import os, json, sys, urllib from mwclient import Site from pprint import pprint from jinja2 import Template from functions import unpack_response, clean_dir, remove_nonwords import html5lib from functions import Colors import argparse p = argparse.ArgumentParser(description="Dump wiki files to html", formatter_class=argparse.ArgumentDefaultsHelpFormatter) p.add_argument("--host", metavar='', default="hub.xpub.nl/sandbox", help='wiki host') p.add_argument("--path", metavar='', default="/itchwiki/", help="Wiki path. Should end with /") p.add_argument("--output", default="/var/www/html/archive", help="Output path for pages") # p.add_argument("--conditions", "-c", metavar='', # default='[[File:+]][[Title::+]][[Part::+]][[Date::+]]', # help='The query conditions') # p.add_argument("--printouts", "-p", metavar='', # default='?Title|?Date|?Part|?Partof|?Creator|?Organization|?Format|?Event|?Topic|?Language', # help='Selection of properties to printout') # p.add_argument("--sort", "-s", metavar='', # default='Date,Title,Part', # help='Sorting according to conditions') # p.add_argument("--order", "-o", metavar='', # default='asc,asc,asc', # help='Order of sorting conditions. Should same amount as the --sort properties') # p.add_argument('--limit', '-l', help='(optional) Limit the number of returned ' # 'items') # # TODO: GET limit to work.Perhaps with a site.raw_api method # p.add_argument('--dry', '-d', action='store_true', # help='dry-run: will only show the query but not run it') args = p.parse_args() # site and login site = Site(host=args.host, path=args.path) with open('login.txt', 'r') as login: # read login user & pwd loginlines = login.read() user, pwd = loginlines.split('\n') site.login(username=user, password=pwd) # login to wiki # read template files SLASH = "\u2044" def filenameforpage(p): f=p.name.replace(' ','_').replace('/', SLASH) + '.html' return f def rewritelinks (html) t = html5lib.parseFragment(html, treebuilder="etree", namespaceHTMLElements=False) for a in t.findall(".//*[@href]"): linkclass = a.attrib.get("class", "") href = a.attrib.get("href") if "external" in linkclass: # leave external links alone continue print ("LINK", href) # a.attrib['href'] = new_href publish=site.Categories['Publish'] for cat in publish.members(): if cat.namespace!=14: continue print('dumping category {}'.format(cat.page_title)) # title=site.Categories['Title'] try: with open('templates/{}.html'.format(cat.page_title.lower())) as templatefile: template = Template(templatefile.read()) except FileNotFoundError: with open('templates/default.html') as templatefile: template = Template(templatefile.read()) for p in cat.members(): print(p) htmlsrc = site.parse(page=p.name)['text']['*'] html = template.render(page=p, body=htmlsrc) with open(os.path.join(args.output, filenameforpage(p)), 'w') as f: print(html, file=f) # break