#! /usr/bin/env python # -*- coding: utf-8 -*- ########### # prototyping downloading and converting mw page content to html ########### # OVERVIEW: # * creating one single html page # * replace {{youtube/vimeo}} with video tags # * replace galleries with rows of images # request all the pages # build index # build all pages import xml.etree.ElementTree as ET import html5lib, re, pprint from mmdc_modules import api_request, api_page, api_thumb_url, pandoc2html, parse_work, api_file_url, replace_gallery, replace_video, gallery_exp, video_exp template = open("web/page-template.html", "r") template = template.read() # download mw work page pageid='15965'#Qq #'16025' #'15986'Jozeph #'16025'Mina work = 'Q' #'Mina'#'User:Joak/graduation/catalog1' workpage_mw = api_page(pageid, 'content') # parse workpage_mw workpage_mw = replace_gallery(workpage_mw) workpage_mw = replace_video(workpage_mw) workdict = parse_work(work, workpage_mw) # create dictionary workpage_mw template for key in workdict.keys(): if key in ['Extra', 'Description', 'Bio']: workdict[key] = pandoc2html(workdict[key].encode('utf-8')) # fill template with dictionary/mw_page values workpage_html = template.format(title=workdict['Title'], creator=workdict['Creator'], date=workdict['Date'], website=workdict['Website'], thumbnail=workdict['Thumbnail'], bio=workdict['Bio'],description=workdict['Description'], extra=workdict['Extra'] ) # parse workpage_html # process html: img full url tree = html5lib.parse(workpage_html, namespaceHTMLElements=False) imgs = tree.findall('.//img') for img in imgs: src = img.get('src') newsrc = api_file_url(src) if newsrc: img.set('src', newsrc) # save workpage_html workpage_html = ET.tostring(tree) work_filename = 'web/{}-{}-{}.html'.format(workdict['Date'], (workdict['Creator'].encode('ascii', 'ignore')).replace(' ','_'), pageid) work_file = open(work_filename, "w") work_file.write(workpage_html) work_file.close()