#! /usr/bin/env python # -*- coding: utf-8 -*- ########### # prototyping downloading and converting mw page content to html ########### # OVERVIEW: # * creating one single html page # * replace {{youtube/vimeo}} with video tags # * replace galleries with rows of images # request all the pages # build index # build all pages import xml.etree.ElementTree as ET import html5lib, re, pprint from mmdc_modules import api_request, api_page, api_thumb_url, pandoc2html, parse_work, api_file_url, replace_gallery, replace_video gallery_exp=re.compile('.*?') file_exp=re.compile('File:(.*?)(?=File:|<\/gallery>)') img_exp=re.compile('(File:|Image:)((.*?)\.(gif|jpg|jpeg|png))(?=\||File:|Image:|<\/gallery>)', re.I) video_exp=re.compile('\{\{(.*?)\|(.*?)\}\}') template = open("web/page-template.html", "r") template = template.read() # download pageid='16025'#'15965'#Qq #'15986'Jozeph #'16025'Mina work = 'Mina'#'User:Joak/graduation/catalog1' workpage_mw = api_page(pageid, 'content') # parsing workpage_mw workdict = parse_work(work, workpage_mw) for key in workdict.keys(): if key in ['Extra', 'Description', 'Bio']: workdict[key] = pandoc2html(workdict[key].encode('utf-8')) workpage_html = template.format(title=workdict['Title'], creator=workdict['Creator'], date=workdict['Date'], website=workdict['Website'], thumbnail=workdict['Thumbnail'], bio=workdict['Bio'],description=workdict['Description'], extra=workdict['Extra'] ) # Process html tree = html5lib.parse(workpage_html, namespaceHTMLElements=False) imgs = tree.findall('.//img') for img in imgs: src = img.get('src') newsrc = api_file_url(src) print 'new src', newsrc if newsrc: img.set('src', newsrc) #print 'IMG', ET.tostring(img) workpage_html = ET.tostring(tree) print 'TREE', workpage_html # # save work_filename = 'web/{}-{}-{}.html'.format(workdict['Date'], (workdict['Creator'].encode('ascii', 'ignore')).replace(' ','_'), pageid) work_file = open(work_filename, "w") work_file.write(workpage_html) work_file.close()