|
|
@ -17,8 +17,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
import xml.etree.ElementTree as ET
|
|
|
|
import xml.etree.ElementTree as ET
|
|
|
|
import html5lib, re, pprint
|
|
|
|
import html5lib, re, pprint
|
|
|
|
from mmdc_modules import api_request, api_page, api_thumb_url, pandoc2html, parse_work, api_file_url, replace_gallery, replace_video, gallery_exp, video_exp, api_pagesincategories, index_addwork, write_html_file
|
|
|
|
from mmdc_modules import api_request, api_page, api_thumb_url, pandoc2html, parse_work, api_file_url, replace_gallery, replace_video, gallery_exp, video_exp, api_pagesincategories, index_addwork, write_html_file, mw_cats
|
|
|
|
|
|
|
|
from argparse import ArgumentParser
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
p = ArgumentParser()
|
|
|
|
|
|
|
|
p.add_argument("--host", default="pzwiki.wdka.nl")
|
|
|
|
|
|
|
|
p.add_argument("--path", default="/mw-mediadesign/", help="nb: should end with /")
|
|
|
|
|
|
|
|
p.add_argument("--category", "-c", nargs="*", default=[], action="append", help="category to query, use -c foo -c bar to intersect multiple categories")
|
|
|
|
|
|
|
|
args = p.parse_args()
|
|
|
|
|
|
|
|
print args
|
|
|
|
########
|
|
|
|
########
|
|
|
|
# QUERY API
|
|
|
|
# QUERY API
|
|
|
|
########
|
|
|
|
########
|
|
|
@ -29,12 +36,16 @@ endpoint = "http://pzwiki.wdka.nl/mw-mediadesign/api.php?format=json&"
|
|
|
|
########
|
|
|
|
########
|
|
|
|
# CREATE INDEX
|
|
|
|
# CREATE INDEX
|
|
|
|
########
|
|
|
|
########
|
|
|
|
memberpages = api_pagesincategories('Graduation work', '2015') #list, containing dictionary of all pages ids. Example: [{u'ns': 0, u'pageid': 15974, u'title': u'Ahhhh'}, {u'ns': 0, u'pageid': 16005, u'title': u'Artyom-graduation-work'}]
|
|
|
|
memberpages=mw_cats(args)
|
|
|
|
|
|
|
|
print 'memberpages', memberpages
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#memberpages = api_pagesincategories('Graduation work', '2015') #list, containing dictionary of all pages ids. Example: [{u'ns': 0, u'pageid': 15974, u'title': u'Ahhhh'}, {u'ns': 0, u'pageid': 16005, u'title': u'Artyom-graduation-work'}]
|
|
|
|
#memberpages = [{u'ns': 0, u'pageid': 15982, u'title': u'The Aesthetics of Ethics'}]
|
|
|
|
#memberpages = [{u'ns': 0, u'pageid': 15982, u'title': u'The Aesthetics of Ethics'}]
|
|
|
|
#memberpages = [{u'ns': 0, u'pageid': 16005, u'title': u'Artyom-graduation-work'}]
|
|
|
|
#memberpages = [{u'ns': 0, u'pageid': 16005, u'title': u'Artyom-graduation-work'}]
|
|
|
|
#memberpages = [{u'ns': 0, u'pageid': 16007, u'title': u'U ntitled'}]
|
|
|
|
#memberpages = [{u'ns': 0, u'pageid': 16007, u'title': u'U ntitled'}]
|
|
|
|
#memberpages = [{u'ns': 0, u'pageid': 15965, u'title': u'Qq'}]
|
|
|
|
#memberpages = [{u'ns': 0, u'pageid': 15965, u'title': u'Qq'}]
|
|
|
|
print 'memberpages', memberpages
|
|
|
|
## output: memberpages [{u'ns': 0, u'pageid': 15982, u'title': u'The Aesthetics of Ethics'}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
########
|
|
|
|
########
|
|
|
|
# Templates
|
|
|
|
# Templates
|
|
|
@ -53,14 +64,14 @@ index_container = index_tree.find(".//div[@class='isotope']") #maybe id is impor
|
|
|
|
for member in memberpages:
|
|
|
|
for member in memberpages:
|
|
|
|
#print member
|
|
|
|
#print member
|
|
|
|
# download mw work page
|
|
|
|
# download mw work page
|
|
|
|
pageid=member['pageid']
|
|
|
|
# pageid=member['pageid']
|
|
|
|
pagetitle=(member['title'].encode('utf-8'))
|
|
|
|
# pagetitle=(member['title'].encode('utf-8'))
|
|
|
|
workpage_mw = api_page(pageid, 'content')
|
|
|
|
workpage_mw = api_page(member, 'content')
|
|
|
|
|
|
|
|
|
|
|
|
# parse workpage_mw
|
|
|
|
# parse workpage_mw
|
|
|
|
workpage_mw = replace_gallery(workpage_mw)
|
|
|
|
workpage_mw = replace_gallery(workpage_mw)
|
|
|
|
workpage_mw = replace_video(workpage_mw)
|
|
|
|
workpage_mw = replace_video(workpage_mw)
|
|
|
|
workdict = parse_work(pagetitle, workpage_mw) # create dictionary workpage_mw template
|
|
|
|
workdict = parse_work(member, workpage_mw) # create dictionary workpage_mw template
|
|
|
|
|
|
|
|
|
|
|
|
for key in workdict.keys(): # convert Extra, Description, Bio to HTML
|
|
|
|
for key in workdict.keys(): # convert Extra, Description, Bio to HTML
|
|
|
|
if key in ['Extra', 'Description', 'Bio'] and workdict[key]:
|
|
|
|
if key in ['Extra', 'Description', 'Bio'] and workdict[key]:
|
|
|
@ -96,7 +107,7 @@ for member in memberpages:
|
|
|
|
workpage_html = ET.tostring(tree)
|
|
|
|
workpage_html = ET.tostring(tree)
|
|
|
|
creator = workdict['Creator'].decode('ascii', 'ignore')
|
|
|
|
creator = workdict['Creator'].decode('ascii', 'ignore')
|
|
|
|
creator = creator.replace(' ','_')
|
|
|
|
creator = creator.replace(' ','_')
|
|
|
|
work_filename = 'web/{}-{}-{}.html'.format(workdict['Date'], creator, pageid)
|
|
|
|
work_filename = 'web/{}-{}.html'.format(workdict['Date'], creator)
|
|
|
|
work_file = open(work_filename, "w")
|
|
|
|
work_file = open(work_filename, "w")
|
|
|
|
work_file.write(workpage_html)
|
|
|
|
work_file.write(workpage_html)
|
|
|
|
work_file.close()
|
|
|
|
work_file.close()
|
|
|
|