You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

145 lines
5.7 KiB
Python

#! /usr/bin/env python
# -*- coding: utf-8 -*-
###########
# prototyping downloading and converting mw page content to html
###########
# OVERVIEW:
# * creating one single html page
# * replace {{youtube/vimeo}} with video tags
# * replace galleries with rows of images
# request all the pages
# **BUILD INDEX**
# build all pages
import xml.etree.ElementTree as ET
9 years ago
import html5lib, pprint
from mmdc_modules import api_page, pandoc2html, parse_work, api_file_url, replace_gallery, replace_video, index_addwork, write_html_file, mw_cats
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument("--host", default="pzwiki.wdka.nl")
p.add_argument("--path", default="/mw-mediadesign/", help="nb: should end with /")
p.add_argument("--category", "-c", nargs="*", default=[], action="append", help="category to query, use -c foo -c bar to intersect multiple categories")
args = p.parse_args()
print args
########
# QUERY API
########
sid = '1234'
useragent = "Mozilla/5.001 (windows; U; NT4.0; en-US; rv:1.0) Gecko/25250101"
endpoint = "http://pzwiki.wdka.nl/mw-mediadesign/api.php?format=json&"
########
# CREATE INDEX
########
memberpages=mw_cats(args)
#memberpages['Ctrl-F Reader','As We Speak']
print 'memberpages', memberpages
########
9 years ago
# Templates
########
page_template = open("web/page-template.html", "r")
index_file = open('web/index-template.html', 'r')
9 years ago
index_tree = html5lib.parse(index_file, namespaceHTMLElements=False)
index_container = index_tree.find(".//div[@class='isotope']") #maybe id is important, to destinguish it
########
# CREATE PAGE
########
for member in memberpages:
9 years ago
print ' member', member
# download mw work page
# pageid=member['pageid']
# pagetitle=(member['title'].encode('utf-8'))
workpage_mw = api_page(member, 'content')
workpage_mw = replace_gallery(workpage_mw)
workpage_mw = replace_video(workpage_mw)
workdict = parse_work(member, workpage_mw) # create dictionary workpage_mw template
# Only parse pages with Creator, Title, Thumbnail
if len(workdict['Creator'])>1 and len(workdict['Title'])>1 and len(workdict['Description'])>1 and len(workdict['Thumbnail'])>1:
for key in workdict.keys(): # convert Extra, Description, Bio to HTML
if key in ['Extra', 'Description', 'Bio'] and workdict[key]:
workdict[key] = pandoc2html( (workdict[key].decode('utf-8')))
elif key in ['Creator']:
workdict[key] = workdict[key].replace(',','' ) #remove comma
#replace empty dict values with ' ' # to avoid empty tags
for key in workdict.keys():
if workdict[key] is '' and key is not 'Thumbnail':
workdict[key] = ' '
elif key is 'Thumbnail' and workdict[key]:
img = '<img id="thumnail" src="{}" />'.format(workdict[key])
# append img to text
workdict[key] = workdict[key] # + img
#print 'THUMB', workdict[key]
if type(workdict[key]) is unicode:
workdict[key]=workdict[key].encode('utf-8')
9 years ago
# create work page
page_tree = html5lib.parse(page_template, namespaceHTMLElements=False)
page_title = page_tree.find('.//title')
page_title.text=workdict['Title']
page_creator = page_tree.find('.//h2[@id="creator"]')
page_creator.text=workdict['Creator']
page_title_date = page_tree.find('.//p[@id="title"]')
page_title_date.text="{} {}".format(workdict['Title'], workdict['Date'])
page_description = page_tree.find('.//div[@id="description"]')
page_description_el = ET.fromstring('<div>'+workdict['Description']+'</div>')
page_description.extend(page_description_el)
page_bio = page_tree.find('.//div[@id="bio"]')
page_bio_el = ET.fromstring('<div>'+workdict['Bio']+'</div>')
page_bio.extend(page_bio_el)
page_sortArea_title = page_tree.find('.//div[@id="sortArea"]/p')
page_sortArea_title.text =workdict['Title']
page_extra = page_tree.find('.//div[@id="extra"]')
page_extra_el = ET.fromstring('<div>'+workdict['Extra']+'</div>')
page_extra.extend(page_extra_el)
page_website = page_tree.find('.//p[@class="hightlightSidebar"]/a')
page_website.set('href', workdict['Website'])
page_website.text=workdict['Website']
page_thumb = page_tree.find('.//img[@id="thumbnail"]')
page_thumb.set('src', workdict['Thumbnail'])
# give work page's imgs full url
imgs = page_tree.findall('.//img')
9 years ago
for img in imgs:
if img.get('id') is not 'logo':
src = img.get('src')
newsrc = api_file_url(src) ## MOVE FULL URl OPERATION TO MW CONTENT
if newsrc:
img.set('src', newsrc)
# save work page
creator = workdict['Creator']#.decode('ascii', 'ignore')
creator = creator.replace(' ','_')
work_filename = 'web/{}-{}.html'.format(workdict['Date'], creator)
write_html_file(page_tree, work_filename)
9 years ago
#######
# INDEX
#######
# insert work to index
9 years ago
index_addwork( parent=index_container,
workid=key,
href=work_filename.replace('web/',''),
title=workdict['Title'],#.decode('utf-8'),
creator=workdict['Creator'],#.decode('utf-8'),
9 years ago
date=workdict['Date'],
thumbnail=workdict['Thumbnail']
9 years ago
)
# print '----', workdict['Title']
# print ET.tostring(tree)
print index_tree, type(index_tree)
9 years ago
write_html_file(index_tree, 'web/index.html')
print
print