|
|
@ -6,7 +6,7 @@
|
|
|
|
#####
|
|
|
|
#####
|
|
|
|
import xml.etree.ElementTree as ET
|
|
|
|
import xml.etree.ElementTree as ET
|
|
|
|
import html5lib, urllib2, json, pprint, re
|
|
|
|
import html5lib, urllib2, json, pprint, re
|
|
|
|
from mmdc_modules import api_thumb_url, pandoc2html, img_fullurl, api_file_url
|
|
|
|
from mmdc_modules import api_thumb_url, pandoc2html, img_fullurl, api_file_url, write_html_file
|
|
|
|
#import mmdc_create_json import api_thumb_url
|
|
|
|
#import mmdc_create_json import api_thumb_url
|
|
|
|
json_allworks_file = open('allworks_mmdc.json', 'r') # save json
|
|
|
|
json_allworks_file = open('allworks_mmdc.json', 'r') # save json
|
|
|
|
json_allworks = json.loads(json_allworks_file.read())
|
|
|
|
json_allworks = json.loads(json_allworks_file.read())
|
|
|
@ -34,86 +34,91 @@ def replace_gallery(content):
|
|
|
|
return content, gallery_imgs
|
|
|
|
return content, gallery_imgs
|
|
|
|
|
|
|
|
|
|
|
|
def replace_video(content):
|
|
|
|
def replace_video(content):
|
|
|
|
print '-- Replacing Videos --'
|
|
|
|
|
|
|
|
videos = []
|
|
|
|
videos = []
|
|
|
|
videos_found = re.findall(video_exp, content)
|
|
|
|
videos_found = re.findall(video_exp, content)
|
|
|
|
for video in videos_found:
|
|
|
|
for video in videos_found:
|
|
|
|
video_provider = str(video[0])
|
|
|
|
video_provider = str(video[0])
|
|
|
|
video_hash = str(video[1])
|
|
|
|
video_hash = str(video[1])
|
|
|
|
video_src = None
|
|
|
|
video_src = None
|
|
|
|
print video_provider, type(video_provider)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (video_provider.lower()) == 'youtube':
|
|
|
|
if (video_provider.lower()) == 'youtube':
|
|
|
|
video_src="https://www.youtube.com/embed/" + video_hash
|
|
|
|
video_src="https://www.youtube.com/embed/" + video_hash
|
|
|
|
elif (video_provider.lower()) == 'vimeo':
|
|
|
|
elif (video_provider.lower()) == 'vimeo':
|
|
|
|
video_src="https://player.vimeo.com/video/" + video_hash
|
|
|
|
video_src="https://player.vimeo.com/video/" + video_hash
|
|
|
|
print 'VIMEO'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if video_src:
|
|
|
|
if video_src:
|
|
|
|
iframe_el = ET.Element('iframe', attrib={'src':video_src, 'width':'600px', 'height':'450px'})
|
|
|
|
iframe_el = ET.Element('iframe', attrib={'src':video_src, 'width':'600px', 'height':'450px'})
|
|
|
|
|
|
|
|
|
|
|
|
videos.append(iframe_el)
|
|
|
|
videos.append(iframe_el)
|
|
|
|
content = re.sub(video_exp, '', content)
|
|
|
|
content = re.sub(video_exp, '', content)
|
|
|
|
return content, videos
|
|
|
|
return content, videos
|
|
|
|
|
|
|
|
|
|
|
|
def create_workpage( allworks_dict, work_key): # replace text content in dict with html nodes, holding the content
|
|
|
|
def create_workpage( allworks_dict, work_key, tree): # replace text content in dict with html nodes, holding the content
|
|
|
|
for key in allworks_dict.keys():
|
|
|
|
for key in allworks_dict.keys():
|
|
|
|
|
|
|
|
div_header = (tree.findall(".//div[@class='header']"))[0]
|
|
|
|
|
|
|
|
div_body = (tree.findall(".//div[@class='body']"))[0]
|
|
|
|
|
|
|
|
div_av = (tree.findall(".//div[@class='av']"))[0]
|
|
|
|
|
|
|
|
|
|
|
|
if key in ['Description', 'Extra']:
|
|
|
|
if key in ['Description', 'Extra']:
|
|
|
|
mw_content = allworks_dict[key]
|
|
|
|
mw_content = allworks_dict[key]
|
|
|
|
if re.search(gallery_exp, mw_content):
|
|
|
|
if re.search(gallery_exp, mw_content):
|
|
|
|
mw_content, gallery_imgs = replace_gallery(mw_content)
|
|
|
|
mw_content, gallery_imgs = replace_gallery(mw_content)
|
|
|
|
allworks_dict['Images'] = gallery_imgs
|
|
|
|
allworks_dict['Images'] = gallery_imgs
|
|
|
|
elif re.search(video_exp, mw_content):
|
|
|
|
elif re.search(video_exp, mw_content):
|
|
|
|
print '-- Searching for Video --'
|
|
|
|
|
|
|
|
mw_content, videos = replace_video(mw_content)
|
|
|
|
mw_content, videos = replace_video(mw_content)
|
|
|
|
allworks_dict['Video'] = videos
|
|
|
|
allworks_dict['Video'] = videos
|
|
|
|
print mw_content, videos
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
allworks_dict[key] = pandoc2html( mw_content if key in allworks_dict.keys() else '' ) # convert to HTML
|
|
|
|
allworks_dict[key] = pandoc2html( mw_content if key in allworks_dict.keys() else '' ) # convert to HTML
|
|
|
|
work_htmltree = html5lib.parseFragment(allworks_dict[key], namespaceHTMLElements=False)
|
|
|
|
work_el = html5lib.parseFragment(allworks_dict[key], namespaceHTMLElements=False)
|
|
|
|
# print work_htmltree
|
|
|
|
|
|
|
|
# print ET.tostring(work_htmltree)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# vimeo/youtube: {{vimeo|44977056}}
|
|
|
|
div_body.append( work_el )
|
|
|
|
# External urls: [http://www.scribd.com/doc/105882261/THE-DICTATOR-S-PRACTICAL-INTERNET-GUIDE-TO-POWER-RETENTION scribd]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print "****************************"
|
|
|
|
|
|
|
|
print ET.tostring(div_body)
|
|
|
|
|
|
|
|
print "****************************"
|
|
|
|
elif key in ['Website']:
|
|
|
|
elif key in ['Website']:
|
|
|
|
work_htmltree = ET.Element('a', attrib={'href': allworks_dict[key], 'id':key})
|
|
|
|
work_el = ET.Element('a', attrib={'href': allworks_dict[key], 'id':key})
|
|
|
|
work_htmltree.text = allworks_dict[key]
|
|
|
|
work_el.text = 'LINK'#allworks_dict[key]
|
|
|
|
elif key in ['Title']:
|
|
|
|
elif key in ['Title']:
|
|
|
|
work_htmltree = ET.Element('h1', attrib={'id': key})
|
|
|
|
work_el = ET.Element('h1', attrib={'id': key})
|
|
|
|
work_htmltree.text
|
|
|
|
work_el.text = allworks_dict[key]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# typeerror: must be Element, not Element
|
|
|
|
|
|
|
|
# div_header.append(work_el)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
elif key in ['Creator', 'Date', 'Bio']:
|
|
|
|
elif key in ['Creator', 'Date', 'Bio']:
|
|
|
|
work_htmltree = ET.Element('p', attrib={'id': key})
|
|
|
|
work_el = ET.Element('p', attrib={'id': key})
|
|
|
|
work_htmltree.text
|
|
|
|
work_el.text = allworks_dict[key]
|
|
|
|
elif key in ['Thumbnail_url']:
|
|
|
|
elif key in ['Thumbnail']:
|
|
|
|
work_htmltree = ET.Element('img', attrib={'src': allworks_dict[key], 'id': key})
|
|
|
|
work_el = ET.Element('img', attrib={'src': allworks_dict[key], 'id': key})
|
|
|
|
print ET.tostring(work_htmltree)
|
|
|
|
print ET.tostring(work_el)
|
|
|
|
else:
|
|
|
|
else:
|
|
|
|
work_htmltree = None
|
|
|
|
work_el = None # remove keys with None value?
|
|
|
|
# remove keys with None value?
|
|
|
|
allworks_dict[key] = work_el
|
|
|
|
|
|
|
|
allworks_dict.pop('Thumbnail_url', None) #remove Thumbnail_url
|
|
|
|
# print work_htmltree
|
|
|
|
|
|
|
|
allworks_dict[key] = work_htmltree
|
|
|
|
|
|
|
|
allworks_dict.pop('Thumbnail', None) #remove thumnail
|
|
|
|
|
|
|
|
pprint.pprint(allworks_dict)
|
|
|
|
pprint.pprint(allworks_dict)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def edit_index(filepath, json_allworks_dict):
|
|
|
|
for key in json_allworks.keys():
|
|
|
|
input_file = open(filepath, 'r')
|
|
|
|
graduation_work=json_allworks[key]
|
|
|
|
tree = html5lib.parse(input_file, namespaceHTMLElements=False)
|
|
|
|
print (graduation_work['Creator']).encode('utf-8')
|
|
|
|
div_section02 = (tree.findall(".//div[@id='section02']"))[0]
|
|
|
|
|
|
|
|
for key in json_allworks_dict.keys():
|
|
|
|
# pprint.pprint(graduation_work)
|
|
|
|
graduation_work=json_allworks_dict[key]
|
|
|
|
|
|
|
|
insert_work(div_section02, 'Graduation_work thumbnail', graduation_work, key )
|
|
|
|
# purge graduation_work from keys with empty vals
|
|
|
|
return tree
|
|
|
|
# for key in graduation_work:
|
|
|
|
|
|
|
|
# if graduation_work[key] in [None, '']:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
worktemplate = open('web/work.html', 'r')
|
|
|
|
# print graduation_work
|
|
|
|
for key in json_allworks.keys():
|
|
|
|
|
|
|
|
graduation_work=json_allworks[key]
|
|
|
|
create_workpage(graduation_work, key )
|
|
|
|
graduation_work_title = (json_allworks[key]['Title']).encode('ascii', 'ignore')
|
|
|
|
|
|
|
|
work_file = 'web/' + key + '-' + graduation_work_title + '.html'
|
|
|
|
|
|
|
|
work_tree = html5lib.parse(worktemplate, namespaceHTMLElements=False)
|
|
|
|
|
|
|
|
create_workpage(graduation_work, key, work_tree )
|
|
|
|
|
|
|
|
write_html_file(work_tree, work_file)
|
|
|
|
|
|
|
|
print '----------'
|
|
|
|
|
|
|
|
# print ET.tostring(work_tree)
|
|
|
|
|
|
|
|
print graduation_work['Creator']
|
|
|
|
|
|
|
|
print graduation_work_title
|
|
|
|
|
|
|
|
|
|
|
|
print '----------'
|
|
|
|
print '----------'
|
|
|
|