md_index.py is creating json dictionary with all the fully documented (with Date, Creator, Thumbnail) works in Category:Graduation work
commit
4a8acfb6a0
@ -0,0 +1 @@
|
|||||||
|
*~
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,148 @@
|
|||||||
|
#! /usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
##############
|
||||||
|
# Edit the index sorting articles according to topic, section and issue
|
||||||
|
#####
|
||||||
|
|
||||||
|
#
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
import html5lib, urllib2, json, pprint, subprocess
|
||||||
|
from urllib import quote as quote
|
||||||
|
import re
|
||||||
|
|
||||||
|
sid = '1234'
|
||||||
|
useragent = "Mozilla/5.001 (windows; U; NT4.0; en-US; rv:1.0) Gecko/25250101"
|
||||||
|
endpoint = "http://pzwiki.wdka.nl/mw-mediadesign/api.php?format=json&"
|
||||||
|
allworks = {}
|
||||||
|
mainkeys = ['Thumbnail','Date','Creator']
|
||||||
|
|
||||||
|
def api_request(action, pagename): #get page: content, metadata, images, imageifnp
|
||||||
|
url = endpoint + action.format(pagename)
|
||||||
|
request = urllib2.urlopen(url)
|
||||||
|
jsonp = json.loads(request.read() )
|
||||||
|
json_dic= (jsonp.get('query').get('pages'))
|
||||||
|
page_id = json_dic.keys()[0]
|
||||||
|
page_content = json_dic.get(page_id)
|
||||||
|
print 'API URL:', url
|
||||||
|
return page_content
|
||||||
|
|
||||||
|
|
||||||
|
def api_page(pageid, info):
|
||||||
|
if info == 'content':
|
||||||
|
api_response = api_request('action=query&pageids={}&prop=revisions&rvprop=content', pageid)
|
||||||
|
page_content = ((api_response.get('revisions'))[0])['*']
|
||||||
|
elif info == 'metadata':
|
||||||
|
page_content = api_request('action=query&pageids={}&prop=info', pageid)
|
||||||
|
elif info == 'articleimgs':
|
||||||
|
page_content = api_request('action=query&pageids={}&prop=images', pageid)
|
||||||
|
elif info == 'imageinfo':
|
||||||
|
page_content = api_request('action=query&pageids={}&prop=imageinfo&iiprop=url&iiurlwidth=300', pageid) # iiurlwidht dermines with of thumbnail
|
||||||
|
return page_content
|
||||||
|
|
||||||
|
def api_page_content(pagename):
|
||||||
|
content = ((page.get('revisions'))[0])['*']
|
||||||
|
return content
|
||||||
|
# print json.dumps( revisions, sort_keys=True, indent=4) ## see response
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def api_thumb_url(filename):
|
||||||
|
'''get thumbnail url of image'''
|
||||||
|
page_content_dict = api_page(filename, 'imageinfo')
|
||||||
|
if 'imageinfo' in page_content_dict.keys():
|
||||||
|
thumburl = ((page_content_dict.get('imageinfo'))[0].get('thumburl'))
|
||||||
|
return page_content_dict
|
||||||
|
|
||||||
|
|
||||||
|
def find_imgs(article):
|
||||||
|
'''get all internal images from published articles:
|
||||||
|
* query all images in a article
|
||||||
|
* for each image request a thumbnail
|
||||||
|
* get the thumburl
|
||||||
|
'''
|
||||||
|
page_content_dict = api_page(article, 'articleimgs')
|
||||||
|
if 'images' in page_content_dict.keys():
|
||||||
|
images_list = page_content_dict.get('images')
|
||||||
|
# print 'images_list', images_list
|
||||||
|
thumbs_list = []
|
||||||
|
for img in images_list: # all images in article
|
||||||
|
title = img.get('title')
|
||||||
|
thumburl_json = api_thumb_url(title)
|
||||||
|
if thumburl_json != None:
|
||||||
|
thumburl = (thumburl_json.get('imageinfo')[0]).get('thumburl')
|
||||||
|
thumbs_list.append(thumburl)
|
||||||
|
else:
|
||||||
|
thumburl = None
|
||||||
|
|
||||||
|
return thumbs_list
|
||||||
|
|
||||||
|
def parse_work_page(title, content):
|
||||||
|
content = content.encode('utf-8')
|
||||||
|
if re.match('\{\{\Graduation work', content):
|
||||||
|
work_dict = {}
|
||||||
|
work_dict['Title']=title
|
||||||
|
template, extra = (re.findall('\{\{Graduation work\n(.*?)\}\}(.*)', content, re.DOTALL))[0]
|
||||||
|
# template's key/value pair
|
||||||
|
keyval = re.findall('\|(.*?)\=(.*?\n)', template, re.DOTALL)
|
||||||
|
extra = ('Extra', extra)
|
||||||
|
keyval.append(extra)
|
||||||
|
for pair in keyval:
|
||||||
|
key = pair[0]
|
||||||
|
val = pair[1]
|
||||||
|
val = val.replace('\n','')
|
||||||
|
if 'Creator' in key:
|
||||||
|
val = val.replace(', ', '')
|
||||||
|
work_dict[key]=val
|
||||||
|
|
||||||
|
return work_dict
|
||||||
|
|
||||||
|
'''
|
||||||
|
TEMPLATE
|
||||||
|
|
||||||
|
|Description=
|
||||||
|
|Creator=
|
||||||
|
|Date=
|
||||||
|
|Thumbnail=
|
||||||
|
|Website=
|
||||||
|
|
||||||
|
Description=Based on her written thesis: The Web Cheated on Me, Marie is trying to figure out where her disappointment with the web comes from. She analyzed her webbrowser history for half a year to find out what kind of information she is looking up. Her graduation work is an audio installation based on this research.\n|Creator=Marie Wocher,\n|Date=2013\n|Thumbnail=4 FromHypertextToApplePie.jpg\n
|
||||||
|
'''
|
||||||
|
|
||||||
|
def api_category(category, year):
|
||||||
|
'''Finds all pages within category and returns a dictionary with info on those pages'''
|
||||||
|
category = category.replace(' ', '_')
|
||||||
|
if year:
|
||||||
|
api_url = endpoint + 'action=query&list=categorymembers&cmlimit=500&cmtitle=Category:{}&cmtitle=Category:{}'.format(category, year) #BUG: API only queries last cmtitle: YEAR
|
||||||
|
else:
|
||||||
|
api_url = endpoint + 'action=query&list=categorymembers&cmlimit=500&cmtitle=Category:{}'.format(category)
|
||||||
|
|
||||||
|
request = urllib2.urlopen(api_url)
|
||||||
|
jsonp = json.loads(request.read())
|
||||||
|
# dict_page = {}
|
||||||
|
for page in jsonp['query']['categorymembers']:
|
||||||
|
print 'Page:', page
|
||||||
|
title = ((page['title']).encode('utf-8') ).replace(" ", "_") #snakecase for page titles
|
||||||
|
pageid = page['pageid']
|
||||||
|
print 'Pageid:', pageid
|
||||||
|
## NOTE: instead of using page name to query page, use PAGE ID
|
||||||
|
article = api_page(pageid, 'content')
|
||||||
|
print 'Content:'
|
||||||
|
pprint.pprint(article)
|
||||||
|
print
|
||||||
|
work = parse_work_page(title, article) #
|
||||||
|
if work and set(mainkeys).issubset(work.keys()) and len([ work[key] for key in mainkeys if work[key] ])==3: # work must exist, have mainkeys as keys w/ values
|
||||||
|
allworks[pageid] = work
|
||||||
|
|
||||||
|
|
||||||
|
api_category('Graduation work', '2013')
|
||||||
|
#pprint.pprint(allworks)
|
||||||
|
|
||||||
|
# save json
|
||||||
|
json_allworks = open('md_allworks.json', 'w')
|
||||||
|
json.dump(allworks, json_allworks )
|
||||||
|
print "wrote json dictionary to:", 'md_allworks.json'
|
||||||
|
|
||||||
|
## TO DO
|
||||||
|
# How do handle work['Extra'] value?
|
||||||
|
# some tiles work['Extra'] contains: <gallery>, [[Pages]], text, etc
|
@ -0,0 +1,26 @@
|
|||||||
|
<!DOCTYPE HTML>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8" />
|
||||||
|
<script type="text/javascript" src="jquery-1.10.2.js"></script>
|
||||||
|
<script type="text/javascript">
|
||||||
|
|
||||||
|
var myjson;
|
||||||
|
|
||||||
|
function readJSON(){
|
||||||
|
$.getJSON( "md_allworks.json", function(data){
|
||||||
|
myjson=data;
|
||||||
|
console.log(data);
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
</script>
|
||||||
|
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<body onload="javascript:readJSON();" >
|
||||||
|
Testing JSON é
|
||||||
|
</body>
|
||||||
|
</html>
|
Loading…
Reference in New Issue