You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
114 lines
4.8 KiB
Python
114 lines
4.8 KiB
Python
#! /usr/bin/env python
|
|
# -*- coding: utf-8 -*-
|
|
|
|
##############
|
|
# CREATE JSON DICTIONARY WITH AN ENTRY FOR EACH WORK
|
|
#####
|
|
|
|
import urllib2, json, pprint, re
|
|
|
|
sid = '1234'
|
|
useragent = "Mozilla/5.001 (windows; U; NT4.0; en-US; rv:1.0) Gecko/25250101"
|
|
endpoint = "http://pzwiki.wdka.nl/mw-mediadesign/api.php?format=json&"
|
|
allworks = {}
|
|
mainkeys = ['Thumbnail','Date','Creator']
|
|
|
|
def api_request(action, pagename): #get page: content, metadata, images, imageifnp
|
|
url = endpoint + action.format(pagename)
|
|
request = urllib2.urlopen(url)
|
|
jsonp = json.loads(request.read() )
|
|
json_dic= (jsonp.get('query').get('pages'))
|
|
# pprint.pprint( json_dic )
|
|
page_id = json_dic.keys()[0]
|
|
page_content = json_dic.get(page_id)
|
|
print 'API Resquest URL:', url
|
|
return page_content
|
|
|
|
def api_page(pageid, query):
|
|
print 'API query:', query
|
|
if query == 'content':
|
|
api_response = api_request('action=query&pageids={}&prop=revisions&rvprop=content', pageid)
|
|
response = ((api_response.get('revisions'))[0])['*']
|
|
elif query == 'metadata':
|
|
response = api_request('action=query&pageids={}&prop=info', pageid)
|
|
elif query == 'articleimgs':
|
|
response = api_request('action=query&pageids={}&prop=images', pageid)
|
|
elif query == 'imageinfo':
|
|
pagename = pageid # in imageinfo titles are used instead of id
|
|
response = api_request('action=query&titles=File:{}&prop=imageinfo&iiprop=url&iiurlwidth=500', pagename) # iiurlwidht dermines with of thumbnail
|
|
return response
|
|
|
|
# http://pzwiki.wdka.nl/mw-mediadesign/api.php?format=json&action=query&titles=File:2x2 905.jpg&prop=imageinfo&iiprop=url&iiurlwidth=300
|
|
# http://pzwiki.wdka.nl/mw-mediadesign/api.php?format=json&action=query&pageids=10603&prop=revisions&rvprop=content
|
|
# http://pzwiki.wdka.nl/mw-mediadesign/api.php?format=json&action=query&titles=Graduation_Website_Braindump&prop=revisions&rvprop=content
|
|
|
|
def api_thumb_url(filename):
|
|
'''get thumbnail url of image'''
|
|
page_content_dict = api_page(filename, 'imageinfo')
|
|
if 'imageinfo' in page_content_dict.keys():
|
|
thumburl = ((page_content_dict.get('imageinfo'))[0].get('thumburl'))
|
|
return thumburl
|
|
|
|
|
|
def parse_work_page(title, content):
|
|
content = content.encode('utf-8')
|
|
if re.match('\{\{\Graduation work', content):
|
|
work_dict = {}
|
|
work_dict['Title']=title
|
|
template, extra = (re.findall('\{\{Graduation work\n(.*?)\}\}(.*)', content, re.DOTALL))[0]
|
|
# template's key/value pair
|
|
keyval = re.findall('\|(.*?)\=(.*?\n)', template, re.DOTALL)
|
|
extra = ('Extra', extra)
|
|
keyval.append(extra)
|
|
checkkeys = [keyval[i] for i in range(len(keyval)) if keyval[i][0] in mainkeys and len(keyval[i][1])>3] #list mainkeys present, w/ values, in tuples [(key, val),(key, val)...]
|
|
if len(checkkeys) == 3 : # checkkeys contains all mainkeys and values
|
|
for pair in keyval:
|
|
key = pair[0]
|
|
val = pair[1]
|
|
val = val.replace('\n','')
|
|
if 'Creator' in key:
|
|
val = val.replace(', ', '')
|
|
elif 'Thumbnail' in key:
|
|
thumburl = api_thumb_url(val)
|
|
work_dict['Thumbnail_url']=thumburl
|
|
print 'THUMB:', thumburl
|
|
work_dict[key]=val
|
|
return work_dict
|
|
|
|
def api_category(category, year): #Find all pages incategory and add to allworks dictionary
|
|
''' TODO: category intersection; With SirrusSearch'''
|
|
category = category.replace(' ', '_')
|
|
if year:
|
|
api_url = endpoint + 'action=query&list=categorymembers&cmlimit=500&cmtitle=Category:{}&cmtitle=Category:{}'.format(category, year)
|
|
else:
|
|
api_url = endpoint + 'action=query&list=categorymembers&cmlimit=500&cmtitle=Category:{}'.format(category)
|
|
request = urllib2.urlopen(api_url)
|
|
jsonp = json.loads(request.read())
|
|
for page in jsonp['query']['categorymembers']:
|
|
title = ((page['title']).encode('utf-8') ).replace(" ", "_") #snakecase for page titles
|
|
pageid = page['pageid']
|
|
article = api_page(pageid, 'content')
|
|
# print title
|
|
# pprint.pprint(article)
|
|
work = parse_work_page(title, article)
|
|
if work:
|
|
allworks[pageid] = work #dictionary(allworks) entry
|
|
print pprint.pprint( work )
|
|
# Create work page
|
|
else:
|
|
print 'WORK DOES NOT CONTAIN REQUIRED CONTENT'
|
|
print '-------------'
|
|
print
|
|
|
|
api_category('Graduation work', '2013')
|
|
json_allworks = open('md_allworks.json', 'w') # save json
|
|
json.dump(allworks, json_allworks )
|
|
|
|
## TO DO
|
|
# How do handle work['Extra'] value? some tiles work['Extra'] contains: <gallery>, [[Pages]], text, etc
|
|
|
|
|
|
|
|
|
|
|