@ -2,14 +2,10 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
##############
##############
# Edit the index sorting articles according to topic, section and issue
# CREATE JSON DICTIONARY WITH AN ENTRY FOR EACH WORK
#####
#####
#
import urllib2 , json , pprint , re
import xml . etree . ElementTree as ET
import html5lib , urllib2 , json , pprint , subprocess
from urllib import quote as quote
import re
sid = ' 1234 '
sid = ' 1234 '
useragent = " Mozilla/5.001 (windows; U; NT4.0; en-US; rv:1.0) Gecko/25250101 "
useragent = " Mozilla/5.001 (windows; U; NT4.0; en-US; rv:1.0) Gecko/25250101 "
@ -22,60 +18,36 @@ def api_request(action, pagename): #get page: content, metadata, images, imageif
request = urllib2 . urlopen ( url )
request = urllib2 . urlopen ( url )
jsonp = json . loads ( request . read ( ) )
jsonp = json . loads ( request . read ( ) )
json_dic = ( jsonp . get ( ' query ' ) . get ( ' pages ' ) )
json_dic = ( jsonp . get ( ' query ' ) . get ( ' pages ' ) )
# pprint.pprint( json_dic )
page_id = json_dic . keys ( ) [ 0 ]
page_id = json_dic . keys ( ) [ 0 ]
page_content = json_dic . get ( page_id )
page_content = json_dic . get ( page_id )
print ' API URL:' , url
print ' API Resquest URL:' , url
return page_content
return page_content
def api_page ( pageid , query ) :
def api_page ( pageid , info ) :
print ' API query: ' , query
if info == ' content ' :
if query == ' content ' :
api_response = api_request ( ' action=query&pageids= {} &prop=revisions&rvprop=content ' , pageid )
api_response = api_request ( ' action=query&pageids= {} &prop=revisions&rvprop=content ' , pageid )
page_content = ( ( api_response . get ( ' revisions ' ) ) [ 0 ] ) [ ' * ' ]
response = ( ( api_response . get ( ' revisions ' ) ) [ 0 ] ) [ ' * ' ]
elif info == ' metadata ' :
elif query == ' metadata ' :
page_content = api_request ( ' action=query&pageids= {} &prop=info ' , pageid )
response = api_request ( ' action=query&pageids= {} &prop=info ' , pageid )
elif info == ' articleimgs ' :
elif query == ' articleimgs ' :
page_content = api_request ( ' action=query&pageids= {} &prop=images ' , pageid )
response = api_request ( ' action=query&pageids= {} &prop=images ' , pageid )
elif info == ' imageinfo ' :
elif query == ' imageinfo ' :
page_content = api_request ( ' action=query&pageids= {} &prop=imageinfo&iiprop=url&iiurlwidth=300 ' , pageid ) # iiurlwidht dermines with of thumbnail
pagename = pageid # in imageinfo titles are used instead of id
return page_content
response = api_request ( ' action=query&titles=File: {} &prop=imageinfo&iiprop=url&iiurlwidth=500 ' , pagename ) # iiurlwidht dermines with of thumbnail
return response
def api_page_content ( pagename ) :
content = ( ( page . get ( ' revisions ' ) ) [ 0 ] ) [ ' * ' ]
# http://pzwiki.wdka.nl/mw-mediadesign/api.php?format=json&action=query&titles=File:2x2 905.jpg&prop=imageinfo&iiprop=url&iiurlwidth=300
return content
# http://pzwiki.wdka.nl/mw-mediadesign/api.php?format=json&action=query&pageids=10603&prop=revisions&rvprop=content
# print json.dumps( revisions, sort_keys=True, indent=4) ## see response
# http://pzwiki.wdka.nl/mw-mediadesign/api.php?format=json&action=query&titles=Graduation_Website_Braindump&prop=revisions&rvprop=content
def api_thumb_url ( filename ) :
def api_thumb_url ( filename ) :
''' get thumbnail url of image '''
''' get thumbnail url of image '''
page_content_dict = api_page ( filename , ' imageinfo ' )
page_content_dict = api_page ( filename , ' imageinfo ' )
if ' imageinfo ' in page_content_dict . keys ( ) :
if ' imageinfo ' in page_content_dict . keys ( ) :
thumburl = ( ( page_content_dict . get ( ' imageinfo ' ) ) [ 0 ] . get ( ' thumburl ' ) )
thumburl = ( ( page_content_dict . get ( ' imageinfo ' ) ) [ 0 ] . get ( ' thumburl ' ) )
return page_content_dict
return thumburl
def find_imgs ( article ) :
''' get all internal images from published articles:
* query all images in a article
* for each image request a thumbnail
* get the thumburl
'''
page_content_dict = api_page ( article , ' articleimgs ' )
if ' images ' in page_content_dict . keys ( ) :
images_list = page_content_dict . get ( ' images ' )
# print 'images_list', images_list
thumbs_list = [ ]
for img in images_list : # all images in article
title = img . get ( ' title ' )
thumburl_json = api_thumb_url ( title )
if thumburl_json != None :
thumburl = ( thumburl_json . get ( ' imageinfo ' ) [ 0 ] ) . get ( ' thumburl ' )
thumbs_list . append ( thumburl )
else :
thumburl = None
return thumbs_list
def parse_work_page ( title , content ) :
def parse_work_page ( title , content ) :
@ -88,9 +60,7 @@ def parse_work_page(title, content):
keyval = re . findall ( ' \ |(.*?) \ =(.*? \n ) ' , template , re . DOTALL )
keyval = re . findall ( ' \ |(.*?) \ =(.*? \n ) ' , template , re . DOTALL )
extra = ( ' Extra ' , extra )
extra = ( ' Extra ' , extra )
keyval . append ( extra )
keyval . append ( extra )
keys = [ keyval [ i ] [ 0 ] for i in range ( len ( keyval ) ) ]
checkkeys = [ keyval [ i ] for i in range ( len ( keyval ) ) if keyval [ i ] [ 0 ] in mainkeys and len ( keyval [ i ] [ 1 ] ) > 3 ] #list mainkeys present, w/ values, in tuples [(key, val),(key, val)...]
#checkkeys: list of mainkeys present, w/ values, in tuples [(key, val),(key, val)...]
checkkeys = [ keyval [ i ] for i in range ( len ( keyval ) ) if keyval [ i ] [ 0 ] in mainkeys and len ( keyval [ i ] [ 1 ] ) > 3 ]
if len ( checkkeys ) == 3 : # checkkeys contains all mainkeys and values
if len ( checkkeys ) == 3 : # checkkeys contains all mainkeys and values
for pair in keyval :
for pair in keyval :
key = pair [ 0 ]
key = pair [ 0 ]
@ -98,47 +68,46 @@ def parse_work_page(title, content):
val = val . replace ( ' \n ' , ' ' )
val = val . replace ( ' \n ' , ' ' )
if ' Creator ' in key :
if ' Creator ' in key :
val = val . replace ( ' , ' , ' ' )
val = val . replace ( ' , ' , ' ' )
elif ' Thumbnail ' in key :
thumburl = api_thumb_url ( val )
work_dict [ ' Thumbnail_url ' ] = thumburl
print ' THUMB: ' , thumburl
work_dict [ key ] = val
work_dict [ key ] = val
return work_dict
return work_dict
def api_category ( category , year ) :
def api_category ( category , year ) : #Find all pages incategory and add to allworks dictionary
''' Finds all pages within category and eact to allworks dictionary '''
''' TODO: category intersection; With SirrusSearch '''
category = category . replace ( ' ' , ' _ ' )
category = category . replace ( ' ' , ' _ ' )
if year :
if year :
api_url = endpoint + ' action=query&list=categorymembers&cmlimit=500&cmtitle=Category: {} &cmtitle=Category: {} ' . format ( category , year ) #BUG: API only queries last cmtitle: YEAR
api_url = endpoint + ' action=query&list=categorymembers&cmlimit=500&cmtitle=Category: {} &cmtitle=Category: {} ' . format ( category , year )
else :
else :
api_url = endpoint + ' action=query&list=categorymembers&cmlimit=500&cmtitle=Category: {} ' . format ( category )
api_url = endpoint + ' action=query&list=categorymembers&cmlimit=500&cmtitle=Category: {} ' . format ( category )
request = urllib2 . urlopen ( api_url )
request = urllib2 . urlopen ( api_url )
jsonp = json . loads ( request . read ( ) )
jsonp = json . loads ( request . read ( ) )
for page in jsonp [ ' query ' ] [ ' categorymembers ' ] :
for page in jsonp [ ' query ' ] [ ' categorymembers ' ] :
print ' Page: ' , page
title = ( ( page [ ' title ' ] ) . encode ( ' utf-8 ' ) ) . replace ( " " , " _ " ) #snakecase for page titles
title = ( ( page [ ' title ' ] ) . encode ( ' utf-8 ' ) ) . replace ( " " , " _ " ) #snakecase for page titles
pageid = page [ ' pageid ' ]
pageid = page [ ' pageid ' ]
## NOTE: instead of using page name to query page, use PAGE ID
article = api_page ( pageid , ' content ' )
article = api_page ( pageid , ' content ' )
print title
# print title
# pprint.pprint(article)
# pprint.pprint(article)
work = parse_work_page ( title , article )
work = parse_work_page ( title , article )
if work :
if work :
allworks [ pageid ] = work #dictionary(allworks) entry
allworks [ pageid ] = work #dictionary(allworks) entry
print work
print pprint . pprint ( work )
# Create work page
else :
else :
print ' WORK DOES NOT CONTAIN REQUIRED CONTENT '
print ' WORK DOES NOT CONTAIN REQUIRED CONTENT '
print ' ------------- '
print ' ------------- '
print
print
api_category ( ' Graduation work ' , ' 2013 ' )
api_category ( ' Graduation work ' , ' 2013 ' )
#pprint.pprint(allworks)
json_allworks = open ( ' md_allworks.json ' , ' w ' ) # save json
# save json
json_allworks = open ( ' md_allworks.json ' , ' w ' )
json . dump ( allworks , json_allworks )
json . dump ( allworks , json_allworks )
#print "wrote json dictionary to:", 'md_allworks.json'
## TO DO
## TO DO
# How do handle work['Extra'] value? some tiles work['Extra'] contains: <gallery>, [[Pages]], text, etc
# How do handle work['Extra'] value? some tiles work['Extra'] contains: <gallery>, [[Pages]], text, etc
# Do template values need to be converted to html?
# Thumbnails need a full url