#! /usr/bin/env python # -*- coding: utf-8 -*- ############## # Edit the index sorting articles according to topic, section and issue ##### # import xml.etree.ElementTree as ET import html5lib, urllib2, json, pprint, subprocess from urllib import quote as quote import re sid = '1234' useragent = "Mozilla/5.001 (windows; U; NT4.0; en-US; rv:1.0) Gecko/25250101" endpoint = "http://pzwiki.wdka.nl/mw-mediadesign/api.php?format=json&" allworks = {} mainkeys = ['Thumbnail','Date','Creator'] def api_request(action, pagename): #get page: content, metadata, images, imageifnp url = endpoint + action.format(pagename) request = urllib2.urlopen(url) jsonp = json.loads(request.read() ) json_dic= (jsonp.get('query').get('pages')) page_id = json_dic.keys()[0] page_content = json_dic.get(page_id) print 'API URL:', url return page_content def api_page(pageid, info): if info == 'content': api_response = api_request('action=query&pageids={}&prop=revisions&rvprop=content', pageid) page_content = ((api_response.get('revisions'))[0])['*'] elif info == 'metadata': page_content = api_request('action=query&pageids={}&prop=info', pageid) elif info == 'articleimgs': page_content = api_request('action=query&pageids={}&prop=images', pageid) elif info == 'imageinfo': page_content = api_request('action=query&pageids={}&prop=imageinfo&iiprop=url&iiurlwidth=300', pageid) # iiurlwidht dermines with of thumbnail return page_content def api_page_content(pagename): content = ((page.get('revisions'))[0])['*'] return content # print json.dumps( revisions, sort_keys=True, indent=4) ## see response def api_thumb_url(filename): '''get thumbnail url of image''' page_content_dict = api_page(filename, 'imageinfo') if 'imageinfo' in page_content_dict.keys(): thumburl = ((page_content_dict.get('imageinfo'))[0].get('thumburl')) return page_content_dict def find_imgs(article): '''get all internal images from published articles: * query all images in a article * for each image request a thumbnail * get the thumburl ''' page_content_dict = api_page(article, 'articleimgs') if 'images' in page_content_dict.keys(): images_list = page_content_dict.get('images') # print 'images_list', images_list thumbs_list = [] for img in images_list: # all images in article title = img.get('title') thumburl_json = api_thumb_url(title) if thumburl_json != None: thumburl = (thumburl_json.get('imageinfo')[0]).get('thumburl') thumbs_list.append(thumburl) else: thumburl = None return thumbs_list def parse_work_page(title, content): content = content.encode('utf-8') if re.match('\{\{\Graduation work', content): work_dict = {} work_dict['Title']=title template, extra = (re.findall('\{\{Graduation work\n(.*?)\}\}(.*)', content, re.DOTALL))[0] # template's key/value pair keyval = re.findall('\|(.*?)\=(.*?\n)', template, re.DOTALL) extra = ('Extra', extra) keyval.append(extra) keys = [keyval[i][0] for i in range(len(keyval))] #checkkeys: list of mainkeys present, w/ values, in tuples [(key, val),(key, val)...] checkkeys = [keyval[i] for i in range(len(keyval)) if keyval[i][0] in mainkeys and len(keyval[i][1])>3] if len(checkkeys) == 3 : # checkkeys contains all mainkeys and values for pair in keyval: key = pair[0] val = pair[1] val = val.replace('\n','') if 'Creator' in key: val = val.replace(', ', '') work_dict[key]=val return work_dict def api_category(category, year): '''Finds all pages within category and eact to allworks dictionary''' category = category.replace(' ', '_') if year: api_url = endpoint + 'action=query&list=categorymembers&cmlimit=500&cmtitle=Category:{}&cmtitle=Category:{}'.format(category, year) #BUG: API only queries last cmtitle: YEAR else: api_url = endpoint + 'action=query&list=categorymembers&cmlimit=500&cmtitle=Category:{}'.format(category) request = urllib2.urlopen(api_url) jsonp = json.loads(request.read()) for page in jsonp['query']['categorymembers']: print 'Page:', page title = ((page['title']).encode('utf-8') ).replace(" ", "_") #snakecase for page titles pageid = page['pageid'] ## NOTE: instead of using page name to query page, use PAGE ID article = api_page(pageid, 'content') print title # pprint.pprint(article) work = parse_work_page(title, article) if work: allworks[pageid] = work #dictionary(allworks) entry print work else: print 'WORK DOES NOT CONTAIN REQUIRED CONTENT' print '-------------' print api_category('Graduation work', '2013') #pprint.pprint(allworks) # save json json_allworks = open('md_allworks.json', 'w') json.dump(allworks, json_allworks ) #print "wrote json dictionary to:", 'md_allworks.json' ## TO DO # How do handle work['Extra'] value? some tiles work['Extra'] contains: , [[Pages]], text, etc # Do template values need to be converted to html? # Thumbnails need a full url