From fc83652e3b04f7b7bfcc05ad497cf7ae4d86f2cc Mon Sep 17 00:00:00 2001
From: Michael Murtaugh
Date: Thu, 8 Feb 2018 11:53:21 +0100
Subject: [PATCH] updates to leaflet and mediawiki for recentchanges
---
scripts/leaflet.py | 16 ++++++++++------
scripts/mediawiki.py | 25 +++++++++++++++++--------
2 files changed, 27 insertions(+), 14 deletions(-)
diff --git a/scripts/leaflet.py b/scripts/leaflet.py
index d842163..21c69f3 100755
--- a/scripts/leaflet.py
+++ b/scripts/leaflet.py
@@ -622,7 +622,7 @@ def make_gallery(args):
items.append(item)
# Ensure / Generate tiles per image
- items.sort(key=lambda x: x['url'])
+ # items.sort(key=lambda x: x['url'])
tiles = []
for item in items:
n = item['url']
@@ -637,11 +637,15 @@ def make_gallery(args):
if 'date' in item:
dt = parse8601(item['date'], "%d %b %Y")
caption += u'{0}'.format(dt)
- if 'url' in item:
- ext = os.path.splitext(urlparse.urlparse(item['url']).path)[1]
- if ext:
- ext = ext[1:].upper()
- caption += u'{1}'.format(item['url'], ext)
+
+ if 'src' in item:
+ caption += u'{1}'.format(item['src'], "SRC")
+ elif 'url' in item:
+ # ext = os.path.splitext(urlparse.urlparse(item['url']).path)[1]
+ # if ext:
+ # ext = ext[1:].upper()
+ caption += u'{1}'.format(item['url'], "SRC")
+
if 'text' or 'date' in item:
caption += u'
';
diff --git a/scripts/mediawiki.py b/scripts/mediawiki.py
index 3c6f539..ec01f23 100644
--- a/scripts/mediawiki.py
+++ b/scripts/mediawiki.py
@@ -35,7 +35,11 @@ def wget (url, path, blocksize=4*1000):
def page_url (site, page):
# print ("[page_url]", page.name, file=sys.stderr)
base = os.path.split(site.site['base'])[0]
- uret = os.path.join(base, urlquote(page.normalize_title(page.name)))
+ path = page.normalize_title(page.name)
+ if type(path) == unicode:
+ path = path.encode("utf-8")
+ path = urlquote(path)
+ uret = os.path.join(base, path)
# assert type(uret) == str
return uret
@@ -377,12 +381,15 @@ def recentfiles (args):
# imageinfo = filepage.imageinfo
imageinfo = imageinfo_with_thumbnail(wiki, r['title'])
if not imageinfo['mime'].startswith("image/"):
- print ("Skipping non image ({0}) {1}".format(imageinfo['mime'], r['title']))
+ print (u"Skipping non image ({0}) {1}".format(imageinfo['mime'], r['title']).encode("utf-8"), file=sys.stderr)
+ continue
+ if 'thumburl' not in imageinfo:
+ print (u"Skipping item with no thumburl {0}".format(r['title']).encode("utf-8"), file=sys.stderr)
continue
# Deal with edge case at items == aiend are returned
if last_date and r['timestamp'] == last_date:
- print ("SKIPPING AIEND item", file=sys.stderr)
+ # print ("SKIPPING AIEND item", file=sys.stderr)
break
# Construct an item for output
@@ -391,7 +398,8 @@ def recentfiles (args):
for usagepage in filepage.imageusage():
break # just grab the first usage page
# url : local path to file
- imageurl = imageinfo['url']
+ # imageurl = imageinfo['url']
+ imageurl = imageinfo['thumburl']
localpath = imageurl.replace("https://pzwiki.wdka.nl/mw-mediadesign/images/", "wiki/")
# wget image from wiki to local folder
if not os.path.exists(localpath):
@@ -405,19 +413,20 @@ def recentfiles (args):
item = {}
item['url'] = localpath
item['date'] = r['timestamp']
+ item['src'] = page_url(wiki, filepage)
userpage = wiki.pages.get('User:'+r['user'])
if usagepage:
- item['text'] = '{1}
Uploaded by {3}'.format(
+ item['text'] = u'{1}
Uploaded by {3}'.format(
page_url(wiki, usagepage),
usagepage.page_title,
page_url(wiki, userpage),
- r['user'])
+ r['user']).encode("utf-8")
else:
- item['text'] = '{1}
Uploaded by {3}'.format(
+ item['text'] = u'{1}
Uploaded by {3}'.format(
page_url(wiki, filepage),
filepage.page_title,
page_url(wiki, userpage),
- r['user'])
+ r['user']).encode("utf-8")
# print (json.dumps(item))
items_to_output.append(item)