Merge branch 'master' of https://git.xpub.nl/XPUB/special-issue-11-wiki2html
commit
61b4fe977e
@ -0,0 +1,26 @@
|
||||
import os, argparse, sys, re, json
|
||||
from mwclient import (Site,
|
||||
errors)
|
||||
|
||||
# API CALL
|
||||
# https://hub.xpub.nl/sandbox/itchwiki/api.php?action=smwbrowse&browse=pvalue¶ms={ "limit": 1500, "offset": 0, "property" : "Title", "search": "" }&format=json
|
||||
# generated orgs.json
|
||||
|
||||
# login
|
||||
site = Site(host='hub.xpub.nl/sandbox', path='/itchwiki/')
|
||||
|
||||
wd =os.path.dirname(os.path.abspath(__file__)) # parent working directory
|
||||
with open(os.path.join(wd, 'login.txt'), 'r') as login: # read login user & pwd
|
||||
loginlines = login.read()
|
||||
user, pwd = loginlines.split('\n')
|
||||
site.login(username=user, password=pwd) # login to wiki
|
||||
|
||||
|
||||
with open('titles.json', 'r') as f:
|
||||
titles = json.load(f)
|
||||
|
||||
for pagename in titles['query']:
|
||||
page = site.pages[pagename]
|
||||
if not page.text(): # if page has no text
|
||||
print(pagename)
|
||||
page.save('{{Publication}}\n[[Category:Title]]')
|
@ -0,0 +1,33 @@
|
||||
import os, argparse, sys, re, json
|
||||
from mwclient import (Site,
|
||||
errors)
|
||||
|
||||
# API CALL
|
||||
# https://hub.xpub.nl/sandbox/itchwiki/api.php?action=smwbrowse&browse=pvalue¶ms={ "limit": 1500, "offset": 0, "property" : "Title", "search": "" }&format=json
|
||||
# generated orgs.json
|
||||
# >>> result = site.api('query', prop='coordinates', titles='Oslo|Copenhagen')
|
||||
# login
|
||||
site = Site(host='hub.xpub.nl/sandbox', path='/itchwiki/')
|
||||
|
||||
wd =os.path.dirname(os.path.abspath(__file__)) # parent working directory
|
||||
with open(os.path.join(wd, 'login.txt'), 'r') as login: # read login user & pwd
|
||||
loginlines = login.read()
|
||||
user, pwd = loginlines.split('\n')
|
||||
site.login(username=user, password=pwd) # login to wiki
|
||||
|
||||
# To query a large number of ite:
|
||||
|
||||
for i in range(0, 1500, 100):
|
||||
# variable i will increase 100 at each iteration
|
||||
# between 0 and 1400
|
||||
# and will make the offset parameter change
|
||||
print('\n', f'Querying from {i} to {i+100}', '\n')
|
||||
|
||||
ask_query = f'[[Category:Title]]|format=json|limit=100|offset={i}'
|
||||
|
||||
response = site.api(action='ask', query=ask_query)
|
||||
for pagetitle in response['query']['results']:
|
||||
print(pagetitle)
|
||||
page = site.pages[pagetitle]
|
||||
# # text = page.text()
|
||||
page.save('{{Publication}}\n[[Category:Title]]')
|
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue