You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

423 lines
12 KiB
Plaintext

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

{
"cells": [
{
"cell_type": "code",
"execution_count": 51,
"metadata": {},
"outputs": [],
"source": [
"import random\n",
"import nltk\n",
"from newsapi import NewsApiClient\n",
"import pprint\n",
"\n",
"pp = pprint.PrettyPrinter(indent=3)"
]
},
{
"cell_type": "code",
"execution_count": 53,
"metadata": {},
"outputs": [],
"source": [
"newsapi = NewsApiClient(api_key='0c00356f65df431ab394d179292075bd')\n",
"top0 = newsapi.get_everything(q='translation', language='en') #get json from NewsAPI\n",
"top1 = newsapi.get_everything(q='futuro', language='it')\n",
"top2 = newsapi.get_everything(q='futuro', language='es')\n",
"top3 = newsapi.get_everything(q='future', language='fr')\n",
"\n",
"#pp.pprint(top0)"
]
},
{
"cell_type": "code",
"execution_count": 256,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'NNP': ['Appl…'], 'NN': ['story:'], 'VBZ': ['is'], 'VBN': ['been'], 'JJ': ['big'], 'NNS': ['employees'], 'IN': ['for'], 'CC': ['and'], 'JJR': ['more'], 'VBG': ['closing'], 'TO': ['to'], 'VB': ['fight'], 'DT': ['The'], 'PRP': ['it'], 'MD': ['can'], 'WDT': ['which'], 'VBD': ['used'], 'PRP$': ['your'], 'CD': ['2020.'], 'RB': ['embarrassingly'], 'RP': ['out'], 'VBP': ['come'], 'RBR': ['earlier'], 'WRB': ['when'], 'PDT': ['all'], 'WP': ['who'], 'EX': ['There'], 'NNPS': ['Republicans']}\n"
]
}
],
"source": [
"articles0 = top0['articles'] #get articles summary from NewsAPI\n",
"articles1 = top1['articles']\n",
"articles2 = top2['articles']\n",
"articles3 = top3['articles']\n",
"\n",
"dtot0 = ''\n",
"dtot1 = ''\n",
"dtot2 = ''\n",
"dtot3 = ''\n",
"\n",
"for x in range(20):\n",
" a0 = articles0[x] #get articles' descriptions and store them to dtot\n",
" a1 = articles1[x]\n",
" a2 = articles2[x]\n",
" a3 = articles3[x]\n",
" d0 = a0['description']\n",
" d1 = a1['description']\n",
" d2 = a2['description']\n",
" d3 = a3['description']\n",
" dtot0 += d0\n",
" dtot1 += d1\n",
" dtot2 += d2\n",
" dtot3 += d3\n",
" \n",
"dtot0 = dtot0.split()\n",
"dtot1 = dtot1.split()\n",
"dtot2 = dtot2.split()\n",
"dtot3 = dtot3.split()\n",
"\n",
"tagged0 = nltk.pos_tag(dtot0) #POSing the descriptions\n",
"tagged1 = nltk.pos_tag(dtot1) #POSing the descriptions\n",
"tagged2 = nltk.pos_tag(dtot2) #POSing the descriptions\n",
"tagged3 = nltk.pos_tag(dtot3) #POSing the descriptions\n",
"\n",
"\n",
"\n",
"#HERE THE WTF\n",
"#HOW CAN I APPEND TO THE DICTIONARY (i0) _ALL_ THE WORDS? IT APPENDS ONLY THE FIRSTS ONES\n",
"#tried in different ways but :(\n",
"\n",
"t0 = []\n",
"i0 = {}\n",
" \n",
"for a, b in tagged0:\n",
" if a not in b:\n",
" i0.update({b:[a]})\n",
" \n",
" \n",
"\n",
"for q, k in tagged0:\n",
" if k not in i0:\n",
" t0.append((k, s))\n",
" i0[k] = '[]'\n",
" \n",
" \n",
"print(i0)"
]
},
{
"cell_type": "code",
"execution_count": 270,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'NNP': ['Appl…'],\n",
" 'NN': ['story:'],\n",
" 'VBZ': ['is'],\n",
" 'VBN': ['been'],\n",
" 'JJ': ['big'],\n",
" 'NNS': ['employees'],\n",
" 'IN': ['for'],\n",
" 'CC': ['and'],\n",
" 'JJR': ['more'],\n",
" 'VBG': ['closing'],\n",
" 'TO': ['to'],\n",
" 'VB': ['fight'],\n",
" 'DT': ['The'],\n",
" 'PRP': ['it'],\n",
" 'MD': ['can'],\n",
" 'WDT': ['which'],\n",
" 'VBD': ['used'],\n",
" 'PRP$': ['your'],\n",
" 'CD': ['2020.'],\n",
" 'RB': ['embarrassingly'],\n",
" 'RP': ['out'],\n",
" 'VBP': ['come'],\n",
" 'RBR': ['earlier'],\n",
" 'WRB': ['when'],\n",
" 'PDT': ['all'],\n",
" 'WP': ['who'],\n",
" 'EX': ['There'],\n",
" 'NNPS': ['Republicans']}"
]
},
"execution_count": 270,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"\n",
"d1 = ''\n",
"for a,b in tagged0:\n",
" d1 = {b : [a] for a,b in tagged0}\n",
" if a not in b:\n",
" d1.update({b:[a]})\n",
" \n",
" \n",
" \n",
" \n",
"d1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 271,
"metadata": {},
"outputs": [],
"source": [
"#This is for prepare a grammar constructio based on a picked random description from the original NewsAPI json\n",
"s = ' '\n",
"r = random.randrange(0,19)\n",
"a_pos = articles[r]\n",
"cont_pos= a_pos['description']\n",
"cont_pos = cont_pos.split()\n",
"tag_cont = nltk.pos_tag(cont_pos)\n",
"\n",
"dat = {}\n",
"\n",
"for word, tag in tag_cont:\n",
" dat[tag] = word\n",
" \n",
"keys = dat.keys()\n",
"\n",
"output = \" + s + \".join([pos for pos in keys])"
]
},
{
"cell_type": "code",
"execution_count": 272,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'NNP + s + NN + s + VBZ + s + VBN + s + JJ + s + NNS + s + IN + s + CC + s + JJR + s + VBG + s + TO + s + VB + s + DT + s + PRP + s + MD'"
]
},
"execution_count": 272,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"output"
]
},
{
"cell_type": "code",
"execution_count": 273,
"metadata": {},
"outputs": [],
"source": [
"####################################################################################################################################################################################"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 293,
"metadata": {},
"outputs": [],
"source": [
"res = ['The cable who is of technologies are going to be new Read also and','''no world's who tells from rules are figuring to use near Broomstick, ahead or''',\n",
"'the mechanism who seeks as owners are figuring to control new Science, again and',\n",
"'the presidency, who has in works carry flying to build hard Harry ever and',\n",
"'a repository. who PlantsPhysicists about microswimmers deliver... going to act it. Harry likely But',\n",
"'the lecture who PlantsPhysicists of issues deliver... flailing to be different Fluora, up and',\n",
"'a cable who has in poets think averting to be electric CEO half-jokingly and',\n",
"'the male who fits of submissions are helping to use fellow Texas half-jokingly and',\n",
"'the toy who is of hed its helping to get electric Black much and',\n",
"'a unease who represents orgasm. sensors think averting to act free PS5 also and',\n",
"'the more...Jennifer who fits of reveals believe offering to streaming. major Fluora, actually and',\n",
"'the sanitizer who represents in reorganizations are figuring to perform adjustable Tech, likely or']\n"
]
},
{
"cell_type": "code",
"execution_count": 294,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['The cable who is of technologies are going to be new Read also and',\n",
" \"no world's who tells from rules are figuring to use near Broomstick, ahead or\",\n",
" 'the mechanism who seeks as owners are figuring to control new Science, again and',\n",
" 'the presidency, who has in works carry flying to build hard Harry ever and',\n",
" 'a repository. who PlantsPhysicists about microswimmers deliver... going to act it. Harry likely But',\n",
" 'the lecture who PlantsPhysicists of issues deliver... flailing to be different Fluora, up and',\n",
" 'a cable who has in poets think averting to be electric CEO half-jokingly and',\n",
" 'the male who fits of submissions are helping to use fellow Texas half-jokingly and',\n",
" 'the toy who is of hed its helping to get electric Black much and',\n",
" 'a unease who represents orgasm. sensors think averting to act free PS5 also and',\n",
" 'the more...Jennifer who fits of reveals believe offering to streaming. major Fluora, actually and',\n",
" 'the sanitizer who represents in reorganizations are figuring to perform adjustable Tech, likely or']"
]
},
"execution_count": 294,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"res"
]
},
{
"cell_type": "code",
"execution_count": 447,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<head>harset=utf-8</head>\n"
]
}
],
"source": [
"export = 'news.txt'\n",
"with open(export, 'w') as export:\n",
" print('<head>harset=utf-8</head>')\n",
" print('<h1>News from the future</h1>', file=export)\n",
" print('<br><br><br><br><br><br><br><br><br><br><br><br>', file = export)\n",
" for x in range(len(res)):\n",
" print(f'''{res[x].lower().capitalize()}.\n",
" ''',file=export)\n",
" print('<br><br><br><br><br><br><br><br><br><br><br><br><br><br>', file = export)"
]
},
{
"cell_type": "code",
"execution_count": 448,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Fontconfig warning: ignoring UTF-8: not a valid region tag\n"
]
}
],
"source": [
"!pandoc news.txt | weasyprint -s css.css - newsfromthefuture.pdf"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 411,
"metadata": {},
"outputs": [],
"source": [
"a_pos = open('language.txt').read()\n",
"cont_pos = a_pos.split()\n",
"tag_cont = nltk.pos_tag(cont_pos)"
]
},
{
"cell_type": "code",
"execution_count": 321,
"metadata": {},
"outputs": [],
"source": [
"dat = {}\n",
"\n",
"for word, tag in tag_cont:\n",
" dat[tag] = word\n",
" \n",
"keys = dat.keys()\n",
"\n",
"output = \" + s + \".join([pos for pos in keys])"
]
},
{
"cell_type": "code",
"execution_count": 330,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Led and broom believe likely American of can test has called sensors the which There smallest 300 selected They who They to'"
]
},
"execution_count": 330,
"metadata": {},
"output_type": "execute_result"
}
],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}