from __future__ import division import glob from nltk import * import re import nltk import codecs from nltk import sent_tokenize, word_tokenize, pos_tag from nltk.probability import FreqDist from nltk.corpus import stopwords from PIL import Image import base64 nltk.download('stopwords') # infofile = open('faceapp_infos.txt','r') # infotext = infofile.read() #open the txt file, read, and tokenize file = open('faceapp.txt','r') text = file.read() #not sure if this works.. x = 1 t_file = open('russia-estonia.txt', 'r') t_text = t_file.read() #stopwords default_stopwords = set(stopwords.words('english')) custom_stopwords = set(codecs.open('stopwords.txt', 'r').read().splitlines()) all_stopwords = default_stopwords | custom_stopwords # multi-line string HTML print(''' Noun ''') #wrapper print('
') #insert an image # https://upload.wikimedia.org/wikipedia/commons/1/15/Joffe_signing_the_Treaty_of_Tartu.jpg FaceApp_img_url = base64.b64encode(open('img/faceapp_logo.png', 'rb').read()).decode('utf-8') FaceApp_image = '

FaceApp

'.format(FaceApp_img_url) print(FaceApp_image) #info box print('
') infotext = [('Service', 'FaceApp'), ('Type', 'Image editing'), ('Initial release', 'December 31, 2016'), ('Type', 'Image editing'), ('source', 'link'), ('Description', 'FaceApp is a mobile application for iOS and Android developed by Russian company Wireless Lab. The app generates highly realistic transformations of human faces in photographs by using neural networks based on artificial intelligence. The app can transform a face to make it smile, look younger, look older, or change gender.')] for title, info in infotext: print('
{0}
{1}

'.format(title, info)) print('
') #ToS text print('
') tokenized = word_tokenize(text) tagged = pos_tag(tokenized) for word, pos in tagged: print('{}'.format(pos, word)) print('
') #colonial words list print('
colonial words:') tokens_without_stopwords = nltk.FreqDist(words.lower() for words in tokenized if words.lower() not in all_stopwords) frequency_word = FreqDist(tokens_without_stopwords) top_words = tokens_without_stopwords.most_common(20) for chosen_words, frequency in top_words: print('
{}({}) '.format(chosen_words, frequency)) print('
') #t_wrapper (second wrapper) print('
') #insert an image # https://upload.wikimedia.org/wikipedia/commons/1/15/Joffe_signing_the_Treaty_of_Tartu.jpg img_url = base64.b64encode(open('img/tartu.jpeg', 'rb').read()).decode('utf-8') t_image = '

Peace Treaty of Tartu

'.format(img_url) print(t_image) #t_info box print('
') t_infotext = [('Name of Treaty', 'Peace Treaty of Tartu'), ('Date', 'February 2, 1920'), ('Location', 'Tartu, Estonia'), ('Signed', 'February 2, 1920'), ('Type', 'bilateral peace treaty'), ('source', 'link'), ('Description', 'The Tartu Peace Treaty or Treaty of Tartu is a peace treaty between Estonia and Russian Soviet Federative Socialist Republic signed on 2 February 1920, ending the Estonian War of Independence.')] for t_title, t_info in t_infotext: print('
{0}
{1}

'.format(t_title, t_info)) print('
') #ToS text print('
') t_tokenized = word_tokenize(t_text) t_tagged = pos_tag(t_tokenized) for t_word, t_pos in t_tagged: print('{}'.format(t_pos, t_word)) print('
') #treaty colonial words list print('
colonial words:') t_tokens_without_stopwords = nltk.FreqDist(words.lower() for words in t_tokenized if words.lower() not in all_stopwords) t_frequency_word = FreqDist(t_tokens_without_stopwords) t_top_words = t_tokens_without_stopwords.most_common(20) for t_chosen_words, t_frequency in t_top_words: print('
{}({}) '.format(t_chosen_words, t_frequency)) print('
') print('
') print('''''')