# from __future__ import division from nltk import sent_tokenize, word_tokenize, pos_tag from nltk.probability import FreqDist from nltk.corpus import stopwords import nltk import codecs import base64 nltk.download('stopwords') with open('treaty_file/russia-estonia.txt', 'r') as russia_file: russia_text = russia_file.read() russia_text_list = russia_text.split("\n\n") t_default_stopwords = set(stopwords.words('english')) t_custom_stopwords = set(codecs.open('t_stopwords.txt', 'r').read().splitlines()) t_all_stopwords = t_default_stopwords | t_custom_stopwords print(''' ''') #t_wrapper (second wrapper) print('
') img_url = base64.b64encode(open('img/tartu.jpeg', 'rb').read()).decode('utf-8') t_image = '
Peace Treaty of Tartu, Estonia
'.format(img_url) print(t_image) #t_info box print('
') t_infotext = [('Name of Treaty', 'Peace Treaty of Tartu'), ('Country of Origin', 'Russia'), ('Signed', 'February 2, 1920'), ('Location', 'Tartu, Estonia'), ('Word Counts', '2,104'), ('Type', 'bilateral peace treaty'), ('Original Source', 'link'), ('Description', 'The Tartu Peace Treaty or Treaty of Tartu is a peace treaty between Estonia and Russian Soviet Federative Socialist Republic signed on 2 February 1920, ending the Estonian War of Independence.')] for t_title, t_info in t_infotext: print('
{0}
{1}

'.format(t_title, t_info)) print('
') #Treaty text print('
') for t_paragraph in russia_text_list: t_tokenized = word_tokenize(t_paragraph) t_tagged = pos_tag(t_tokenized) print('

') for t_word, t_pos in t_tagged: print('{2}'.format(t_pos.replace('.', 'dot').replace(',', 'comma').replace('(', 'marks').replace(')', 'marks').replace(':', 'marks').replace(';', 'marks'), t_word.replace('.', 'dot').replace(',', 'comma').replace('(', 'marks').replace(')', 'marks').replace(':', 'marks').replace(';', 'marks').lower(), t_word)) print('

') print('
') #treaty colonial top words list print('
Frequent words
') t_tokens_without_stopwords = nltk.FreqDist(words.lower() for words in t_tokenized if words.lower() not in t_all_stopwords) t_frequency_word = FreqDist(t_tokens_without_stopwords) t_top_words = t_tokens_without_stopwords.most_common(20) for t_chosen_words, t_frequency in t_top_words: print('
 {} ({})
'.format(t_chosen_words, t_frequency)) print('
') print('') print('''''')