')
+tokenized_all = []
for paragraph in faceapp_text_list:
tokenized = word_tokenize(paragraph)
+ tokenized_all += tokenized # add to the tokenized_all
tagged = pos_tag(tokenized)
print('
')
for word, pos in tagged:
@@ -93,7 +95,7 @@ print('
')
#tos top words list
print('
Frequent words
')
-tokens_without_stopwords = nltk.FreqDist(words.lower() for words in tokenized if words.lower() not in tos_all_stopwords)
+tokens_without_stopwords = nltk.FreqDist(words.lower() for words in tokenized_all if words.lower() not in tos_all_stopwords)
frequency_word = FreqDist(tokens_without_stopwords)
top_words = tokens_without_stopwords.most_common(30)