# # for new_file in tokens_without_stopwords: # appendFile = open('tokenized_words.txt', 'a') # appendFile.write(" " + new_file) # appendFile.close() # #shows only stopwords # processed_word_list = [] # for word in tokenized: # # print(word) # if word not in all_stopwords: # processed_word_list.append('*') # else: # processed_word_list.append(word) # print(processed_word_list) # # # result putting in a graph # top_words_plot = frequency_word.plot(10) # print(top_words_plot)