|
|
|
@ -1,7 +1,13 @@
|
|
|
|
|
from mastodon import Mastodon
|
|
|
|
|
from pprint import pprint
|
|
|
|
|
import os
|
|
|
|
|
import time
|
|
|
|
|
import datetime
|
|
|
|
|
from pprint import pprint
|
|
|
|
|
|
|
|
|
|
today = datetime.date.today()
|
|
|
|
|
text_file = open("answers_results.txt", "a+")
|
|
|
|
|
text_file.write("Data collected on : "+str(today)+"\n"+"\n")
|
|
|
|
|
|
|
|
|
|
#toots id and instances position refer to same post
|
|
|
|
|
#FOR EXAMPLE
|
|
|
|
@ -12,14 +18,6 @@ import datetime
|
|
|
|
|
toot_id = [101767654564328802, 101767613341391125]
|
|
|
|
|
instances = ["https://todon.nl/", "https://meow.social/"]
|
|
|
|
|
|
|
|
|
|
# ok, estou a ver o problem. Cada token correspond a uma instancia
|
|
|
|
|
# de forma mas ter que ter a mesma order de instances na lista e de tokens em token.txt
|
|
|
|
|
# para isso podemos usar enumerate() no for loop
|
|
|
|
|
# utilizando a variavel n (numero da iteração) para determinar qual dos items de instances,
|
|
|
|
|
# é a instance de aquele token
|
|
|
|
|
# Outro problem: token.readlines() acrescenta um line break ao token: \n
|
|
|
|
|
# deforma the of temos que remove em access_token=token_line.replace('\n','')
|
|
|
|
|
|
|
|
|
|
#toots token order is also the same
|
|
|
|
|
#FOR EXAMPLE
|
|
|
|
|
#toot_id[0] goes with instances[0] and now goes with line nr1 from txt file
|
|
|
|
@ -50,4 +48,9 @@ with open('token.txt', 'r') as token:
|
|
|
|
|
pprint("Bot:" + "\n" + str(bot) + "\n" + "\n")
|
|
|
|
|
pprint("Content:" + "\n" + str(content) + "\n" + "\n")
|
|
|
|
|
|
|
|
|
|
text_file.write("Avatar:" + "\n" + str(avatar) + "\n" + "\n")
|
|
|
|
|
text_file.write("Name:" + "\n" + str(name) + "\n" + "\n")
|
|
|
|
|
text_file.write("Bot:" + "\n" + str(bot) + "\n" + "\n")
|
|
|
|
|
text_file.write("Content:" + "\n" + str(content) + "\n" + "\n" + "\n")
|
|
|
|
|
|
|
|
|
|
time.sleep(2)
|