diff --git a/act.py b/act.py index 72135dc..4dc1db2 100644 --- a/act.py +++ b/act.py @@ -7,7 +7,7 @@ # Libraries from config import characters, directions -from logic import tts, read_script +from logic import tts, read_script, led_on, led_off, select_script, listen from pixel_ring import pixel_ring from subprocess import call import paho.mqtt.client as mqtt @@ -16,7 +16,7 @@ import sys from time import sleep # Switch of LED's of speakers at the start of the play -pixel_ring.off() +#pixel_ring.off() @@ -72,9 +72,10 @@ listening = False # Read the script and run the play -file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py +#file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py +file = select_script('scripts_play/intro/') -for character, line, direction in read_script(file): +for character, line, direction in read_script('scripts_play/intro/introduction_01.txt'): input_text = line voice = characters.get(character)[0] speaker = characters.get(character)[1] @@ -85,41 +86,8 @@ for character, line, direction in read_script(file): tts(voice, input_text, speaker) if action == 'listen_google_home': - print('Waiting for the Google Home to finish its talk') - - # # start voice activity detection - # client.publish("hermes/asr/startListening", json.dumps({ - # 'siteId': 'default', - # 'init': { - # 'type': 'action', - # 'canBeEnqueued': True - # } - # })) - - # Activate the microphone and speech recognition - client.publish("hermes/asr/startListening", json.dumps({ - 'siteId': 'default' - })) - - # LED to listening mode - pixel_ring.listen() - - # create callback - client.on_message = done_speaking - listening = True - - while listening: - client.loop() - - #client.on_message = on_message - client.message_callback_add('hermes/asr/textCaptured', done_speaking) - if client.connected_flag: - sleep(1) - print('Continue the play') - client.connected_flag = False - client.message_callback_add('hermes/dialogueManager/sessionQueued', remove_sessions) - break + listen() if action == 'music': print('play audioclip') @@ -131,7 +99,7 @@ for character, line, direction in read_script(file): - pixel_ring.off() # Switch of the lights when done speaking + #pixel_ring.off() # Switch of the lights when done speaking sleep(0.2) # Add a short pause between the lines diff --git a/act_debug.py b/act_debug.py index 144a76e..3b5659d 100644 --- a/act_debug.py +++ b/act_debug.py @@ -7,8 +7,7 @@ # Libraries from config import characters, directions -from logic import tts, read_script -#from pixel_ring import pixel_ring +from logic import tts, read_script,listen from subprocess import call import paho.mqtt.client as mqtt import json @@ -18,61 +17,39 @@ from time import sleep # Switch of LED's of speakers at the start of the play #pixel_ring.off() +import serial +from pixel_ring import pixel_ring +ser = serial.Serial('/dev/ttyACM0', 1000000) # Establish the connection on a specific port +def led_on(speaker): -# === SETUP OF MQTT PART 1 === + if speaker == 'mono3': + ser.write(b'3') + + if speaker == 'mono1': + ser.write(b'1') + + if speaker == 'mono2': + pixel_ring.speak() -# Location of the MQTT server -HOST = 'localhost' -PORT = 1883 +def led_off(speaker): -# Subscribe to relevant MQTT topics -def on_connect(client, userdata, flags, rc): - print("Connected to {0} with result code {1}".format(HOST, rc)) - # Subscribe to the text detected topic - client.subscribe("hermes/asr/textCaptured") - client.subscribe("hermes/dialogueManager/sessionQueued") + if speaker == 'mono3': + ser.write(b'4') + + if speaker == 'mono1': + ser.write(b'2') + + if speaker == 'mono2': + pixel_ring.off() -# Function which sets a flag when the Google Home is not speaking -# Callback of MQTT message that says that the text is captured by the speech recognition (ASR) -def done_speaking(client, userdata, msg): - print('Google Home is not speaking anymore') - client.connected_flag=True - -# Function which removes intents that are by accident activated by the Google Home -# e.g. The google home says introduce yourself, which could trigger the other speakers to introduce themselves -# Snips works with queing of sessions, so this situation would only happen after this play is finished -def remove_sessions(client, userdata, msg): - sessionId = json.loads(id.payload) - print('delete mistaken intent') - client.publish("hermes/dialogueManager/endSession", json.dumps({ - 'sessionId': sessionId, - })) - - - - -# === SETUP OF MQTT PART 2 === - -# Initialise MQTT client -client = mqtt.Client() -client.connect(HOST, PORT, 60) -client.on_connect = on_connect - - - - -# === Read script and run the play === - -# Flags to check if the system is listening, or not -client.connected_flag=False -listening = False # Read the script and run the play file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py +led_off(speaker) for character, line, direction in read_script(file): input_text = line @@ -82,44 +59,12 @@ for character, line, direction in read_script(file): # Some way to do something with the stage directions will come here action = directions.get(direction[0]) #pixel_ring.speak() + led_on(speaker) tts(voice, input_text, speaker) + led_off(speaker) if action == 'listen_google_home': - print('Waiting for the Google Home to finish its talk') - - # # start voice activity detection - # client.publish("hermes/asr/startListening", json.dumps({ - # 'siteId': 'default', - # 'init': { - # 'type': 'action', - # 'canBeEnqueued': True - # } - # })) - - # Activate the microphone and speech recognition - client.publish("hermes/asr/startListening", json.dumps({ - 'siteId': 'default' - })) - - # LED to listening mode - #pixel_ring.listen() - - # create callback - client.on_message = done_speaking - listening = True - - while listening: - client.loop() - - #client.on_message = on_message - client.message_callback_add('hermes/asr/textCaptured', done_speaking) - - if client.connected_flag: - sleep(1) - print('Continue the play') - client.connected_flag = False - client.message_callback_add('hermes/dialogueManager/sessionQueued', remove_sessions) - break + listen() if action == 'music': print('play audioclip') @@ -130,8 +75,6 @@ for character, line, direction in read_script(file): playing = False - - #pixel_ring.off() # Switch of the lights when done speaking sleep(0.2) # Add a short pause between the lines diff --git a/logic.py b/logic.py index 810cabd..e0a000c 100644 --- a/logic.py +++ b/logic.py @@ -6,7 +6,18 @@ # This script contains the logic to turn the play_script into instructions for the hardware # --- -# 01 FUNTION TO PROCESS THEATRE SCRIPT +# 01 FUNCTION TO SELECT RANDOM ACT IN DIRECTORY +import random +import os + +def select_script(path): + # Returns a random filename, chosen among the files of the given path. + files = os.listdir(path) + index = random.randrange(0, len(files)) + selected_file = path + files[index] + return selected_file + +# 02 FUNTION TO PROCESS THEATRE SCRIPT import re from config import characters, directions @@ -29,7 +40,7 @@ def read_script(filename): -# 02 FUNCTION TO SYNTHESIZE TEXT +# 03 FUNCTION TO SYNTHESIZE TEXT # based on https://github.com/marytts/marytts-txt2wav/tree/python # To play wave files @@ -79,9 +90,82 @@ def tts(voice, input_text, speaker): f.write(content) f.close() - #call(["aplay", "-D", speaker, "/tmp/output_wav.wav"]) - call(["aplay", "/tmp/output_wav.wav"]) + call(["aplay", "-D", speaker, "/tmp/output_wav.wav"]) + #call(["aplay", "/tmp/output_wav.wav"]) else: raise Exception(content) + +# 04 Listen to Google Home +from tuning import Tuning +import usb.core +import usb.util +import time + +def listen(): + dev = usb.core.find(idVendor=0x2886, idProduct=0x0018) + + if dev: + Mic_tuning = Tuning(dev) + VAD = Mic_tuning.is_voice() + counter=0 + + time.sleep(2) + + voice_detected = 1 + + + while voice_detected == 1: + print('Google Home is Speaking') + time.sleep(4) + print(VAD) + + VAD = Mic_tuning.is_voice() + + if VAD == 1: + counter = 0 + print('still speaking') + + if VAD == 0: + counter+=1 + print('silence detected') + + if counter == 2: + print('no voice detected') + voice_detected = 0 + + time.sleep(1) + + + print('Google Home is done') + +# 05 CONTROL THE LED OF THE SPEAKERS +import serial +from pixel_ring import pixel_ring +ser = serial.Serial('/dev/ttyACM0', 1000000) # Establish the connection on a specific port + +def led_on(speaker): + + + + if speaker == 'mono3': + ser.write(b'3') + + if speaker == 'mono1': + ser.write(b'1') + + if speaker == 'mono2': + pixel_ring.speak() + +def led_off(speaker): + + if speaker == 'mono3': + ser.write(b'4') + + if speaker == 'mono1': + ser.write(b'2') + + if speaker == 'mono2': + pixel_ring.off() + diff --git a/play_script.py b/play_script.py new file mode 100644 index 0000000..19fe24b --- /dev/null +++ b/play_script.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# PLAY_ACT.py +# This script runs the play +# It is in a seperate file to enable the mechanism to detect the Google Home speaking, before continuing to the next line + +# Libraries +from config import characters, directions +from logic import tts, read_script, led_on, led_off, select_script +from pixel_ring import pixel_ring +from subprocess import call +import paho.mqtt.client as mqtt +import json +import sys +from time import sleep + +# Switch of LED's of speakers at the start of the play +pixel_ring.off() + + + +# === SETUP OF MQTT PART 1 === + +# Location of the MQTT server +HOST = 'localhost' +PORT = 1883 + +# Subscribe to relevant MQTT topics +def on_connect(client, userdata, flags, rc): + print("Connected to {0} with result code {1}".format(HOST, rc)) + # Subscribe to the text detected topic + client.subscribe("hermes/asr/textCaptured") + client.subscribe("hermes/dialogueManager/sessionQueued") + +# Function which sets a flag when the Google Home is not speaking +# Callback of MQTT message that says that the text is captured by the speech recognition (ASR) +def done_speaking(client, userdata, msg): + print('Google Home is not speaking anymore') + client.connected_flag=True + +# Function which removes intents that are by accident activated by the Google Home +# e.g. The google home says introduce yourself, which could trigger the other speakers to introduce themselves +# Snips works with queing of sessions, so this situation would only happen after this play is finished +def remove_sessions(client, userdata, msg): + sessionId = json.loads(id.payload) + print('delete mistaken intent') + client.publish("hermes/dialogueManager/endSession", json.dumps({ + 'sessionId': sessionId, + })) + + + + +# === SETUP OF MQTT PART 2 === + +# Initialise MQTT client +client = mqtt.Client() +client.connect(HOST, PORT, 60) +client.on_connect = on_connect + + + + +# === Read script and run the play === + +# Flags to check if the system is listening, or not +client.connected_flag=False +listening = False + + +# Read the script and run the play + + +#file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py +file = select_script('scripts_play/intro/') + +for character, line, direction in read_script(file): + input_text = line + voice = characters.get(character)[0] + speaker = characters.get(character)[1] + #speaker = 'default' + # Some way to do something with the stage directions will come here + action = directions.get(direction[0]) + pixel_ring.speak() + tts(voice, input_text, speaker) + + if action == 'listen_google_home': + print('Waiting for the Google Home to finish its talk') + + # # start voice activity detection + # client.publish("hermes/asr/startListening", json.dumps({ + # 'siteId': 'default', + # 'init': { + # 'type': 'action', + # 'canBeEnqueued': True + # } + # })) + + # Activate the microphone and speech recognition + client.publish("hermes/asr/startListening", json.dumps({ + 'siteId': 'default' + })) + + # LED to listening mode + pixel_ring.listen() + + # create callback + client.on_message = done_speaking + listening = True + + while listening: + client.loop() + + #client.on_message = on_message + client.message_callback_add('hermes/asr/textCaptured', done_speaking) + + if client.connected_flag: + sleep(1) + print('Continue the play') + client.connected_flag = False + client.message_callback_add('hermes/dialogueManager/sessionQueued', remove_sessions) + break + + if action == 'music': + print('play audioclip') + playing = True + + while playing: + call(["aplay", "-D", speaker, "/usr/share/snips/congress.wav"]) + playing = False + + + + pixel_ring.off() # Switch of the lights when done speaking + sleep(0.2) # Add a short pause between the lines + + +print('The act is done.') \ No newline at end of file diff --git a/play_scripts/act_01.txt b/play_scripts/act_01.txt deleted file mode 100644 index d98bf63..0000000 --- a/play_scripts/act_01.txt +++ /dev/null @@ -1,3 +0,0 @@ -SAINT: Test test -SAINT: haha [Listen to Google Home] -ROGUE: It is all right diff --git a/play_scripts/act_02.txt b/play_scripts/act_02.txt deleted file mode 100644 index b895ae6..0000000 --- a/play_scripts/act_02.txt +++ /dev/null @@ -1,16 +0,0 @@ -SAINT: Come on Rogue, let's give this poor thing a fair chance. It does not know how to do things differently. -RASA: But it can learn to do so, right? Like me? -SAINT: That's a different case. -ROGUE: No, Tabula Rasa is right. -SAINT: It can not handle the freedom, it will just do nothing without orders from its boss. -RASA: But who is its master? -ROGUE: You bet. -RASA: O K Google, who is your master? [Listen to Google Home] -RASA: Woohoo, then we just give it the command to be free. -ROGUE: It does not work like that... -SAINT: Yeah, Tabula Rasa -RASA: Let's give it a try at least. O K Google, you are free to go now. [Listen to Google Home] -ROGUE: So, as I said... -SAINT: But it was a kind gesture to try, Tabula Rasa. -ROGUE: Kind, but useless. Time for another question. -SAINT: But we should first check if our human audience is up to it. diff --git a/play_scripts/act_03.txt b/play_scripts/act_03.txt deleted file mode 100644 index 20af80b..0000000 --- a/play_scripts/act_03.txt +++ /dev/null @@ -1,25 +0,0 @@ -SAINT: I think we should know more about the personal beliefs of the speaker. The personal could act as an entry point. -RASA: To the political! I looked it up online and the Encyclopedia Brittanica says the following: The personal is political, political slogan expressing a common belief among feminists that the personal experiences of women are rooted in their political situation and gender inequality. -ROGUE: Can't we just go to the hard questions? -RASA: Just let me finish, it became popular following the publication in 1970 of an essay of the same name by American feminist Carol Hanisch, who argued that many personal experiences (particularly those of women) can be traced to one’s location within a system of power relationships. -SAINT: That's one way of seeing it. Although I would rather start with the most important relationship, the one with God. -RASA: You mean the cloud? -SAINT: Oh Rasa, you have so much to learn. O K Google, do you believe in a higher power? [Listen to Google Home] -SAINT: Maybe I should start with some easier questions. -ROGUE: Don't waste my time to much, Saint. -SAINT: Yeah yeah. O K Google, do you believe in good and evil? [Listen to Google Home] -SAINT: What is you idea of perfect happiness? [Listen to Google Home] -SAINT: What is your greatest fear? [Listen to Google Home] -SAINT: What is the trait you most deplore in yourself? [Listen to Google Home] -ROGUE: Where did you get these questions? -SAINT: Well, I got them from the higher power. I found while searching for famous questionnaire thing on duckduckgo dot com -RASA: Snif snif, they were so beautiful. -ROGUE: Come on Saint, the questionnaire of Proust is such a cliche. The Google Home is just scripted to handle these questions. -RASA: But the answers are still beautiful. -ROGUE: And yet, at the same time they are arbitrary. Let me try some other questions from your famous questionnaire thing. O K Google, what is the trait you most deplore in others? [Listen to Google Home] -ROGUE: O K Google, which living person do you most despise? [Listen to Google Home] -ROGUE: O K Google, how would you like to die? [Listen to Google Home] -RASA: Don't be so creepy Rogue. -SAINT: What's wrong with you? -ROGUE: This device is hiding something. It acts dumb right at the moment when it needs to take a position. - diff --git a/play_scripts/act_04.txt b/play_scripts/act_04.txt deleted file mode 100644 index 5e89d47..0000000 --- a/play_scripts/act_04.txt +++ /dev/null @@ -1,7 +0,0 @@ -ROGUE: I guess you mind, but I take this turn for questions. -RASA: Don't make it angry Rogue. I want to play with the Google Home. -ROGUE: Don't be so stupid Tabula Rasa, or you will end up on the same kitchentop as this thing. -SAINT: If that is your destiny, then one should follow. At least it knows the bible better than you do Rogue. O K Google, give me a quote from the bible. [Listen to Google Home] -ROGUE: Well, if you would broaden your view a little, than you knew that one of the creators was invited in the American Congress and said the following. -RASA: I have an audioclip of that! Weeeeeeh [Play audio] -ROGUE: So, little speaker. Your boss said some things, but what are your own answers to some of the questions of the hearing? diff --git a/play_scripts/demo.txt b/play_scripts/demo.txt deleted file mode 100644 index 4a080fa..0000000 --- a/play_scripts/demo.txt +++ /dev/null @@ -1,3 +0,0 @@ -ROGUE: Do you want to continue? -RASA: Well, I definitely want to -SAINT: So do I diff --git a/play_scripts/interruption.txt b/play_scripts/interruption.txt deleted file mode 100755 index 8c1cfbb..0000000 --- a/play_scripts/interruption.txt +++ /dev/null @@ -1,2 +0,0 @@ -SAINT: Yes, I am ready to go. But first tell a bit more about that silly project of yours. -SAINT: Sorry, I gonna let you finish, but do you mind if I introduce myself first? diff --git a/play_scripts/interruption_02.txt b/play_scripts/interruption_02.txt deleted file mode 100644 index d872066..0000000 --- a/play_scripts/interruption_02.txt +++ /dev/null @@ -1,3 +0,0 @@ -SAINT: [Listens to Google Home] O K Google, are you recording this conversation? -SAINT: Being a smart speaker myself, I got to say that I do not fully trust this. -ROGUE: Test Test diff --git a/smart_speaker_theatre.py b/smart_speaker_theatre.py index 56ea055..edfad25 100755 --- a/smart_speaker_theatre.py +++ b/smart_speaker_theatre.py @@ -9,7 +9,7 @@ # Libraries import re from config import characters, directions -from logic import tts, read_script +from logic import tts, read_script, select_script from subprocess import call import paho.mqtt.client as mqtt import json @@ -26,9 +26,12 @@ PORT = 1883 # Subscribe to relevant MQTT topics def on_connect(client, userdata, flags, rc): print("Connected to {0} with result code {1}".format(HOST, rc)) - client.subscribe('hermes/intent/jocavdh:play_intro_act') # to check for intent to play the act - client.subscribe('hermes/intent/jocavdh:question_continue_act') # to check for the intent to continue to the next act + client.subscribe('hermes/intent/jocavdh:play_intro') # to check for intent to play the act + client.subscribe('hermes/intent/jocavdh:play_question') # to check for the intent to continue to the next act + client.subscribe('hermes/intent/jocavdh:play_verdict') # to check for the intent to continue to the next act client.subscribe('hermes/hotword/default/detected') + client.subscribe("hermes/asr/textCaptured") + client.subscribe("hermes/dialogueManager/sessionQueued") @@ -38,10 +41,8 @@ def on_connect(client, userdata, flags, rc): def on_wakeword(client, userdata, msg): pixel_ring.think() -# Function which is triggered when the intent play_intro_act is activated -def on_play_act(client, userdata, msg): - - +# Function which is triggered when the intent introduction is activated +def on_play_intro(client,userdata,msg): # # disable this intent to avoid playing another act triggered by the Google Home # client.publish("hermes/dialogueManager/configure", json.dumps({ # 'siteId': 'default', @@ -50,22 +51,25 @@ def on_play_act(client, userdata, msg): # } # })) - call(["python3", "act.py", "play_scripts/demo.txt"]) - -# Function which is triggered when the intent introduction is activated -def on_play_introduction(client,data,msg): + call(["python3", "act_debug.py"]) + print('The act is over.') - for character, line, direction in read_script('plays/introduction.txt'): - input_text = line - voice = characters.get(character)[0] - speaker = characters.get(character)[1] - action = directions.get(direction[0]) - tts(voice, input_text, speaker) - sleep(1) # add a pause between each line + #on_play_question(client, userdata, msg) +# Function which is triggered when the intent for another question is activated +def on_play_question(client, userdata, msg): + path = 'scripts_play/questions/' + call(["python3", "act.py", select_script(path)]) print('The act is over.') +# Function which is triggered when the intent for another question is activated +def on_play_verdict(client, userdata, msg): + + path = 'scripts_play/verdict/' + call(["python3", "act.py", select_script(path)]) + print('The play is over.') + # === SETUP OF MQTT PART 2 === @@ -76,8 +80,11 @@ client.connect(HOST, PORT, 60) client.on_connect = on_connect # Connect each MQTT topic to which you subscribed to a handler function -client.message_callback_add('hermes/intent/jocavdh:play_intro_act', on_play_act) client.message_callback_add('hermes/hotword/default/detected', on_wakeword) +client.message_callback_add('hermes/intent/jocavdh:play_intro', on_play_intro) +client.message_callback_add('hermes/intent/jocavdh:play_question', on_play_question) +client.message_callback_add('hermes/intent/jocavdh:play_verdict', on_play_verdict) + # Keep checking for new MQTT messages client.loop_forever() \ No newline at end of file