diff --git a/README.md b/README.md old mode 100755 new mode 100644 diff --git a/act.py b/act.py old mode 100755 new mode 100644 index 72135dc..4dc1db2 --- a/act.py +++ b/act.py @@ -7,7 +7,7 @@ # Libraries from config import characters, directions -from logic import tts, read_script +from logic import tts, read_script, led_on, led_off, select_script, listen from pixel_ring import pixel_ring from subprocess import call import paho.mqtt.client as mqtt @@ -16,7 +16,7 @@ import sys from time import sleep # Switch of LED's of speakers at the start of the play -pixel_ring.off() +#pixel_ring.off() @@ -72,9 +72,10 @@ listening = False # Read the script and run the play -file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py +#file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py +file = select_script('scripts_play/intro/') -for character, line, direction in read_script(file): +for character, line, direction in read_script('scripts_play/intro/introduction_01.txt'): input_text = line voice = characters.get(character)[0] speaker = characters.get(character)[1] @@ -85,41 +86,8 @@ for character, line, direction in read_script(file): tts(voice, input_text, speaker) if action == 'listen_google_home': - print('Waiting for the Google Home to finish its talk') - - # # start voice activity detection - # client.publish("hermes/asr/startListening", json.dumps({ - # 'siteId': 'default', - # 'init': { - # 'type': 'action', - # 'canBeEnqueued': True - # } - # })) - - # Activate the microphone and speech recognition - client.publish("hermes/asr/startListening", json.dumps({ - 'siteId': 'default' - })) - - # LED to listening mode - pixel_ring.listen() - - # create callback - client.on_message = done_speaking - listening = True - - while listening: - client.loop() - - #client.on_message = on_message - client.message_callback_add('hermes/asr/textCaptured', done_speaking) - if client.connected_flag: - sleep(1) - print('Continue the play') - client.connected_flag = False - client.message_callback_add('hermes/dialogueManager/sessionQueued', remove_sessions) - break + listen() if action == 'music': print('play audioclip') @@ -131,7 +99,7 @@ for character, line, direction in read_script(file): - pixel_ring.off() # Switch of the lights when done speaking + #pixel_ring.off() # Switch of the lights when done speaking sleep(0.2) # Add a short pause between the lines diff --git a/act_debug.py b/act_debug.py old mode 100755 new mode 100644 index 144a76e..ed65e16 --- a/act_debug.py +++ b/act_debug.py @@ -7,8 +7,7 @@ # Libraries from config import characters, directions -from logic import tts, read_script -#from pixel_ring import pixel_ring +from logic import tts, read_script,listen from subprocess import call import paho.mqtt.client as mqtt import json @@ -18,61 +17,17 @@ from time import sleep # Switch of LED's of speakers at the start of the play #pixel_ring.off() +import serial +from pixel_ring import pixel_ring +#ser = serial.Serial('/dev/ttyACM0', 1000000) # Establish the connection on a specific port -# === SETUP OF MQTT PART 1 === - -# Location of the MQTT server -HOST = 'localhost' -PORT = 1883 - -# Subscribe to relevant MQTT topics -def on_connect(client, userdata, flags, rc): - print("Connected to {0} with result code {1}".format(HOST, rc)) - # Subscribe to the text detected topic - client.subscribe("hermes/asr/textCaptured") - client.subscribe("hermes/dialogueManager/sessionQueued") - -# Function which sets a flag when the Google Home is not speaking -# Callback of MQTT message that says that the text is captured by the speech recognition (ASR) -def done_speaking(client, userdata, msg): - print('Google Home is not speaking anymore') - client.connected_flag=True - -# Function which removes intents that are by accident activated by the Google Home -# e.g. The google home says introduce yourself, which could trigger the other speakers to introduce themselves -# Snips works with queing of sessions, so this situation would only happen after this play is finished -def remove_sessions(client, userdata, msg): - sessionId = json.loads(id.payload) - print('delete mistaken intent') - client.publish("hermes/dialogueManager/endSession", json.dumps({ - 'sessionId': sessionId, - })) - - - - -# === SETUP OF MQTT PART 2 === - -# Initialise MQTT client -client = mqtt.Client() -client.connect(HOST, PORT, 60) -client.on_connect = on_connect - - - - -# === Read script and run the play === - -# Flags to check if the system is listening, or not -client.connected_flag=False -listening = False - # Read the script and run the play file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py +#led_off(speaker) for character, line, direction in read_script(file): input_text = line @@ -82,44 +37,12 @@ for character, line, direction in read_script(file): # Some way to do something with the stage directions will come here action = directions.get(direction[0]) #pixel_ring.speak() + #led_on(speaker) tts(voice, input_text, speaker) + #led_off(speaker) if action == 'listen_google_home': - print('Waiting for the Google Home to finish its talk') - - # # start voice activity detection - # client.publish("hermes/asr/startListening", json.dumps({ - # 'siteId': 'default', - # 'init': { - # 'type': 'action', - # 'canBeEnqueued': True - # } - # })) - - # Activate the microphone and speech recognition - client.publish("hermes/asr/startListening", json.dumps({ - 'siteId': 'default' - })) - - # LED to listening mode - #pixel_ring.listen() - - # create callback - client.on_message = done_speaking - listening = True - - while listening: - client.loop() - - #client.on_message = on_message - client.message_callback_add('hermes/asr/textCaptured', done_speaking) - - if client.connected_flag: - sleep(1) - print('Continue the play') - client.connected_flag = False - client.message_callback_add('hermes/dialogueManager/sessionQueued', remove_sessions) - break + listen() if action == 'music': print('play audioclip') @@ -130,9 +53,7 @@ for character, line, direction in read_script(file): playing = False - - #pixel_ring.off() # Switch of the lights when done speaking sleep(0.2) # Add a short pause between the lines -print('The act is done.') \ No newline at end of file +print('The act is done.') diff --git a/config.py b/config.py old mode 100755 new mode 100644 diff --git a/logic.py b/logic.py old mode 100755 new mode 100644 index 810cabd..e0a000c --- a/logic.py +++ b/logic.py @@ -6,7 +6,18 @@ # This script contains the logic to turn the play_script into instructions for the hardware # --- -# 01 FUNTION TO PROCESS THEATRE SCRIPT +# 01 FUNCTION TO SELECT RANDOM ACT IN DIRECTORY +import random +import os + +def select_script(path): + # Returns a random filename, chosen among the files of the given path. + files = os.listdir(path) + index = random.randrange(0, len(files)) + selected_file = path + files[index] + return selected_file + +# 02 FUNTION TO PROCESS THEATRE SCRIPT import re from config import characters, directions @@ -29,7 +40,7 @@ def read_script(filename): -# 02 FUNCTION TO SYNTHESIZE TEXT +# 03 FUNCTION TO SYNTHESIZE TEXT # based on https://github.com/marytts/marytts-txt2wav/tree/python # To play wave files @@ -79,9 +90,82 @@ def tts(voice, input_text, speaker): f.write(content) f.close() - #call(["aplay", "-D", speaker, "/tmp/output_wav.wav"]) - call(["aplay", "/tmp/output_wav.wav"]) + call(["aplay", "-D", speaker, "/tmp/output_wav.wav"]) + #call(["aplay", "/tmp/output_wav.wav"]) else: raise Exception(content) + +# 04 Listen to Google Home +from tuning import Tuning +import usb.core +import usb.util +import time + +def listen(): + dev = usb.core.find(idVendor=0x2886, idProduct=0x0018) + + if dev: + Mic_tuning = Tuning(dev) + VAD = Mic_tuning.is_voice() + counter=0 + + time.sleep(2) + + voice_detected = 1 + + + while voice_detected == 1: + print('Google Home is Speaking') + time.sleep(4) + print(VAD) + + VAD = Mic_tuning.is_voice() + + if VAD == 1: + counter = 0 + print('still speaking') + + if VAD == 0: + counter+=1 + print('silence detected') + + if counter == 2: + print('no voice detected') + voice_detected = 0 + + time.sleep(1) + + + print('Google Home is done') + +# 05 CONTROL THE LED OF THE SPEAKERS +import serial +from pixel_ring import pixel_ring +ser = serial.Serial('/dev/ttyACM0', 1000000) # Establish the connection on a specific port + +def led_on(speaker): + + + + if speaker == 'mono3': + ser.write(b'3') + + if speaker == 'mono1': + ser.write(b'1') + + if speaker == 'mono2': + pixel_ring.speak() + +def led_off(speaker): + + if speaker == 'mono3': + ser.write(b'4') + + if speaker == 'mono1': + ser.write(b'2') + + if speaker == 'mono2': + pixel_ring.off() + diff --git a/play_script.py b/play_script.py new file mode 100644 index 0000000..19fe24b --- /dev/null +++ b/play_script.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# PLAY_ACT.py +# This script runs the play +# It is in a seperate file to enable the mechanism to detect the Google Home speaking, before continuing to the next line + +# Libraries +from config import characters, directions +from logic import tts, read_script, led_on, led_off, select_script +from pixel_ring import pixel_ring +from subprocess import call +import paho.mqtt.client as mqtt +import json +import sys +from time import sleep + +# Switch of LED's of speakers at the start of the play +pixel_ring.off() + + + +# === SETUP OF MQTT PART 1 === + +# Location of the MQTT server +HOST = 'localhost' +PORT = 1883 + +# Subscribe to relevant MQTT topics +def on_connect(client, userdata, flags, rc): + print("Connected to {0} with result code {1}".format(HOST, rc)) + # Subscribe to the text detected topic + client.subscribe("hermes/asr/textCaptured") + client.subscribe("hermes/dialogueManager/sessionQueued") + +# Function which sets a flag when the Google Home is not speaking +# Callback of MQTT message that says that the text is captured by the speech recognition (ASR) +def done_speaking(client, userdata, msg): + print('Google Home is not speaking anymore') + client.connected_flag=True + +# Function which removes intents that are by accident activated by the Google Home +# e.g. The google home says introduce yourself, which could trigger the other speakers to introduce themselves +# Snips works with queing of sessions, so this situation would only happen after this play is finished +def remove_sessions(client, userdata, msg): + sessionId = json.loads(id.payload) + print('delete mistaken intent') + client.publish("hermes/dialogueManager/endSession", json.dumps({ + 'sessionId': sessionId, + })) + + + + +# === SETUP OF MQTT PART 2 === + +# Initialise MQTT client +client = mqtt.Client() +client.connect(HOST, PORT, 60) +client.on_connect = on_connect + + + + +# === Read script and run the play === + +# Flags to check if the system is listening, or not +client.connected_flag=False +listening = False + + +# Read the script and run the play + + +#file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py +file = select_script('scripts_play/intro/') + +for character, line, direction in read_script(file): + input_text = line + voice = characters.get(character)[0] + speaker = characters.get(character)[1] + #speaker = 'default' + # Some way to do something with the stage directions will come here + action = directions.get(direction[0]) + pixel_ring.speak() + tts(voice, input_text, speaker) + + if action == 'listen_google_home': + print('Waiting for the Google Home to finish its talk') + + # # start voice activity detection + # client.publish("hermes/asr/startListening", json.dumps({ + # 'siteId': 'default', + # 'init': { + # 'type': 'action', + # 'canBeEnqueued': True + # } + # })) + + # Activate the microphone and speech recognition + client.publish("hermes/asr/startListening", json.dumps({ + 'siteId': 'default' + })) + + # LED to listening mode + pixel_ring.listen() + + # create callback + client.on_message = done_speaking + listening = True + + while listening: + client.loop() + + #client.on_message = on_message + client.message_callback_add('hermes/asr/textCaptured', done_speaking) + + if client.connected_flag: + sleep(1) + print('Continue the play') + client.connected_flag = False + client.message_callback_add('hermes/dialogueManager/sessionQueued', remove_sessions) + break + + if action == 'music': + print('play audioclip') + playing = True + + while playing: + call(["aplay", "-D", speaker, "/usr/share/snips/congress.wav"]) + playing = False + + + + pixel_ring.off() # Switch of the lights when done speaking + sleep(0.2) # Add a short pause between the lines + + +print('The act is done.') \ No newline at end of file diff --git a/scripts_play/debug/debug_01.txt b/scripts_play/debug/debug_01.txt old mode 100755 new mode 100644 diff --git a/scripts_play/debug/debug_02.txt b/scripts_play/debug/debug_02.txt old mode 100755 new mode 100644 diff --git a/scripts_play/debug/demo.txt b/scripts_play/debug/demo.txt old mode 100755 new mode 100644 diff --git a/scripts_play/debug/interruption_02.txt b/scripts_play/debug/interruption_02.txt old mode 100755 new mode 100644 diff --git a/scripts_play/intro/introduction_01.txt b/scripts_play/intro/introduction_01.txt old mode 100755 new mode 100644 diff --git a/scripts_play/questions/act_01.txt b/scripts_play/questions/act_01.txt old mode 100755 new mode 100644 diff --git a/scripts_play/questions/act_02.txt b/scripts_play/questions/act_02.txt old mode 100755 new mode 100644 diff --git a/scripts_play/questions/act_03.txt b/scripts_play/questions/act_03.txt old mode 100755 new mode 100644 diff --git a/scripts_play/questions/act_04.txt b/scripts_play/questions/act_04.txt old mode 100755 new mode 100644 diff --git a/scripts_play/verdict/verdict_01.txt b/scripts_play/verdict/verdict_01.txt old mode 100755 new mode 100644 diff --git a/scripts_play/verdict/verdict_02.txt b/scripts_play/verdict/verdict_02.txt old mode 100755 new mode 100644 diff --git a/smart_speaker_theatre.py b/smart_speaker_theatre.py index 4bfc61f..edfad25 100755 --- a/smart_speaker_theatre.py +++ b/smart_speaker_theatre.py @@ -9,7 +9,7 @@ # Libraries import re from config import characters, directions -from logic import tts, read_script +from logic import tts, read_script, select_script from subprocess import call import paho.mqtt.client as mqtt import json @@ -26,23 +26,23 @@ PORT = 1883 # Subscribe to relevant MQTT topics def on_connect(client, userdata, flags, rc): print("Connected to {0} with result code {1}".format(HOST, rc)) - client.subscribe('hermes/intent/jocavdh:play_intro_act') # to check for intent to play the act - client.subscribe('hermes/intent/jocavdh:question_continue_act') # to check for the intent to continue to the next act + client.subscribe('hermes/intent/jocavdh:play_intro') # to check for intent to play the act + client.subscribe('hermes/intent/jocavdh:play_question') # to check for the intent to continue to the next act + client.subscribe('hermes/intent/jocavdh:play_verdict') # to check for the intent to continue to the next act client.subscribe('hermes/hotword/default/detected') + client.subscribe("hermes/asr/textCaptured") + client.subscribe("hermes/dialogueManager/sessionQueued") # === FUNCTIONS THAT ARE TRIGGERED WHEN AN INTENT IS DETECTED === -#def on_wakeword(client, userdata, msg): - #pixel_ring.think() - - -# Function which is triggered when the intent play_intro_act is activated -def on_play_act(client, userdata, msg): - +def on_wakeword(client, userdata, msg): + pixel_ring.think() +# Function which is triggered when the intent introduction is activated +def on_play_intro(client,userdata,msg): # # disable this intent to avoid playing another act triggered by the Google Home # client.publish("hermes/dialogueManager/configure", json.dumps({ # 'siteId': 'default', @@ -51,22 +51,25 @@ def on_play_act(client, userdata, msg): # } # })) - call(["python3", "act.py", "play_scripts/demo.txt"]) - -# Function which is triggered when the intent introduction is activated -def on_play_introduction(client,data,msg): + call(["python3", "act_debug.py"]) + print('The act is over.') - for character, line, direction in read_script('plays/introduction.txt'): - input_text = line - voice = characters.get(character)[0] - speaker = characters.get(character)[1] - action = directions.get(direction[0]) - tts(voice, input_text, speaker) - sleep(1) # add a pause between each line + #on_play_question(client, userdata, msg) +# Function which is triggered when the intent for another question is activated +def on_play_question(client, userdata, msg): + path = 'scripts_play/questions/' + call(["python3", "act.py", select_script(path)]) print('The act is over.') +# Function which is triggered when the intent for another question is activated +def on_play_verdict(client, userdata, msg): + + path = 'scripts_play/verdict/' + call(["python3", "act.py", select_script(path)]) + print('The play is over.') + # === SETUP OF MQTT PART 2 === @@ -77,8 +80,11 @@ client.connect(HOST, PORT, 60) client.on_connect = on_connect # Connect each MQTT topic to which you subscribed to a handler function -client.message_callback_add('hermes/intent/jocavdh:play_intro_act', on_play_act) -#client.message_callback_add('hermes/hotword/default/detected', on_wakeword) +client.message_callback_add('hermes/hotword/default/detected', on_wakeword) +client.message_callback_add('hermes/intent/jocavdh:play_intro', on_play_intro) +client.message_callback_add('hermes/intent/jocavdh:play_question', on_play_question) +client.message_callback_add('hermes/intent/jocavdh:play_verdict', on_play_verdict) + # Keep checking for new MQTT messages -client.loop_forever() +client.loop_forever() \ No newline at end of file diff --git a/sounds/congress.wav b/sounds/congress.wav old mode 100755 new mode 100644