commit on mac pro

new_voices
joca 6 years ago
commit 4591346ddc

@ -7,7 +7,7 @@
# Libraries # Libraries
from config import characters, directions from config import characters, directions
from logic import tts, read_script from logic import tts, read_script, led_on, led_off, select_script, listen
from pixel_ring import pixel_ring from pixel_ring import pixel_ring
from subprocess import call from subprocess import call
import paho.mqtt.client as mqtt import paho.mqtt.client as mqtt
@ -16,7 +16,7 @@ import sys
from time import sleep from time import sleep
# Switch of LED's of speakers at the start of the play # Switch of LED's of speakers at the start of the play
pixel_ring.off() #pixel_ring.off()
@ -72,9 +72,10 @@ listening = False
# Read the script and run the play # Read the script and run the play
file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py #file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py
file = select_script('scripts_play/intro/')
for character, line, direction in read_script(file): for character, line, direction in read_script('scripts_play/intro/introduction_01.txt'):
input_text = line input_text = line
voice = characters.get(character)[0] voice = characters.get(character)[0]
speaker = characters.get(character)[1] speaker = characters.get(character)[1]
@ -85,41 +86,8 @@ for character, line, direction in read_script(file):
tts(voice, input_text, speaker) tts(voice, input_text, speaker)
if action == 'listen_google_home': if action == 'listen_google_home':
print('Waiting for the Google Home to finish its talk')
# # start voice activity detection
# client.publish("hermes/asr/startListening", json.dumps({
# 'siteId': 'default',
# 'init': {
# 'type': 'action',
# 'canBeEnqueued': True
# }
# }))
# Activate the microphone and speech recognition
client.publish("hermes/asr/startListening", json.dumps({
'siteId': 'default'
}))
# LED to listening mode
pixel_ring.listen()
# create callback
client.on_message = done_speaking
listening = True
while listening:
client.loop()
#client.on_message = on_message
client.message_callback_add('hermes/asr/textCaptured', done_speaking)
if client.connected_flag: listen()
sleep(1)
print('Continue the play')
client.connected_flag = False
client.message_callback_add('hermes/dialogueManager/sessionQueued', remove_sessions)
break
if action == 'music': if action == 'music':
print('play audioclip') print('play audioclip')
@ -131,7 +99,7 @@ for character, line, direction in read_script(file):
pixel_ring.off() # Switch of the lights when done speaking #pixel_ring.off() # Switch of the lights when done speaking
sleep(0.2) # Add a short pause between the lines sleep(0.2) # Add a short pause between the lines

@ -7,8 +7,7 @@
# Libraries # Libraries
from config import characters, directions from config import characters, directions
from logic import tts, read_script from logic import tts, read_script,listen
#from pixel_ring import pixel_ring
from subprocess import call from subprocess import call
import paho.mqtt.client as mqtt import paho.mqtt.client as mqtt
import json import json
@ -18,61 +17,17 @@ from time import sleep
# Switch of LED's of speakers at the start of the play # Switch of LED's of speakers at the start of the play
#pixel_ring.off() #pixel_ring.off()
import serial
from pixel_ring import pixel_ring
#ser = serial.Serial('/dev/ttyACM0', 1000000) # Establish the connection on a specific port
# === SETUP OF MQTT PART 1 ===
# Location of the MQTT server
HOST = 'localhost'
PORT = 1883
# Subscribe to relevant MQTT topics
def on_connect(client, userdata, flags, rc):
print("Connected to {0} with result code {1}".format(HOST, rc))
# Subscribe to the text detected topic
client.subscribe("hermes/asr/textCaptured")
client.subscribe("hermes/dialogueManager/sessionQueued")
# Function which sets a flag when the Google Home is not speaking
# Callback of MQTT message that says that the text is captured by the speech recognition (ASR)
def done_speaking(client, userdata, msg):
print('Google Home is not speaking anymore')
client.connected_flag=True
# Function which removes intents that are by accident activated by the Google Home
# e.g. The google home says introduce yourself, which could trigger the other speakers to introduce themselves
# Snips works with queing of sessions, so this situation would only happen after this play is finished
def remove_sessions(client, userdata, msg):
sessionId = json.loads(id.payload)
print('delete mistaken intent')
client.publish("hermes/dialogueManager/endSession", json.dumps({
'sessionId': sessionId,
}))
# === SETUP OF MQTT PART 2 ===
# Initialise MQTT client
client = mqtt.Client()
client.connect(HOST, PORT, 60)
client.on_connect = on_connect
# === Read script and run the play ===
# Flags to check if the system is listening, or not
client.connected_flag=False
listening = False
# Read the script and run the play # Read the script and run the play
file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py
#led_off(speaker)
for character, line, direction in read_script(file): for character, line, direction in read_script(file):
input_text = line input_text = line
@ -82,44 +37,12 @@ for character, line, direction in read_script(file):
# Some way to do something with the stage directions will come here # Some way to do something with the stage directions will come here
action = directions.get(direction[0]) action = directions.get(direction[0])
#pixel_ring.speak() #pixel_ring.speak()
#led_on(speaker)
tts(voice, input_text, speaker) tts(voice, input_text, speaker)
#led_off(speaker)
if action == 'listen_google_home': if action == 'listen_google_home':
print('Waiting for the Google Home to finish its talk') listen()
# # start voice activity detection
# client.publish("hermes/asr/startListening", json.dumps({
# 'siteId': 'default',
# 'init': {
# 'type': 'action',
# 'canBeEnqueued': True
# }
# }))
# Activate the microphone and speech recognition
client.publish("hermes/asr/startListening", json.dumps({
'siteId': 'default'
}))
# LED to listening mode
#pixel_ring.listen()
# create callback
client.on_message = done_speaking
listening = True
while listening:
client.loop()
#client.on_message = on_message
client.message_callback_add('hermes/asr/textCaptured', done_speaking)
if client.connected_flag:
sleep(1)
print('Continue the play')
client.connected_flag = False
client.message_callback_add('hermes/dialogueManager/sessionQueued', remove_sessions)
break
if action == 'music': if action == 'music':
print('play audioclip') print('play audioclip')
@ -130,9 +53,7 @@ for character, line, direction in read_script(file):
playing = False playing = False
#pixel_ring.off() # Switch of the lights when done speaking
sleep(0.2) # Add a short pause between the lines sleep(0.2) # Add a short pause between the lines
print('The act is done.') print('The act is done.')

@ -6,7 +6,18 @@
# This script contains the logic to turn the play_script into instructions for the hardware # This script contains the logic to turn the play_script into instructions for the hardware
# --- # ---
# 01 FUNTION TO PROCESS THEATRE SCRIPT # 01 FUNCTION TO SELECT RANDOM ACT IN DIRECTORY
import random
import os
def select_script(path):
# Returns a random filename, chosen among the files of the given path.
files = os.listdir(path)
index = random.randrange(0, len(files))
selected_file = path + files[index]
return selected_file
# 02 FUNTION TO PROCESS THEATRE SCRIPT
import re import re
from config import characters, directions from config import characters, directions
@ -29,7 +40,7 @@ def read_script(filename):
# 02 FUNCTION TO SYNTHESIZE TEXT # 03 FUNCTION TO SYNTHESIZE TEXT
# based on https://github.com/marytts/marytts-txt2wav/tree/python # based on https://github.com/marytts/marytts-txt2wav/tree/python
# To play wave files # To play wave files
@ -79,9 +90,82 @@ def tts(voice, input_text, speaker):
f.write(content) f.write(content)
f.close() f.close()
#call(["aplay", "-D", speaker, "/tmp/output_wav.wav"]) call(["aplay", "-D", speaker, "/tmp/output_wav.wav"])
call(["aplay", "/tmp/output_wav.wav"]) #call(["aplay", "/tmp/output_wav.wav"])
else: else:
raise Exception(content) raise Exception(content)
# 04 Listen to Google Home
from tuning import Tuning
import usb.core
import usb.util
import time
def listen():
dev = usb.core.find(idVendor=0x2886, idProduct=0x0018)
if dev:
Mic_tuning = Tuning(dev)
VAD = Mic_tuning.is_voice()
counter=0
time.sleep(2)
voice_detected = 1
while voice_detected == 1:
print('Google Home is Speaking')
time.sleep(4)
print(VAD)
VAD = Mic_tuning.is_voice()
if VAD == 1:
counter = 0
print('still speaking')
if VAD == 0:
counter+=1
print('silence detected')
if counter == 2:
print('no voice detected')
voice_detected = 0
time.sleep(1)
print('Google Home is done')
# 05 CONTROL THE LED OF THE SPEAKERS
import serial
from pixel_ring import pixel_ring
ser = serial.Serial('/dev/ttyACM0', 1000000) # Establish the connection on a specific port
def led_on(speaker):
if speaker == 'mono3':
ser.write(b'3')
if speaker == 'mono1':
ser.write(b'1')
if speaker == 'mono2':
pixel_ring.speak()
def led_off(speaker):
if speaker == 'mono3':
ser.write(b'4')
if speaker == 'mono1':
ser.write(b'2')
if speaker == 'mono2':
pixel_ring.off()

@ -0,0 +1,139 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PLAY_ACT.py
# This script runs the play
# It is in a seperate file to enable the mechanism to detect the Google Home speaking, before continuing to the next line
# Libraries
from config import characters, directions
from logic import tts, read_script, led_on, led_off, select_script
from pixel_ring import pixel_ring
from subprocess import call
import paho.mqtt.client as mqtt
import json
import sys
from time import sleep
# Switch of LED's of speakers at the start of the play
pixel_ring.off()
# === SETUP OF MQTT PART 1 ===
# Location of the MQTT server
HOST = 'localhost'
PORT = 1883
# Subscribe to relevant MQTT topics
def on_connect(client, userdata, flags, rc):
print("Connected to {0} with result code {1}".format(HOST, rc))
# Subscribe to the text detected topic
client.subscribe("hermes/asr/textCaptured")
client.subscribe("hermes/dialogueManager/sessionQueued")
# Function which sets a flag when the Google Home is not speaking
# Callback of MQTT message that says that the text is captured by the speech recognition (ASR)
def done_speaking(client, userdata, msg):
print('Google Home is not speaking anymore')
client.connected_flag=True
# Function which removes intents that are by accident activated by the Google Home
# e.g. The google home says introduce yourself, which could trigger the other speakers to introduce themselves
# Snips works with queing of sessions, so this situation would only happen after this play is finished
def remove_sessions(client, userdata, msg):
sessionId = json.loads(id.payload)
print('delete mistaken intent')
client.publish("hermes/dialogueManager/endSession", json.dumps({
'sessionId': sessionId,
}))
# === SETUP OF MQTT PART 2 ===
# Initialise MQTT client
client = mqtt.Client()
client.connect(HOST, PORT, 60)
client.on_connect = on_connect
# === Read script and run the play ===
# Flags to check if the system is listening, or not
client.connected_flag=False
listening = False
# Read the script and run the play
#file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py
file = select_script('scripts_play/intro/')
for character, line, direction in read_script(file):
input_text = line
voice = characters.get(character)[0]
speaker = characters.get(character)[1]
#speaker = 'default'
# Some way to do something with the stage directions will come here
action = directions.get(direction[0])
pixel_ring.speak()
tts(voice, input_text, speaker)
if action == 'listen_google_home':
print('Waiting for the Google Home to finish its talk')
# # start voice activity detection
# client.publish("hermes/asr/startListening", json.dumps({
# 'siteId': 'default',
# 'init': {
# 'type': 'action',
# 'canBeEnqueued': True
# }
# }))
# Activate the microphone and speech recognition
client.publish("hermes/asr/startListening", json.dumps({
'siteId': 'default'
}))
# LED to listening mode
pixel_ring.listen()
# create callback
client.on_message = done_speaking
listening = True
while listening:
client.loop()
#client.on_message = on_message
client.message_callback_add('hermes/asr/textCaptured', done_speaking)
if client.connected_flag:
sleep(1)
print('Continue the play')
client.connected_flag = False
client.message_callback_add('hermes/dialogueManager/sessionQueued', remove_sessions)
break
if action == 'music':
print('play audioclip')
playing = True
while playing:
call(["aplay", "-D", speaker, "/usr/share/snips/congress.wav"])
playing = False
pixel_ring.off() # Switch of the lights when done speaking
sleep(0.2) # Add a short pause between the lines
print('The act is done.')

@ -9,7 +9,7 @@
# Libraries # Libraries
import re import re
from config import characters, directions from config import characters, directions
from logic import tts, read_script from logic import tts, read_script, select_script
from subprocess import call from subprocess import call
import paho.mqtt.client as mqtt import paho.mqtt.client as mqtt
import json import json
@ -26,23 +26,23 @@ PORT = 1883
# Subscribe to relevant MQTT topics # Subscribe to relevant MQTT topics
def on_connect(client, userdata, flags, rc): def on_connect(client, userdata, flags, rc):
print("Connected to {0} with result code {1}".format(HOST, rc)) print("Connected to {0} with result code {1}".format(HOST, rc))
client.subscribe('hermes/intent/jocavdh:play_intro_act') # to check for intent to play the act client.subscribe('hermes/intent/jocavdh:play_intro') # to check for intent to play the act
client.subscribe('hermes/intent/jocavdh:question_continue_act') # to check for the intent to continue to the next act client.subscribe('hermes/intent/jocavdh:play_question') # to check for the intent to continue to the next act
client.subscribe('hermes/intent/jocavdh:play_verdict') # to check for the intent to continue to the next act
client.subscribe('hermes/hotword/default/detected') client.subscribe('hermes/hotword/default/detected')
client.subscribe("hermes/asr/textCaptured")
client.subscribe("hermes/dialogueManager/sessionQueued")
# === FUNCTIONS THAT ARE TRIGGERED WHEN AN INTENT IS DETECTED === # === FUNCTIONS THAT ARE TRIGGERED WHEN AN INTENT IS DETECTED ===
#def on_wakeword(client, userdata, msg): def on_wakeword(client, userdata, msg):
#pixel_ring.think() pixel_ring.think()
# Function which is triggered when the intent play_intro_act is activated
def on_play_act(client, userdata, msg):
# Function which is triggered when the intent introduction is activated
def on_play_intro(client,userdata,msg):
# # disable this intent to avoid playing another act triggered by the Google Home # # disable this intent to avoid playing another act triggered by the Google Home
# client.publish("hermes/dialogueManager/configure", json.dumps({ # client.publish("hermes/dialogueManager/configure", json.dumps({
# 'siteId': 'default', # 'siteId': 'default',
@ -51,22 +51,25 @@ def on_play_act(client, userdata, msg):
# } # }
# })) # }))
call(["python3", "act.py", "play_scripts/demo.txt"]) call(["python3", "act_debug.py"])
print('The act is over.')
# Function which is triggered when the intent introduction is activated
def on_play_introduction(client,data,msg):
for character, line, direction in read_script('plays/introduction.txt'): #on_play_question(client, userdata, msg)
input_text = line
voice = characters.get(character)[0]
speaker = characters.get(character)[1]
action = directions.get(direction[0])
tts(voice, input_text, speaker)
sleep(1) # add a pause between each line
# Function which is triggered when the intent for another question is activated
def on_play_question(client, userdata, msg):
path = 'scripts_play/questions/'
call(["python3", "act.py", select_script(path)])
print('The act is over.') print('The act is over.')
# Function which is triggered when the intent for another question is activated
def on_play_verdict(client, userdata, msg):
path = 'scripts_play/verdict/'
call(["python3", "act.py", select_script(path)])
print('The play is over.')
# === SETUP OF MQTT PART 2 === # === SETUP OF MQTT PART 2 ===
@ -77,8 +80,11 @@ client.connect(HOST, PORT, 60)
client.on_connect = on_connect client.on_connect = on_connect
# Connect each MQTT topic to which you subscribed to a handler function # Connect each MQTT topic to which you subscribed to a handler function
client.message_callback_add('hermes/intent/jocavdh:play_intro_act', on_play_act) client.message_callback_add('hermes/hotword/default/detected', on_wakeword)
#client.message_callback_add('hermes/hotword/default/detected', on_wakeword) client.message_callback_add('hermes/intent/jocavdh:play_intro', on_play_intro)
client.message_callback_add('hermes/intent/jocavdh:play_question', on_play_question)
client.message_callback_add('hermes/intent/jocavdh:play_verdict', on_play_verdict)
# Keep checking for new MQTT messages # Keep checking for new MQTT messages
client.loop_forever() client.loop_forever()
Loading…
Cancel
Save