You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
139 lines
3.9 KiB
Python
139 lines
3.9 KiB
Python
6 years ago
|
#!/usr/bin/env python3
|
||
|
# -*- coding: utf-8 -*-
|
||
|
|
||
|
# PLAY_ACT.py
|
||
|
# This script runs the play
|
||
|
# It is in a seperate file to enable the mechanism to detect the Google Home speaking, before continuing to the next line
|
||
|
|
||
|
# Libraries
|
||
|
from config import characters, directions
|
||
|
from logic import tts, read_script, led_on, led_off, select_script
|
||
|
from pixel_ring import pixel_ring
|
||
|
from subprocess import call
|
||
|
import paho.mqtt.client as mqtt
|
||
|
import json
|
||
|
import sys
|
||
|
from time import sleep
|
||
|
|
||
|
# Switch of LED's of speakers at the start of the play
|
||
|
pixel_ring.off()
|
||
|
|
||
|
|
||
|
|
||
|
# === SETUP OF MQTT PART 1 ===
|
||
|
|
||
|
# Location of the MQTT server
|
||
|
HOST = 'localhost'
|
||
|
PORT = 1883
|
||
|
|
||
|
# Subscribe to relevant MQTT topics
|
||
|
def on_connect(client, userdata, flags, rc):
|
||
|
print("Connected to {0} with result code {1}".format(HOST, rc))
|
||
|
# Subscribe to the text detected topic
|
||
|
client.subscribe("hermes/asr/textCaptured")
|
||
|
client.subscribe("hermes/dialogueManager/sessionQueued")
|
||
|
|
||
|
# Function which sets a flag when the Google Home is not speaking
|
||
|
# Callback of MQTT message that says that the text is captured by the speech recognition (ASR)
|
||
|
def done_speaking(client, userdata, msg):
|
||
|
print('Google Home is not speaking anymore')
|
||
|
client.connected_flag=True
|
||
|
|
||
|
# Function which removes intents that are by accident activated by the Google Home
|
||
|
# e.g. The google home says introduce yourself, which could trigger the other speakers to introduce themselves
|
||
|
# Snips works with queing of sessions, so this situation would only happen after this play is finished
|
||
|
def remove_sessions(client, userdata, msg):
|
||
|
sessionId = json.loads(id.payload)
|
||
|
print('delete mistaken intent')
|
||
|
client.publish("hermes/dialogueManager/endSession", json.dumps({
|
||
|
'sessionId': sessionId,
|
||
|
}))
|
||
|
|
||
|
|
||
|
|
||
|
|
||
|
# === SETUP OF MQTT PART 2 ===
|
||
|
|
||
|
# Initialise MQTT client
|
||
|
client = mqtt.Client()
|
||
|
client.connect(HOST, PORT, 60)
|
||
|
client.on_connect = on_connect
|
||
|
|
||
|
|
||
|
|
||
|
|
||
|
# === Read script and run the play ===
|
||
|
|
||
|
# Flags to check if the system is listening, or not
|
||
|
client.connected_flag=False
|
||
|
listening = False
|
||
|
|
||
|
|
||
|
# Read the script and run the play
|
||
|
|
||
|
|
||
|
#file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py
|
||
|
file = select_script('scripts_play/intro/')
|
||
|
|
||
|
for character, line, direction in read_script(file):
|
||
|
input_text = line
|
||
|
voice = characters.get(character)[0]
|
||
|
speaker = characters.get(character)[1]
|
||
|
#speaker = 'default'
|
||
|
# Some way to do something with the stage directions will come here
|
||
|
action = directions.get(direction[0])
|
||
|
pixel_ring.speak()
|
||
|
tts(voice, input_text, speaker)
|
||
|
|
||
|
if action == 'listen_google_home':
|
||
|
print('Waiting for the Google Home to finish its talk')
|
||
|
|
||
|
# # start voice activity detection
|
||
|
# client.publish("hermes/asr/startListening", json.dumps({
|
||
|
# 'siteId': 'default',
|
||
|
# 'init': {
|
||
|
# 'type': 'action',
|
||
|
# 'canBeEnqueued': True
|
||
|
# }
|
||
|
# }))
|
||
|
|
||
|
# Activate the microphone and speech recognition
|
||
|
client.publish("hermes/asr/startListening", json.dumps({
|
||
|
'siteId': 'default'
|
||
|
}))
|
||
|
|
||
|
# LED to listening mode
|
||
|
pixel_ring.listen()
|
||
|
|
||
|
# create callback
|
||
|
client.on_message = done_speaking
|
||
|
listening = True
|
||
|
|
||
|
while listening:
|
||
|
client.loop()
|
||
|
|
||
|
#client.on_message = on_message
|
||
|
client.message_callback_add('hermes/asr/textCaptured', done_speaking)
|
||
|
|
||
|
if client.connected_flag:
|
||
|
sleep(1)
|
||
|
print('Continue the play')
|
||
|
client.connected_flag = False
|
||
|
client.message_callback_add('hermes/dialogueManager/sessionQueued', remove_sessions)
|
||
|
break
|
||
|
|
||
|
if action == 'music':
|
||
|
print('play audioclip')
|
||
|
playing = True
|
||
|
|
||
|
while playing:
|
||
|
call(["aplay", "-D", speaker, "/usr/share/snips/congress.wav"])
|
||
|
playing = False
|
||
|
|
||
|
|
||
|
|
||
|
pixel_ring.off() # Switch of the lights when done speaking
|
||
|
sleep(0.2) # Add a short pause between the lines
|
||
|
|
||
|
|
||
|
print('The act is done.')
|