Compare commits
6 Commits
1603262c1f
...
be4a79c3e9
Author | SHA1 | Date |
---|---|---|
jocavdh | be4a79c3e9 | 5 years ago |
jocavdh | eb666374d6 | 5 years ago |
jocavdh | c6b775229e | 5 years ago |
jocavdh | 902048bf63 | 5 years ago |
jocavdh | da58fc7026 | 5 years ago |
jocavdh | 81d8e9317b | 5 years ago |
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
marytts/bin/marytts-server
|
@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
sudo ./smart_speaker_theatre.py
|
@ -1,5 +1,20 @@
|
||||
# Smart speaker theatre backend
|
||||
# Script reader
|
||||
|
||||
Scripts which turn detected user intents in actions done by the speakers.
|
||||
This programme reads out a theatre script, using the speech synthesizer Mary TTS.
|
||||
|
||||
Work in progress.
|
||||
Install mary-tts following the instructions in the pdf and start it
|
||||
then run the play_script.py
|
||||
|
||||
|
||||
----
|
||||
Writing new plays
|
||||
|
||||
As an input, write a play in the following format:
|
||||
|
||||
CHARACTERNAME: [ stage directions ] text to say
|
||||
|
||||
Put the script in the plays directory, and put the filename in play_script.py.
|
||||
|
||||
Use instructions.py to set-up the characters and the voices.
|
||||
|
||||
Stage directions (for example lights, silences etc.) are still in development
|
||||
|
@ -0,0 +1,138 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# PLAY_ACT.py
|
||||
# This script runs the play
|
||||
# It is in a seperate file to enable the mechanism to detect the Google Home speaking, before continuing to the next line
|
||||
|
||||
# Libraries
|
||||
from config import characters, directions
|
||||
from logic import tts, read_script
|
||||
from pixel_ring import pixel_ring
|
||||
from subprocess import call
|
||||
import paho.mqtt.client as mqtt
|
||||
import json
|
||||
import sys
|
||||
from time import sleep
|
||||
|
||||
# Switch of LED's of speakers at the start of the play
|
||||
pixel_ring.off()
|
||||
|
||||
|
||||
|
||||
# === SETUP OF MQTT PART 1 ===
|
||||
|
||||
# Location of the MQTT server
|
||||
HOST = 'localhost'
|
||||
PORT = 1883
|
||||
|
||||
# Subscribe to relevant MQTT topics
|
||||
def on_connect(client, userdata, flags, rc):
|
||||
print("Connected to {0} with result code {1}".format(HOST, rc))
|
||||
# Subscribe to the text detected topic
|
||||
client.subscribe("hermes/asr/textCaptured")
|
||||
client.subscribe("hermes/dialogueManager/sessionQueued")
|
||||
|
||||
# Function which sets a flag when the Google Home is not speaking
|
||||
# Callback of MQTT message that says that the text is captured by the speech recognition (ASR)
|
||||
def done_speaking(client, userdata, msg):
|
||||
print('Google Home is not speaking anymore')
|
||||
client.connected_flag=True
|
||||
|
||||
# Function which removes intents that are by accident activated by the Google Home
|
||||
# e.g. The google home says introduce yourself, which could trigger the other speakers to introduce themselves
|
||||
# Snips works with queing of sessions, so this situation would only happen after this play is finished
|
||||
def remove_sessions(client, userdata, msg):
|
||||
sessionId = json.loads(id.payload)
|
||||
print('delete mistaken intent')
|
||||
client.publish("hermes/dialogueManager/endSession", json.dumps({
|
||||
'sessionId': sessionId,
|
||||
}))
|
||||
|
||||
|
||||
|
||||
|
||||
# === SETUP OF MQTT PART 2 ===
|
||||
|
||||
# Initialise MQTT client
|
||||
client = mqtt.Client()
|
||||
client.connect(HOST, PORT, 60)
|
||||
client.on_connect = on_connect
|
||||
|
||||
|
||||
|
||||
|
||||
# === Read script and run the play ===
|
||||
|
||||
# Flags to check if the system is listening, or not
|
||||
client.connected_flag=False
|
||||
listening = False
|
||||
|
||||
|
||||
# Read the script and run the play
|
||||
|
||||
|
||||
file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py
|
||||
|
||||
for character, line, direction in read_script(file):
|
||||
input_text = line
|
||||
voice = characters.get(character)[0]
|
||||
speaker = characters.get(character)[1]
|
||||
#speaker = 'default'
|
||||
# Some way to do something with the stage directions will come here
|
||||
action = directions.get(direction[0])
|
||||
pixel_ring.speak()
|
||||
tts(voice, input_text, speaker)
|
||||
|
||||
if action == 'listen_google_home':
|
||||
print('Waiting for the Google Home to finish its talk')
|
||||
|
||||
# # start voice activity detection
|
||||
# client.publish("hermes/asr/startListening", json.dumps({
|
||||
# 'siteId': 'default',
|
||||
# 'init': {
|
||||
# 'type': 'action',
|
||||
# 'canBeEnqueued': True
|
||||
# }
|
||||
# }))
|
||||
|
||||
# Activate the microphone and speech recognition
|
||||
client.publish("hermes/asr/startListening", json.dumps({
|
||||
'siteId': 'default'
|
||||
}))
|
||||
|
||||
# LED to listening mode
|
||||
pixel_ring.listen()
|
||||
|
||||
# create callback
|
||||
client.on_message = done_speaking
|
||||
listening = True
|
||||
|
||||
while listening:
|
||||
client.loop()
|
||||
|
||||
#client.on_message = on_message
|
||||
client.message_callback_add('hermes/asr/textCaptured', done_speaking)
|
||||
|
||||
if client.connected_flag:
|
||||
sleep(1)
|
||||
print('Continue the play')
|
||||
client.connected_flag = False
|
||||
client.message_callback_add('hermes/dialogueManager/sessionQueued', remove_sessions)
|
||||
break
|
||||
|
||||
if action == 'music':
|
||||
print('play audioclip')
|
||||
playing = True
|
||||
|
||||
while playing:
|
||||
call(["aplay", "-D", speaker, "/usr/share/snips/congress.wav"])
|
||||
playing = False
|
||||
|
||||
|
||||
|
||||
pixel_ring.off() # Switch of the lights when done speaking
|
||||
sleep(1) # Add a short pause between the lines
|
||||
|
||||
|
||||
print('The act is done.')
|
@ -1,106 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
# Fuction recognize intents and connect them to the right actions
|
||||
import paho.mqtt.client as mqtt
|
||||
import json
|
||||
from time import sleep
|
||||
from logic import tts, read_script
|
||||
from config import characters, directions
|
||||
|
||||
HOST = 'localhost'
|
||||
PORT = 1883
|
||||
|
||||
|
||||
def on_connect(client, userdata, flags, rc):
|
||||
print("Connected to {0} with result code {1}".format(HOST, rc))
|
||||
# Subscribe to the text detected topic
|
||||
client.subscribe("hermes/nlu/intentNotRecognized")
|
||||
client.subscribe('hermes/intent/jocavdh:ask')
|
||||
client.subscribe('hermes/intent/jocavdh:answer_yes')
|
||||
|
||||
def on_introduce(client,data,msg):
|
||||
data = json.loads(msg.payload)
|
||||
sessionId = data['sessionId']
|
||||
|
||||
for character, line, direction in read_script('play_scripts/demo.txt'):
|
||||
input_text = line
|
||||
voice = characters.get(character)[0]
|
||||
speaker = characters.get(character)[1]
|
||||
#speaker = 'default'
|
||||
# Some way to do something with the stage directions will come here
|
||||
action = directions.get(direction[0])
|
||||
tts(voice, input_text, speaker)
|
||||
print('say this sentence')
|
||||
|
||||
if action == 'listen_audience':
|
||||
print('listen to the audience')
|
||||
|
||||
client.publish('hermes/dialogueManager/endSession', json.dumps({
|
||||
'sessionId': sessionId
|
||||
}))
|
||||
|
||||
client.publish('hermes/dialogueManager/startSession', json.dumps({
|
||||
'siteId': 'default',
|
||||
'init': {'type': 'action', 'canBeEnqueued': True, 'intentFilter':['jocavdh:answer_yes']}
|
||||
}))
|
||||
|
||||
break
|
||||
|
||||
if action == 'listen_google_home':
|
||||
print('ok google')
|
||||
client.publish('hermes/dialogueManager/endSession', json.dumps({
|
||||
'sessionId': sessionId
|
||||
}))
|
||||
|
||||
client.publish('hermes/dialogueManager/startSession', json.dumps({
|
||||
'siteId': 'default',
|
||||
'init': {'type': 'action', 'canBeEnqueued': True, 'sendIntentNotRecognized': True}
|
||||
}))
|
||||
|
||||
# client.publish("hermes/asr/toggleOn")
|
||||
# client.publish('hermes/asr/startListening', json.dumps({
|
||||
# 'siteId': 'default'
|
||||
# }))
|
||||
|
||||
break
|
||||
|
||||
|
||||
def on_answer(client,data,msg):
|
||||
|
||||
data = json.loads(msg.payload)
|
||||
answer_value = data['slots'][0]['value']['value']
|
||||
print(answer_value)
|
||||
|
||||
voice = "dfki-obadiah"
|
||||
speaker = 'default'
|
||||
|
||||
if answer_value == 'yes':
|
||||
input_text = 'Lorem ipsum'
|
||||
tts(voice, input_text, speaker)
|
||||
#on_introduce(client,data,msg)
|
||||
|
||||
if answer_value == 'no':
|
||||
input_text = 'nope nope nope'
|
||||
tts(voice, input_text, speaker)
|
||||
|
||||
print('The play is over.')
|
||||
|
||||
def onIntentNotRecognized(client, data, msg):
|
||||
data = json.loads(msg.payload)
|
||||
|
||||
print('not recognized')
|
||||
|
||||
on_introduce(client,data,msg)
|
||||
|
||||
|
||||
client = mqtt.Client()
|
||||
client.connect(HOST, PORT, 60)
|
||||
client.on_connect = on_connect
|
||||
client.message_callback_add('hermes/intent/jocavdh:ask', on_introduce)
|
||||
client.message_callback_add('hermes/intent/jocavdh:answer_yes', on_answer)
|
||||
client.message_callback_add("hermes/nlu/intentNotRecognized", onIntentNotRecognized)
|
||||
print('main')
|
||||
|
||||
client.loop_forever()
|
@ -1,21 +1,14 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# play_config.py
|
||||
# config.py
|
||||
# This script contains the basic configuration of the play
|
||||
# A list of the characters and their voices
|
||||
# A list of stage directions which connect to formal instructions for the hardware
|
||||
# ---
|
||||
|
||||
# Define sound output device here
|
||||
# sound_output_device = sc.get_speaker('Scarlett')
|
||||
|
||||
# Dictionary to link characters to the right voice
|
||||
characters = {"ROGUE":["dfki-prudence", "mono1"], "SAINT":["dfki-obadiah", "mono2"], "RASA":["dfki-poppy-hsmm", "mono3"] }
|
||||
characters = {"ROGUE":["cmu-slt-hsmm", "mono2"], "SAINT":["dfki-obadiah-hsmm", "mono3"], "RASA":["dfki-poppy-hsmm", "mono1"] }
|
||||
|
||||
# Dictionary to link stage directions to a particular formal action
|
||||
directions = {
|
||||
'Wait for audience':'listen_audience',
|
||||
'Listens to Google Home':'listen_google_home',
|
||||
'Music':'audio'
|
||||
}
|
||||
directions = {"Listen to Google Home":'listen_google_home','Music':'music'}
|
||||
|
@ -0,0 +1 @@
|
||||
Subproject commit 30de55966fdf0d0f1ee6e02cf356c56ba76b577b
|
@ -1,2 +1,3 @@
|
||||
ROGUE: [Listens to Google Home] Give me
|
||||
SAINT: Do that.
|
||||
ROGUE: Do you want to continue?
|
||||
RASA: Well, I definitely want to
|
||||
SAINT: So do I
|
||||
|
@ -0,0 +1,2 @@
|
||||
SAINT: Yes, I am ready to go. But first tell a bit more about that silly project of yours.
|
||||
SAINT: Sorry, I gonna let you finish, but do you mind if I introduce myself first?
|
@ -0,0 +1,3 @@
|
||||
SAINT: [Listens to Google Home] O K Google, are you recording this conversation?
|
||||
SAINT: Being a smart speaker myself, I got to say that I do not fully trust this.
|
||||
ROGUE: Test Test
|
@ -1 +0,0 @@
|
||||
ROGUE: Hi, I am rogue. Are you Joca?
|
@ -1,3 +0,0 @@
|
||||
# Needed for Snips platform 1.1.0 (0.61.1)
|
||||
hermes-python>=0.3.3
|
||||
toml
|
@ -1,34 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# Copy config.ini.default if it exists and config.ini doesn't exist.
|
||||
if [ -e config.ini.default ] && [ ! -e config.ini ]; then
|
||||
cp config.ini.default config.ini
|
||||
chmod a+w config.ini
|
||||
fi
|
||||
|
||||
PYTHON=$(command -v python3)
|
||||
VENV=venv
|
||||
|
||||
if [ -f "$PYTHON" ]; then
|
||||
|
||||
if [ ! -d $VENV ]; then
|
||||
# Create a virtual environment if it doesn't exist.
|
||||
$PYTHON -m venv $VENV
|
||||
else
|
||||
if [ -e $VENV/bin/python2 ]; then
|
||||
# If a Python2 environment exists, delete it first
|
||||
# before creating a new Python 3 virtual environment.
|
||||
rm -r $VENV
|
||||
$PYTHON -m venv $VENV
|
||||
fi
|
||||
fi
|
||||
|
||||
# Activate the virtual environment and install requirements.
|
||||
# shellcheck disable=SC1090
|
||||
. $VENV/bin/activate
|
||||
pip3 install -r requirements.txt
|
||||
|
||||
else
|
||||
>&2 echo "Cannot find Python 3. Please install it."
|
||||
fi
|
@ -0,0 +1,83 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# SMART SPEAKER THEATRE
|
||||
# This script reads the triggers for activated user intents sent by Snips over MQTT
|
||||
# Using this triggers, it will start particular actions and scripts
|
||||
|
||||
|
||||
# Libraries
|
||||
import re
|
||||
from config import characters, directions
|
||||
from logic import tts, read_script
|
||||
from subprocess import call
|
||||
import paho.mqtt.client as mqtt
|
||||
import json
|
||||
from time import sleep
|
||||
from pixel_ring import pixel_ring
|
||||
|
||||
|
||||
# === SETUP OF MQTT PART 1 ===
|
||||
|
||||
# Location of the MQTT server
|
||||
HOST = 'localhost'
|
||||
PORT = 1883
|
||||
|
||||
# Subscribe to relevant MQTT topics
|
||||
def on_connect(client, userdata, flags, rc):
|
||||
print("Connected to {0} with result code {1}".format(HOST, rc))
|
||||
client.subscribe('hermes/intent/jocavdh:play_intro_act') # to check for intent to play the act
|
||||
client.subscribe('hermes/intent/jocavdh:question_continue_act') # to check for the intent to continue to the next act
|
||||
client.subscribe('hermes/hotword/default/detected')
|
||||
|
||||
|
||||
|
||||
|
||||
# === FUNCTIONS THAT ARE TRIGGERED WHEN AN INTENT IS DETECTED ===
|
||||
|
||||
def on_wakeword(client, userdata, msg):
|
||||
pixel_ring.think()
|
||||
|
||||
# Function which is triggered when the intent play_intro_act is activated
|
||||
def on_play_act(client, userdata, msg):
|
||||
|
||||
|
||||
# # disable this intent to avoid playing another act triggered by the Google Home
|
||||
# client.publish("hermes/dialogueManager/configure", json.dumps({
|
||||
# 'siteId': 'default',
|
||||
# 'intents': {
|
||||
# 'jocavdh:play': False
|
||||
# }
|
||||
# }))
|
||||
|
||||
call(["python3", "act.py", "play_scripts/demo.txt"])
|
||||
|
||||
# Function which is triggered when the intent introduction is activated
|
||||
def on_play_introduction(client,data,msg):
|
||||
|
||||
for character, line, direction in read_script('plays/introduction.txt'):
|
||||
input_text = line
|
||||
voice = characters.get(character)[0]
|
||||
speaker = characters.get(character)[1]
|
||||
action = directions.get(direction[0])
|
||||
tts(voice, input_text, speaker)
|
||||
sleep(1) # add a pause between each line
|
||||
|
||||
|
||||
print('The act is over.')
|
||||
|
||||
|
||||
|
||||
# === SETUP OF MQTT PART 2 ===
|
||||
|
||||
# Initialise MQTT client
|
||||
client = mqtt.Client()
|
||||
client.connect(HOST, PORT, 60)
|
||||
client.on_connect = on_connect
|
||||
|
||||
# Connect each MQTT topic to which you subscribed to a handler function
|
||||
client.message_callback_add('hermes/intent/jocavdh:play_intro_act', on_play_act)
|
||||
client.message_callback_add('hermes/hotword/default/detected', on_wakeword)
|
||||
|
||||
# Keep checking for new MQTT messages
|
||||
client.loop_forever()
|
Binary file not shown.
Loading…
Reference in New Issue