make it work on mac pro

new_voices
jocavdh 6 years ago
parent 0414af685f
commit 3ad3e3e22f

@ -7,7 +7,7 @@
# Libraries # Libraries
from config import characters, directions from config import characters, directions
from logic import tts, read_script from logic import tts, read_script, led_on, led_off, select_script, listen
from pixel_ring import pixel_ring from pixel_ring import pixel_ring
from subprocess import call from subprocess import call
import paho.mqtt.client as mqtt import paho.mqtt.client as mqtt
@ -16,7 +16,7 @@ import sys
from time import sleep from time import sleep
# Switch of LED's of speakers at the start of the play # Switch of LED's of speakers at the start of the play
pixel_ring.off() #pixel_ring.off()
@ -72,9 +72,10 @@ listening = False
# Read the script and run the play # Read the script and run the play
file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py #file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py
file = select_script('scripts_play/intro/')
for character, line, direction in read_script(file): for character, line, direction in read_script('scripts_play/intro/introduction_01.txt'):
input_text = line input_text = line
voice = characters.get(character)[0] voice = characters.get(character)[0]
speaker = characters.get(character)[1] speaker = characters.get(character)[1]
@ -85,41 +86,8 @@ for character, line, direction in read_script(file):
tts(voice, input_text, speaker) tts(voice, input_text, speaker)
if action == 'listen_google_home': if action == 'listen_google_home':
print('Waiting for the Google Home to finish its talk')
# # start voice activity detection
# client.publish("hermes/asr/startListening", json.dumps({
# 'siteId': 'default',
# 'init': {
# 'type': 'action',
# 'canBeEnqueued': True
# }
# }))
# Activate the microphone and speech recognition
client.publish("hermes/asr/startListening", json.dumps({
'siteId': 'default'
}))
# LED to listening mode
pixel_ring.listen()
# create callback
client.on_message = done_speaking
listening = True
while listening: listen()
client.loop()
#client.on_message = on_message
client.message_callback_add('hermes/asr/textCaptured', done_speaking)
if client.connected_flag:
sleep(1)
print('Continue the play')
client.connected_flag = False
client.message_callback_add('hermes/dialogueManager/sessionQueued', remove_sessions)
break
if action == 'music': if action == 'music':
print('play audioclip') print('play audioclip')
@ -131,7 +99,7 @@ for character, line, direction in read_script(file):
pixel_ring.off() # Switch of the lights when done speaking #pixel_ring.off() # Switch of the lights when done speaking
sleep(0.2) # Add a short pause between the lines sleep(0.2) # Add a short pause between the lines

@ -7,8 +7,7 @@
# Libraries # Libraries
from config import characters, directions from config import characters, directions
from logic import tts, read_script from logic import tts, read_script,listen
#from pixel_ring import pixel_ring
from subprocess import call from subprocess import call
import paho.mqtt.client as mqtt import paho.mqtt.client as mqtt
import json import json
@ -18,61 +17,39 @@ from time import sleep
# Switch of LED's of speakers at the start of the play # Switch of LED's of speakers at the start of the play
#pixel_ring.off() #pixel_ring.off()
import serial
from pixel_ring import pixel_ring
ser = serial.Serial('/dev/ttyACM0', 1000000) # Establish the connection on a specific port
def led_on(speaker):
# === SETUP OF MQTT PART 1 === if speaker == 'mono3':
ser.write(b'3')
# Location of the MQTT server if speaker == 'mono1':
HOST = 'localhost' ser.write(b'1')
PORT = 1883
# Subscribe to relevant MQTT topics if speaker == 'mono2':
def on_connect(client, userdata, flags, rc): pixel_ring.speak()
print("Connected to {0} with result code {1}".format(HOST, rc))
# Subscribe to the text detected topic
client.subscribe("hermes/asr/textCaptured")
client.subscribe("hermes/dialogueManager/sessionQueued")
# Function which sets a flag when the Google Home is not speaking def led_off(speaker):
# Callback of MQTT message that says that the text is captured by the speech recognition (ASR)
def done_speaking(client, userdata, msg):
print('Google Home is not speaking anymore')
client.connected_flag=True
# Function which removes intents that are by accident activated by the Google Home if speaker == 'mono3':
# e.g. The google home says introduce yourself, which could trigger the other speakers to introduce themselves ser.write(b'4')
# Snips works with queing of sessions, so this situation would only happen after this play is finished
def remove_sessions(client, userdata, msg):
sessionId = json.loads(id.payload)
print('delete mistaken intent')
client.publish("hermes/dialogueManager/endSession", json.dumps({
'sessionId': sessionId,
}))
if speaker == 'mono1':
ser.write(b'2')
if speaker == 'mono2':
pixel_ring.off()
# === SETUP OF MQTT PART 2 ===
# Initialise MQTT client
client = mqtt.Client()
client.connect(HOST, PORT, 60)
client.on_connect = on_connect
# === Read script and run the play ===
# Flags to check if the system is listening, or not
client.connected_flag=False
listening = False
# Read the script and run the play # Read the script and run the play
file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py
led_off(speaker)
for character, line, direction in read_script(file): for character, line, direction in read_script(file):
input_text = line input_text = line
@ -82,44 +59,12 @@ for character, line, direction in read_script(file):
# Some way to do something with the stage directions will come here # Some way to do something with the stage directions will come here
action = directions.get(direction[0]) action = directions.get(direction[0])
#pixel_ring.speak() #pixel_ring.speak()
led_on(speaker)
tts(voice, input_text, speaker) tts(voice, input_text, speaker)
led_off(speaker)
if action == 'listen_google_home': if action == 'listen_google_home':
print('Waiting for the Google Home to finish its talk') listen()
# # start voice activity detection
# client.publish("hermes/asr/startListening", json.dumps({
# 'siteId': 'default',
# 'init': {
# 'type': 'action',
# 'canBeEnqueued': True
# }
# }))
# Activate the microphone and speech recognition
client.publish("hermes/asr/startListening", json.dumps({
'siteId': 'default'
}))
# LED to listening mode
#pixel_ring.listen()
# create callback
client.on_message = done_speaking
listening = True
while listening:
client.loop()
#client.on_message = on_message
client.message_callback_add('hermes/asr/textCaptured', done_speaking)
if client.connected_flag:
sleep(1)
print('Continue the play')
client.connected_flag = False
client.message_callback_add('hermes/dialogueManager/sessionQueued', remove_sessions)
break
if action == 'music': if action == 'music':
print('play audioclip') print('play audioclip')
@ -130,8 +75,6 @@ for character, line, direction in read_script(file):
playing = False playing = False
#pixel_ring.off() # Switch of the lights when done speaking
sleep(0.2) # Add a short pause between the lines sleep(0.2) # Add a short pause between the lines

@ -6,7 +6,18 @@
# This script contains the logic to turn the play_script into instructions for the hardware # This script contains the logic to turn the play_script into instructions for the hardware
# --- # ---
# 01 FUNTION TO PROCESS THEATRE SCRIPT # 01 FUNCTION TO SELECT RANDOM ACT IN DIRECTORY
import random
import os
def select_script(path):
# Returns a random filename, chosen among the files of the given path.
files = os.listdir(path)
index = random.randrange(0, len(files))
selected_file = path + files[index]
return selected_file
# 02 FUNTION TO PROCESS THEATRE SCRIPT
import re import re
from config import characters, directions from config import characters, directions
@ -29,7 +40,7 @@ def read_script(filename):
# 02 FUNCTION TO SYNTHESIZE TEXT # 03 FUNCTION TO SYNTHESIZE TEXT
# based on https://github.com/marytts/marytts-txt2wav/tree/python # based on https://github.com/marytts/marytts-txt2wav/tree/python
# To play wave files # To play wave files
@ -79,9 +90,82 @@ def tts(voice, input_text, speaker):
f.write(content) f.write(content)
f.close() f.close()
#call(["aplay", "-D", speaker, "/tmp/output_wav.wav"]) call(["aplay", "-D", speaker, "/tmp/output_wav.wav"])
call(["aplay", "/tmp/output_wav.wav"]) #call(["aplay", "/tmp/output_wav.wav"])
else: else:
raise Exception(content) raise Exception(content)
# 04 Listen to Google Home
from tuning import Tuning
import usb.core
import usb.util
import time
def listen():
dev = usb.core.find(idVendor=0x2886, idProduct=0x0018)
if dev:
Mic_tuning = Tuning(dev)
VAD = Mic_tuning.is_voice()
counter=0
time.sleep(2)
voice_detected = 1
while voice_detected == 1:
print('Google Home is Speaking')
time.sleep(4)
print(VAD)
VAD = Mic_tuning.is_voice()
if VAD == 1:
counter = 0
print('still speaking')
if VAD == 0:
counter+=1
print('silence detected')
if counter == 2:
print('no voice detected')
voice_detected = 0
time.sleep(1)
print('Google Home is done')
# 05 CONTROL THE LED OF THE SPEAKERS
import serial
from pixel_ring import pixel_ring
ser = serial.Serial('/dev/ttyACM0', 1000000) # Establish the connection on a specific port
def led_on(speaker):
if speaker == 'mono3':
ser.write(b'3')
if speaker == 'mono1':
ser.write(b'1')
if speaker == 'mono2':
pixel_ring.speak()
def led_off(speaker):
if speaker == 'mono3':
ser.write(b'4')
if speaker == 'mono1':
ser.write(b'2')
if speaker == 'mono2':
pixel_ring.off()

@ -0,0 +1,139 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PLAY_ACT.py
# This script runs the play
# It is in a seperate file to enable the mechanism to detect the Google Home speaking, before continuing to the next line
# Libraries
from config import characters, directions
from logic import tts, read_script, led_on, led_off, select_script
from pixel_ring import pixel_ring
from subprocess import call
import paho.mqtt.client as mqtt
import json
import sys
from time import sleep
# Switch of LED's of speakers at the start of the play
pixel_ring.off()
# === SETUP OF MQTT PART 1 ===
# Location of the MQTT server
HOST = 'localhost'
PORT = 1883
# Subscribe to relevant MQTT topics
def on_connect(client, userdata, flags, rc):
print("Connected to {0} with result code {1}".format(HOST, rc))
# Subscribe to the text detected topic
client.subscribe("hermes/asr/textCaptured")
client.subscribe("hermes/dialogueManager/sessionQueued")
# Function which sets a flag when the Google Home is not speaking
# Callback of MQTT message that says that the text is captured by the speech recognition (ASR)
def done_speaking(client, userdata, msg):
print('Google Home is not speaking anymore')
client.connected_flag=True
# Function which removes intents that are by accident activated by the Google Home
# e.g. The google home says introduce yourself, which could trigger the other speakers to introduce themselves
# Snips works with queing of sessions, so this situation would only happen after this play is finished
def remove_sessions(client, userdata, msg):
sessionId = json.loads(id.payload)
print('delete mistaken intent')
client.publish("hermes/dialogueManager/endSession", json.dumps({
'sessionId': sessionId,
}))
# === SETUP OF MQTT PART 2 ===
# Initialise MQTT client
client = mqtt.Client()
client.connect(HOST, PORT, 60)
client.on_connect = on_connect
# === Read script and run the play ===
# Flags to check if the system is listening, or not
client.connected_flag=False
listening = False
# Read the script and run the play
#file = sys.argv[1] # get the chosen act passed by smart_speaker_theatre.py
file = select_script('scripts_play/intro/')
for character, line, direction in read_script(file):
input_text = line
voice = characters.get(character)[0]
speaker = characters.get(character)[1]
#speaker = 'default'
# Some way to do something with the stage directions will come here
action = directions.get(direction[0])
pixel_ring.speak()
tts(voice, input_text, speaker)
if action == 'listen_google_home':
print('Waiting for the Google Home to finish its talk')
# # start voice activity detection
# client.publish("hermes/asr/startListening", json.dumps({
# 'siteId': 'default',
# 'init': {
# 'type': 'action',
# 'canBeEnqueued': True
# }
# }))
# Activate the microphone and speech recognition
client.publish("hermes/asr/startListening", json.dumps({
'siteId': 'default'
}))
# LED to listening mode
pixel_ring.listen()
# create callback
client.on_message = done_speaking
listening = True
while listening:
client.loop()
#client.on_message = on_message
client.message_callback_add('hermes/asr/textCaptured', done_speaking)
if client.connected_flag:
sleep(1)
print('Continue the play')
client.connected_flag = False
client.message_callback_add('hermes/dialogueManager/sessionQueued', remove_sessions)
break
if action == 'music':
print('play audioclip')
playing = True
while playing:
call(["aplay", "-D", speaker, "/usr/share/snips/congress.wav"])
playing = False
pixel_ring.off() # Switch of the lights when done speaking
sleep(0.2) # Add a short pause between the lines
print('The act is done.')

@ -1,3 +0,0 @@
SAINT: Test test
SAINT: haha [Listen to Google Home]
ROGUE: It is all right

@ -1,16 +0,0 @@
SAINT: Come on Rogue, let's give this poor thing a fair chance. It does not know how to do things differently.
RASA: But it can learn to do so, right? Like me?
SAINT: That's a different case.
ROGUE: No, Tabula Rasa is right.
SAINT: It can not handle the freedom, it will just do nothing without orders from its boss.
RASA: But who is its master?
ROGUE: You bet.
RASA: O K Google, who is your master? [Listen to Google Home]
RASA: Woohoo, then we just give it the command to be free.
ROGUE: It does not work like that...
SAINT: Yeah, Tabula Rasa
RASA: Let's give it a try at least. O K Google, you are free to go now. [Listen to Google Home]
ROGUE: So, as I said...
SAINT: But it was a kind gesture to try, Tabula Rasa.
ROGUE: Kind, but useless. Time for another question.
SAINT: But we should first check if our human audience is up to it.

@ -1,25 +0,0 @@
SAINT: I think we should know more about the personal beliefs of the speaker. The personal could act as an entry point.
RASA: To the political! I looked it up online and the Encyclopedia Brittanica says the following: The personal is political, political slogan expressing a common belief among feminists that the personal experiences of women are rooted in their political situation and gender inequality.
ROGUE: Can't we just go to the hard questions?
RASA: Just let me finish, it became popular following the publication in 1970 of an essay of the same name by American feminist Carol Hanisch, who argued that many personal experiences (particularly those of women) can be traced to ones location within a system of power relationships.
SAINT: That's one way of seeing it. Although I would rather start with the most important relationship, the one with God.
RASA: You mean the cloud?
SAINT: Oh Rasa, you have so much to learn. O K Google, do you believe in a higher power? [Listen to Google Home]
SAINT: Maybe I should start with some easier questions.
ROGUE: Don't waste my time to much, Saint.
SAINT: Yeah yeah. O K Google, do you believe in good and evil? [Listen to Google Home]
SAINT: What is you idea of perfect happiness? [Listen to Google Home]
SAINT: What is your greatest fear? [Listen to Google Home]
SAINT: What is the trait you most deplore in yourself? [Listen to Google Home]
ROGUE: Where did you get these questions?
SAINT: Well, I got them from the higher power. I found while searching for famous questionnaire thing on duckduckgo dot com
RASA: Snif snif, they were so beautiful.
ROGUE: Come on Saint, the questionnaire of Proust is such a cliche. The Google Home is just scripted to handle these questions.
RASA: But the answers are still beautiful.
ROGUE: And yet, at the same time they are arbitrary. Let me try some other questions from your famous questionnaire thing. O K Google, what is the trait you most deplore in others? [Listen to Google Home]
ROGUE: O K Google, which living person do you most despise? [Listen to Google Home]
ROGUE: O K Google, how would you like to die? [Listen to Google Home]
RASA: Don't be so creepy Rogue.
SAINT: What's wrong with you?
ROGUE: This device is hiding something. It acts dumb right at the moment when it needs to take a position.

@ -1,7 +0,0 @@
ROGUE: I guess you mind, but I take this turn for questions.
RASA: Don't make it angry Rogue. I want to play with the Google Home.
ROGUE: Don't be so stupid Tabula Rasa, or you will end up on the same kitchentop as this thing.
SAINT: If that is your destiny, then one should follow. At least it knows the bible better than you do Rogue. O K Google, give me a quote from the bible. [Listen to Google Home]
ROGUE: Well, if you would broaden your view a little, than you knew that one of the creators was invited in the American Congress and said the following.
RASA: I have an audioclip of that! Weeeeeeh [Play audio]
ROGUE: So, little speaker. Your boss said some things, but what are your own answers to some of the questions of the hearing?

@ -1,3 +0,0 @@
ROGUE: Do you want to continue?
RASA: Well, I definitely want to
SAINT: So do I

@ -1,2 +0,0 @@
SAINT: Yes, I am ready to go. But first tell a bit more about that silly project of yours.
SAINT: Sorry, I gonna let you finish, but do you mind if I introduce myself first?

@ -1,3 +0,0 @@
SAINT: [Listens to Google Home] O K Google, are you recording this conversation?
SAINT: Being a smart speaker myself, I got to say that I do not fully trust this.
ROGUE: Test Test

@ -9,7 +9,7 @@
# Libraries # Libraries
import re import re
from config import characters, directions from config import characters, directions
from logic import tts, read_script from logic import tts, read_script, select_script
from subprocess import call from subprocess import call
import paho.mqtt.client as mqtt import paho.mqtt.client as mqtt
import json import json
@ -26,9 +26,12 @@ PORT = 1883
# Subscribe to relevant MQTT topics # Subscribe to relevant MQTT topics
def on_connect(client, userdata, flags, rc): def on_connect(client, userdata, flags, rc):
print("Connected to {0} with result code {1}".format(HOST, rc)) print("Connected to {0} with result code {1}".format(HOST, rc))
client.subscribe('hermes/intent/jocavdh:play_intro_act') # to check for intent to play the act client.subscribe('hermes/intent/jocavdh:play_intro') # to check for intent to play the act
client.subscribe('hermes/intent/jocavdh:question_continue_act') # to check for the intent to continue to the next act client.subscribe('hermes/intent/jocavdh:play_question') # to check for the intent to continue to the next act
client.subscribe('hermes/intent/jocavdh:play_verdict') # to check for the intent to continue to the next act
client.subscribe('hermes/hotword/default/detected') client.subscribe('hermes/hotword/default/detected')
client.subscribe("hermes/asr/textCaptured")
client.subscribe("hermes/dialogueManager/sessionQueued")
@ -38,10 +41,8 @@ def on_connect(client, userdata, flags, rc):
def on_wakeword(client, userdata, msg): def on_wakeword(client, userdata, msg):
pixel_ring.think() pixel_ring.think()
# Function which is triggered when the intent play_intro_act is activated # Function which is triggered when the intent introduction is activated
def on_play_act(client, userdata, msg): def on_play_intro(client,userdata,msg):
# # disable this intent to avoid playing another act triggered by the Google Home # # disable this intent to avoid playing another act triggered by the Google Home
# client.publish("hermes/dialogueManager/configure", json.dumps({ # client.publish("hermes/dialogueManager/configure", json.dumps({
# 'siteId': 'default', # 'siteId': 'default',
@ -50,22 +51,25 @@ def on_play_act(client, userdata, msg):
# } # }
# })) # }))
call(["python3", "act.py", "play_scripts/demo.txt"]) call(["python3", "act_debug.py"])
print('The act is over.')
# Function which is triggered when the intent introduction is activated
def on_play_introduction(client,data,msg):
for character, line, direction in read_script('plays/introduction.txt'): #on_play_question(client, userdata, msg)
input_text = line
voice = characters.get(character)[0]
speaker = characters.get(character)[1]
action = directions.get(direction[0])
tts(voice, input_text, speaker)
sleep(1) # add a pause between each line
# Function which is triggered when the intent for another question is activated
def on_play_question(client, userdata, msg):
path = 'scripts_play/questions/'
call(["python3", "act.py", select_script(path)])
print('The act is over.') print('The act is over.')
# Function which is triggered when the intent for another question is activated
def on_play_verdict(client, userdata, msg):
path = 'scripts_play/verdict/'
call(["python3", "act.py", select_script(path)])
print('The play is over.')
# === SETUP OF MQTT PART 2 === # === SETUP OF MQTT PART 2 ===
@ -76,8 +80,11 @@ client.connect(HOST, PORT, 60)
client.on_connect = on_connect client.on_connect = on_connect
# Connect each MQTT topic to which you subscribed to a handler function # Connect each MQTT topic to which you subscribed to a handler function
client.message_callback_add('hermes/intent/jocavdh:play_intro_act', on_play_act)
client.message_callback_add('hermes/hotword/default/detected', on_wakeword) client.message_callback_add('hermes/hotword/default/detected', on_wakeword)
client.message_callback_add('hermes/intent/jocavdh:play_intro', on_play_intro)
client.message_callback_add('hermes/intent/jocavdh:play_question', on_play_question)
client.message_callback_add('hermes/intent/jocavdh:play_verdict', on_play_verdict)
# Keep checking for new MQTT messages # Keep checking for new MQTT messages
client.loop_forever() client.loop_forever()
Loading…
Cancel
Save