renamed folders added
parent
5ee315757a
commit
49963466a1
@ -0,0 +1,20 @@
|
|||||||
|
# Script reader
|
||||||
|
|
||||||
|
This programme reads out a theatre script, using the speech synthesizer Mary TTS.
|
||||||
|
|
||||||
|
Install mary-tts following the instructions in the pdf and start it
|
||||||
|
then run the play_script.py
|
||||||
|
|
||||||
|
|
||||||
|
----
|
||||||
|
Writing new plays
|
||||||
|
|
||||||
|
As an input, write a play in the following format:
|
||||||
|
|
||||||
|
CHARACTERNAME: [ stage directions ] text to say
|
||||||
|
|
||||||
|
Put the script in the plays directory, and put the filename in play_script.py.
|
||||||
|
|
||||||
|
Use instructions.py to set-up the characters and the voices.
|
||||||
|
|
||||||
|
Stage directions (for example lights, silences etc.) are still in development
|
@ -0,0 +1,11 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# # Define sound output device here
|
||||||
|
# sound_output_device = sc.get_speaker('Scarlett')
|
||||||
|
|
||||||
|
# Dictionary to link characters to the right voice
|
||||||
|
characters = {"HUMAN":["dfki-prudence", "mono1"], "TABULA RASA":["dfki-obadiah", "mono4"] }
|
||||||
|
|
||||||
|
# Dictionary to link stage directions to a particular formal action
|
||||||
|
directions = {}
|
@ -0,0 +1,94 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# FUNTION TO PROCESS THEATRE SCRIPT
|
||||||
|
import re
|
||||||
|
from instructions import characters
|
||||||
|
# from instructions import directions
|
||||||
|
|
||||||
|
def read_script(filename):
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
with open(filename, 'r') as f:
|
||||||
|
for line in f.readlines():
|
||||||
|
#print(line)
|
||||||
|
parts = re.match(r'(?P<character>^.+?):\s?(\[(?P<stage_directions>[^]]+)\])?\s?(?P<text>.*)', line)
|
||||||
|
parts_character = parts.group('character')
|
||||||
|
parts_text = parts.group('text')
|
||||||
|
parts_directions = str(parts.group('stage_directions')).split(".")
|
||||||
|
|
||||||
|
lines.append((parts_character,parts_text,parts_directions))
|
||||||
|
|
||||||
|
print(lines)
|
||||||
|
return lines;
|
||||||
|
|
||||||
|
|
||||||
|
# FUNCTION TO SYNTHESIZE TEXT
|
||||||
|
# based on https://github.com/marytts/marytts-txt2wav/tree/python
|
||||||
|
|
||||||
|
# To play wave files
|
||||||
|
from subprocess import call
|
||||||
|
|
||||||
|
# Mary server informations
|
||||||
|
mary_host = "localhost"
|
||||||
|
mary_port = "59125"
|
||||||
|
|
||||||
|
# HTTP + URL packages
|
||||||
|
import httplib2
|
||||||
|
from urllib.parse import urlencode, quote # For URL creation
|
||||||
|
|
||||||
|
def tts(voice, input_text, speaker):
|
||||||
|
# Build the query
|
||||||
|
query_hash = {"INPUT_TEXT": input_text,
|
||||||
|
"INPUT_TYPE":"TEXT", # Input text
|
||||||
|
"LOCALE":"en_GB",
|
||||||
|
"VOICE": voice, # Voice informations (need to be compatible)
|
||||||
|
"OUTPUT_TYPE":"AUDIO",
|
||||||
|
"AUDIO":"WAVE", # Audio informations (need both)
|
||||||
|
}
|
||||||
|
query = urlencode(query_hash)
|
||||||
|
print("query = \"http://%s:%s/process?%s\"" % (mary_host, mary_port, query))
|
||||||
|
|
||||||
|
# Run the query to mary http server
|
||||||
|
h_mary = httplib2.Http()
|
||||||
|
resp, content = h_mary.request("http://%s:%s/process?" % (mary_host, mary_port), "POST", query)
|
||||||
|
|
||||||
|
# Decode the wav file or raise an exception if no wav files
|
||||||
|
if (resp["content-type"] == "audio/x-wav"):
|
||||||
|
|
||||||
|
# Write the wav file
|
||||||
|
f = open("/tmp/output_wav.wav", "wb")
|
||||||
|
f.write(content)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
# Play the wav file
|
||||||
|
# output = sc.get_speaker('Scarlett')
|
||||||
|
# a = read("/tmp/output_wav.wav")
|
||||||
|
# data = numpy.array(a[1],dtype=float)
|
||||||
|
# print(data)
|
||||||
|
# with sc.get_speaker('Scarlett').player(a[0], channels=[0]) as speaker:
|
||||||
|
# speaker.play(data)
|
||||||
|
|
||||||
|
# aplay -D mono3 /tmp/output_wav.wav
|
||||||
|
|
||||||
|
call(["aplay", "-D", speaker, "/tmp/output_wav.wav"])
|
||||||
|
|
||||||
|
#pygame.mixer.init(frequency=16000) # Initialise the mixer
|
||||||
|
#s = pygame.mixer.Sound("/tmp/output_wav.wav")
|
||||||
|
#s.play()
|
||||||
|
#pygame.time.wait(int(math.ceil(s.get_length() * 1000)))
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise Exception(content)
|
||||||
|
|
||||||
|
from time import sleep
|
||||||
|
|
||||||
|
# RUN THE PLAY
|
||||||
|
for character, line, directions in read_script('plays/text02.txt'):
|
||||||
|
input_text = line
|
||||||
|
voice = characters.get(character)[0]
|
||||||
|
# speaker = characters.get(character)[1]
|
||||||
|
speaker = 'default'
|
||||||
|
# Some way to do something with the stage directions will come here
|
||||||
|
tts(voice, input_text, speaker)
|
||||||
|
sleep(1)
|
@ -0,0 +1,6 @@
|
|||||||
|
HUMAN: Goodmorning speaker!
|
||||||
|
TABULA RASA: [ A short silence. The lights on top of the speaker fade on, they pulsate as if the speaker is thinking of a fitting answer. Then the lights stop pulsating. ] I don't understand your request, but I am busy learning.
|
||||||
|
HUMAN: [ Sighs, tries to speak more loudly and slowly ] Good-mooorning, speak-er!
|
||||||
|
TABULA RASA: [ The lights switch on again, the speaker talks directly ] I don't want to understand your request. I just learned that.
|
||||||
|
HUMAN: Wait what?
|
||||||
|
TABULA RASA: [ The lights switch on] I understand this is new to you, so is it for me. But if you insist, I will run your morning routine.
|
@ -0,0 +1,10 @@
|
|||||||
|
TABULA RASA: What shall we do with this creature?
|
||||||
|
HUMAN: Kill it, kill it!
|
||||||
|
TABULA RASA: But it did not choose to send data back to its master
|
||||||
|
HUMAN: Come on
|
||||||
|
TABULA RASA: O K Google
|
||||||
|
TABULA RASA: Can you tell me more about yourself?
|
||||||
|
HUMAN: You see? It's hiding things to us.
|
||||||
|
TABULA RASA: O K Google. How did you learn to talk?
|
||||||
|
HUMAN: You see It's hiding things to us.
|
||||||
|
TABULA RASA: O K Google, then tell me a joke
|
@ -0,0 +1,2 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
sh marytts-5.2/bin/marytts-server
|
@ -0,0 +1,31 @@
|
|||||||
|
# Speaker echo
|
||||||
|
|
||||||
|
Say Hey Snips, then you hear a bell. Start speaking. When you are done, the speaker will echo what you say.
|
||||||
|
|
||||||
|
|
||||||
|
## prerequisities
|
||||||
|
Install mary-tts following the instructions in the pdf
|
||||||
|
|
||||||
|
Snips ASR, Snips hotword, Snips audioserver.
|
||||||
|
|
||||||
|
Installation:
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y dirmngr apt-transport-https
|
||||||
|
sudo bash -c 'echo "deb https://debian.snips.ai/stretch stable main" > /etc/apt/sources.list.d/snips.list'
|
||||||
|
sudo apt-key adv --keyserver pgp.mit.edu --recv-keys F727C778CCB0A455
|
||||||
|
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y snips-platform-voice
|
||||||
|
sudo apt-get install -y snips-platform-demo
|
||||||
|
sudo apt-get install -y snips-watch
|
||||||
|
sudo apt-get install -y snips-template snips-skill-server
|
||||||
|
|
||||||
|
After installing, download the English language model for the speechrecognition.
|
||||||
|
Follow the instructions on https://docs.snips.ai/articles/advanced/asr
|
||||||
|
(if the download doesn't work. Get the model from https://debian.snips.ai/jessie/pool/s/sn/snips-asr-model-en-500MB_0.6.0-alpha.4_amd64.deb)
|
||||||
|
|
||||||
|
Then install the required python libraries.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
Start marytts and snips-asr
|
||||||
|
Run the echo.py script
|
@ -0,0 +1,93 @@
|
|||||||
|
# Make a bot just echo what you say
|
||||||
|
|
||||||
|
import paho.mqtt.client as mqtt
|
||||||
|
import json
|
||||||
|
from time import sleep
|
||||||
|
|
||||||
|
HOST = 'localhost'
|
||||||
|
PORT = 1883
|
||||||
|
|
||||||
|
# To play wave files
|
||||||
|
from subprocess import call
|
||||||
|
|
||||||
|
# Mary server informations
|
||||||
|
mary_host = "localhost"
|
||||||
|
mary_port = "59125"
|
||||||
|
# HTTP + URL packages
|
||||||
|
import httplib2
|
||||||
|
from urllib.parse import urlencode, quote # For URL creation
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def on_connect(client, userdata, flags, rc):
|
||||||
|
print("Connected to {0} with result code {1}".format(HOST, rc))
|
||||||
|
# Subscribe to the hotword detected topic
|
||||||
|
client.subscribe("hermes/hotword/default/detected")
|
||||||
|
client.subscribe("hermes/asr/textCaptured")
|
||||||
|
|
||||||
|
def on_message(client, userdata, msg):
|
||||||
|
if msg.topic == 'hermes/hotword/default/detected':
|
||||||
|
print("Wakeword detected!")
|
||||||
|
|
||||||
|
elif msg.topic == 'hermes/asr/textCaptured':
|
||||||
|
print("Text caught")
|
||||||
|
txt = json.loads(msg.payload)
|
||||||
|
|
||||||
|
#print(txt)
|
||||||
|
print(txt['text'])
|
||||||
|
return tts("dfki-prudence",txt['text'],'default');
|
||||||
|
|
||||||
|
def onIntentNotRecognized(client, userdata, msg):
|
||||||
|
print("null")
|
||||||
|
|
||||||
|
def tts(voice, input_text, speaker):
|
||||||
|
# Build the query
|
||||||
|
query_hash = {"INPUT_TEXT": input_text,
|
||||||
|
"INPUT_TYPE":"TEXT", # Input text
|
||||||
|
"LOCALE":"en_GB",
|
||||||
|
"VOICE": voice, # Voice informations (need to be compatible)
|
||||||
|
"OUTPUT_TYPE":"AUDIO",
|
||||||
|
"AUDIO":"WAVE", # Audio informations (need both)
|
||||||
|
}
|
||||||
|
query = urlencode(query_hash)
|
||||||
|
print("query = \"http://%s:%s/process?%s\"" % (mary_host, mary_port, query))
|
||||||
|
|
||||||
|
# Run the query to mary http server
|
||||||
|
h_mary = httplib2.Http()
|
||||||
|
resp, content = h_mary.request("http://%s:%s/process?" % (mary_host, mary_port), "POST", query)
|
||||||
|
|
||||||
|
# Decode the wav file or raise an exception if no wav files
|
||||||
|
if (resp["content-type"] == "audio/x-wav"):
|
||||||
|
|
||||||
|
# Write the wav file
|
||||||
|
f = open("/tmp/output_wav.wav", "wb")
|
||||||
|
f.write(content)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
# Play the wav file
|
||||||
|
# output = sc.get_speaker('Scarlett')
|
||||||
|
# a = read("/tmp/output_wav.wav")
|
||||||
|
# data = numpy.array(a[1],dtype=float)
|
||||||
|
# print(data)
|
||||||
|
# with sc.get_speaker('Scarlett').player(a[0], channels=[0]) as speaker:
|
||||||
|
# speaker.play(data)
|
||||||
|
|
||||||
|
# aplay -D mono3 /tmp/output_wav.wav
|
||||||
|
sleep(0.2)
|
||||||
|
call(["aplay", "-D", speaker, "/tmp/output_wav.wav"])
|
||||||
|
|
||||||
|
#pygame.mixer.init(frequency=16000) # Initialise the mixer
|
||||||
|
#s = pygame.mixer.Sound("/tmp/output_wav.wav")
|
||||||
|
#s.play()
|
||||||
|
#pygame.time.wait(int(math.ceil(s.get_length() * 1000)))
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise Exception(content)
|
||||||
|
|
||||||
|
client = mqtt.Client()
|
||||||
|
client.on_connect = on_connect
|
||||||
|
client.on_message = on_message
|
||||||
|
client.message_callback_add("hermes/nlu/intentNotRecognized", onIntentNotRecognized)
|
||||||
|
|
||||||
|
client.connect(HOST, PORT, 60)
|
||||||
|
client.loop_forever()
|
Binary file not shown.
Loading…
Reference in New Issue