Revision 0c15613f
server/CMakeLists.txt | ||
---|---|---|
56 | 56 |
FILES |
57 | 57 |
phoneme.msg |
58 | 58 |
soundchunk.msg |
59 |
utterance.msg |
|
59 | 60 |
) |
60 | 61 |
|
61 | 62 |
add_service_files( |
... | ... | |
68 | 69 |
animation.action |
69 | 70 |
speech.action |
70 | 71 |
utterance.action |
72 |
tts.action |
|
71 | 73 |
emotionstate.action |
72 | 74 |
gazetarget.action |
73 | 75 |
mouthtarget.action |
server/action/utterance.action | ||
---|---|---|
1 | 1 |
#utterance |
2 |
|
|
3 |
#list of symbol/duration pairs |
|
4 |
phoneme[] phonemes |
|
5 |
|
|
6 |
#sound data (raw data, similar to a wave file) |
|
7 |
soundchunk audio |
|
8 |
|
|
9 |
#textual description |
|
10 |
string text |
|
2 |
utterance utterance |
|
11 | 3 |
|
12 | 4 |
--- |
13 | 5 |
#response |
server/include/MiddlewareROS.h | ||
---|---|---|
30 | 30 |
#include "Middleware.h" |
31 | 31 |
#ifdef ROS_SUPPORT |
32 | 32 |
#include "ros/ros.h" |
33 |
#include <actionlib/client/simple_action_client.h> |
|
34 |
|
|
33 | 35 |
//messages |
34 | 36 |
#include "hlrc_server/phoneme.h" |
35 | 37 |
#include "hlrc_server/soundchunk.h" |
38 |
#include "hlrc_server/utterance.h" |
|
36 | 39 |
//actions |
37 | 40 |
#include "hlrc_server/gazetargetAction.h" |
41 |
#include "hlrc_server/ttsAction.h" |
|
38 | 42 |
|
39 | 43 |
#include "ROS/GazeCallbackWrapperROS.h" |
40 | 44 |
#include "ROS/MouthCallbackWrapperROS.h" |
... | ... | |
45 | 49 |
#endif |
46 | 50 |
#include <boost/shared_ptr.hpp> |
47 | 51 |
|
52 |
#define ROS_ACTION_CALL_TIMEOUT 30.0 |
|
48 | 53 |
|
49 | 54 |
class MiddlewareROS : public Middleware{ |
50 | 55 |
#ifndef ROS_SUPPORT |
... | ... | |
87 | 92 |
GazeCallbackWrapper *gaze_action_server; |
88 | 93 |
MouthCallbackWrapper *mouth_action_server; |
89 | 94 |
SpeechCallbackWrapper *speech_action_server; |
95 |
actionlib::SimpleActionClient<hlrc_server::ttsAction> *tts_ac; |
|
90 | 96 |
#endif |
91 | 97 |
}; |
92 | 98 |
|
server/include/ROS/SpeechCallbackWrapperROS.h | ||
---|---|---|
54 | 54 |
as_.publishFeedback(feedback); |
55 | 55 |
|
56 | 56 |
//send to application: |
57 |
text = "<maryxml version=\"0.4\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" \ |
|
58 |
c xmlns=\"http://mary.dfki.de/2002/MaryXML\" xml:lang=\"en_US\">" + |
|
57 |
text = "<maryxml version=\"0.4\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://mary.dfki.de/2002/MaryXML\" xml:lang=\"en_US\">" + |
|
59 | 58 |
text + |
60 | 59 |
"</maryxml>"; |
61 | 60 |
mw->speak_callback(text); |
server/include/ROS/UtteranceCallbackWrapperROS.h | ||
---|---|---|
46 | 46 |
|
47 | 47 |
void call(const GoalConstPtr &goal){ |
48 | 48 |
hlrc_server::utteranceGoalConstPtr request = goal; |
49 |
printf("> incoming utterance '%s' (%d phone symbols)\n", request->text.c_str(), (int)request->phonemes.size());
|
|
49 |
printf("> incoming utterance '%s' (%d phone symbols)\n", request->utterance.text.c_str(), (int)request->utterance.phonemes.size());
|
|
50 | 50 |
|
51 | 51 |
//everything is ok, will be cleared on failures |
52 | 52 |
feedback.result = 1; |
... | ... | |
54 | 54 |
boost::shared_ptr<Utterance> utterance(new Utterance()); |
55 | 55 |
|
56 | 56 |
//copy values: |
57 |
utterance->set_text(request->text); |
|
57 |
utterance->set_text(request->utterance.text);
|
|
58 | 58 |
|
59 | 59 |
boost::shared_ptr<AudioData> audio_data(new AudioData()); |
60 |
if (!extract_audio(request->audio, audio_data)){ |
|
60 |
if (!extract_audio(request->utterance.audio, audio_data)){
|
|
61 | 61 |
feedback.result = 0; |
62 | 62 |
} |
63 | 63 |
|
64 | 64 |
utterance->set_audio_data(audio_data); |
65 |
utterance->set_phoneme_vector(extract_phoneme_vector(request->phonemes)); |
|
65 |
utterance->set_phoneme_vector(extract_phoneme_vector(request->utterance.phonemes));
|
|
66 | 66 |
|
67 | 67 |
//send to application; |
68 | 68 |
mw->utterance_callback(utterance); |
server/include/RSB/SpeechCallbackWrapper.h | ||
---|---|---|
39 | 39 |
std::string text = *_text.get(); |
40 | 40 |
//<voice effect=\"Rate(durScale:1.5) + TractScaler(durScale:1.1)\">" + |
41 | 41 |
|
42 |
text = "<maryxml version=\"0.4\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" \ |
|
43 |
xmlns=\"http://mary.dfki.de/2002/MaryXML\" xml:lang=\"en_US\">" + |
|
42 |
text = "<maryxml version=\"0.4\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://mary.dfki.de/2002/MaryXML\" xml:lang=\"en_US\">" + |
|
44 | 43 |
text + |
45 | 44 |
"</maryxml>"; |
46 | 45 |
mw->speak_callback(text); |
server/src/MiddlewareROS.cpp | ||
---|---|---|
35 | 35 |
//CallbackWrapper: |
36 | 36 |
#include <boost/range/algorithm/remove_if.hpp> |
37 | 37 |
#include <boost/algorithm/string/classification.hpp> |
38 |
#include <actionlib/client/simple_action_client.h> |
|
39 |
#include "ROS/UtteranceROS.h" |
|
38 | 40 |
|
39 | 41 |
using namespace std; |
40 | 42 |
using namespace hlrc_server; |
41 | 43 |
|
44 |
template <typename actionspec> |
|
45 |
actionlib::SimpleActionClient<actionspec> * create_action_client(string scope){ |
|
46 |
printf("> starting SimpleActionClient on %s\n",scope.c_str()); |
|
47 |
|
|
48 |
actionlib::SimpleActionClient<actionspec> *ac = new actionlib::SimpleActionClient<actionspec>(scope, true); |
|
49 |
|
|
50 |
if (!ac->waitForServer(ros::Duration(1))){ |
|
51 |
char buf[256]; |
|
52 |
snprintf(buf, 256, "ERROR: action service %s not ready", scope.c_str()); |
|
53 |
throw runtime_error(buf); |
|
54 |
} |
|
55 |
return ac; |
|
56 |
} |
|
57 |
|
|
58 |
|
|
42 | 59 |
MiddlewareROS::MiddlewareROS(Arbiter *arbiter, std::string scope) : Middleware(arbiter, scope){ |
43 | 60 |
init(); |
44 | 61 |
} |
... | ... | |
83 | 100 |
mouth_action_server = new MouthCallbackWrapper(this, scope, "mouth"); |
84 | 101 |
speech_action_server = new SpeechCallbackWrapper(this, scope, "speech"); |
85 | 102 |
|
103 |
//create tts client |
|
104 |
tts_ac = create_action_client<hlrc_server::ttsAction>(base_scope + "/tts_provider"); |
|
105 |
|
|
106 |
|
|
107 |
|
|
86 | 108 |
printf("> init done\n"); |
87 | 109 |
} |
88 | 110 |
|
... | ... | |
98 | 120 |
|
99 | 121 |
boost::shared_ptr<Utterance> utterance(new Utterance()); |
100 | 122 |
|
101 |
printf("> WARNING: ros tts call not implemented yet\n");
|
|
123 |
hlrc_server::ttsGoal goal;
|
|
102 | 124 |
|
103 |
/* |
|
104 |
//build request |
|
105 |
boost::shared_ptr<std::string> request(new string(text)); |
|
125 |
goal.text = text; |
|
106 | 126 |
|
107 |
//try to fetch it asynchronously: |
|
108 |
try{ |
|
109 |
RemoteServer::DataFuture<rst::audition::Utterance> future_ptr = tts_server->callAsync<rst::audition::Utterance>("create_utterance", request); |
|
127 |
//send |
|
128 |
tts_ac->sendGoal(goal); |
|
110 | 129 |
|
111 |
//try to fetch the result |
|
112 |
boost::shared_ptr<rst::audition::Utterance> utterance_ptr = future_ptr.get(tts_timeout); |
|
113 |
utterance = UtteranceRSB(*(utterance_ptr.get())); |
|
130 |
//call ros: |
|
131 |
bool finished_before_timeout = tts_ac->waitForResult(ros::Duration(ROS_ACTION_CALL_TIMEOUT)); |
|
114 | 132 |
|
115 |
}catch(rsc::threading::FutureTimeoutException e){ |
|
116 |
printf("> error: tts_call timed out after %3.1f seconds.\n", tts_timeout); |
|
117 |
}catch(rsc::threading::FutureTaskExecutionException e){ |
|
118 |
printf("> error: tts_call failed: %s\n", e.what()); |
|
133 |
if (!finished_before_timeout){ |
|
134 |
printf("> ERROR: NO REPLY to utterance action call received within %4.2f s\n",ROS_ACTION_CALL_TIMEOUT); |
|
135 |
}else{ |
|
136 |
//done, return utterance ptr |
|
137 |
ttsResultConstPtr tts_res = tts_ac->getResult(); |
|
138 |
boost::shared_ptr<Utterance> utterance(new UtteranceROS(tts_res)); |
|
139 |
printf("> done. got utterance (text=%s)\n",utterance->get_text().c_str()); |
|
140 |
return utterance; |
|
119 | 141 |
} |
120 | 142 |
|
121 |
printf("> done. got utterance (text=%s)\n",utterance.get_text().c_str()); |
|
122 |
*/ |
|
123 |
|
|
143 |
printf("> failed... got no utterance\n"); |
|
124 | 144 |
return utterance; |
125 | 145 |
} |
126 | 146 |
|
server/src/MiddlewareRSB.cpp | ||
---|---|---|
144 | 144 |
|
145 | 145 |
//try to fetch it asynchronously: |
146 | 146 |
try{ |
147 |
RemoteServer::DataFuture<rst::audition::Utterance> future_ptr = tts_server->callAsync<rst::audition::Utterance>("create_utterance", request);
|
|
147 |
RemoteServer::DataFuture<rst::audition::^> future_ptr = tts_server->callAsync<rst::audition::Utterance>("create_utterance", request);
|
|
148 | 148 |
|
149 | 149 |
//try to fetch the result |
150 | 150 |
boost::shared_ptr<rst::audition::Utterance> utterance_ptr = future_ptr.get(tts_timeout); |
tts_bridge/mary/DESCRIPTION.rst | ||
---|---|---|
1 |
[h]igh [l]evel [r]obot [c]ontrol client project |
|
2 |
======================= |
|
3 |
|
|
4 |
This is the python implementation of the hlrc_client |
tts_bridge/mary/MANIFEST.in | ||
---|---|---|
1 |
include DESCRIPTION.rst |
|
2 |
|
|
3 |
# Include the test suite (FIXME: does not work yet) |
|
4 |
# recursive-include tests * |
|
5 |
|
|
6 |
# If using Python 2.6 or less, then have to include package data, even though |
|
7 |
# it's already declared in setup.py |
|
8 |
include hlrc/*.dat |
tts_bridge/mary/README.rst | ||
---|---|---|
1 |
[h]igh [l]evel [r]obot [c]ontrol client project |
|
2 |
======================= |
|
3 |
|
|
4 |
This is the python implementation of the hlrc_client |
tts_bridge/mary/mary_tts_bridge/MaryTTSBridge.py | ||
---|---|---|
1 |
#!/usr/bin/python |
|
2 |
""" |
|
3 |
This file is part of hlrc |
|
4 |
|
|
5 |
Copyright(c) sschulz <AT> techfak.uni-bielefeld.de |
|
6 |
http://opensource.cit-ec.de/projects/hlrc |
|
7 |
|
|
8 |
This file may be licensed under the terms of the |
|
9 |
GNU General Public License Version 3 (the ``GPL''), |
|
10 |
or (at your option) any later version. |
|
11 |
|
|
12 |
Software distributed under the License is distributed |
|
13 |
on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either |
|
14 |
express or implied. See the GPL for the specific language |
|
15 |
governing rights and limitations. |
|
16 |
|
|
17 |
You should have received a copy of the GPL along with this |
|
18 |
program. If not, go to http://www.gnu.org/licenses/gpl.html |
|
19 |
or write to the Free Software Foundation, Inc., |
|
20 |
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
|
21 |
|
|
22 |
The development of this software was supported by the |
|
23 |
Excellence Cluster EXC 277 Cognitive Interaction Technology. |
|
24 |
The Excellence Cluster EXC 277 is a grant of the Deutsche |
|
25 |
Forschungsgemeinschaft (DFG) in the context of the German |
|
26 |
Excellence Initiative. |
|
27 |
""" |
|
28 |
|
|
29 |
import logging |
|
30 |
import rospy |
|
31 |
from hlrc_server.msg import * |
|
32 |
import time |
|
33 |
import sys |
|
34 |
import actionlib |
|
35 |
from io import BytesIO |
|
36 |
import wave |
|
37 |
from MaryTTSClient import * |
|
38 |
from cStringIO import StringIO |
|
39 |
|
|
40 |
class MaryTTSBridge(object): |
|
41 |
#_feedback = ttsActionFeedback() |
|
42 |
#_result = ttsActionResult() |
|
43 |
|
|
44 |
|
|
45 |
def __init__(self, topic, voice="cmu-slt-hsmm", locale="en_GB", tts_host="127.0.0.1", tts_port=59125, loglevel=logging.WARNING): |
|
46 |
"""initialise |
|
47 |
:param loglevel: optional log level |
|
48 |
""" |
|
49 |
self.loglevel = loglevel |
|
50 |
self.logger = logging.getLogger(__name__) |
|
51 |
# create nice and actually usable formatter and add it to the handler |
|
52 |
self.config_logger(loglevel) |
|
53 |
self.logger.info("starting MaryTTSBridge") |
|
54 |
|
|
55 |
self.tts_client = MaryTTSClient(voice, locale, tts_host, tts_port, loglevel) |
|
56 |
|
|
57 |
rospy.init_node('MaryTTSBridge') |
|
58 |
|
|
59 |
self._action_name = topic |
|
60 |
self._as = actionlib.SimpleActionServer(self._action_name, ttsAction, execute_cb = self.execute_cb, auto_start = False) |
|
61 |
self._as.start() |
|
62 |
|
|
63 |
|
|
64 |
|
|
65 |
def __del__(self): |
|
66 |
"""destructor |
|
67 |
""" |
|
68 |
self.logger.debug("destructor of MaryTTSBridge called") |
|
69 |
|
|
70 |
def config_logger(self, level): |
|
71 |
"""initialise a nice logger formatting |
|
72 |
:param level: log level |
|
73 |
""" |
|
74 |
formatter = logging.Formatter('%(asctime)s %(name)-30s %(levelname)-8s > %(message)s') |
|
75 |
ch = logging.StreamHandler() |
|
76 |
#ch.setLevel(level) |
|
77 |
ch.setFormatter(formatter) |
|
78 |
self.logger.setLevel(level) |
|
79 |
self.logger.addHandler(ch) |
|
80 |
|
|
81 |
def create_soundchunk(self, audio_data): |
|
82 |
#extract wave from data |
|
83 |
fio = BytesIO(audio_data) |
|
84 |
wav = wave.open(fio) |
|
85 |
|
|
86 |
s = soundchunk() |
|
87 |
|
|
88 |
s.channels = wav.getnchannels() |
|
89 |
s.data = audio_data |
|
90 |
s.endianess = s.ENDIAN_LITTLE #guessed?! |
|
91 |
s.rate = wav.getframerate() |
|
92 |
s.samplecount = wav.getnframes() |
|
93 |
|
|
94 |
|
|
95 |
#sample format: |
|
96 |
sample_width = wav.getsampwidth() |
|
97 |
if (sample_width == 1): |
|
98 |
s.sample_type = s.SAMPLE_U8 |
|
99 |
elif (sample_width == 2): |
|
100 |
s.sample_type = s.SAMPLE_U16 |
|
101 |
elif (sample_width == 3): |
|
102 |
s.sample_type = s.SAMPLE_U24 |
|
103 |
else: |
|
104 |
self.logger.error("ERROR: invalid sample width "+str(sample_width) + " detected") |
|
105 |
s = soundchunk() |
|
106 |
|
|
107 |
self.logger.info("created soundchunk with "+str(s.samplecount)+" samples") |
|
108 |
|
|
109 |
return s |
|
110 |
|
|
111 |
def create_phonemes(self, phoneme_str): |
|
112 |
last = 0.0 |
|
113 |
plist = [] |
|
114 |
|
|
115 |
sio = StringIO(phoneme_str) |
|
116 |
for line in sio: |
|
117 |
if (line[0] != '#'): |
|
118 |
phoneme_list = line.split(" ") |
|
119 |
symbol = phoneme_list[2] |
|
120 |
symbol = symbol.rstrip() |
|
121 |
|
|
122 |
now = float(phoneme_list[0]) |
|
123 |
duration = (now - last)*1000 |
|
124 |
last = now |
|
125 |
plist.append(phoneme(symbol, int(duration))) |
|
126 |
|
|
127 |
self.logger.info("created phonemelist with " + str(len(plist)) + " elements") |
|
128 |
|
|
129 |
return plist |
|
130 |
|
|
131 |
def create_utterance(self, text, audio_data, phoneme_list): |
|
132 |
u = utterance() |
|
133 |
u.text = text |
|
134 |
u.audio = self.create_soundchunk(audio_data) |
|
135 |
u.phonemes = self.create_phonemes(phoneme_list) |
|
136 |
|
|
137 |
self.logger.info("created utterance for 'phonemelist with '" + u.text + "'") |
|
138 |
return u |
|
139 |
|
|
140 |
def execute_cb(self, goal): |
|
141 |
self.logger.info("incoming utterance '" + goal.text + "'") |
|
142 |
|
|
143 |
success = True |
|
144 |
result = ttsResult() |
|
145 |
|
|
146 |
#incoming msg, ask mary tts for data: |
|
147 |
try: |
|
148 |
audio = self.tts_client.generate_audio(goal.text) |
|
149 |
phonelist = self.tts_client.generate_phonemes(goal.text) |
|
150 |
|
|
151 |
except: |
|
152 |
self.logger.error("failed to create utterance error = '" + str(sys.exc_info()[1]) + "'") |
|
153 |
success = False |
|
154 |
|
|
155 |
if success: |
|
156 |
#build soundchunk |
|
157 |
result.utterance = self.create_utterance(goal.text, audio, phonelist) |
|
158 |
self._as.set_succeeded(result) |
|
159 |
else: |
|
160 |
self._as.set_aborted(result) |
|
161 |
|
|
162 |
def run(self): |
|
163 |
#run the main loop |
|
164 |
rospy.spin() |
|
165 |
|
|
166 |
#test code |
|
167 |
if __name__ == "__main__": |
|
168 |
if (len(sys.argv) != 2): |
|
169 |
print("> usage: "+sys.argv[0]+" <topic>\n\n") |
|
170 |
sys.exit(1) |
|
171 |
|
|
172 |
bridge = MaryTTSBridge(topic=sys.argv[1], loglevel=logging.INFO) |
|
173 |
bridge.run() |
|
174 |
|
tts_bridge/mary/mary_tts_bridge/MaryTTSClient.py | ||
---|---|---|
1 |
""" |
|
2 |
This file is part of hlrc |
|
3 |
|
|
4 |
Copyright(c) sschulz <AT> techfak.uni-bielefeld.de |
|
5 |
http://opensource.cit-ec.de/projects/hlrc |
|
6 |
|
|
7 |
This file may be licensed under the terms of the |
|
8 |
GNU General Public License Version 3 (the ``GPL''), |
|
9 |
or (at your option) any later version. |
|
10 |
|
|
11 |
Software distributed under the License is distributed |
|
12 |
on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either |
|
13 |
express or implied. See the GPL for the specific language |
|
14 |
governing rights and limitations. |
|
15 |
|
|
16 |
You should have received a copy of the GPL along with this |
|
17 |
program. If not, go to http://www.gnu.org/licenses/gpl.html |
|
18 |
or write to the Free Software Foundation, Inc., |
|
19 |
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
|
20 |
|
|
21 |
The development of this software was supported by the |
|
22 |
Excellence Cluster EXC 277 Cognitive Interaction Technology. |
|
23 |
The Excellence Cluster EXC 277 is a grant of the Deutsche |
|
24 |
Forschungsgemeinschaft (DFG) in the context of the German |
|
25 |
Excellence Initiative. |
|
26 |
""" |
|
27 |
|
|
28 |
import logging |
|
29 |
#try: |
|
30 |
# import rsb |
|
31 |
#except ImportError: |
|
32 |
# RSB_SUPPORT = False |
|
33 |
#else: |
|
34 |
# from MiddlewareRSB import * |
|
35 |
# RSB_SUPPORT = True |
|
36 |
|
|
37 |
#from MiddlewareROS import * |
|
38 |
import sys |
|
39 |
import httplib, urllib |
|
40 |
import StringIO |
|
41 |
import wave |
|
42 |
import ctypes |
|
43 |
import wave |
|
44 |
import sys |
|
45 |
|
|
46 |
class MaryTTSClient: |
|
47 |
def __init__(self, voice="cmu-slt-hsmm", locale="en_US", tts_host="127.0.0.1", tts_port=59125, loglevel=logging.WARNING): |
|
48 |
"""initialise |
|
49 |
:param loglevel: optional log level |
|
50 |
""" |
|
51 |
self.loglevel = loglevel |
|
52 |
self.logger = logging.getLogger(__name__) |
|
53 |
# create nice and actually usable formatter and add it to the handler |
|
54 |
self.config_logger(loglevel) |
|
55 |
|
|
56 |
self.logger.info("starting MaryTTSClient (voice="+voice+", locale="+locale+", host="+tts_host+", port="+str(tts_port)) |
|
57 |
|
|
58 |
self.tts_host = tts_host |
|
59 |
self.tts_port = tts_port |
|
60 |
self.locale = locale |
|
61 |
self.voice = voice |
|
62 |
|
|
63 |
def __del__(self): |
|
64 |
"""destructor |
|
65 |
""" |
|
66 |
self.logger.debug("destructor of MaryTTSClient called") |
|
67 |
|
|
68 |
def config_logger(self, level): |
|
69 |
"""initialise a nice logger formatting |
|
70 |
:param level: log level |
|
71 |
""" |
|
72 |
formatter = logging.Formatter('%(asctime)s %(name)-30s %(levelname)-8s > %(message)s') |
|
73 |
ch = logging.StreamHandler() |
|
74 |
#ch.setLevel(level) |
|
75 |
ch.setFormatter(formatter) |
|
76 |
self.logger.setLevel(level) |
|
77 |
self.logger.addHandler(ch) |
|
78 |
|
|
79 |
def generate_audio(self, message): |
|
80 |
"""generate audio from text |
|
81 |
:param message: text to synthesize |
|
82 |
""" |
|
83 |
return self.generate(message, "AUDIO") |
|
84 |
|
|
85 |
def generate_phonemes(self, message): |
|
86 |
"""generate phoneme list from text |
|
87 |
:param message: text to synthesize |
|
88 |
""" |
|
89 |
return self.generate(message, "REALISED_DURATIONS") |
|
90 |
|
|
91 |
def generate(self, message, output_type): |
|
92 |
"""generate requested data object from text |
|
93 |
:param message: text to synthesize |
|
94 |
""" |
|
95 |
|
|
96 |
raw_params = { |
|
97 |
"INPUT_TEXT": message, |
|
98 |
"INPUT_TYPE": "RAWMARYXML", |
|
99 |
"OUTPUT_TYPE": output_type, |
|
100 |
"LOCALE": self.locale, |
|
101 |
"AUDIO": "WAVE_FILE", |
|
102 |
"VOICE": self.voice, |
|
103 |
} |
|
104 |
|
|
105 |
params = urllib.urlencode(raw_params) |
|
106 |
headers = {} |
|
107 |
|
|
108 |
#open connection to mary server |
|
109 |
conn = httplib.HTTPConnection(self.tts_host, self.tts_port) |
|
110 |
|
|
111 |
#conn.set_debuglevel(5) |
|
112 |
|
|
113 |
conn.request("POST", "/process", params, headers) |
|
114 |
response = conn.getresponse() |
|
115 |
if response.status != 200: |
|
116 |
print response.getheaders() |
|
117 |
raise RuntimeError("{0}: {1}".format(response.status,response.reason)) |
|
118 |
return response.read() |
|
119 |
|
|
120 |
#test code |
|
121 |
if __name__ == "__main__": |
|
122 |
client = MaryTTSClient() |
|
123 |
audio = client.generate_phonemes("test 1 2 3 4 5 6 7 8 9 10") |
|
124 |
print(audio) |
tts_bridge/mary/mary_tts_bridge/__init__.py | ||
---|---|---|
1 |
from MaryTTSBridge import MaryTTSBridge |
tts_bridge/mary/setup.cfg | ||
---|---|---|
1 |
[bdist_wheel] |
|
2 |
# This flag says that the code is written to work on both Python 2 and Python |
|
3 |
# 3. If at all possible, it is good practice to do this. If you cannot, you |
|
4 |
# will need to generate wheels for each Python version that you support. |
|
5 |
universal=1 |
tts_bridge/mary/setup.py | ||
---|---|---|
1 |
"""[h]igh [l]evel [r]obot [c]ontrol client project |
|
2 |
|
|
3 |
|
|
4 |
See: |
|
5 |
http://opensource.cit-ec.uni-bielefeld.de/hlrc |
|
6 |
""" |
|
7 |
|
|
8 |
# Always prefer setuptools over distutils |
|
9 |
from setuptools import setup, find_packages |
|
10 |
# To use a consistent encoding |
|
11 |
from codecs import open |
|
12 |
from os import path |
|
13 |
|
|
14 |
here = path.abspath(path.dirname(__file__)) |
|
15 |
|
|
16 |
# Get the long description from the relevant file |
|
17 |
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f: |
|
18 |
long_description = f.read() |
|
19 |
|
|
20 |
setup( |
|
21 |
name='mary_tts_bridge', |
|
22 |
|
|
23 |
# Versions should comply with PEP440. For a discussion on single-sourcing |
|
24 |
# the version across setup.py and the project code, see |
|
25 |
# https://packaging.python.org/en/latest/single_source_version.html |
|
26 |
version='0.1', |
|
27 |
|
|
28 |
description='mary tts provider bridge', |
|
29 |
long_description=long_description, |
|
30 |
|
|
31 |
# The project's main homepage. |
|
32 |
url='http://opensource.cit-ec.uni-bielefeld.de/hlrc', |
|
33 |
|
|
34 |
# Author details |
|
35 |
author='Simon Schulz', |
|
36 |
author_email='sschulz@techfak.uni-bielefeld.de', |
|
37 |
|
|
38 |
# Choose your license |
|
39 |
license='GPLv3', |
|
40 |
|
|
41 |
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers |
|
42 |
classifiers=[ |
|
43 |
# How mature is this project? Common values are |
|
44 |
# 3 - Alpha |
|
45 |
# 4 - Beta |
|
46 |
# 5 - Production/Stable |
|
47 |
'Development Status :: 3 - Alpha', |
|
48 |
|
|
49 |
# Indicate who your project is intended for |
|
50 |
'Intended Audience :: Developers', |
|
51 |
'Topic :: Scientific/Engineering', |
|
52 |
|
|
53 |
# Pick your license as you wish (should match "license" above) |
|
54 |
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', |
|
55 |
|
|
56 |
# Specify the Python versions you support here. In particular, ensure |
|
57 |
# that you indicate whether you support Python 2, Python 3 or both. |
|
58 |
'Programming Language :: Python :: 2', |
|
59 |
'Programming Language :: Python :: 2.6', |
|
60 |
'Programming Language :: Python :: 2.7', |
|
61 |
'Programming Language :: Python :: 3', |
|
62 |
'Programming Language :: Python :: 3.2', |
|
63 |
'Programming Language :: Python :: 3.3', |
|
64 |
'Programming Language :: Python :: 3.4', |
|
65 |
], |
|
66 |
|
|
67 |
# What does your project relate to? |
|
68 |
keywords='sample setuptools development', |
|
69 |
|
|
70 |
# You can just specify the packages manually here if your project is |
|
71 |
# simple. Or you can use find_packages(). |
|
72 |
packages=find_packages(exclude=['contrib', 'docs', 'tests*']), |
|
73 |
|
|
74 |
# List run-time dependencies here. These will be installed by pip when |
|
75 |
# your project is installed. For an analysis of "install_requires" vs pip's |
|
76 |
# requirements files see: |
|
77 |
# https://packaging.python.org/en/latest/requirements.html |
|
78 |
install_requires=[], |
|
79 |
|
|
80 |
# List additional groups of dependencies here (e.g. development |
|
81 |
# dependencies). You can install these using the following syntax, |
|
82 |
# for example: |
|
83 |
# $ pip install -e .[dev,test] |
|
84 |
extras_require={ |
|
85 |
'dev': ['check-manifest'], |
|
86 |
'test': ['coverage'], |
|
87 |
}, |
|
88 |
|
|
89 |
# If there are data files included in your packages that need to be |
|
90 |
# installed, specify them here. If using Python 2.6 or less, then these |
|
91 |
# have to be included in MANIFEST.in as well. |
|
92 |
package_data={ |
|
93 |
#'sample': ['package_data.dat'], |
|
94 |
}, |
|
95 |
|
|
96 |
# Although 'package_data' is the preferred approach, in some case you may |
|
97 |
# need to place data files outside of your packages. See: |
|
98 |
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa |
|
99 |
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data' |
|
100 |
data_files=[], #('my_data', ['data/data_file'])], |
|
101 |
|
|
102 |
#scripts=['bin/hlrc_test_gui.py'], |
|
103 |
|
|
104 |
|
|
105 |
# To provide executable scripts, use entry points in preference to the |
|
106 |
# "scripts" keyword. Entry points provide cross-platform support and allow |
|
107 |
# pip to create the appropriate form of executable for the target platform. |
|
108 |
entry_points={ |
|
109 |
'console_scripts': [ |
|
110 |
'mary_tts_provider=mary_tts_bridg.MaryTTSBridge:main', |
|
111 |
], |
|
112 |
}, |
|
113 |
) |
Also available in: Unified diff