hlrc / server / include / ROS / UtteranceCallbackWrapperROS.h @ 84534fbf
History | View | Annotate | Download (5.098 KB)
1 |
/*
|
---|---|
2 |
* This file is part of hlrc_server
|
3 |
*
|
4 |
* Copyright(c) sschulz <AT> techfak.uni-bielefeld.de
|
5 |
* http://opensource.cit-ec.de/projects/hlrc_server
|
6 |
*
|
7 |
* This file may be licensed under the terms of the
|
8 |
* GNU General Public License Version 3 (the ``GPL''),
|
9 |
* or (at your option) any later version.
|
10 |
*
|
11 |
* Software distributed under the License is distributed
|
12 |
* on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
|
13 |
* express or implied. See the GPL for the specific language
|
14 |
* governing rights and limitations.
|
15 |
*
|
16 |
* You should have received a copy of the GPL along with this
|
17 |
* program. If not, go to http://www.gnu.org/licenses/gpl.html
|
18 |
* or write to the Free Software Foundation, Inc.,
|
19 |
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
20 |
*
|
21 |
* The development of this software was supported by the
|
22 |
* Excellence Cluster EXC 277 Cognitive Interaction Technology.
|
23 |
* The Excellence Cluster EXC 277 is a grant of the Deutsche
|
24 |
* Forschungsgemeinschaft (DFG) in the context of the German
|
25 |
* Excellence Initiative.
|
26 |
*
|
27 |
*/
|
28 |
|
29 |
#pragma once
|
30 |
#include "hlrc_server/utteranceAction.h" |
31 |
#include "CallbackWrapperROS.h" |
32 |
#include "hlrc_server/phoneme.h" |
33 |
#include "hlrc_server/soundchunk.h" |
34 |
|
35 |
// callback handler incoming gaze requests:
|
36 |
class UtteranceCallbackWrapper : CallbackWrapper<hlrc_server::utteranceAction> { |
37 |
protected:
|
38 |
hlrc_server::utteranceFeedback feedback; |
39 |
hlrc_server::utteranceResult result; |
40 |
|
41 |
public:
|
42 |
UtteranceCallbackWrapper(Middleware* mw, std::string scope, std::string name) |
43 |
: CallbackWrapper<hlrc_server::utteranceAction>(mw, scope, name, |
44 |
boost::bind(&UtteranceCallbackWrapper::call, this, _1)){ |
45 |
//
|
46 |
}; |
47 |
|
48 |
void call(const GoalConstPtr& goal) { |
49 |
hlrc_server::utteranceGoalConstPtr request = goal; |
50 |
printf("> incoming utterance '%s' (%d phone symbols)\n", request->utterance.text.c_str(),
|
51 |
(int)request->utterance.phonemes.size());
|
52 |
|
53 |
// everything is ok, will be cleared on failures
|
54 |
feedback.result = 1;
|
55 |
|
56 |
std::shared_ptr<Utterance> utterance(new Utterance()); |
57 |
|
58 |
// copy values:
|
59 |
utterance->set_text(request->utterance.text); |
60 |
|
61 |
std::shared_ptr<AudioData> audio_data(new AudioData()); |
62 |
if (!extract_audio(request->utterance.audio, audio_data)) {
|
63 |
feedback.result = 0;
|
64 |
} |
65 |
|
66 |
utterance->set_audio_data(audio_data); |
67 |
utterance->set_phoneme_vector(extract_phoneme_vector(request->utterance.phonemes)); |
68 |
|
69 |
// send to application;
|
70 |
mw->utterance_callback(utterance); |
71 |
|
72 |
if (feedback.result) {
|
73 |
result.result = 1;
|
74 |
as_.setSucceeded(result); |
75 |
} |
76 |
else {
|
77 |
as_.setAborted(result); |
78 |
} |
79 |
} |
80 |
|
81 |
// convert ros message audio data to our own implementation
|
82 |
bool extract_audio(hlrc_server::soundchunk sound_chunk, std::shared_ptr<AudioData> audio_data) {
|
83 |
// extract data:
|
84 |
unsigned int audio_len = sound_chunk.data.size(); |
85 |
char* audio_data_char = (char*)sound_chunk.data.data(); |
86 |
|
87 |
// audio.samples = vector<char>(audio_data_char, audio_data_char+audio_len);
|
88 |
audio_data->samples.resize(audio_len); |
89 |
audio_data->samples.assign(audio_data_char, audio_data_char + audio_len); |
90 |
|
91 |
printf("audio samplesize is %d bytes\n", (int)audio_data->samples.size()); |
92 |
|
93 |
// extract format:
|
94 |
audio_data->sample_signed = true;
|
95 |
switch (sound_chunk.sample_type) {
|
96 |
case (hlrc_server::soundchunk::SAMPLE_U8):
|
97 |
audio_data->sample_signed = false; // and fall through: |
98 |
case (hlrc_server::soundchunk::SAMPLE_S8):
|
99 |
audio_data->sample_bit = 8;
|
100 |
break;
|
101 |
|
102 |
case (hlrc_server::soundchunk::SAMPLE_U16):
|
103 |
audio_data->sample_signed = false; // and fall through: |
104 |
case (hlrc_server::soundchunk::SAMPLE_S16):
|
105 |
audio_data->sample_bit = 16;
|
106 |
break;
|
107 |
|
108 |
case (hlrc_server::soundchunk::SAMPLE_U24):
|
109 |
audio_data->sample_signed = false; // and fall through: |
110 |
case (hlrc_server::soundchunk::SAMPLE_S24):
|
111 |
audio_data->sample_bit = 24;
|
112 |
break;
|
113 |
|
114 |
default:
|
115 |
printf("> invalid sample type %d in SoundChunk! ignoring request!\n", sound_chunk.sample_type);
|
116 |
return false; |
117 |
// throw runtime_error("UtteranceROS::convert_audio_data() unsupported sample type in soundchunk");
|
118 |
} |
119 |
|
120 |
// bitrate
|
121 |
audio_data->sample_rate = sound_chunk.rate; |
122 |
|
123 |
// endianness
|
124 |
if (sound_chunk.endianess == hlrc_server::soundchunk::ENDIAN_LITTLE) {
|
125 |
audio_data->sample_big_endian = false;
|
126 |
} |
127 |
else if (sound_chunk.endianess == hlrc_server::soundchunk::ENDIAN_BIG) { |
128 |
audio_data->sample_big_endian = true;
|
129 |
} |
130 |
else {
|
131 |
printf("> invalid SoundChunk byte_format");
|
132 |
throw std::runtime_error("UtteranceROS::convert_audio_data() unsupported byte_format in soundchunk");
|
133 |
} |
134 |
|
135 |
// number of channels
|
136 |
audio_data->sample_channels = sound_chunk.channels; |
137 |
|
138 |
printf("> new AudioData: %s\n", audio_data->to_string().c_str());
|
139 |
|
140 |
return true; |
141 |
} |
142 |
|
143 |
// convert ros phoneme vector to out own implementation
|
144 |
Utterance::phonemes_vector_t extract_phoneme_vector(std::vector<hlrc_server::phoneme> pv) { |
145 |
Utterance::phonemes_vector_t result; |
146 |
|
147 |
// extract phoneme vector
|
148 |
for (unsigned int i = 0; i < pv.size(); i++) { |
149 |
hlrc_server::phoneme ros_phoneme = pv[i]; |
150 |
Utterance::symbol_duration_pair_t phoneme = make_pair(ros_phoneme.symbol, ros_phoneme.duration); |
151 |
result.push_back(phoneme); |
152 |
} |
153 |
|
154 |
return result;
|
155 |
} |
156 |
}; |