Revision f150aab5 server/include/ROS/UtteranceROS.h
server/include/ROS/UtteranceROS.h | ||
---|---|---|
1 | 1 |
/* |
2 |
* This file is part of hlrc_server |
|
3 |
* |
|
4 |
* Copyright(c) sschulz <AT> techfak.uni-bielefeld.de |
|
5 |
* http://opensource.cit-ec.de/projects/hlrc_server |
|
6 |
* |
|
7 |
* This file may be licensed under the terms of the |
|
8 |
* GNU General Public License Version 3 (the ``GPL''), |
|
9 |
* or (at your option) any later version. |
|
10 |
* |
|
11 |
* Software distributed under the License is distributed |
|
12 |
* on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either |
|
13 |
* express or implied. See the GPL for the specific language |
|
14 |
* governing rights and limitations. |
|
15 |
* |
|
16 |
* You should have received a copy of the GPL along with this |
|
17 |
* program. If not, go to http://www.gnu.org/licenses/gpl.html |
|
18 |
* or write to the Free Software Foundation, Inc., |
|
19 |
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
|
20 |
* |
|
21 |
* The development of this software was supported by the |
|
22 |
* Excellence Cluster EXC 277 Cognitive Interaction Technology. |
|
23 |
* The Excellence Cluster EXC 277 is a grant of the Deutsche |
|
24 |
* Forschungsgemeinschaft (DFG) in the context of the German |
|
25 |
* Excellence Initiative. |
|
26 |
* |
|
27 |
*/ |
|
2 |
* This file is part of hlrc_server
|
|
3 |
*
|
|
4 |
* Copyright(c) sschulz <AT> techfak.uni-bielefeld.de
|
|
5 |
* http://opensource.cit-ec.de/projects/hlrc_server
|
|
6 |
*
|
|
7 |
* This file may be licensed under the terms of the
|
|
8 |
* GNU General Public License Version 3 (the ``GPL''),
|
|
9 |
* or (at your option) any later version.
|
|
10 |
*
|
|
11 |
* Software distributed under the License is distributed
|
|
12 |
* on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
|
|
13 |
* express or implied. See the GPL for the specific language
|
|
14 |
* governing rights and limitations.
|
|
15 |
*
|
|
16 |
* You should have received a copy of the GPL along with this
|
|
17 |
* program. If not, go to http://www.gnu.org/licenses/gpl.html
|
|
18 |
* or write to the Free Software Foundation, Inc.,
|
|
19 |
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
20 |
*
|
|
21 |
* The development of this software was supported by the
|
|
22 |
* Excellence Cluster EXC 277 Cognitive Interaction Technology.
|
|
23 |
* The Excellence Cluster EXC 277 is a grant of the Deutsche
|
|
24 |
* Forschungsgemeinschaft (DFG) in the context of the German
|
|
25 |
* Excellence Initiative.
|
|
26 |
*
|
|
27 |
*/
|
|
28 | 28 |
|
29 | 29 |
#pragma once |
30 | 30 |
#include "AudioData.h" |
... | ... | |
35 | 35 |
#include "hlrc_server/utterance.h" |
36 | 36 |
#include "hlrc_server/ttsAction.h" |
37 | 37 |
|
38 |
//converter from ros utterance |
|
38 |
// converter from ros utterance
|
|
39 | 39 |
class UtteranceROS : public Utterance { |
40 |
public: |
|
41 |
UtteranceROS(ttsResultConstPtr ros_utterance){ |
|
42 |
//set text: |
|
43 |
set_text(ros_utterance->utterance.text); |
|
44 |
|
|
45 |
//convert soundchunk to audio data: |
|
46 |
extract_audio_data(ros_utterance->utterance.audio); |
|
47 |
|
|
48 |
//convert phonemes: |
|
49 |
extract_phonemes(ros_utterance->utterance.phonemes); |
|
50 |
} |
|
51 |
|
|
52 |
~UtteranceROS(){}; |
|
53 |
|
|
54 |
void extract_audio_data(soundchunk sound_chunk){ |
|
55 |
//extract data: |
|
56 |
unsigned int audio_len = sound_chunk.data.size(); |
|
57 |
char *audio_data_char = (char *)sound_chunk.data.data(); |
|
58 |
|
|
59 |
audio_data->samples.resize(audio_len); |
|
60 |
audio_data->samples.assign(audio_data_char, audio_data_char+audio_len); |
|
61 |
printf("audio samplesize is %d bytes\n",(unsigned int)audio_data->samples.size()); |
|
62 |
|
|
63 |
//extract format: |
|
64 |
audio_data->sample_signed = true; |
|
65 |
switch (sound_chunk.sample_type){ |
|
66 |
case(soundchunk::SAMPLE_U8): audio_data->sample_signed = false; //and fall through: |
|
67 |
case(soundchunk::SAMPLE_S8): audio_data->sample_bit = 8; break; |
|
68 |
|
|
69 |
case(soundchunk::SAMPLE_U16): audio_data->sample_signed = false; //and fall through: |
|
70 |
case(soundchunk::SAMPLE_S16): audio_data->sample_bit = 16; break; |
|
71 |
|
|
72 |
case(soundchunk::SAMPLE_U24): audio_data->sample_signed = false; //and fall through: |
|
73 |
case(soundchunk::SAMPLE_S24): audio_data->sample_bit = 24; break; |
|
74 |
|
|
75 |
default: |
|
76 |
printf("> invalid sample type %d in ROS SoundChunk! ignoring request!\n", sound_chunk.sample_type); |
|
77 |
throw runtime_error("UtteranceROS::convert_audio_data() unsupported sample type in ros SoundChunk"); |
|
78 |
} |
|
79 |
|
|
80 |
//bitrate |
|
81 |
audio_data->sample_rate = sound_chunk.rate; |
|
82 |
|
|
83 |
//endianness |
|
84 |
if (sound_chunk.endianess == soundchunk::ENDIAN_LITTLE){ |
|
85 |
audio_data->sample_big_endian = false; |
|
86 |
}else if (sound_chunk.endianess == soundchunk::ENDIAN_BIG){ |
|
87 |
audio_data->sample_big_endian = true; |
|
88 |
}else{ |
|
89 |
printf("> invalid SoundChunk byte_format"); |
|
90 |
throw runtime_error("UtteranceROS::convert_audio_data() unsupported byte_format in ros SoundChunk"); |
|
91 |
} |
|
92 |
|
|
93 |
//number of channels |
|
94 |
audio_data->sample_channels = sound_chunk.channels; |
|
95 |
|
|
96 |
printf("> new AudioData: %s\n",audio_data->to_string().c_str()); |
|
97 |
} |
|
98 |
|
|
99 |
void extract_phonemes(utterance::_phonemes_type ros_phones){ |
|
100 |
//extract phoneme vector |
|
101 |
phonemes_vector.clear(); |
|
102 |
for(auto it = ros_phones.begin(); it<ros_phones.end(); it++){ |
|
103 |
Utterance::symbol_duration_pair_t phoneme = make_pair(it->symbol, it->duration); |
|
104 |
phonemes_vector.push_back(phoneme); |
|
105 |
} |
|
106 |
} |
|
107 |
|
|
40 |
public: |
|
41 |
UtteranceROS(ttsResultConstPtr ros_utterance) { |
|
42 |
// set text: |
|
43 |
set_text(ros_utterance->utterance.text); |
|
44 |
|
|
45 |
// convert soundchunk to audio data: |
|
46 |
extract_audio_data(ros_utterance->utterance.audio); |
|
47 |
|
|
48 |
// convert phonemes: |
|
49 |
extract_phonemes(ros_utterance->utterance.phonemes); |
|
50 |
} |
|
51 |
|
|
52 |
~UtteranceROS(){}; |
|
53 |
|
|
54 |
void extract_audio_data(soundchunk sound_chunk) { |
|
55 |
// extract data: |
|
56 |
unsigned int audio_len = sound_chunk.data.size(); |
|
57 |
char* audio_data_char = (char*)sound_chunk.data.data(); |
|
58 |
|
|
59 |
audio_data->samples.resize(audio_len); |
|
60 |
audio_data->samples.assign(audio_data_char, audio_data_char + audio_len); |
|
61 |
printf("audio samplesize is %d bytes\n", (unsigned int)audio_data->samples.size()); |
|
62 |
|
|
63 |
// extract format: |
|
64 |
audio_data->sample_signed = true; |
|
65 |
switch (sound_chunk.sample_type) { |
|
66 |
case (soundchunk::SAMPLE_U8): |
|
67 |
audio_data->sample_signed = false; // and fall through: |
|
68 |
case (soundchunk::SAMPLE_S8): |
|
69 |
audio_data->sample_bit = 8; |
|
70 |
break; |
|
71 |
|
|
72 |
case (soundchunk::SAMPLE_U16): |
|
73 |
audio_data->sample_signed = false; // and fall through: |
|
74 |
case (soundchunk::SAMPLE_S16): |
|
75 |
audio_data->sample_bit = 16; |
|
76 |
break; |
|
77 |
|
|
78 |
case (soundchunk::SAMPLE_U24): |
|
79 |
audio_data->sample_signed = false; // and fall through: |
|
80 |
case (soundchunk::SAMPLE_S24): |
|
81 |
audio_data->sample_bit = 24; |
|
82 |
break; |
|
83 |
|
|
84 |
default: |
|
85 |
printf("> invalid sample type %d in ROS SoundChunk! ignoring request!\n", sound_chunk.sample_type); |
|
86 |
throw runtime_error("UtteranceROS::convert_audio_data() unsupported sample type in ros SoundChunk"); |
|
87 |
} |
|
88 |
|
|
89 |
// bitrate |
|
90 |
audio_data->sample_rate = sound_chunk.rate; |
|
91 |
|
|
92 |
// endianness |
|
93 |
if (sound_chunk.endianess == soundchunk::ENDIAN_LITTLE) { |
|
94 |
audio_data->sample_big_endian = false; |
|
95 |
} |
|
96 |
else if (sound_chunk.endianess == soundchunk::ENDIAN_BIG) { |
|
97 |
audio_data->sample_big_endian = true; |
|
98 |
} |
|
99 |
else { |
|
100 |
printf("> invalid SoundChunk byte_format"); |
|
101 |
throw runtime_error("UtteranceROS::convert_audio_data() unsupported byte_format in ros SoundChunk"); |
|
102 |
} |
|
103 |
|
|
104 |
// number of channels |
|
105 |
audio_data->sample_channels = sound_chunk.channels; |
|
106 |
|
|
107 |
printf("> new AudioData: %s\n", audio_data->to_string().c_str()); |
|
108 |
} |
|
109 |
|
|
110 |
void extract_phonemes(utterance::_phonemes_type ros_phones) { |
|
111 |
// extract phoneme vector |
|
112 |
phonemes_vector.clear(); |
|
113 |
for (auto it = ros_phones.begin(); it < ros_phones.end(); it++) { |
|
114 |
Utterance::symbol_duration_pair_t phoneme = make_pair(it->symbol, it->duration); |
|
115 |
phonemes_vector.push_back(phoneme); |
|
116 |
} |
|
117 |
} |
|
108 | 118 |
}; |
109 |
|
Also available in: Unified diff