Revision f150aab5 server/include/ROS/UtteranceCallbackWrapperROS.h

View differences:

server/include/ROS/UtteranceCallbackWrapperROS.h
1 1
/*
2
* This file is part of hlrc_server
3
*
4
* Copyright(c) sschulz <AT> techfak.uni-bielefeld.de
5
* http://opensource.cit-ec.de/projects/hlrc_server
6
*
7
* This file may be licensed under the terms of the
8
* GNU General Public License Version 3 (the ``GPL''),
9
* or (at your option) any later version.
10
*
11
* Software distributed under the License is distributed
12
* on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
13
* express or implied. See the GPL for the specific language
14
* governing rights and limitations.
15
*
16
* You should have received a copy of the GPL along with this
17
* program. If not, go to http://www.gnu.org/licenses/gpl.html
18
* or write to the Free Software Foundation, Inc.,
19
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20
*
21
* The development of this software was supported by the
22
* Excellence Cluster EXC 277 Cognitive Interaction Technology.
23
* The Excellence Cluster EXC 277 is a grant of the Deutsche
24
* Forschungsgemeinschaft (DFG) in the context of the German
25
* Excellence Initiative.
26
*
27
*/
2
 * This file is part of hlrc_server
3
 *
4
 * Copyright(c) sschulz <AT> techfak.uni-bielefeld.de
5
 * http://opensource.cit-ec.de/projects/hlrc_server
6
 *
7
 * This file may be licensed under the terms of the
8
 * GNU General Public License Version 3 (the ``GPL''),
9
 * or (at your option) any later version.
10
 *
11
 * Software distributed under the License is distributed
12
 * on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
13
 * express or implied. See the GPL for the specific language
14
 * governing rights and limitations.
15
 *
16
 * You should have received a copy of the GPL along with this
17
 * program. If not, go to http://www.gnu.org/licenses/gpl.html
18
 * or write to the Free Software Foundation, Inc.,
19
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20
 *
21
 * The development of this software was supported by the
22
 * Excellence Cluster EXC 277 Cognitive Interaction Technology.
23
 * The Excellence Cluster EXC 277 is a grant of the Deutsche
24
 * Forschungsgemeinschaft (DFG) in the context of the German
25
 * Excellence Initiative.
26
 *
27
 */
28 28

  
29 29
#pragma once
30 30
#include "hlrc_server/utteranceAction.h"
......
32 32
#include "hlrc_server/phoneme.h"
33 33
#include "hlrc_server/soundchunk.h"
34 34

  
35
//callback handler incoming gaze requests:
36
class UtteranceCallbackWrapper : CallbackWrapper<hlrc_server::utteranceAction>{
35
// callback handler incoming gaze requests:
36
class UtteranceCallbackWrapper : CallbackWrapper<hlrc_server::utteranceAction> {
37 37
protected:
38
    hlrc_server::utteranceFeedback feedback;
39
    hlrc_server::utteranceResult result;
38
	hlrc_server::utteranceFeedback feedback;
39
	hlrc_server::utteranceResult result;
40 40

  
41 41
public:
42

  
43
    UtteranceCallbackWrapper(Middleware *mw, std::string scope, std::string name) : CallbackWrapper<hlrc_server::utteranceAction>(mw, scope, name, boost::bind(&UtteranceCallbackWrapper::call, this, _1)){
44
        //
45
    };
46

  
47
    void call(const GoalConstPtr &goal){
48
        hlrc_server::utteranceGoalConstPtr request = goal;
49
        printf("> incoming utterance '%s' (%d phone symbols)\n", request->utterance.text.c_str(), (int)request->utterance.phonemes.size());
50

  
51
        //everything is ok, will be cleared on failures
52
        feedback.result = 1;
53

  
54
        boost::shared_ptr<Utterance> utterance(new Utterance());
55

  
56
        //copy values:
57
        utterance->set_text(request->utterance.text);
58

  
59
        boost::shared_ptr<AudioData> audio_data(new AudioData());
60
        if (!extract_audio(request->utterance.audio, audio_data)){
61
            feedback.result = 0;
62
        }
63

  
64
        utterance->set_audio_data(audio_data);
65
        utterance->set_phoneme_vector(extract_phoneme_vector(request->utterance.phonemes));
66

  
67
        //send to application;
68
        mw->utterance_callback(utterance);
69

  
70

  
71
        if (feedback.result){
72
            result.result = 1;
73
            as_.setSucceeded(result);
74
        }else{
75
            as_.setAborted(result);
76
        }
77
    }
78

  
79

  
80
    //convert ros message audio data to our own implementation
81
    bool extract_audio(hlrc_server::soundchunk sound_chunk, boost::shared_ptr<AudioData> audio_data){
82
        //extract data:
83
        unsigned int audio_len = sound_chunk.data.size();
84
        char *audio_data_char = (char *)sound_chunk.data.data();
85

  
86
        //audio.samples = vector<char>(audio_data_char, audio_data_char+audio_len);
87
        audio_data->samples.resize(audio_len);
88
        audio_data->samples.assign(audio_data_char, audio_data_char+audio_len);
89

  
90
        printf("audio samplesize is %d bytes\n",(int)audio_data->samples.size());
91

  
92
        //extract format:
93
        audio_data->sample_signed = true;
94
        switch (sound_chunk.sample_type){
95
            case(hlrc_server::soundchunk::SAMPLE_U8):  audio_data->sample_signed = false; //and fall through:
96
            case(hlrc_server::soundchunk::SAMPLE_S8):  audio_data->sample_bit =  8; break;
97

  
98
            case(hlrc_server::soundchunk::SAMPLE_U16): audio_data->sample_signed = false; //and fall through:
99
            case(hlrc_server::soundchunk::SAMPLE_S16): audio_data->sample_bit = 16; break;
100

  
101
            case(hlrc_server::soundchunk::SAMPLE_U24): audio_data->sample_signed = false; //and fall through:
102
            case(hlrc_server::soundchunk::SAMPLE_S24): audio_data->sample_bit = 24; break;
103

  
104
            default:
105
                printf("> invalid sample type %d in SoundChunk! ignoring request!\n", sound_chunk.sample_type);
106
                return false;
107
                //throw runtime_error("UtteranceRSB::convert_audio_data() unsupported sample type in soundchunk");
108
        }
109

  
110
        //bitrate
111
        audio_data->sample_rate = sound_chunk.rate;
112

  
113
        //endianness
114
        if (sound_chunk.endianess == hlrc_server::soundchunk::ENDIAN_LITTLE){
115
            audio_data->sample_big_endian = false;
116
        }else if (sound_chunk.endianess == hlrc_server::soundchunk::ENDIAN_BIG){
117
            audio_data->sample_big_endian = true;
118
        }else{
119
            printf("> invalid SoundChunk byte_format");
120
            throw std::runtime_error("UtteranceRSB::convert_audio_data() unsupported byte_format in soundchunk");
121
        }
122

  
123
        //number of channels
124
        audio_data->sample_channels = sound_chunk.channels;
125

  
126
        printf("> new AudioData: %s\n",audio_data->to_string().c_str());
127

  
128
        return true;
129
    }
130

  
131
    //convert ros phoneme vector to out own implementation
132
    Utterance::phonemes_vector_t extract_phoneme_vector(std::vector<hlrc_server::phoneme> pv){
133
        Utterance::phonemes_vector_t result;
134

  
135
        //extract phoneme vector
136
        for(unsigned int i=0; i<pv.size(); i++){
137
            hlrc_server::phoneme ros_phoneme = pv[i];
138
            Utterance::symbol_duration_pair_t phoneme = make_pair(ros_phoneme.symbol, ros_phoneme.duration);
139
            result.push_back(phoneme);
140
        }
141

  
142
        return result;
143
    }
144

  
42
	UtteranceCallbackWrapper(Middleware* mw, std::string scope, std::string name)
43
	   : CallbackWrapper<hlrc_server::utteranceAction>(mw, scope, name,
44
	                                                   boost::bind(&UtteranceCallbackWrapper::call, this, _1)){
45
		   //
46
	   };
47

  
48
	void call(const GoalConstPtr& goal) {
49
		hlrc_server::utteranceGoalConstPtr request = goal;
50
		printf("> incoming utterance '%s' (%d phone symbols)\n", request->utterance.text.c_str(),
51
		       (int)request->utterance.phonemes.size());
52

  
53
		// everything is ok, will be cleared on failures
54
		feedback.result = 1;
55

  
56
		boost::shared_ptr<Utterance> utterance(new Utterance());
57

  
58
		// copy values:
59
		utterance->set_text(request->utterance.text);
60

  
61
		boost::shared_ptr<AudioData> audio_data(new AudioData());
62
		if (!extract_audio(request->utterance.audio, audio_data)) {
63
			feedback.result = 0;
64
		}
65

  
66
		utterance->set_audio_data(audio_data);
67
		utterance->set_phoneme_vector(extract_phoneme_vector(request->utterance.phonemes));
68

  
69
		// send to application;
70
		mw->utterance_callback(utterance);
71

  
72
		if (feedback.result) {
73
			result.result = 1;
74
			as_.setSucceeded(result);
75
		}
76
		else {
77
			as_.setAborted(result);
78
		}
79
	}
80

  
81
	// convert ros message audio data to our own implementation
82
	bool extract_audio(hlrc_server::soundchunk sound_chunk, boost::shared_ptr<AudioData> audio_data) {
83
		// extract data:
84
		unsigned int audio_len = sound_chunk.data.size();
85
		char* audio_data_char = (char*)sound_chunk.data.data();
86

  
87
		// audio.samples = vector<char>(audio_data_char, audio_data_char+audio_len);
88
		audio_data->samples.resize(audio_len);
89
		audio_data->samples.assign(audio_data_char, audio_data_char + audio_len);
90

  
91
		printf("audio samplesize is %d bytes\n", (int)audio_data->samples.size());
92

  
93
		// extract format:
94
		audio_data->sample_signed = true;
95
		switch (sound_chunk.sample_type) {
96
			case (hlrc_server::soundchunk::SAMPLE_U8):
97
				audio_data->sample_signed = false; // and fall through:
98
			case (hlrc_server::soundchunk::SAMPLE_S8):
99
				audio_data->sample_bit = 8;
100
				break;
101

  
102
			case (hlrc_server::soundchunk::SAMPLE_U16):
103
				audio_data->sample_signed = false; // and fall through:
104
			case (hlrc_server::soundchunk::SAMPLE_S16):
105
				audio_data->sample_bit = 16;
106
				break;
107

  
108
			case (hlrc_server::soundchunk::SAMPLE_U24):
109
				audio_data->sample_signed = false; // and fall through:
110
			case (hlrc_server::soundchunk::SAMPLE_S24):
111
				audio_data->sample_bit = 24;
112
				break;
113

  
114
			default:
115
				printf("> invalid sample type %d in SoundChunk! ignoring request!\n", sound_chunk.sample_type);
116
				return false;
117
				// throw runtime_error("UtteranceRSB::convert_audio_data() unsupported sample type in soundchunk");
118
		}
119

  
120
		// bitrate
121
		audio_data->sample_rate = sound_chunk.rate;
122

  
123
		// endianness
124
		if (sound_chunk.endianess == hlrc_server::soundchunk::ENDIAN_LITTLE) {
125
			audio_data->sample_big_endian = false;
126
		}
127
		else if (sound_chunk.endianess == hlrc_server::soundchunk::ENDIAN_BIG) {
128
			audio_data->sample_big_endian = true;
129
		}
130
		else {
131
			printf("> invalid SoundChunk byte_format");
132
			throw std::runtime_error("UtteranceRSB::convert_audio_data() unsupported byte_format in soundchunk");
133
		}
134

  
135
		// number of channels
136
		audio_data->sample_channels = sound_chunk.channels;
137

  
138
		printf("> new AudioData: %s\n", audio_data->to_string().c_str());
139

  
140
		return true;
141
	}
142

  
143
	// convert ros phoneme vector to out own implementation
144
	Utterance::phonemes_vector_t extract_phoneme_vector(std::vector<hlrc_server::phoneme> pv) {
145
		Utterance::phonemes_vector_t result;
146

  
147
		// extract phoneme vector
148
		for (unsigned int i = 0; i < pv.size(); i++) {
149
			hlrc_server::phoneme ros_phoneme = pv[i];
150
			Utterance::symbol_duration_pair_t phoneme = make_pair(ros_phoneme.symbol, ros_phoneme.duration);
151
			result.push_back(phoneme);
152
		}
153

  
154
		return result;
155
	}
145 156
};
146

  
147

  

Also available in: Unified diff