Revision 888a909b
| client/cpp/include/MiddlewareROS.h | ||
|---|---|---|
| 43 | 43 |
#include <hlrc_server/speechAction.h> |
| 44 | 44 |
#include <actionlib/client/simple_action_client.h> |
| 45 | 45 |
#include <actionlib/client/terminal_state.h> |
| 46 |
#include <boost/shared_ptr.hpp>
|
|
| 46 |
#include <memory>
|
|
| 47 | 47 |
#endif |
| 48 | 48 |
|
| 49 | 49 |
#define ROS_ACTION_CALL_TIMEOUT 30.0 |
| client/cpp/include/MiddlewareRSB.h | ||
|---|---|---|
| 86 | 86 |
rsb::ListenerPtr hack_listener; |
| 87 | 87 |
|
| 88 | 88 |
std::mutex pending_tasks_mutex; |
| 89 |
std::vector<boost::shared_ptr<rst::communicationpatterns::TaskState>> pending_tasks;
|
|
| 89 |
std::vector<std::shared_ptr<rst::communicationpatterns::TaskState>> pending_tasks;
|
|
| 90 | 90 |
unsigned int say_task_active; |
| 91 | 91 |
unsigned int say_task_done; |
| 92 | 92 |
|
| ... | ... | |
| 96 | 96 |
void set_current_saytask(std::string text); |
| 97 | 97 |
|
| 98 | 98 |
void check_for_inprotk(); |
| 99 |
void incoming_hack(boost::shared_ptr<std::string> finished_task);
|
|
| 99 |
void incoming_hack(std::shared_ptr<std::string> finished_task);
|
|
| 100 | 100 |
#endif |
| 101 | 101 |
#endif |
| 102 | 102 |
}; |
| client/cpp/src/MiddlewareROS.cpp | ||
|---|---|---|
| 70 | 70 |
|
| 71 | 71 |
//create node handle |
| 72 | 72 |
//ros_node_handle = new ros::NodeHandle(); |
| 73 |
//node_handle = boost::shared_ptr<ros::NodeHandle>(new ros::NodeHandle());
|
|
| 73 |
//node_handle = std::shared_ptr<ros::NodeHandle>(new ros::NodeHandle());
|
|
| 74 | 74 |
|
| 75 | 75 |
printf("> setting up ROS action clients...\n");
|
| 76 | 76 |
animation_ac = create_action_client<hlrc_server::animationAction>(scope + "/animation"); |
| client/cpp/src/MiddlewareRSB.cpp | ||
|---|---|---|
| 90 | 90 |
} |
| 91 | 91 |
|
| 92 | 92 |
void MiddlewareRSB::publish_emotion(string scope_target, RobotEmotion e, bool blocking){
|
| 93 |
boost::shared_ptr<rst::animation::EmotionExpression> request(new rst::animation::EmotionExpression());
|
|
| 93 |
std::shared_ptr<rst::animation::EmotionExpression> request(new rst::animation::EmotionExpression());
|
|
| 94 | 94 |
|
| 95 | 95 |
switch(e.value){
|
| 96 | 96 |
default: |
| ... | ... | |
| 135 | 135 |
} |
| 136 | 136 |
|
| 137 | 137 |
void MiddlewareRSB::publish_gaze_target(RobotGaze incoming_target, bool blocking){
|
| 138 |
boost::shared_ptr<rst::animation::BinocularHeadGaze> request(new rst::animation::BinocularHeadGaze ());
|
|
| 138 |
std::shared_ptr<rst::animation::BinocularHeadGaze> request(new rst::animation::BinocularHeadGaze ());
|
|
| 139 | 139 |
|
| 140 |
boost::shared_ptr<rst::geometry::SphericalDirectionFloat> target(new rst::geometry::SphericalDirectionFloat ());
|
|
| 140 |
std::shared_ptr<rst::geometry::SphericalDirectionFloat> target(new rst::geometry::SphericalDirectionFloat ());
|
|
| 141 | 141 |
target->set_azimuth(incoming_target.pan); |
| 142 | 142 |
target->set_elevation(incoming_target.tilt); |
| 143 | 143 |
request->set_allocated_target(target.get()); |
| 144 | 144 |
|
| 145 | 145 |
request->set_eye_vergence(incoming_target.vergence); |
| 146 | 146 |
|
| 147 |
boost::shared_ptr<rst::geometry::SphericalDirectionFloat> offset(new rst::geometry::SphericalDirectionFloat());
|
|
| 147 |
std::shared_ptr<rst::geometry::SphericalDirectionFloat> offset(new rst::geometry::SphericalDirectionFloat());
|
|
| 148 | 148 |
offset->set_azimuth(incoming_target.pan_offset); |
| 149 | 149 |
offset->set_elevation(incoming_target.tilt_offset); |
| 150 | 150 |
request->set_allocated_offset(offset.get()); |
| ... | ... | |
| 163 | 163 |
|
| 164 | 164 |
void MiddlewareRSB::publish_mouth_target(RobotMouth target, bool blocking){
|
| 165 | 165 |
/* |
| 166 |
boost::shared_ptr<rst::robot::MouthTarget> request(new rst::robot::MouthTarget());
|
|
| 166 |
std::shared_ptr<rst::robot::MouthTarget> request(new rst::robot::MouthTarget());
|
|
| 167 | 167 |
|
| 168 | 168 |
request->set_position_left( target.position_left); |
| 169 | 169 |
request->set_position_center(target.position_center); |
| ... | ... | |
| 183 | 183 |
} |
| 184 | 184 |
|
| 185 | 185 |
void MiddlewareRSB::publish_head_animation(RobotHeadAnimation a, bool blocking){
|
| 186 |
boost::shared_ptr<rst::animation::HeadAnimation> request(new rst::animation::HeadAnimation());
|
|
| 186 |
std::shared_ptr<rst::animation::HeadAnimation> request(new rst::animation::HeadAnimation());
|
|
| 187 | 187 |
|
| 188 | 188 |
switch(a.value){
|
| 189 | 189 |
default: |
| ... | ... | |
| 234 | 234 |
|
| 235 | 235 |
void MiddlewareRSB::publish_speech(string text, bool blocking){
|
| 236 | 236 |
//say it |
| 237 |
boost::shared_ptr<std::string> request(new string(text));
|
|
| 237 |
std::shared_ptr<std::string> request(new string(text));
|
|
| 238 | 238 |
|
| 239 | 239 |
if (blocking){
|
| 240 | 240 |
hlrc_server->call<std::string>("speech", request);
|
| server/include/Arbiter.h | ||
|---|---|---|
| 29 | 29 |
#pragma once |
| 30 | 30 |
#include <string> |
| 31 | 31 |
#include <vector> |
| 32 |
#include <boost/shared_ptr.hpp>
|
|
| 32 |
#include <memory>
|
|
| 33 | 33 |
#include <mutex> |
| 34 | 34 |
#include <humotion/client/client.h> |
| 35 | 35 |
#include "EmotionState.h" |
| ... | ... | |
| 49 | 49 |
void set_default_emotion(EmotionState e); |
| 50 | 50 |
void set_current_emotion(EmotionState e); |
| 51 | 51 |
void set_mouth_config(MouthConfig m); |
| 52 |
void speak(boost::shared_ptr<Utterance> u);
|
|
| 52 |
void speak(std::shared_ptr<Utterance> u);
|
|
| 53 | 53 |
bool speak_active(); |
| 54 | 54 |
|
| 55 | 55 |
void set_gaze_target(humotion::GazeState g); |
| 56 | 56 |
void set_mouth_target(humotion::MouthState target); |
| 57 |
void play_animation(boost::shared_ptr<Animation> ani);
|
|
| 57 |
void play_animation(std::shared_ptr<Animation> ani);
|
|
| 58 | 58 |
|
| 59 | 59 |
humotion::GazeState get_gaze_state(); |
| 60 | 60 |
humotion::MouthState get_mouth_state(); |
| ... | ... | |
| 85 | 85 |
|
| 86 | 86 |
MouthConfig mouth_config; |
| 87 | 87 |
|
| 88 |
boost::shared_ptr<Utterance> utterance;
|
|
| 88 |
std::shared_ptr<Utterance> utterance;
|
|
| 89 | 89 |
|
| 90 | 90 |
humotion::GazeState requested_gaze_state; |
| 91 | 91 |
humotion::MouthState requested_mouth_state; |
| ... | ... | |
| 102 | 102 |
std::mutex animation_mutex; |
| 103 | 103 |
Animation active_animation; |
| 104 | 104 |
|
| 105 |
typedef std::vector<boost::shared_ptr<Animation>> active_animation_vector_t;
|
|
| 105 |
typedef std::vector<std::shared_ptr<Animation>> active_animation_vector_t;
|
|
| 106 | 106 |
std::mutex active_animation_vector_mutex; |
| 107 | 107 |
active_animation_vector_t active_animation_vector; |
| 108 | 108 |
|
| server/include/AudioPlayer.h | ||
|---|---|---|
| 28 | 28 |
|
| 29 | 29 |
#pragma once |
| 30 | 30 |
#include <string> |
| 31 |
#include <boost/shared_ptr.hpp>
|
|
| 31 |
#include <memory>
|
|
| 32 | 32 |
#include "AudioData.h" |
| 33 | 33 |
|
| 34 | 34 |
class AudioPlayer{
|
| ... | ... | |
| 47 | 47 |
CLOSED |
| 48 | 48 |
} PLAYBACKSTATE_T; |
| 49 | 49 |
|
| 50 |
virtual void play(boost::shared_ptr<AudioData> audio) = 0;
|
|
| 50 |
virtual void play(std::shared_ptr<AudioData> audio) = 0;
|
|
| 51 | 51 |
bool is_playing(); |
| 52 | 52 |
|
| 53 | 53 |
protected: |
| 54 |
boost::shared_ptr<AudioData> audio_data;
|
|
| 54 |
std::shared_ptr<AudioData> audio_data;
|
|
| 55 | 55 |
PLAYBACKSTATE_T playback_state; |
| 56 | 56 |
std::string audio_driver; |
| 57 | 57 |
bool playback_requested; |
| server/include/AudioPlayerLibAO.h | ||
|---|---|---|
| 38 | 38 |
~AudioPlayerLibAO(); |
| 39 | 39 |
|
| 40 | 40 |
void playback_thread(); |
| 41 |
void play(boost::shared_ptr<AudioData> audio);
|
|
| 41 |
void play(std::shared_ptr<AudioData> audio);
|
|
| 42 | 42 |
bool is_playing(); |
| 43 | 43 |
|
| 44 | 44 |
|
| 45 | 45 |
|
| 46 | 46 |
private: |
| 47 |
ao_sample_format extract_ao_format(boost::shared_ptr<AudioData> audio);
|
|
| 47 |
ao_sample_format extract_ao_format(std::shared_ptr<AudioData> audio);
|
|
| 48 | 48 |
boost::thread *playback_thread_ptr; |
| 49 | 49 |
|
| 50 | 50 |
//std::vector<char> audio_data; |
| server/include/AudioPlayerRSB.h | ||
|---|---|---|
| 42 | 42 |
~AudioPlayerRSB(); |
| 43 | 43 |
|
| 44 | 44 |
void playback_thread(); |
| 45 |
void play(boost::shared_ptr<AudioData> audio);
|
|
| 45 |
void play(std::shared_ptr<AudioData> audio);
|
|
| 46 | 46 |
bool is_playing(); |
| 47 | 47 |
|
| 48 | 48 |
private: |
| server/include/Middleware.h | ||
|---|---|---|
| 41 | 41 |
virtual void tick() = 0; |
| 42 | 42 |
|
| 43 | 43 |
void speak_callback(std::string text); |
| 44 |
void utterance_callback(boost::shared_ptr<Utterance> u);
|
|
| 44 |
void utterance_callback(std::shared_ptr<Utterance> u);
|
|
| 45 | 45 |
void gaze_callback(humotion::GazeState gaze); |
| 46 | 46 |
void mouth_callback(humotion::MouthState mouth); |
| 47 | 47 |
void default_emotion_callback(EmotionState emotion_state); |
| 48 | 48 |
void current_emotion_callback(EmotionState emotion_state); |
| 49 |
void animation_callback(boost::shared_ptr<Animation> ani);
|
|
| 49 |
void animation_callback(std::shared_ptr<Animation> ani);
|
|
| 50 | 50 |
|
| 51 |
virtual boost::shared_ptr<Utterance> tts_call(std::string text) = 0;
|
|
| 51 |
virtual std::shared_ptr<Utterance> tts_call(std::string text) = 0;
|
|
| 52 | 52 |
|
| 53 | 53 |
protected: |
| 54 | 54 |
Arbiter *arbiter; |
| server/include/MiddlewareROS.h | ||
|---|---|---|
| 50 | 50 |
#include "ROS/AnimationCallbackWrapperROS.h" |
| 51 | 51 |
#include "ROS/SpeechCallbackWrapperROS.h" |
| 52 | 52 |
#endif |
| 53 |
#include <boost/shared_ptr.hpp>
|
|
| 53 |
#include <memory>
|
|
| 54 | 54 |
|
| 55 | 55 |
#define ROS_ACTION_CALL_TIMEOUT 30.0 |
| 56 | 56 |
|
| ... | ... | |
| 66 | 66 |
void init(){};
|
| 67 | 67 |
void tick(){};
|
| 68 | 68 |
|
| 69 |
boost::shared_ptr<Utterance> tts_call(std::string text){
|
|
| 70 |
return boost::shared_ptr<Utterance>(new Utterance());
|
|
| 69 |
std::shared_ptr<Utterance> tts_call(std::string text){
|
|
| 70 |
return std::shared_ptr<Utterance>(new Utterance());
|
|
| 71 | 71 |
} |
| 72 | 72 |
|
| 73 | 73 |
#else |
| ... | ... | |
| 76 | 76 |
~MiddlewareROS(); |
| 77 | 77 |
void init(); |
| 78 | 78 |
void tick(); |
| 79 |
boost::shared_ptr<Utterance> tts_call(std::string text);
|
|
| 79 |
std::shared_ptr<Utterance> tts_call(std::string text);
|
|
| 80 | 80 |
|
| 81 | 81 |
private: |
| 82 |
//boost::shared_ptr<ros::NodeHandle> node_handle;
|
|
| 82 |
//std::shared_ptr<ros::NodeHandle> node_handle;
|
|
| 83 | 83 |
ros::NodeHandle *ros_node_handle; |
| 84 | 84 |
bool tick_necessary; |
| 85 | 85 |
//listen to tf tree |
| 86 | 86 |
tf2_ros::Buffer tfBuffer; |
| 87 |
boost::shared_ptr<tf2_ros::TransformListener> tfListener;
|
|
| 87 |
std::shared_ptr<tf2_ros::TransformListener> tfListener;
|
|
| 88 | 88 |
|
| 89 | 89 |
//FIXME: These pointers are never destroyed. Shouldn't they be shared_ptrs? |
| 90 | 90 |
AnimationCallbackWrapper *animation_action_server; |
| server/include/MiddlewareRSB.h | ||
|---|---|---|
| 46 | 46 |
~MiddlewareRSB(){}
|
| 47 | 47 |
void init(){};
|
| 48 | 48 |
void tick(){};
|
| 49 |
boost::shared_ptr<Utterance> tts_call(std::string text){
|
|
| 50 |
return boost::shared_ptr<Utterance>(new Utterance());
|
|
| 49 |
std::shared_ptr<Utterance> tts_call(std::string text){
|
|
| 50 |
return std::shared_ptr<Utterance>(new Utterance());
|
|
| 51 | 51 |
} |
| 52 | 52 |
|
| 53 | 53 |
#else |
| ... | ... | |
| 56 | 56 |
~MiddlewareRSB(); |
| 57 | 57 |
void init(); |
| 58 | 58 |
void tick(); |
| 59 |
boost::shared_ptr<Utterance> tts_call(std::string text);
|
|
| 59 |
std::shared_ptr<Utterance> tts_call(std::string text);
|
|
| 60 | 60 |
|
| 61 | 61 |
private: |
| 62 | 62 |
void init_callbacks(); |
| server/include/ROS/AnimationCallbackWrapperROS.h | ||
|---|---|---|
| 45 | 45 |
hlrc_server::animationGoalConstPtr request = goal; |
| 46 | 46 |
printf("> incoming animation (%d)\n", (int)request->target);
|
| 47 | 47 |
|
| 48 |
boost::shared_ptr<Animation> ani(new Animation());
|
|
| 48 |
std::shared_ptr<Animation> ani(new Animation());
|
|
| 49 | 49 |
|
| 50 | 50 |
//everything is ok, will be cleared on failures |
| 51 | 51 |
feedback.result = 1; |
| server/include/ROS/UtteranceCallbackWrapperROS.h | ||
|---|---|---|
| 51 | 51 |
//everything is ok, will be cleared on failures |
| 52 | 52 |
feedback.result = 1; |
| 53 | 53 |
|
| 54 |
boost::shared_ptr<Utterance> utterance(new Utterance());
|
|
| 54 |
std::shared_ptr<Utterance> utterance(new Utterance());
|
|
| 55 | 55 |
|
| 56 | 56 |
//copy values: |
| 57 | 57 |
utterance->set_text(request->utterance.text); |
| 58 | 58 |
|
| 59 |
boost::shared_ptr<AudioData> audio_data(new AudioData());
|
|
| 59 |
std::shared_ptr<AudioData> audio_data(new AudioData());
|
|
| 60 | 60 |
if (!extract_audio(request->utterance.audio, audio_data)){
|
| 61 | 61 |
feedback.result = 0; |
| 62 | 62 |
} |
| ... | ... | |
| 78 | 78 |
|
| 79 | 79 |
|
| 80 | 80 |
//convert ros message audio data to our own implementation |
| 81 |
bool extract_audio(hlrc_server::soundchunk sound_chunk, boost::shared_ptr<AudioData> audio_data){
|
|
| 81 |
bool extract_audio(hlrc_server::soundchunk sound_chunk, std::shared_ptr<AudioData> audio_data){
|
|
| 82 | 82 |
//extract data: |
| 83 | 83 |
unsigned int audio_len = sound_chunk.data.size(); |
| 84 | 84 |
char *audio_data_char = (char *)sound_chunk.data.data(); |
| server/include/RSB/AnimationCallbackWrapper.h | ||
|---|---|---|
| 35 | 35 |
public: |
| 36 | 36 |
AnimationCallbackWrapper(Middleware *mw) : CallbackWrapper(mw){}
|
| 37 | 37 |
|
| 38 |
void call(const std::string& method_name, boost::shared_ptr<rst::animation::HeadAnimation> input){
|
|
| 38 |
void call(const std::string& method_name, std::shared_ptr<rst::animation::HeadAnimation> input){
|
|
| 39 | 39 |
printf("> incomint animation (method = %s)\n",method_name.c_str());
|
| 40 | 40 |
|
| 41 | 41 |
//fetch animation passed by rsb: |
| 42 | 42 |
rst::animation::HeadAnimation *rst_ani = input.get(); |
| 43 |
boost::shared_ptr<Animation> ani(new Animation());
|
|
| 43 |
std::shared_ptr<Animation> ani(new Animation());
|
|
| 44 | 44 |
|
| 45 | 45 |
switch ((int) rst_ani->animation()){
|
| 46 | 46 |
case(rst_ani->IDLE): ani->target = Animation::IDLE; break; |
| server/include/RSB/CallbackWrapper.h | ||
|---|---|---|
| 46 | 46 |
|
| 47 | 47 |
~CallbackWrapper(){}
|
| 48 | 48 |
|
| 49 |
virtual void call(const std::string&, boost::shared_ptr<T> param) = 0;
|
|
| 49 |
virtual void call(const std::string&, std::shared_ptr<T> param) = 0;
|
|
| 50 | 50 |
|
| 51 | 51 |
protected: |
| 52 | 52 |
Middleware *mw; |
| server/include/RSB/EmotionCallbackWrapper.h | ||
|---|---|---|
| 34 | 34 |
public: |
| 35 | 35 |
EmotionCallbackWrapper(Middleware *mw) : CallbackWrapper(mw){}
|
| 36 | 36 |
|
| 37 |
void call(const std::string& method_name, boost::shared_ptr<rst::animation::EmotionExpression> input){
|
|
| 37 |
void call(const std::string& method_name, std::shared_ptr<rst::animation::EmotionExpression> input){
|
|
| 38 | 38 |
printf("> incoming emotion (%s = %d)\n", method_name.c_str(),(int)input->emotion());
|
| 39 | 39 |
|
| 40 | 40 |
EmotionState emotion_state; |
| server/include/RSB/GazeCallbackWrapper.h | ||
|---|---|---|
| 33 | 33 |
class GazeCallbackWrapper : public CallbackWrapper<rst::animation::BinocularHeadGaze>{
|
| 34 | 34 |
public: |
| 35 | 35 |
GazeCallbackWrapper(Middleware *mw) : CallbackWrapper(mw){}
|
| 36 |
void call(const std::string& method_name, boost::shared_ptr<rst::animation::BinocularHeadGaze> input){
|
|
| 36 |
void call(const std::string& method_name, std::shared_ptr<rst::animation::BinocularHeadGaze> input){
|
|
| 37 | 37 |
rst::animation::BinocularHeadGaze *gaze = input.get(); |
| 38 | 38 |
//printf("> incoming gaze (p=%3.1f t=%3.1f r=%3.1f / v=%3.1f)\n", gaze->pan(), gaze->tilt(), gaze->roll(), gaze->vergence());
|
| 39 | 39 |
|
| server/include/RSB/MouthCallbackWrapper.h | ||
|---|---|---|
| 34 | 34 |
class MouthCallbackWrapper : public CallbackWrapper<rst::robot::MouthTarget>{
|
| 35 | 35 |
public: |
| 36 | 36 |
MouthCallbackWrapper(Middleware *mw) : CallbackWrapper(mw){}
|
| 37 |
void call(const std::string& method_name, boost::shared_ptr<rst::robot::MouthTarget> input){
|
|
| 37 |
void call(const std::string& method_name, std::shared_ptr<rst::robot::MouthTarget> input){
|
|
| 38 | 38 |
rst::robot::MouthTarget *mouth = input.get(); |
| 39 | 39 |
|
| 40 | 40 |
humotion::MouthState mouth_state; |
| server/include/RSB/SpeechCallbackWrapper.h | ||
|---|---|---|
| 34 | 34 |
public: |
| 35 | 35 |
SpeechCallbackWrapper(Middleware *mw) : CallbackWrapper(mw){}
|
| 36 | 36 |
|
| 37 |
void call(const std::string&, boost::shared_ptr<std::string> _text){
|
|
| 37 |
void call(const std::string&, std::shared_ptr<std::string> _text){
|
|
| 38 | 38 |
//send to application |
| 39 | 39 |
std::string text = *_text.get(); |
| 40 | 40 |
//<voice effect=\"Rate(durScale:1.5) + TractScaler(durScale:1.1)\">" + |
| 41 | 41 |
|
| 42 | 42 |
text = "<maryxml version=\"0.4\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://mary.dfki.de/2002/MaryXML\" xml:lang=\"en_US\">" + |
| 43 |
text +
|
|
| 43 |
text + |
|
| 44 | 44 |
"</maryxml>"; |
| 45 | 45 |
mw->speak_callback(text); |
| 46 | 46 |
} |
| server/include/RSB/UtteranceCallbackWrapper.h | ||
|---|---|---|
| 34 | 34 |
class UtteranceCallbackWrapper : public CallbackWrapper<rst::audition::Utterance>{
|
| 35 | 35 |
public: |
| 36 | 36 |
UtteranceCallbackWrapper(Middleware *mw) : CallbackWrapper(mw){}
|
| 37 |
void call(const std::string&, boost::shared_ptr<rst::audition::Utterance> param){
|
|
| 37 |
void call(const std::string&, std::shared_ptr<rst::audition::Utterance> param){
|
|
| 38 | 38 |
//convert rsb utterance to out own: |
| 39 | 39 |
rst::audition::Utterance *rst_utterance = param.get(); |
| 40 | 40 |
printf("> incoming utterance '%s' (%d phone symbols)\n", param->textual_representation().c_str(), (int)rst_utterance->phonemes().element().size());
|
| 41 | 41 |
|
| 42 |
boost::shared_ptr<Utterance> utterance(new UtteranceRSB(*rst_utterance));
|
|
| 42 |
std::shared_ptr<Utterance> utterance(new UtteranceRSB(*rst_utterance));
|
|
| 43 | 43 |
|
| 44 | 44 |
//send to application; |
| 45 | 45 |
mw->utterance_callback(utterance); |
| server/include/Utterance.h | ||
|---|---|---|
| 30 | 30 |
#include <string> |
| 31 | 31 |
#include <vector> |
| 32 | 32 |
#include <chrono> |
| 33 |
#include <boost/shared_ptr.hpp>
|
|
| 33 |
#include <memory>
|
|
| 34 | 34 |
#include "AudioData.h" |
| 35 | 35 |
|
| 36 | 36 |
class Utterance{
|
| ... | ... | |
| 43 | 43 |
|
| 44 | 44 |
void set_phoneme_vector(phonemes_vector_t p); |
| 45 | 45 |
void set_text(std::string t); |
| 46 |
void set_audio_data(boost::shared_ptr<AudioData> a);
|
|
| 46 |
void set_audio_data(std::shared_ptr<AudioData> a);
|
|
| 47 | 47 |
|
| 48 | 48 |
void start_playback(); |
| 49 | 49 |
bool is_playing(); |
| 50 | 50 |
std::string currently_active_phoneme(); |
| 51 |
boost::shared_ptr<AudioData> get_audio_data();
|
|
| 51 |
std::shared_ptr<AudioData> get_audio_data();
|
|
| 52 | 52 |
std::string get_text(); |
| 53 | 53 |
|
| 54 | 54 |
protected: |
| 55 |
boost::shared_ptr<AudioData> audio_data;
|
|
| 55 |
std::shared_ptr<AudioData> audio_data;
|
|
| 56 | 56 |
std::string text; |
| 57 | 57 |
phonemes_vector_t phonemes_vector; |
| 58 | 58 |
|
| server/src/Arbiter.cpp | ||
|---|---|---|
| 61 | 61 |
gaze_state_animation_restart = true; |
| 62 | 62 |
emotion_target = &emotion_config_default; |
| 63 | 63 |
|
| 64 |
utterance = boost::shared_ptr<Utterance>(new Utterance());
|
|
| 64 |
utterance = std::shared_ptr<Utterance>(new Utterance());
|
|
| 65 | 65 |
} |
| 66 | 66 |
|
| 67 | 67 |
Arbiter::~Arbiter(){
|
| ... | ... | |
| 117 | 117 |
} |
| 118 | 118 |
|
| 119 | 119 |
//! note: this is blocking! |
| 120 |
void Arbiter::play_animation(boost::shared_ptr<Animation> incoming_animation){
|
|
| 120 |
void Arbiter::play_animation(std::shared_ptr<Animation> incoming_animation){
|
|
| 121 | 121 |
//incoming animation, check if this would conflict with any pending animations: |
| 122 | 122 |
//lock access & iterate over vector: |
| 123 | 123 |
std::unique_lock<std::mutex> lock_av(active_animation_vector_mutex); |
| 124 | 124 |
active_animation_vector_t::iterator it; |
| 125 | 125 |
for(it = active_animation_vector.begin(); it<active_animation_vector.end(); it++){
|
| 126 |
boost::shared_ptr<Animation> current_ani = *it;
|
|
| 126 |
std::shared_ptr<Animation> current_ani = *it;
|
|
| 127 | 127 |
//check if the running animation collides with the incoming animation: |
| 128 | 128 |
if (current_ani->collides_with(incoming_animation.get())){
|
| 129 | 129 |
//this would fail, we can not play this animation right now! |
| ... | ... | |
| 150 | 150 |
//ok, it finished. we can safely remove it now: |
| 151 | 151 |
lock_av.lock(); |
| 152 | 152 |
for(it = active_animation_vector.begin(); it<active_animation_vector.end();){
|
| 153 |
boost::shared_ptr<Animation> current_ani = *it;
|
|
| 153 |
std::shared_ptr<Animation> current_ani = *it;
|
|
| 154 | 154 |
if (*it == incoming_animation){
|
| 155 | 155 |
//printf(">match -> remove incoming ani again\n");
|
| 156 | 156 |
it = active_animation_vector.erase(it); |
| ... | ... | |
| 160 | 160 |
} |
| 161 | 161 |
} |
| 162 | 162 |
|
| 163 |
void Arbiter::speak(boost::shared_ptr<Utterance> u){ //, ao_sample_format audio_format, char *audio_data, unsigned int audio_len){
|
|
| 163 |
void Arbiter::speak(std::shared_ptr<Utterance> u){ //, ao_sample_format audio_format, char *audio_data, unsigned int audio_len){
|
|
| 164 | 164 |
//lock audio playback as such: |
| 165 | 165 |
const std::lock_guard<std::mutex> lock_audio(audio_player_mutex); |
| 166 | 166 |
|
| ... | ... | |
| 348 | 348 |
const std::lock_guard<std::mutex> lock_av(active_animation_vector_mutex); |
| 349 | 349 |
active_animation_vector_t::iterator it; |
| 350 | 350 |
for(it = active_animation_vector.begin(); it<active_animation_vector.end(); it++){
|
| 351 |
boost::shared_ptr<Animation> current_ani = *it;
|
|
| 351 |
std::shared_ptr<Animation> current_ani = *it;
|
|
| 352 | 352 |
|
| 353 | 353 |
//gaze_state.dump(); |
| 354 | 354 |
current_ani->apply_on_gazestate(&gaze_state); |
| server/src/AudioPlayerLibAO.cpp | ||
|---|---|---|
| 53 | 53 |
|
| 54 | 54 |
|
| 55 | 55 |
//this will return once we start playing |
| 56 |
void AudioPlayerLibAO::play(boost::shared_ptr<AudioData> audio){
|
|
| 56 |
void AudioPlayerLibAO::play(std::shared_ptr<AudioData> audio){
|
|
| 57 | 57 |
audio_data = audio; |
| 58 | 58 |
|
| 59 | 59 |
printf("> AudioPlayerLibAO: play() %d samples requested\n",(int)audio_data->samples.size());
|
| ... | ... | |
| 83 | 83 |
} |
| 84 | 84 |
|
| 85 | 85 |
|
| 86 |
ao_sample_format AudioPlayerLibAO::extract_ao_format(boost::shared_ptr<AudioData> audio){
|
|
| 86 |
ao_sample_format AudioPlayerLibAO::extract_ao_format(std::shared_ptr<AudioData> audio){
|
|
| 87 | 87 |
ao_sample_format ao_format; |
| 88 | 88 |
|
| 89 | 89 |
//get bits per sample |
| server/src/AudioPlayerRSB.cpp | ||
|---|---|---|
| 63 | 63 |
|
| 64 | 64 |
|
| 65 | 65 |
//this will return once we start playing |
| 66 |
void AudioPlayerRSB::play(boost::shared_ptr<AudioData> _audio_data){
|
|
| 66 |
void AudioPlayerRSB::play(std::shared_ptr<AudioData> _audio_data){
|
|
| 67 | 67 |
audio_data = _audio_data; |
| 68 | 68 |
|
| 69 | 69 |
printf("> AudioPlayerRSB: play() %d samples requested\n",(int)audio_data->samples.size());
|
| ... | ... | |
| 75 | 75 |
} |
| 76 | 76 |
|
| 77 | 77 |
//ok, we can play the audio. copy/setup data: |
| 78 |
//audio_data_ptr = boost::shared_ptr<rst::audition::SoundChunk>(boost::make_shared<rst::audition::SoundChunk>(*audio_chunk));
|
|
| 78 |
//audio_data_ptr = std::shared_ptr<rst::audition::SoundChunk>(boost::make_shared<rst::audition::SoundChunk>(*audio_chunk));
|
|
| 79 | 79 |
playback_requested = true; |
| 80 | 80 |
} |
| 81 | 81 |
|
| ... | ... | |
| 154 | 154 |
} |
| 155 | 155 |
|
| 156 | 156 |
void AudioPlayerRSB::publish_audio_data(){
|
| 157 |
boost::shared_ptr<rst::audition::SoundChunk> request(new rst::audition::SoundChunk());
|
|
| 157 |
std::shared_ptr<rst::audition::SoundChunk> request(new rst::audition::SoundChunk());
|
|
| 158 | 158 |
|
| 159 | 159 |
request->set_channels(audio_data->sample_channels); |
| 160 | 160 |
request->set_data(audio_data->samples.data()); |
| server/src/Middleware.cpp | ||
|---|---|---|
| 44 | 44 |
printf("> %s(%s) called\n", __FUNCTION__,text.c_str());
|
| 45 | 45 |
|
| 46 | 46 |
//call a tts system to convert the text to an utterance: |
| 47 |
boost::shared_ptr<Utterance> utterance = tts_call(text);
|
|
| 47 |
std::shared_ptr<Utterance> utterance = tts_call(text);
|
|
| 48 | 48 |
|
| 49 | 49 |
//and then process it |
| 50 | 50 |
utterance_callback(utterance); |
| 51 | 51 |
} |
| 52 | 52 |
|
| 53 |
void Middleware::utterance_callback(boost::shared_ptr<Utterance> utterance){
|
|
| 53 |
void Middleware::utterance_callback(std::shared_ptr<Utterance> utterance){
|
|
| 54 | 54 |
printf("> %s(text=%s) called\n", __FUNCTION__,utterance->get_text().c_str());
|
| 55 | 55 |
|
| 56 | 56 |
//can we speak this now? |
| ... | ... | |
| 82 | 82 |
arbiter->set_current_emotion(emotion_state); |
| 83 | 83 |
} |
| 84 | 84 |
|
| 85 |
void Middleware::animation_callback(boost::shared_ptr<Animation> ani){
|
|
| 85 |
void Middleware::animation_callback(std::shared_ptr<Animation> ani){
|
|
| 86 | 86 |
arbiter->play_animation(ani); |
| 87 | 87 |
} |
| server/src/MiddlewareROS.cpp | ||
|---|---|---|
| 119 | 119 |
} |
| 120 | 120 |
|
| 121 | 121 |
//call a tts system to convert a string to an utterance |
| 122 |
boost::shared_ptr<Utterance> MiddlewareROS::tts_call(string text){
|
|
| 123 |
boost::shared_ptr<Utterance> utterance(new Utterance());
|
|
| 122 |
std::shared_ptr<Utterance> MiddlewareROS::tts_call(string text){
|
|
| 123 |
std::shared_ptr<Utterance> utterance(new Utterance());
|
|
| 124 | 124 |
|
| 125 | 125 |
//double tts_timeout = 1.0; //seconds. DO NOT CHANGE THIS! |
| 126 | 126 |
if (tts_ac == NULL){
|
| ... | ... | |
| 143 | 143 |
}else{
|
| 144 | 144 |
//done, return utterance ptr |
| 145 | 145 |
ttsResultConstPtr tts_res = tts_ac->getResult(); |
| 146 |
boost::shared_ptr<Utterance> utterance(new UtteranceROS(tts_res));
|
|
| 146 |
std::shared_ptr<Utterance> utterance(new UtteranceROS(tts_res));
|
|
| 147 | 147 |
printf("> done. got utterance (text=%s)\n",utterance->get_text().c_str());
|
| 148 | 148 |
return utterance; |
| 149 | 149 |
} |
| server/src/MiddlewareRSB.cpp | ||
|---|---|---|
| 147 | 147 |
} |
| 148 | 148 |
|
| 149 | 149 |
//call a tts system to convert a string to an utterance |
| 150 |
boost::shared_ptr<Utterance> MiddlewareRSB::tts_call(string text){
|
|
| 150 |
std::shared_ptr<Utterance> MiddlewareRSB::tts_call(string text){
|
|
| 151 | 151 |
double tts_timeout = 1.0; //seconds. DO NOT CHANGE THIS! |
| 152 | 152 |
|
| 153 | 153 |
//build request |
| 154 |
boost::shared_ptr<std::string> request(new string(text));
|
|
| 154 |
std::shared_ptr<std::string> request(new string(text));
|
|
| 155 | 155 |
|
| 156 | 156 |
//try to fetch it asynchronously: |
| 157 | 157 |
try{
|
| 158 | 158 |
RemoteServer::DataFuture<rst::audition::Utterance> future_ptr = tts_server->callAsync<rst::audition::Utterance>("create_utterance", request);
|
| 159 | 159 |
|
| 160 | 160 |
//try to fetch the result |
| 161 |
boost::shared_ptr<rst::audition::Utterance> utterance_ptr = future_ptr.get(tts_timeout);
|
|
| 161 |
std::shared_ptr<rst::audition::Utterance> utterance_ptr = future_ptr.get(tts_timeout);
|
|
| 162 | 162 |
|
| 163 | 163 |
//done, return utterance ptr |
| 164 |
boost::shared_ptr<Utterance> utterance(new UtteranceRSB(*(utterance_ptr.get())));
|
|
| 164 |
std::shared_ptr<Utterance> utterance(new UtteranceRSB(*(utterance_ptr.get())));
|
|
| 165 | 165 |
printf("> done. got utterance (text=%s)\n",utterance->get_text().c_str());
|
| 166 | 166 |
return utterance; |
| 167 | 167 |
|
| ... | ... | |
| 172 | 172 |
} |
| 173 | 173 |
|
| 174 | 174 |
printf("> failed... got no utterance\n");
|
| 175 |
boost::shared_ptr<Utterance> utterance(new Utterance());
|
|
| 175 |
std::shared_ptr<Utterance> utterance(new Utterance());
|
|
| 176 | 176 |
return utterance; |
| 177 | 177 |
} |
| 178 | 178 |
|
| server/src/Utterance.cpp | ||
|---|---|---|
| 28 | 28 |
|
| 29 | 29 |
#include "Utterance.h" |
| 30 | 30 |
using namespace std; |
| 31 |
using namespace boost; |
|
| 32 | 31 |
|
| 33 | 32 |
#define DEBUG_PRINT_PHONEMES 1 |
| 34 | 33 |
|
| 35 | 34 |
Utterance::Utterance(){
|
| 36 | 35 |
playing = false; |
| 37 | 36 |
text = "UNINITIALISED UTTERANCE"; |
| 38 |
audio_data = boost::shared_ptr<AudioData>(new AudioData());
|
|
| 37 |
audio_data = std::shared_ptr<AudioData>(new AudioData());
|
|
| 39 | 38 |
} |
| 40 | 39 |
|
| 41 | 40 |
void Utterance::set_phoneme_vector(phonemes_vector_t p){
|
| ... | ... | |
| 46 | 45 |
text = t; |
| 47 | 46 |
} |
| 48 | 47 |
|
| 49 |
void Utterance::set_audio_data(boost::shared_ptr<AudioData> a){
|
|
| 48 |
void Utterance::set_audio_data(std::shared_ptr<AudioData> a){
|
|
| 50 | 49 |
audio_data = a; |
| 51 | 50 |
} |
| 52 | 51 |
|
| ... | ... | |
| 57 | 56 |
return text; |
| 58 | 57 |
} |
| 59 | 58 |
|
| 60 |
boost::shared_ptr<AudioData> Utterance::get_audio_data(){
|
|
| 59 |
std::shared_ptr<AudioData> Utterance::get_audio_data(){
|
|
| 61 | 60 |
return audio_data; |
| 62 | 61 |
} |
| 63 | 62 |
|
Also available in: Unified diff