behaviours

EmysBehaviour.u contains various robot competencies, which are more complex behaviors based on one or many his low level modules. The following listings shows some examples of Emys behaviors

//firstly we add competency component to robot structure
do(Global) {
if (!robot.hasLocalSlot("competency")) robot.addComponent("competency");
//Function implementing Emys acting adequately to his emotions
function a_RActEmotions(){
 
//here should be error checking
var _e_tmp = ["",0];//variable for read emotions (name and intensity)
var _e_tag = Tag.new();
 
loop {
if (_e_tmp != robot.emotion.Get()) {//if current emotion has changed
_e_tmp = robot.emotion.Get();//get new emotion
_e_tag.stop;
_e_tag:detach({ //execute in the background
 
switch (_e_tmp[0])
{
//every emotion changes the robot face expression, frequency and intensity of his blinking and breathing and speech parameters (tempo, pitch)
case "happy": {
robot.body.neck.head.ExpSmile(_e_tmp[1]*3,0.5);
robot.dialogue.SetEmotion("happy", _e_tmp[1]*3.3),
robot.body.neck.head.ActBlinking(50-10*_e_tmp[1],2),
robot.body.neck.head.ActBreath(-8,3-_e_tmp[1]*0.33),
},
case "content": {
robot.body.neck.head.ExpSmile(_e_tmp[1]*3,0.5);
robot.dialogue.SetEmotion("happy", _e_tmp[1]*3),
robot.body.neck.head.ActBlinking(50-10*_e_tmp[1],2),
robot.body.neck.head.ActBreath(-8,3-_e_tmp[1]*0.33),
},
case "sad": {      
robot.body.neck.head.ExpSad(_e_tmp[1]*3,0.8);
robot.dialogue.SetEmotion("sad", _e_tmp[1]*3.3),
robot.body.neck.head.ActBlinking(20,2+_e_tmp[1]),
robot.body.neck.head.ActBreath(-5,2+_e_tmp[1]*0.33),
},
//other emotions
},//form switch
 
}), // detach
 
} else {
sleep(30ms);
};
},
};
 
//another complex function ActAlive enable robot to look around (with speed reflecting its emotions), follow person if visible and react to detected moves
function a_RAlive(_config){
 
//here should be error checking
//creating various code tags
if (!hasSlot("t_follow7")) var Global.t_follow7=Tag.new() else { t_follow7.unfreeze| t_follow7.stop; };
if (!hasSlot("t_follow7A")) var Global.t_follow7A=Tag.new() else { t_follow7A.unfreeze| t_follow7A.stop; };
if (!hasSlot("t_follow7B")) var Global.t_follow7B=Tag.new() else { t_follow7B.unfreeze| t_follow7B.stop; };
if (!hasSlot("t_follow7C")) var Global.t_follow7C=Tag.new() else { t_follow7C.unfreeze| t_follow7C.stop; };
 
t_follow7:{
//choose needed detectors
if (_config){
if (robot.video.hasLocalSlot("objectDetector1")) 
robot.video.objectDetector1.enable=false;
if (robot.video.hasLocalSlot("color1Detector"))  {
robot.video.color1Detector.enable=false;
robot.video.color2Detector.enable=false;
robot.video.color3Detector.enable=false;
robot.video.color4Detector.enable=false;
};
if (robot.video.hasLocalSlot("facetDetector"))   
robot.video.facetDetector.enable=false;   
 
// enable used detectors
robot.video.moveDetector.enable=true;
robot.video.humanDetector.enable=true;
robot.video.humanDetector.faceTrackingPause=false;  
},
if (robot.hasLocalSlot("emotion"))  {
robot.competency.ActEmotions(),//robot will act adequately to its emotions
} else {//when there are is no emotion component, act neutrally
robot.body.neck.head.ExpNormal(1);
robot.body.neck.head.ActBlinking(100,2),
robot.body.neck.head.ActBreath(8,3),
},
 
t_follow7A: {  //following the detected person
robot.competency.ActFollowPerson(false),
at (robot.video.humanDetector.visible~1) //human visible for more than 1s
{
//robot.log.Set("HUMAN DETECTED",[]);
t_follow7B.freeze;
t_follow7C.freeze;
if (robot.video.hasLocalSlot("moveDetector")) robot.video.moveDetector.enable=false;
},
at (!robot.video.humanDetector.visible~1) //human not visible for more than 1s
{
//robot.log.Set("HUMAN LOST",[]);
if (robot.video.hasLocalSlot("moveDetector")) robot.video.moveDetector.enable=true;
t_follow7B.unfreeze;
};
},
 
t_follow7B: {//make some background reactions - look in direction of sound source
robot.competency.ActBackgroundReaction(false),
 
at (((robot.video.moveDetector.visible)||(robot.audio.detector.sourceConfidence>0.5))) 
{
// robot.log.Set("MOVE OR NOISY DETECTED",[]);
t_follow7C.freeze;  
},
 
at (((!robot.video.moveDetector.visible)&&(!(robot.audio.detector.sourceConfidence>0.5)))~1) 
{
//robot.log.Set("MOVE OR NOISY LOST",[]);
t_follow7C.unfreeze;
};
 
},
 
t_follow7C: {//Look around with speed and intensity reflecting robot emotions
if (robot.hasLocalSlot("emotion"))  {
robot.competency.ActAroundWithEmotions(),
} else {
robot.body.neck.head.ActAround(6,5,3),
},
},
}, // t_follow6:
};
 
 

Example of cooperation between robot.video and robot.ml is learning colors or faces.

//learning colors
function  b_RLearnColorFromRightHand(colorName){
//here should be checking if all needed robot components are available    
 
if (robot.video.humanDetector.hand[right].visible)//if right hand was detected
{
robot.log.Set("LEARNING: LEARN COLOR FROM RIGHT HAND",[robot.video.humanDetector.hand[right].color.value,colorName]);//logging robot activity
return robot.ml.colorLearning.Learn(robot.video.humanDetector.hand[right].color.value,colorName);//learn new sample of color given as function argument from color gathered from object held in user right hand
} else {
return false;
};
};
//saving the knowledge base
  function  b_RLoadLearningKnowledge(){
//here should be error checking
robot.log.Set("LEARNING: LOAD COLOR KNOWLEDGE",["saved/Color_Knowledge.xml"]);
return robot.ml.colorLearning.Load("saved/Color_Knowledge.xml");//saving gathered knowledge to file
};
//learning faces
function  b_RLearnFace(name){
//error checking    
 
g:loop{
if (robot.video.humanDetector.head.visible)  robot.ml.faceLearning.Learn(robot.video.humanDetector.head.image, name); 
sleep(0.02);//if user face is visible use it as new learning sample with label taken from function argument
},
 
sleep(3);
g.stop;//stop learning user face after 3 seconds
return Eigenfaces1.updateDatabase(80);
};
 

Following person is example of cooperation between video based information with Emys head movement

function a_RFollowPerson(_config){
//here should be error checking  
 
if (!hasSlot("t_follow5")) var Global.t_follow5=Tag.new() else { t_follow5.unfreeze| t_follow5.stop; };
 
t_follow5:{
if (_config){
// disable not used detectors
if (robot.video.hasLocalSlot("objectDetector1")) 
robot.video.objectDetector1.enable=false;
if (robot.video.hasLocalSlot("color1Detector"))  {
robot.video.color1Detector.enable=false;
robot.video.color2Detector.enable=false;
robot.video.color3Detector.enable=false;
robot.video.color4Detector.enable=false;
};
if (robot.video.hasLocalSlot("facetDetector"))   
robot.video.facetDetector.enable=false;
if (robot.video.hasLocalSlot("moveDetector"))   
robot.video.moveDetector.enable=false;
 
// enable used detectors
robot.video.humanDetector.enable=true;
robot.video.humanDetector.faceTrackingPause=false;
},
loop {//function is infinite, should be called taged
if (robot.video.humanDetector.head.visible) {//user head was detected
if (robot.hasLocalSlot("emotion"))  //robot has emotion component which will affect movement speed
{      robot.body.neck.head.MoveAtSpeed(robot.video.humanDetector.head.orientation[0],robot.video.humanDetector.head.orientation[1]-8,40+robot.emotion.movingSpeed*2),//move robot head based on read user head position with speed based on current movingSpeed
} else { robot.body.neck.head.MoveAtSpeed(robot.video.humanDetector.head.orientation[0],robot.video.humanDetector.head.orientation[1]-8,55),
},
},
sleep(30ms);
},
}, // t_follow:
};
 

The following listing shows competencies which will take photo and then post it on Facebook.

function b_RTakePhoto(file_name){
//here should be error checking    
robot.audio.player.Play(_uFilesDir+"sounds/samples/shutter.wav"),//play the noise of camera shutter
robot.video.photo.Take("kinect");//take photo from Kinect
robot.video.photo.Save(file_name);//and save it
};
//post saved photo on facebook
function b_RPostPhotoOnFacebook(){
//here should be error checking
return robot.network.facebook["user"].photo.PostOnTimeline(robot.dialogue.speech_sequences[370][robot.dialogue.language],"saved/photo.jpg");//we post on user timeline saved photo with comment taken from speech sequence vector
}; 
//comment
robot.dialogue.speech_sequences[370][_lang_tmp] = "Photo published by my robot.";
 

 

 

EMYS and FLASH are Open Source and distributed according to the GPL v2.0 © Rev. 0.8.0, 27.04.2016

FLASH Documentation