If not done so already, please could all users set up 2FA on your accounts at the soonest opportunity https://docs.gitlab.com/ee/user/profile/account/two_factor_authentication.html

Commit 363fc7d5 authored by Youssef Mohamed's avatar Youssef Mohamed
Browse files

initial commit

parent 720390aa
Pipeline #277 failed with stages
cmake_minimum_required(VERSION 2.8.3)
project(social_nets)
find_package(catkin REQUIRED COMPONENTS
message_generation
message_runtime
image_transport
cv_bridge
std_msgs
geometry_msgs
sensor_msgs
roscpp
rospy
tf2
tf2_ros
)
catkin_python_setup()
find_package(Boost 1.5.9 REQUIRED COMPONENTS filesystem system)
find_package(OpenCV EXACT 3.2 REQUIRED) # exact req for local machine
find_library(OPENFACE_LD_LIB LandmarkDetector)
find_library(OPENFACE_FA_LIB FaceAnalyser)
find_library(OPENFACE_GA_LIB GazeAnalyser)
find_library(OPENFACE_UT_LIB Utilities)
set(OPENFACE_LIBS ${OPENFACE_LD_LIB} ${OPENFACE_FA_LIB} ${OPENFACE_GA_LIB} ${OPENFACE_UT_LIB})
find_path(OPENFACE_INCLUDE_DIR LandmarkDetectorModel.h PATH_SUFFIXES OpenFace)
add_message_files(FILES ActionUnit.msg Face.msg Faces.msg GazeDir.msg BodyLang.msg)
generate_messages(DEPENDENCIES std_msgs geometry_msgs)
add_definitions(-std=c++11)
add_definitions(-g -pg)
include_directories(
include
${catkin_INCLUDE_DIRS}
${OPENFACE_INCLUDE_DIR}
${OpenBLAS_INCLUDE_DIR}
${Boost_INCLUDE_DIRS}
${Boost_INCLUDE_DIRS}/boost
${OpenCV_INCLUDE_DIRS}
)
add_executable(openface2_ros_listener src/openface2_ros_listener.cpp)
target_link_libraries(openface2_ros_listener
${catkin_LIBRARIES}
)
add_executable(openface2_ros src/openface2_ros.cpp)
add_dependencies(openface2_ros ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
add_dependencies(openface2_ros social_nets_generate_messages_cpp)
add_executable(openface2_ros_single src/openface2_ros_single.cpp)
add_dependencies(openface2_ros_single ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
add_dependencies(openface2_ros_single social_nets_generate_messages_cpp)
target_link_libraries(openface2_ros
${catkin_LIBRARIES}
${OPENFACE_LIBS}
openblas
dlib
${Boost_LIBRARIES}
${OpenCV_LIBRARIES}
)
install(TARGETS openface2_ros
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
target_link_libraries(openface2_ros_single
${catkin_LIBRARIES}
${OPENFACE_LIBS}
openblas
dlib
${Boost_LIBRARIES}
${OpenCV_LIBRARIES}
)
install(TARGETS openface2_ros_single
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
# hri_matcher
hri_matcher
=============
A ROS node that generates unique ids for facial and skeletal data, then matches them in real-time to their known location using OpenVino's re-identification model. Afterwards, a unique person id is generated and assigned to that person's facial and skeletal ids. If either the facial or skeletal facial stopped being detected, the matcher will publish the last detected message.
The detection relies on the open-source [Intel OpenVINO toolkit](https://docs.openvinotoolkit.org/),
Installation
------------
- Follow first the general [ROS4HRI installation instructions](https://caidin.brl.ac.uk/ROS4HRI/main/blob/master/README.md).
- Install the [`hri_msgs` package](https://git.brl.ac.uk/ROS4HRI/hri_msgs/blob/master/README.md)
- Install the [`hri_faces` package](https://caidin.brl.ac.uk/ROS4HRI/hri_faces/blob/master/README.md)
- Install the [`hri_skeletons` package](https://caidin.brl.ac.uk/ROS4HRI/hri_skeletons/blob/master/README.md)
- then:
```
$ cd ~/src
$ git clone https://git.brl.ac.uk/ROS4HRI/hri_matcher.git
$ cd hri_matcher
$ mkdir build && cd build
$ cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=~/dev .. && make && make install
```
You can test this node by running:
```
$ roslaunch video_stream_opencv camera.launch video_stream_provider:=0 camera_name:=camera
$ roslaunch hri_matcher hri_matcher
$ rostopic list
```
which should display something like that if there are three people in the frame:
![Topics](docs/Screenshot_topics.png)
matches the bodies and faces of people in the frame
\ No newline at end of file
<launch>
<include file="$(env HOME)/catkin_ws/src/ros_openvino_toolkit/vino_launch/launch/pipeline_people_oss.launch" />
<arg name="image_topic" default="/camera/image_raw" />
<arg name="publish_viz" default="true" />
<node name="humans_matcher" pkg="hri_matcher" type="humans_matcher.py" output="screen" >
</node>
</launch>
<?xml version="1.0"?>
<package>
<name>social_nets</name>
<version>1.0.0</version>
<description>hri_matcher node</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="os19105@bristol.ac.uk">Youssef</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>Apache 2.0</license>
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use build_depend for packages you need at runtime: -->
<!-- <build_depend>message_runtime</build_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<build_depend>rospy</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>rospy</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>geometry_msgs</build_depend>
<build_depend>sensor_msgs</build_depend>
<build_depend>image_transport</build_depend>
<build_depend>cv_bridge</build_depend>
<build_depend>message_generation</build_depend>
<build_depend>roscpp</build_depend>
<build_depend>tf2</build_depend>
<build_depend>tf2_ros</build_depend>
<build_depend>sqlite</build_depend>
<run_depend>message_runtime</run_depend>
<run_depend>roscpp</run_depend>
<run_depend>rospy</run_depend>
<run_depend>image_transport</run_depend>
<run_depend>cv_bridge</run_depend>
<run_depend>tf2</run_depend>
<run_depend>tf2_ros</run_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>
#! /usr/bin/python
import rospy
import sqlite3
import datetime
import time
import tf2_ros
import StringIO
import uuid
import numpy as np
import roslib; roslib.load_manifest('visualization_marker_tutorials')
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
import rospy
import math
import rospy
import math
import geometry_msgs.msg
from functools import partial
#published messages
from hri_msgs.msg import AgeAndGender
from hri_msgs.msg import BodyLang
from hri_msgs.msg import Expression
from hri_msgs.msg import FacialActionUnits
from hri_msgs.msg import FacialLandmarks
from hri_msgs.msg import GazeSenderReceiver
from hri_msgs.msg import GazesStamped
from hri_msgs.msg import Person
from hri_msgs.msg import BodyPose
from std_msgs.msg import String
#subscribed messages
from hri_msgs.msg import PointOfInterest2D
from hri_msgs.msg import Skeleton2D
from social_nets.msg import BodyLang
from social_nets.msg import Face
from social_nets.msg import Faces
from vino_people_msgs.msg import ReidentificationStamped
from vino_people_msgs.msg import Reidentification
from vino_people_msgs.msg import AgeGenderStamped
from vino_people_msgs.msg import AgeGender
from vino_people_msgs.msg import EmotionsStamped
from tf2_msgs.msg import TFMessage
max_heads = 7
window_width = 640
window_height = 480
markerArray = MarkerArray()
POI = PointOfInterest2D()
face_IDs = []
bod_IDs = []
person_IDs = []
faces_count = 0
feature_dict = {}
for head_id in range(max_heads):
face_IDs.append(str(uuid.uuid4())[:5])
bod_IDs.append(str(uuid.uuid4())[:5])
person_IDs.append(str(uuid.uuid4())[:5])
# The class has several callback functions which are related to each model used, a call back must me created for each model used
# then the condition is added bellow to relate the data extracted from the callback to the reidentification table.
class ListenCompare:
def __init__(self):
self.diff_face_ids = []
self.diff_body_ids = []
self.old_faces_ids = []
self.old_bodies_ids = []
self.faces_landmarks = []
self.BODYDATA = []
self.BODYLANG = None
self.agegender_data = []
self.emotion_data = None
self.PERSONDATA = None
self.trans = []
self.landmarks2d_x = None
self.face_pose = None
self.NOSE_X = 0
self.NOSE_Y = 0
self.NECK_X = 0
self.NECK_Y = 0
self.RIGHT_SHOULDER_X = 0
self.RIGHT_SHOULDER_Y = 0
self.RIGHT_ELBLOW_X = 0
self.RIGHT_ELBLOW_Y = 0
self.RIGHT_WRIST_X = 0
self.RIGHT_WRIST_Y = 0
self.LEFT_SHOULDER_X = 0
self.LEFT_SHOULDER_Y = 0
self.LEFT_ELBLOW_X = 0
self.LEFT_ELBLOW_Y = 0
self.LEFT_WRIST_X = 0
self.LEFT_WRIST_Y = 0
self.RIGHT_HIP_X = 0
self.RIGHT_HIP_Y = 0
self.RIGHT_KNEE_X = 0
self.RIGHT_KNEE_Y = 0
self.RIGHT_ANKEL_X = 0
self.RIGHT_ANKEL_Y = 0
self.LEFT_HIP_X = 0
self.LEFT_HIP_Y = 0
self.LEFT_KNEE_X = 0
self.LEFT_KNEE_Y = 0
self.LEFT_ANKEL_X = 0
self.LEFT_ANKEL_Y = 0
self.LEFT_EYE_X = 0
self.LEFT_EYE_Y = 0
self.RIGHT_EYE_X = 0
self.RIGHT_EYE_Y = 0
self.LEFT_EAR_X = 0
self.LEFT_EAR_Y = 0
self.RIGHT_EAR_X = 0
self.RIGHT_EAR_Y = 0
self.RIGHT_EAR=0
self.count_faces = 0
# This can be used to extract landmarks, action units, face count, left and right eye gaze, headpose.
def faces_callback(self,data):
#iteration through the faces
self.count_faces = data.count
for faces_2D in data.faces:
self.landmarksArray_X = []
self.landmarksArray_Y = []
self.FAUs_Numbs = []
self.FAUs_presence = []
self.FAUS_intensity = []
self.landmarks2d_x = faces_2D.landmarks_2d[30].x
self.face_pose = [faces_2D.head_pose.position.x/1000 , faces_2D.head_pose.position.y/1000 , faces_2D.head_pose.position.z/1000]
for all_landmarks in faces_2D.landmarks_2d:
# print(all_landmarks.x)
self.landmarksArray_X.append(all_landmarks.x)
self.landmarksArray_Y.append(all_landmarks.y)
for FAUs in faces_2D.action_units:
self.FAUs_Numbs.append(FAUs.name[2]+ FAUs.name[3])
self.FAUs_presence.append(FAUs.presence)
self.FAUS_intensity.append(FAUs.intensity)
# print(self.FAUs_Numbs)
# print(self.landmarksArray_X + self.landmarksArray_Y)
# print('-------------------------')
self.process()
# age and gender data for each person.
def agegender_callback(self,data):
self.agegender_data = data.objects #array of people
self.process()
def tf_callback(self,data):
self.tf_head_ID = data.transforms[0].child_frame_id
print(self.tf_head_ID)
self.process()
#emotion data for each person
def emotion_callback(self,data):
for emotion_data in data.emotions:
self.emotion = emotion_data.emotion
self.emotion_roi_x = emotion_data.roi.x_offset
self.emotion_roi_y = emotion_data.roi.y_offset
self.emotion_roi_width = emotion_data.roi.width
self.process()
# Function for all the skeletal points
def body_callback(self,data):
self.BODYDATA = data.skeleton
self.LEFT_EYE_X = data.skeleton[data.LEFT_EYE].x
self.LEFT_EYE_Y = data.skeleton[data.LEFT_EYE].y
self.NOSE_X = data.skeleton[data.NOSE].x
self.NOSE_Y = data.skeleton[data.NOSE].y
self.NECK_X = data.skeleton[data.NECK].x
self.NECK_Y = data.skeleton[data.NECK].y
self.RIGHT_SHOULDER_X = data.skeleton[data.RIGHT_SHOULDER].x
self.RIGHT_SHOULDER_Y = data.skeleton[data.RIGHT_SHOULDER].y
self.RIGHT_ELBOW_X = data.skeleton[data.RIGHT_ELBOW].x
self.RIGHT_ELBOW_Y = data.skeleton[data.RIGHT_ELBOW].y
self.RIGHT_WRIST_X = data.skeleton[data.RIGHT_WRIST].x
self.RIGHT_WRIST_Y = data.skeleton[data.RIGHT_WRIST].y
self.LEFT_SHOULDER_X = data.skeleton[data.LEFT_SHOULDER].x
self.LEFT_SHOULDER_Y = data.skeleton[data.LEFT_SHOULDER].y
self.LEFT_ELBOW_X = data.skeleton[data.LEFT_ELBOW].x
self.LEFT_ELBOW_Y = data.skeleton[data.LEFT_ELBOW].y
self.LEFT_WRIST_X = data.skeleton[data.LEFT_WRIST].x
self.LEFT_WRIST_Y = data.skeleton[data.LEFT_WRIST].y
self.RIGHT_HIP_X = data.skeleton[data.RIGHT_HIP].x
self.RIGHT_HIP_Y = data.skeleton[data.RIGHT_HIP].y
self.RIGHT_KNEE_X = data.skeleton[data.RIGHT_KNEE].x
self.RIGHT_KNEE_Y = data.skeleton[data.RIGHT_KNEE].y
self.RIGHT_ANKLE_X = data.skeleton[data.RIGHT_ANKLE].x
self.RIGHT_ANKLE_Y = data.skeleton[data.RIGHT_ANKLE].y
self.LEFT_HIP_X = data.skeleton[data.LEFT_HIP].x
self.LEFT_HIP_Y = data.skeleton[data.LEFT_HIP].y
self.LEFT_KNEE_X = data.skeleton[data.LEFT_KNEE].x
self.LEFT_KNEE_Y = data.skeleton[data.LEFT_KNEE].y
self.LEFT_ANKLE_X = data.skeleton[data.LEFT_ANKLE].x
self.LEFT_ANKLE_Y = data.skeleton[data.LEFT_ANKLE].y
self.RIGHT_EYE_X = data.skeleton[data.RIGHT_EYE].x
self.RIGHT_EYE_Y = data.skeleton[data.RIGHT_EYE].y
self.LEFT_EAR_X = data.skeleton[data.LEFT_EAR].x
self.LEFT_EAR_Y = data.skeleton[data.LEFT_EAR].y
self.RIGHT_EAR_X = data.skeleton[data.RIGHT_EAR].x
self.RIGHT_EAR_Y = data.skeleton[data.RIGHT_EAR].y
self.process()
# function for the upper body pose
def BodyPrediction_callback(self,data):
self.BODYLANG = data.lang
self.body_ID = data.person
self.process()
# reidentification model
def person_callback(self,data):
self.PERSONDATA = data.reidentified_vector[0]
self.process()
# TODO: store the ids for all the bodies and faces for them to be added in the IDs topic
# TODO: store the features themselves so the latest position can be published.
def process(self):
faces_ids_update = []
bodies_ids_update = []
for faces_count in range(self.count_faces):
faces_ids_update.append(face_IDs[faces_count])
bodies_ids_update.append(bod_IDs[faces_count])
self.person_ID = person_IDs[int (self.PERSONDATA.identity[3])]
self.person_ID_topic = "humans/persons/%s" % (self.person_ID)
# print( self.person_ID)
Face_ID_unique = face_IDs[int (self.tf_head_ID[4])]
Face_ID= "humans/faces/"+Face_ID_unique
Body_ID_unique = bod_IDs[self.body_ID]
Body_ID = "humans/bodies/"+ Body_ID_unique # for a 5 char long ID
x_offset = self.PERSONDATA.roi.x_offset
y_offset = self.PERSONDATA.roi.y_offset
width = self.PERSONDATA.roi.width
height = self.PERSONDATA.roi.height
x_b = self.NOSE_X
y_b = self.NOSE_Y
if self.emotion_roi_x < self.landmarks2d_x < self.emotion_roi_x + self.emotion_roi_width:
FL = FacialLandmarks()
FL.landmarks = []
for LA in range(len(self.landmarksArray_X)):
# print(LA)
POI.x = self.landmarksArray_X[LA]
POI.y = self.landmarksArray_Y[LA]
FL.landmarks.append(POI)
# print(FL.landmarks)
# print("-------------")
# print(FL)
self.landmarks_publisher = rospy.Publisher(Face_ID + "/landmarks" , FacialLandmarks ,queue_size=1)
self.landmarks_publisher.publish(FL)
Exp = Expression()
OpenVino_emo = str (self.emotion.upper())
print(OpenVino_emo)
Exp.expression = getattr(Exp,OpenVino_emo)
self.Expression_publisher = rospy.Publisher(Face_ID + "/expression" ,Expression ,queue_size=1)
self.Expression_publisher.publish(Exp)
self.FAU_publisher = rospy.Publisher(Face_ID + "/facs" ,FacialActionUnits,queue_size=1)
FAUs_data = FacialActionUnits()
FAUs_data.FAU = [int(i) for i in self.FAUs_Numbs]
# FAUs_data.presence = self.FAUs_presence
FAUs_data.intensity = self.FAUS_intensity
# print(FAUs_data)
self.FAU_publisher.publish(FAUs_data)
# holding the old face data
feature_dict[Face_ID_unique] = [FAUs_data,FL,Exp]
self.Skele2D_publisher = rospy.Publisher(Body_ID + "/skeleton2D" ,Skeleton2D,queue_size=10)
skele2d = Skeleton2D()
skele2d.skeleton = [POI]*18
skele2d.skeleton[skele2d.NOSE].x= self.NOSE_X
skele2d.skeleton[skele2d.NOSE].y= self.NOSE_Y
skele2d.skeleton[skele2d.NECK].x= self.NECK_X
skele2d.skeleton[skele2d.NECK].y= self.NECK_Y
skele2d.skeleton[skele2d.RIGHT_SHOULDER].x= self.RIGHT_SHOULDER_X
skele2d.skeleton[skele2d.RIGHT_SHOULDER].y= self.RIGHT_SHOULDER_Y
skele2d.skeleton[skele2d.RIGHT_ELBOW].x= self.RIGHT_ELBOW_X
skele2d.skeleton[skele2d.RIGHT_ELBOW].y= self.RIGHT_ELBOW_Y
skele2d.skeleton[skele2d.RIGHT_WRIST].x= self.RIGHT_WRIST_X
skele2d.skeleton[skele2d.RIGHT_WRIST].y= self.RIGHT_WRIST_Y
skele2d.skeleton[skele2d.LEFT_SHOULDER].x= self.LEFT_SHOULDER_X
skele2d.skeleton[skele2d.LEFT_SHOULDER].y= self.LEFT_SHOULDER_Y
skele2d.skeleton[skele2d.LEFT_ELBOW].x= self.LEFT_ELBOW_X
skele2d.skeleton[skele2d.LEFT_ELBOW].y= self.LEFT_ELBOW_Y
skele2d.skeleton[skele2d.LEFT_WRIST].x= self.LEFT_WRIST_X
skele2d.skeleton[skele2d.LEFT_WRIST].y= self.LEFT_WRIST_Y
skele2d.skeleton[skele2d.RIGHT_HIP].x= self.RIGHT_HIP_X
skele2d.skeleton[skele2d.RIGHT_HIP].y= self.RIGHT_HIP_Y
skele2d.skeleton[skele2d.RIGHT_KNEE].x= self.RIGHT_KNEE_X
skele2d.skeleton[skele2d.RIGHT_KNEE].y= self.RIGHT_KNEE_Y
skele2d.skeleton[skele2d.RIGHT_ANKLE].x= self.RIGHT_ANKLE_X
skele2d.skeleton[skele2d.RIGHT_ANKLE].y= self.RIGHT_ANKLE_Y
skele2d.skeleton[skele2d.LEFT_HIP].x= self.LEFT_HIP_X
skele2d.skeleton[skele2d.LEFT_HIP].y= self.LEFT_HIP_Y
skele2d.skeleton[skele2d.LEFT_KNEE].x= self.LEFT_KNEE_X
skele2d.skeleton[skele2d.LEFT_KNEE].y= self.LEFT_KNEE_Y
skele2d.skeleton[skele2d.LEFT_ANKLE].x= self.LEFT_ANKLE_X
skele2d.skeleton[skele2d.LEFT_ANKLE].y= self.LEFT_ANKLE_Y
skele2d.skeleton[skele2d.LEFT_EYE].x = self.LEFT_EYE_X
skele2d.skeleton[skele2d.LEFT_EYE].y = self.LEFT_EYE_Y
skele2d.skeleton[skele2d.RIGHT_EYE].x = self.RIGHT_EYE_X
skele2d.skeleton[skele2d.RIGHT_EYE].y= self.RIGHT_EYE_Y
skele2d.skeleton[skele2d.LEFT_EAR].x= self.LEFT_EAR_X
skele2d.skeleton[skele2d.LEFT_EAR].y= self.LEFT_EAR_Y
skele2d.skeleton[skele2d.RIGHT_EAR].x= self.RIGHT_EAR_X
skele2d.skeleton[skele2d.RIGHT_EAR].y = self.RIGHT_EAR_Y
self.Skele2D_publisher.publish(skele2d)
self.BodPred_publisher = rospy.Publisher(Body_ID + "/attitude" ,BodyPose,queue_size=1)
bodlang = BodyPose()
OpenVino_lang = str (self.BODYLANG)
bodlang.lang = getattr(bodlang,OpenVino_lang)
self.BodPred_publisher.publish(bodlang)
# updating a dict to hold the old values of a feature.
feature_dict[Body_ID_unique] = [skele2d,bodlang]
if self.old_faces_ids:
self.diff_face_ids = set(self.old_faces_ids) - set(faces_ids_update)
if self.old_bodies_ids:
self.diff_body_ids = set(self.old_bodies_ids) - set(bodies_ids_update)
self.old_faces_ids = faces_ids_update
self.old_bodies_ids = bodies_ids_update
for id in self.diff_face_ids:
self.landmarks_publisher.publish(feature_dict.get(id)[1])
self.FAU_publisher.publish(feature_dict.get(id)[0])
self.Expression_publisher.publish(feature_dict.get(id)[2])
for id in self.diff_body_ids:
self.BodPred_publisher.publish(feature_dict.get(id)[1])
self.Skele2D_publisher.publish(feature_dict.get(id)[0])
# a series of conditions are added to relate all the features to the reidentfication model, any new model added must have a condition
for agegender in self.agegender_data:
# condition for age and gender
if x_offset < agegender.roi.x_offset < x_offset + width or x_offset < agegender.roi.x_offset + agegender.roi.width < x_offset + width:
#condition for emotions
if x_offset < self.emotion_roi_x < x_offset + width:
#condition for 2D facial landmarks
if x_offset < self.landmarks2d_x < x_offset + width:
#condition for skeletal points
if x_offset < window_width - x_b < x_offset + width: