Hi @thanhlh
Here’s code for drawing joints orientations:
void drawOrientations(DepthSensor::Ptr depthSensor,
const std::vector<Joint>& joints, cv::Mat& show,
float length, int width)
{
if (!depthSensor || !depthSensor->getDepthFrame())
return;
float xScale = (float)show.cols / depthSensor->getDepthFrame()->getCols();
float yScale = (float)show.rows / depthSensor->getDepthFrame()->getRows();
for (int j = 0; j < joints.size(); ++j)
{
if (joints[j].type == JOINT_NONE)
continue;
if (joints[j].confidence < 0.15)
continue;
Orientation orient = joints[j].orient;
cv::Point3f position(joints[j].real.x, joints[j].real.y, joints[j].real.z);
cv::Point3f x(orient.matrix[0], orient.matrix[3], orient.matrix[6]);
cv::Point3f y(orient.matrix[1], orient.matrix[4], orient.matrix[7]);
cv::Point3f z(orient.matrix[2], orient.matrix[5], orient.matrix[8]);
cv::Point3f positionX = position + length * x;
cv::Point3f positionY = position + length * y;
cv::Point3f positionZ = position + length * z;
Vector3 proj = depthSensor->convertRealToProjCoords(position.x, position.y, position.z);
Vector3 projX = depthSensor->convertRealToProjCoords(positionX.x, positionX.y, positionX.z);
Vector3 projY = depthSensor->convertRealToProjCoords(positionY.x, positionY.y, positionY.z);
Vector3 projZ = depthSensor->convertRealToProjCoords(positionZ.x, positionZ.y, positionZ.z);
cv::line(show, cv::Point(proj.x * xScale, proj.y * yScale),
cv::Point(projX.x * xScale, projX.y * yScale), CV_RGB(255, 0, 0), width);
cv::line(show, cv::Point(proj.x * xScale, proj.y * yScale),
cv::Point(projY.x * xScale, projY.y * yScale), CV_RGB(0, 255, 0), width);
cv::line(show, cv::Point(proj.x * xScale, proj.y * yScale),
cv::Point(projZ.x * xScale, projZ.y * yScale), CV_RGB(0, 0, 255), width);
}
}