4#include <visp3/core/vpConfig.h>
6#if defined(VISP_HAVE_OCCIPITAL_STRUCTURE) && defined(VISP_HAVE_PUGIXML) && defined(VISP_HAVE_OPENCV) && \
7 (((VISP_HAVE_OPENCV_VERSION < 0x050000) && (defined(HAVE_OPENCV_FEATURES2D) || defined(HAVE_OPENCV_XFEATURES2D))) || \
8 ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES)))
10#include <visp3/core/vpDisplay.h>
11#include <visp3/core/vpIoTools.h>
12#include <visp3/core/vpXmlParserCamera.h>
13#include <visp3/gui/vpDisplayFactory.h>
14#include <visp3/mbt/vpMbGenericTracker.h>
15#include <visp3/sensor/vpOccipitalStructure.h>
16#include <visp3/vision/vpKeyPoint.h>
18#ifdef ENABLE_VISP_NAMESPACE
22#ifndef DOXYGEN_SHOULD_SKIP_THIS
31std::string depthTypeToString(
const DepthType &type)
52DepthType depthTypeFromString(
const std::string &name)
54 DepthType
type(DEPTH_COUNT);
57 while ((i <
static_cast<unsigned int>(DEPTH_COUNT)) && notFound) {
58 DepthType candidate =
static_cast<DepthType
>(
i);
68std::string getDepthTypeList(
const std::string &prefix =
"<",
const std::string &sep =
" , ",
const std::string &suffix =
">")
70 std::string list(prefix);
72 while (i <
static_cast<unsigned int>(DEPTH_COUNT - 1)) {
73 DepthType
type =
static_cast<DepthType
>(
i);
74 std::string name = depthTypeToString(type);
78 DepthType
type =
static_cast<DepthType
>(DEPTH_COUNT - 1);
79 std::string name = depthTypeToString(type);
80 list += name + suffix;
85int main(
int argc,
char *argv[])
87 std::string config_color =
"", config_depth =
"";
88 std::string model_color =
"", model_depth =
"";
89 std::string init_file =
"";
90 bool use_ogre =
false;
91 bool use_scanline =
false;
92 bool use_edges =
true;
94 DepthType use_depth = DEPTH_DENSE;
96 bool auto_init =
false;
97 double proj_error_threshold = 25;
98 std::string learning_data =
"learning/data-learned.bin";
99 bool display_projection_error =
false;
101 for (
int i = 1;
i < argc;
i++) {
102 if (std::string(argv[i]) ==
"--config_color" && i + 1 < argc) {
103 config_color = std::string(argv[i + 1]);
105 else if (std::string(argv[i]) ==
"--config_depth" && i + 1 < argc) {
106 config_depth = std::string(argv[i + 1]);
108 else if (std::string(argv[i]) ==
"--model_color" && i + 1 < argc) {
109 model_color = std::string(argv[i + 1]);
111 else if (std::string(argv[i]) ==
"--model_depth" && i + 1 < argc) {
112 model_depth = std::string(argv[i + 1]);
114 else if (std::string(argv[i]) ==
"--init_file" && i + 1 < argc) {
115 init_file = std::string(argv[i + 1]);
117 else if (std::string(argv[i]) ==
"--proj_error_threshold" && i + 1 < argc) {
118 proj_error_threshold = std::atof(argv[i + 1]);
120 else if (std::string(argv[i]) ==
"--use_ogre") {
123 else if (std::string(argv[i]) ==
"--use_scanline") {
126 else if (std::string(argv[i]) ==
"--use_edges" && i + 1 < argc) {
127 use_edges = (std::atoi(argv[i + 1]) == 0 ? false :
true);
129 else if (std::string(argv[i]) ==
"--use_klt" && i + 1 < argc) {
130 use_klt = (std::atoi(argv[i + 1]) == 0 ? false :
true);
132 else if (std::string(argv[i]) ==
"--use_depth" && i + 1 < argc) {
133 use_depth = depthTypeFromString(std::string(argv[i + 1]));
135 else if (std::string(argv[i]) ==
"--learn") {
138 else if (std::string(argv[i]) ==
"--learning_data" && i + 1 < argc) {
139 learning_data = argv[
i + 1];
141 else if (std::string(argv[i]) ==
"--auto_init") {
144 else if (std::string(argv[i]) ==
"--display_proj_error") {
145 display_projection_error =
true;
147 else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
148 std::cout <<
"Usage: \n"
150 <<
" [--model_color <object.cao>] [--model_depth <object.cao>]"
151 " [--config_color <object.xml>] [--config_depth <object.xml>]"
152 " [--init_file <object.init>] [--use_ogre] [--use_scanline]"
153 " [--proj_error_threshold <threshold between 0 and 90> (default: "
154 << proj_error_threshold
156 " [--use_edges <0|1> (default: 1)] [--use_klt <0|1> (default: 1)] [--use_depth " + getDepthTypeList() +
" (default: " + depthTypeToString(use_depth) +
")]"
157 " [--learn] [--auto_init] [--learning_data <path to .bin> (default: learning/data-learned.bin)]"
158 " [--display_proj_error]"
161 std::cout <<
"\n** How to track a 4.2 cm width cube with manual initialization:\n"
162 << argv[0] <<
" --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1" << std::endl;
163 std::cout <<
"\n** How to learn the cube and create a learning database:\n"
164 << argv[0] <<
" --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --learn"
166 std::cout <<
"\n** How to track the cube with initialization from learning database:\n"
167 << argv[0] <<
" --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --auto_init"
174 if (model_depth.empty()) {
175 model_depth = model_color;
178 if (config_color.empty()) {
179 config_color = (parentname.empty() ?
"" : (parentname +
"/")) +
vpIoTools::getNameWE(model_color) +
".xml";
181 if (config_depth.empty()) {
182 config_depth = (parentname.empty() ?
"" : (parentname +
"/")) +
vpIoTools::getNameWE(model_color) +
"_depth.xml";
184 if (init_file.empty()) {
185 init_file = (parentname.empty() ?
"" : (parentname +
"/")) +
vpIoTools::getNameWE(model_color) +
".init";
187 std::cout <<
"Tracked features: " << std::endl;
188 std::cout <<
" Use edges : " << use_edges << std::endl;
189 std::cout <<
" Use klt : " << use_klt << std::endl;
190 std::cout <<
" Use depth : " << depthTypeToString(use_depth) << std::endl;
191 std::cout <<
"Tracker options: " << std::endl;
192 std::cout <<
" Use ogre : " << use_ogre << std::endl;
193 std::cout <<
" Use scanline: " << use_scanline << std::endl;
194 std::cout <<
" Proj. error : " << proj_error_threshold << std::endl;
195 std::cout <<
" Display proj. error: " << display_projection_error << std::endl;
196 std::cout <<
"Config files: " << std::endl;
197 std::cout <<
" Config color: "
198 <<
"\"" << config_color <<
"\"" << std::endl;
199 std::cout <<
" Config depth: "
200 <<
"\"" << config_depth <<
"\"" << std::endl;
201 std::cout <<
" Model color : "
202 <<
"\"" << model_color <<
"\"" << std::endl;
203 std::cout <<
" Model depth : "
204 <<
"\"" << model_depth <<
"\"" << std::endl;
205 std::cout <<
" Init file : "
206 <<
"\"" << init_file <<
"\"" << std::endl;
207 std::cout <<
"Learning options : " << std::endl;
208 std::cout <<
" Learn : " << learn << std::endl;
209 std::cout <<
" Auto init : " << auto_init << std::endl;
210 std::cout <<
" Learning data: " << learning_data << std::endl;
212 if (!use_edges && !use_klt && (use_depth == DEPTH_UNUSED)) {
213 std::cout <<
"You must choose at least one visual features between edge, KLT and depth." << std::endl;
217 if (config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || init_file.empty()) {
218 std::cout <<
"config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || "
225 ST::CaptureSessionSettings settings;
226 settings.source = ST::CaptureSessionSourceId::StructureCore;
227 settings.structureCore.visibleEnabled =
true;
228 settings.applyExpensiveCorrection =
true;
234 std::cout <<
"Catch an exception: " <<
e.what() << std::endl;
235 std::cout <<
"Check if the Structure Core camera is connected..." << std::endl;
243 std::cout <<
"Sensor internal camera parameters for color camera: " <<
cam_color << std::endl;
244 std::cout <<
"Sensor internal camera parameters for depth camera: " <<
cam_depth << std::endl;
250 unsigned int _posx = 100, _posy = 50;
252#ifdef VISP_HAVE_DISPLAY
253#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
260 if (use_edges || use_klt) {
261 display1->init(I_gray, _posx, _posy,
"Color stream");
263 if (use_depth != DEPTH_UNUSED) {
264 display2->init(I_depth, _posx + I_gray.getWidth() + 10, _posy,
"Depth stream");
269 sc.
acquire((
unsigned char *)I_color.bitmap, (
unsigned char *)I_depth_raw.bitmap,
nullptr,
nullptr,
nullptr);
271 if (use_edges || use_klt) {
281 if (use_depth != DEPTH_UNUSED) {
293 std::vector<int> trackerTypes;
294 if (use_edges && use_klt)
301 if (use_depth == DEPTH_DENSE) {
304 else if (use_depth == DEPTH_NORMAL) {
310 std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
311 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
312 std::map<std::string, std::string> mapOfInitFiles;
313 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
314 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
315 std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
317 std::vector<vpColVector> pointcloud;
321 if ((use_edges || use_klt) && (use_depth != DEPTH_UNUSED)) {
322 tracker.loadConfigFile(config_color, config_depth);
323 tracker.loadModel(model_color, model_depth);
324 std::cout <<
"Sensor internal depth_M_color: \n" <<
depth_M_color << std::endl;
326 tracker.setCameraTransformationMatrix(mapOfCameraTransformations);
327 mapOfImages[
"Camera1"] = &I_gray;
328 mapOfImages[
"Camera2"] = &
I_depth;
329 mapOfInitFiles[
"Camera1"] = init_file;
330 tracker.setCameraParameters(cam_color, cam_depth);
332 else if (use_edges || use_klt) {
333 tracker.loadConfigFile(config_color);
334 tracker.loadModel(model_color);
335 tracker.setCameraParameters(cam_color);
337 else if (use_depth != DEPTH_UNUSED) {
338 tracker.loadConfigFile(config_depth);
339 tracker.loadModel(model_depth);
340 tracker.setCameraParameters(cam_depth);
343 tracker.setDisplayFeatures(
true);
344 tracker.setOgreVisibilityTest(use_ogre);
345 tracker.setScanLineVisibilityTest(use_scanline);
346 tracker.setProjectionErrorComputation(
true);
347 tracker.setProjectionErrorDisplay(display_projection_error);
349#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_XFEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
350 std::string detectorName =
"SIFT";
351 std::string extractorName =
"SIFT";
352 std::string matcherName =
"BruteForce";
353#elif ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
354 std::string detectorName =
"FAST";
355 std::string extractorName =
"ORB";
357 std::string matcherName =
"BruteForce-Hamming";
360 if (learn || auto_init) {
364#if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
365 cv::Ptr<cv::ORB> orb_detector = keypoint.
getDetector(
"ORB").dynamicCast<cv::ORB>();
367 orb_detector->setNLevels(1);
374 std::cout <<
"Cannot enable auto detection. Learning file \"" << learning_data <<
"\" doesn't exist" << std::endl;
380 if ((use_edges || use_klt) && (use_depth != DEPTH_UNUSED)) {
381 tracker.initClick(mapOfImages, mapOfInitFiles,
true);
383 else if (use_edges || use_klt) {
384 tracker.initClick(I_gray, init_file,
true);
386 else if (use_depth != DEPTH_UNUSED) {
387 tracker.initClick(I_depth, init_file,
true);
394 bool run_auto_init =
false;
396 run_auto_init =
true;
398 std::vector<double> times_vec;
404 bool learn_position =
false;
410 bool tracking_failed =
false;
413 sc.
acquire((
unsigned char *)I_color.bitmap, (
unsigned char *)I_depth_raw.bitmap, &pointcloud);
415 if (use_edges || use_klt || run_auto_init) {
419 if (use_depth != DEPTH_UNUSED) {
424 if ((use_edges || use_klt) && (use_depth != DEPTH_UNUSED)) {
425 mapOfImages[
"Camera1"] = &I_gray;
426 mapOfPointclouds[
"Camera2"] = &pointcloud;
427 mapOfWidths[
"Camera2"] =
width;
428 mapOfHeights[
"Camera2"] =
height;
430 else if (use_edges || use_klt) {
431 mapOfImages[
"Camera"] = &I_gray;
433 else if (use_depth != DEPTH_UNUSED) {
434 mapOfPointclouds[
"Camera"] = &pointcloud;
435 mapOfWidths[
"Camera"] =
width;
436 mapOfHeights[
"Camera"] =
height;
441 if (keypoint.
matchPoint(I_gray, cam_color, cMo)) {
442 std::cout <<
"Auto init succeed" << std::endl;
443 if ((use_edges || use_klt) && (use_depth != DEPTH_UNUSED)) {
444 mapOfCameraPoses[
"Camera1"] =
cMo;
446 tracker.initFromPose(mapOfImages, mapOfCameraPoses);
448 else if (use_edges || use_klt) {
449 tracker.initFromPose(I_gray, cMo);
451 else if (use_depth != DEPTH_UNUSED) {
452 tracker.initFromPose(I_depth, depth_M_color * cMo);
456 if (use_edges || use_klt) {
459 if (use_depth != DEPTH_UNUSED) {
470 tracker.setDisplayFeatures(
true);
472 run_auto_init =
false;
474 if ((use_edges || use_klt) && (use_depth != DEPTH_UNUSED)) {
475 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
477 else if (use_edges || use_klt) {
480 else if (use_depth != DEPTH_UNUSED) {
481 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
485 std::cout <<
"Tracker exception: " <<
e.getStringMessage() << std::endl;
486 tracking_failed =
true;
488 std::cout <<
"Tracker needs to restart (tracking exception)" << std::endl;
489 run_auto_init =
true;
497 double proj_error = 0;
500 proj_error =
tracker.getProjectionError();
503 proj_error =
tracker.computeCurrentProjectionError(I_gray, cMo, cam_color);
506 if (auto_init && proj_error > proj_error_threshold) {
507 std::cout <<
"Tracker needs to restart (projection error detected: " << proj_error <<
")" << std::endl;
508 run_auto_init =
true;
509 tracking_failed =
true;
513 if (!tracking_failed) {
515 tracker.setDisplayFeatures(
true);
517 if ((use_edges || use_klt) && (use_depth != DEPTH_UNUSED)) {
518 tracker.display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth,
vpColor::red, 3);
522 else if (use_edges || use_klt) {
526 else if (use_depth != DEPTH_UNUSED) {
532 std::stringstream ss;
533 ss <<
"Nb features: " <<
tracker.getError().size();
537 std::stringstream ss;
538 ss <<
"Features: edges " <<
tracker.getNbFeaturesEdge() <<
", klt " <<
tracker.getNbFeaturesKlt()
539 <<
", depth " <<
tracker.getNbFeaturesDepthDense();
544 std::stringstream ss;
545 ss <<
"Loop time: " << loop_t <<
" ms";
548 if (use_edges || use_klt) {
564 learn_position =
true;
567 run_auto_init =
true;
571 if (use_depth != DEPTH_UNUSED) {
581 if (learn_position) {
583 std::vector<cv::KeyPoint> trainKeyPoints;
584 keypoint.
detect(I_gray, trainKeyPoints);
587 std::vector<vpPolygon> polygons;
588 std::vector<std::vector<vpPoint> > roisPt;
589 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair =
tracker.getPolygonFaces();
590 polygons = pair.first;
591 roisPt = pair.second;
594 std::vector<cv::Point3f> points3f;
598 keypoint.
buildReference(I_gray, trainKeyPoints, points3f,
true, learn_id++);
601 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
604 learn_position =
false;
605 std::cout <<
"Data learned" << std::endl;
608 times_vec.push_back(loop_t);
611 std::cout <<
"Save learning file: " << learning_data << std::endl;
616 std::cout <<
"Catch an exception: " <<
e.what() << std::endl;
619#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) && defined(VISP_HAVE_DISPLAY)
620 if (display1 !=
nullptr) {
623 if (display2 !=
nullptr) {
628 if (!times_vec.empty()) {
636#elif defined(VISP_HAVE_OCCIPITAL_STRUCTURE)
639 std::cout <<
"Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
645 std::cout <<
"Install libStructure 3rd party, configure and build ViSP again to use this example" << std::endl;
Generic class defining intrinsic camera parameters.
static const vpColor none
static const vpColor yellow
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
Class that allows keypoints 2D features detection (and descriptors extraction) and matching thanks to...
unsigned int buildReference(const vpImage< unsigned char > &I) VP_OVERRIDE
void setExtractor(const vpFeatureDescriptorType &extractorType)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
void setMatcher(const std::string &matcherName)
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
unsigned int matchPoint(const vpImage< unsigned char > &I) VP_OVERRIDE
void setDetector(const vpFeatureDetectorType &detectorType)
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
static double getMedian(const std::vector< double > &v)
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
static double getMean(const std::vector< double > &v)
Real-time 6D object pose tracking using its CAD model.
unsigned int getHeight(vpOccipitalStructureStream stream_type)
vpCameraParameters getCameraParameters(const vpOccipitalStructureStream stream_type, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithoutDistortion)
void acquire(vpImage< unsigned char > &gray, bool undistorted=false, double *ts=nullptr)
unsigned int getWidth(vpOccipitalStructureStream stream_type)
vpHomogeneousMatrix getTransform(const vpOccipitalStructureStream from, const vpOccipitalStructureStream to)
bool open(const ST::CaptureSessionSettings &settings)
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT double measureTimeMs()