Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
testKeyPoint-2.cpp
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2025 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Description:
31 * Test keypoint matching and pose estimation.
32 */
33
39
40#include <iostream>
41
42#include <visp3/core/vpConfig.h>
43
44#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) && \
45 (((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_CALIB3D) && defined(HAVE_OPENCV_FEATURES2D)) || \
46 ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_3D) && defined(HAVE_OPENCV_FEATURES)))
47
48#include <visp3/core/vpImage.h>
49#include <visp3/core/vpIoTools.h>
50#include <visp3/gui/vpDisplayFactory.h>
51#include <visp3/io/vpImageIo.h>
52#include <visp3/io/vpParseArgv.h>
53#include <visp3/io/vpVideoReader.h>
54#include <visp3/mbt/vpMbEdgeTracker.h>
55#include <visp3/vision/vpKeyPoint.h>
56
57// List of allowed command line options
58#define GETOPTARGS "cdph"
59
60#ifdef ENABLE_VISP_NAMESPACE
61using namespace VISP_NAMESPACE_NAME;
62#endif
63
64void usage(const char *name, const char *badparam);
65bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display, bool &use_parallel_ransac);
66
75void usage(const char *name, const char *badparam)
76{
77 fprintf(stdout, "\n\
78Test keypoints matching.\n\
79\n\
80SYNOPSIS\n\
81 %s [-c] [-d] [-p] [-h]\n",
82 name);
83
84 fprintf(stdout, "\n\
85OPTIONS: \n\
86\n\
87 -c\n\
88 Disable the mouse click. Useful to automate the \n\
89 execution of this program without human intervention.\n\
90\n\
91 -d \n\
92 Turn off the display.\n\
93\n\
94 -p \n\
95 Use parallel RANSAC.\n\
96\n\
97 -h\n\
98 Print the help.\n");
99
100 if (badparam)
101 fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
102}
103
115bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display, bool &use_parallel_ransac)
116{
117 const char *optarg_;
118 int c;
119 while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
120
121 switch (c) {
122 case 'c':
123 click_allowed = false;
124 break;
125 case 'd':
126 display = false;
127 break;
128 case 'p':
129 use_parallel_ransac = true;
130 break;
131 case 'h':
132 usage(argv[0], nullptr);
133 return false;
134
135 default:
136 usage(argv[0], optarg_);
137 return false;
138 }
139 }
140
141 if ((c == 1) || (c == -1)) {
142 // standalone param or error
143 usage(argv[0], nullptr);
144 std::cerr << "ERROR: " << std::endl;
145 std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
146 return false;
147 }
148
149 return true;
150}
151
152template <typename Type>
153void run_test(const std::string &env_ipath, bool opt_click_allowed, bool opt_display, bool use_parallel_ransac,
154 vpImage<Type> &I, vpImage<Type> &IMatching)
155{
156#if defined(VISP_HAVE_DATASET)
157#if VISP_HAVE_DATASET_VERSION >= 0x030600
158 std::string ext("png");
159#else
160 std::string ext("pgm");
161#endif
162#else
163 // We suppose that the user will download a recent dataset
164 std::string ext("png");
165#endif
166 // Set the path location of the image sequence
167 std::string dirname = vpIoTools::createFilePath(env_ipath, "mbt/cube");
168
169 // Build the name of the image files
170 std::string filenameRef = vpIoTools::createFilePath(dirname, "image0000." + ext);
171 vpImageIo::read(I, filenameRef);
172 std::string filenameCur = vpIoTools::createFilePath(dirname, "image%04d." + ext);
173
174 vpDisplay *display = nullptr;
175
176 if (opt_display) {
177#ifdef VISP_HAVE_DISPLAY
178 display = vpDisplayFactory::allocateDisplay(I, 0, 0, "ORB keypoints matching and pose estimation");
179 display->setDownScalingFactor(vpDisplay::SCALE_AUTO);
180#else
181 std::cout << "No image viewer is available..." << std::endl;
182#endif
183 }
184
187 // Load config for tracker
188 std::string tracker_config_file = vpIoTools::createFilePath(env_ipath, "mbt/cube.xml");
189
190#if defined(VISP_HAVE_PUGIXML)
191 tracker.loadConfigFile(tracker_config_file);
192 tracker.getCameraParameters(cam);
193#else
194 // Corresponding parameters manually set to have an example code
195 vpMe me;
196 me.setMaskSize(5);
197 me.setMaskNumber(180);
198 me.setRange(8);
200 me.setThreshold(20);
201 me.setMu1(0.5);
202 me.setMu2(0.5);
203 me.setSampleStep(4);
204 me.setNbTotalSample(250);
205 tracker.setMovingEdge(me);
206 cam.initPersProjWithoutDistortion(547.7367575, 542.0744058, 338.7036994, 234.5083345);
207 tracker.setCameraParameters(cam);
208 tracker.setNearClippingDistance(0.01);
209 tracker.setFarClippingDistance(100.0);
210 tracker.setClipping(tracker.getClipping() | vpMbtPolygon::FOV_CLIPPING);
211#endif
212
213 tracker.setAngleAppear(vpMath::rad(89));
214 tracker.setAngleDisappear(vpMath::rad(89));
215
216 // Load CAO model
217 std::string cao_model_file = vpIoTools::createFilePath(env_ipath, "mbt/cube.cao");
218 tracker.loadModel(cao_model_file);
219
220 // Initialize the pose
221 std::string init_file = vpIoTools::createFilePath(env_ipath, "mbt/cube.init");
222 if (opt_display && opt_click_allowed) {
223 tracker.initClick(I, init_file);
224 }
225 else {
226 vpHomogeneousMatrix cMoi(0.02044769891, 0.1101505452, 0.5078963719, 2.063603907, 1.110231561, -0.4392789872);
227 tracker.initFromPose(I, cMoi);
228 }
229
230 // Get the init pose
232 tracker.getPose(cMo);
233
234 // Init keypoints
235 vpKeyPoint keypoints("ORB", "ORB", "BruteForce-Hamming");
236 keypoints.setRansacParallel(use_parallel_ransac);
237#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020400)
238 // Bug when using LSH index with FLANN and OpenCV 2.3.1.
239 // see http://code.opencv.org/issues/1741 (Bug #1741)
240 keypoints.setMatcher("FlannBased");
241#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
242 keypoints.setDetectorParameter("ORB", "nLevels", 1);
243#else
244 cv::Ptr<cv::ORB> orb_detector = keypoints.getDetector("ORB").dynamicCast<cv::ORB>();
245 if (orb_detector) {
246 orb_detector->setNLevels(1);
247 }
248#endif
249#endif
250
251 // Detect keypoints on the current image
252 std::vector<cv::KeyPoint> trainKeyPoints;
253 double elapsedTime;
254 keypoints.detect(I, trainKeyPoints, elapsedTime);
255
256 // Keep only keypoints on the cube
257 std::vector<vpPolygon> polygons;
258 std::vector<std::vector<vpPoint> > roisPt;
259 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair =
260 tracker.getPolygonFaces(true); // To detect an issue with CI
261 polygons = pair.first;
262 roisPt = pair.second;
263
264 // Compute the 3D coordinates
265 std::vector<cv::Point3f> points3f;
266 vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
267
268 // Build the reference keypoints
269 keypoints.buildReference(I, trainKeyPoints, points3f, false, 1);
270
271 // Read image 150
272 filenameRef = vpIoTools::createFilePath(dirname, "image0150." + ext);
273 vpImageIo::read(I, filenameRef);
274
275 // Init pose at image 150
276 cMo.buildFrom(0.02651282185, -0.03713587374, 0.6873765919, 2.314744454, 0.3492296488, -0.1226054828);
277 tracker.initFromPose(I, cMo);
278
279 // Detect keypoints on the image 150
280 keypoints.detect(I, trainKeyPoints, elapsedTime);
281
282 // Keep only keypoints on the cube
283 pair = tracker.getPolygonFaces(true, true,
284 true); // To detect an issue with CI
285 polygons = pair.first;
286 roisPt = pair.second;
287
288 // Compute the 3D coordinates
289 vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
290
291 // Build the reference keypoints
292 keypoints.buildReference(I, trainKeyPoints, points3f, true, 2);
293
294 // Read image 200
295 filenameRef = vpIoTools::createFilePath(dirname, "image0200." + ext);
296 vpImageIo::read(I, filenameRef);
297
298 // Init pose at image 200
299 cMo.buildFrom(0.02965448956, -0.07283091786, 0.7253526051, 2.300529617, -0.4286674806, 0.1788761025);
300 tracker.initFromPose(I, cMo);
301
302 // Detect keypoints on the image 200
303 keypoints.detect(I, trainKeyPoints, elapsedTime);
304
305 // Keep only keypoints on the cube
306 pair = tracker.getPolygonFaces(false); // To detect an issue with CI
307 polygons = pair.first;
308 roisPt = pair.second;
309
310 // Compute the 3D coordinates
311 vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
312
313 // Build the reference keypoints
314 keypoints.buildReference(I, trainKeyPoints, points3f, true, 3);
315
316 // Init reader for getting the input image sequence
318 g.setFileName(filenameCur);
319 g.open(I);
320 g.acquire(I);
321
322 vpDisplay *display2 = nullptr;
323
324 keypoints.createImageMatching(I, IMatching);
325
326 if (opt_display) {
327#ifdef VISP_HAVE_DISPLAY
328 display2 = vpDisplayFactory::allocateDisplay(IMatching, 0, static_cast<int>(I.getHeight()) / vpDisplay::getDownScalingFactor(I) + 30, "IMatching");
330#endif
331 }
332
333 bool opt_click = false;
334 double error;
336 std::vector<double> times_vec;
337 while ((opt_display && !g.end()) || (!opt_display && g.getFrameIndex() < 30)) {
338 g.acquire(I);
339
340 if (opt_display) {
342
343 // Display image matching
344 keypoints.insertImageMatching(I, IMatching);
345
346 vpDisplay::display(IMatching);
347 }
348
349 // Match keypoints and estimate the pose
350 if (keypoints.matchPoint(I, cam, cMo, error, elapsedTime)) {
351 times_vec.push_back(elapsedTime);
352
353 tracker.setCameraParameters(cam);
354 tracker.setPose(I, cMo);
355
356 if (opt_display) {
357 tracker.display(I, cMo, cam, vpColor::red, 2);
358 vpDisplay::displayFrame(I, cMo, cam, 0.025, vpColor::none, 3);
359
360 std::vector<vpImagePoint> ransacInliers = keypoints.getRansacInliers();
361 std::vector<vpImagePoint> ransacOutliers = keypoints.getRansacOutliers();
362
363 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
365 vpImagePoint imPt(*it);
366 imPt.set_u(imPt.get_u() + I.getWidth());
367 imPt.set_v(imPt.get_v() + I.getHeight());
368 vpDisplay::displayCircle(IMatching, imPt, 4, vpColor::green);
369 }
370
371 for (std::vector<vpImagePoint>::const_iterator it = ransacOutliers.begin(); it != ransacOutliers.end(); ++it) {
373 vpImagePoint imPt(*it);
374 imPt.set_u(imPt.get_u() + I.getWidth());
375 imPt.set_v(imPt.get_v() + I.getHeight());
376 vpDisplay::displayCircle(IMatching, imPt, 4, vpColor::red);
377 }
378
379 keypoints.displayMatching(I, IMatching);
380
381 // Display model in the correct sub-image in IMatching
383 cam2.initPersProjWithoutDistortion(cam.get_px(), cam.get_py(), cam.get_u0() + I.getWidth(),
384 cam.get_v0() + I.getHeight());
385 tracker.setCameraParameters(cam2);
386 tracker.setPose(IMatching, cMo);
387 tracker.display(IMatching, cMo, cam2, vpColor::red, 2);
388 vpDisplay::displayFrame(IMatching, cMo, cam2, 0.025, vpColor::none, 3);
389 }
390 }
391
392 if (opt_display) {
394 vpDisplay::flush(IMatching);
395 }
396
397 if (opt_click_allowed && opt_display) {
398 // Click requested to process next image
399 if (opt_click) {
400 vpDisplay::getClick(I, button, true);
401 if (button == vpMouseButton::button3) {
402 opt_click = false;
403 }
404 }
405 else {
406 // Use right click to enable/disable step by step tracking
407 if (vpDisplay::getClick(I, button, false)) {
408 if (button == vpMouseButton::button3) {
409 opt_click = true;
410 }
411 else if (button == vpMouseButton::button1) {
412 break;
413 }
414 }
415 }
416 }
417 }
418
419 if (display) {
420 delete display;
421 }
422 if (display2) {
423 delete display2;
424 }
425
426 if (!times_vec.empty()) {
427 std::cout << "Computation time, Mean: " << vpMath::getMean(times_vec)
428 << " ms ; Median: " << vpMath::getMedian(times_vec) << " ms ; Std: " << vpMath::getStdev(times_vec)
429 << std::endl;
430 }
431}
432
433int main(int argc, const char **argv)
434{
435 try {
436 std::string env_ipath;
437 bool opt_click_allowed = true;
438 bool opt_display = true;
439 bool use_parallel_ransac = false;
440
441 // Read the command line options
442 if (getOptions(argc, argv, opt_click_allowed, opt_display, use_parallel_ransac) == false) {
443 return EXIT_FAILURE;
444 }
445
446 // Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH
447 // environment variable value
449
450 if (env_ipath.empty()) {
451 std::cerr << "Please set the VISP_INPUT_IMAGE_PATH environment "
452 "variable value."
453 << std::endl;
454 return EXIT_FAILURE;
455 }
456
457 {
458 vpImage<unsigned char> I, IMatching;
459
460 std::cout << "-- Test on gray level images" << std::endl;
461
462 run_test(env_ipath, opt_click_allowed, opt_display, use_parallel_ransac, I, IMatching);
463 }
464 {
465 vpImage<vpRGBa> I, IMatching;
466
467 std::cout << "-- Test on color images" << std::endl;
468
469 run_test(env_ipath, opt_click_allowed, opt_display, use_parallel_ransac, I, IMatching);
470 }
471
472 }
473 catch (const vpException &e) {
474 std::cerr << e.what() << std::endl;
475 return EXIT_FAILURE;
476 }
477
478 std::cout << "testKeyPoint-2 is ok !" << std::endl;
479 return EXIT_SUCCESS;
480}
481#else
482int main()
483{
484 std::cerr << "You need OpenCV library." << std::endl;
485
486 return EXIT_SUCCESS;
487}
488
489#endif
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
static const vpColor red
Definition vpColor.h:198
static const vpColor none
Definition vpColor.h:210
static const vpColor green
Definition vpColor.h:201
Class that defines generic functionalities for display.
Definition vpDisplay.h:171
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void displayCircle(const vpImage< unsigned char > &I, const vpImageCircle &circle, const vpColor &color, bool fill=false, unsigned int thickness=1)
virtual void setDownScalingFactor(unsigned int scale)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
unsigned int getDownScalingFactor()
Definition vpDisplay.h:218
error that can be emitted by ViSP classes.
Definition vpException.h:60
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
Definition vpImage.h:131
static std::string getViSPImagesDataPath()
static std::string createFilePath(const std::string &parent, const std::string &child)
Class that allows keypoints 2D features detection (and descriptors extraction) and matching thanks to...
Definition vpKeyPoint.h:274
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
static double rad(double deg)
Definition vpMath.h:129
static double getMedian(const std::vector< double > &v)
Definition vpMath.cpp:343
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition vpMath.cpp:374
static double getMean(const std::vector< double > &v)
Definition vpMath.cpp:323
Make the complete tracking of an object by using its CAD model.
Definition vpMe.h:143
void setMu1(const double &mu_1)
Definition vpMe.h:408
void setRange(const unsigned int &range)
Definition vpMe.h:438
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
Definition vpMe.h:531
void setNbTotalSample(const int &ntotal_sample)
Definition vpMe.h:422
void setMaskNumber(const unsigned int &mask_number)
Definition vpMe.cpp:555
void setThreshold(const double &threshold)
Definition vpMe.h:489
void setSampleStep(const double &sample_step)
Definition vpMe.h:445
void setMaskSize(const unsigned int &mask_size)
Definition vpMe.cpp:563
void setMu2(const double &mu_2)
Definition vpMe.h:415
@ NORMALIZED_THRESHOLD
Definition vpMe.h:154
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
void open(vpImage< vpRGBa > &I) VP_OVERRIDE
void setFileName(const std::string &filename)
long getFrameIndex() const
void acquire(vpImage< vpRGBa > &I) VP_OVERRIDE
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.