Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
vpRBTracker.h
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2024 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 */
30
35#ifndef VP_RB_TRACKER_H
36#define VP_RB_TRACKER_H
37
38#include <visp3/core/vpConfig.h>
39
40#if defined(VISP_HAVE_PANDA3D)
41
42#include <visp3/rbt/vpRBFeatureTracker.h>
43#include <visp3/rbt/vpRBSilhouettePointsExtractionSettings.h>
44#include <visp3/rbt/vpPanda3DDepthFilters.h>
45#include <visp3/rbt/vpObjectCentricRenderer.h>
46#include <visp3/rbt/vpRBTrackingResult.h>
47#include <visp3/rbt/vpRBConvergenceMetric.h>
48#include <visp3/rbt/vpRBInitializationHelper.h>
49#include <visp3/core/vpDisplay.h>
50
51#include <ostream>
52#include <type_traits>
53
54#if defined(VISP_HAVE_NLOHMANN_JSON)
55#include VISP_NLOHMANN_JSON(json_fwd.hpp)
56#endif
57
59
60class vpObjectMask;
63
86class VISP_EXPORT vpRBTracker
87{
88public:
89
91
92 ~vpRBTracker() = default;
93
98
106 void getPose(vpHomogeneousMatrix &cMo) const;
107
114 void setPose(const vpHomogeneousMatrix &cMo);
129
130 vpMatrix getCovariance() const;
131
135
140
148 void addTracker(std::shared_ptr<vpRBFeatureTracker> tracker);
149
155 inline std::vector<std::shared_ptr<vpRBFeatureTracker>> getFeatureTrackers() const { return m_trackers; }
156
160 inline std::string getModelPath() const { return m_modelPath; }
161
167 void setModelPath(const std::string &path);
168
174 vpCameraParameters getCameraParameters() const;
185 void setCameraParameters(const vpCameraParameters &cam, unsigned h, unsigned w);
186
187 inline unsigned int getImageWidth() const { return m_imageWidth; }
188 inline unsigned int getImageHeight() const { return m_imageHeight; }
189
194
195 void setSilhouetteExtractionParameters(const vpSilhouettePointsExtractionSettings &settings);
196
197 double getOptimizationGain() const { return m_lambda; }
198 inline void setOptimizationGain(double lambda)
199 {
200 if (lambda < 0.0) {
201 throw vpException(vpException::badValue, "Optimization gain should be greater or equal to zero");
202 }
203 m_lambda = lambda;
204 }
205 unsigned int getMaxOptimizationIters() const { return m_vvsIterations; }
206 inline void setMaxOptimizationIters(unsigned int iters)
207 {
208 if (iters == 0) {
209 throw vpException(vpException::badValue, "Max number of iterations must be greater than zero");
210 }
211 m_vvsIterations = iters;
212 }
213
214 double getOptimizationInitialMu() const { return m_muInit; }
215 inline void setOptimizationInitialMu(double mu)
216 {
217 if (mu < 0.0) {
218 throw vpException(vpException::badValue, "Optimization mu should be greater than or equal to zero");
219 }
220 m_muInit = mu;
221 }
222
224 inline void setOptimizationMuIterFactor(double factor)
225 {
226 if (factor < 0.0) {
227 throw vpException(vpException::badValue, "Optimization mu factor should be greater than or equal to zero");
228 }
229 m_muIterFactor = factor;
230 }
231
233 inline void setScaleInvariantRegularization(bool invariant)
234 {
235 m_scaleInvariantOptim = invariant;
236 }
237
242 std::shared_ptr<vpRBDriftDetector> getDriftDetector() const { return m_driftDetector; }
249 inline void setDriftDetector(const std::shared_ptr<vpRBDriftDetector> &detector)
250 {
251 m_driftDetector = detector;
252 }
253
257 std::shared_ptr<vpObjectMask> getObjectSegmentationMethod() const { return m_mask; }
264 inline void setObjectSegmentationMethod(const std::shared_ptr<vpObjectMask> &mask)
265 {
266 m_mask = mask;
267 }
268
272 std::shared_ptr<vpRBVisualOdometry> getOdometryMethod() const { return m_odometry; }
279 inline void setOdometryMethod(const std::shared_ptr<vpRBVisualOdometry> &odometry)
280 {
281 m_odometry = odometry;
282 }
283
284#if defined(VISP_HAVE_NLOHMANN_JSON)
293 void loadConfigurationFile(const std::string &filename);
294
301 void loadConfiguration(const nlohmann::json &j);
302#endif
303
307
308 void reset();
309
314
315
322 void startTracking();
346 vpRBTrackingResult track(const vpImage<unsigned char> &I, const vpImage<vpRGBa> &IRGB);
360 vpRBTrackingResult track(const vpImage<unsigned char> &I, const vpImage<vpRGBa> &IRGB, const vpImage<float> &depth);
364
376 double score(const vpHomogeneousMatrix &cMo, const vpImage<unsigned char> &I, const vpImage<vpRGBa> &IRGB, const vpImage<float> &depth);
377
382
390 void displayMask(vpImage<unsigned char> &Imask) const;
391
400 void display(const vpImage<unsigned char> &I, const vpImage<vpRGBa> &IRGB, const vpImage<unsigned char> &depth);
404
405#ifdef VISP_HAVE_MODULE_GUI
425 template <typename ImageType>
426 typename std::enable_if<std::is_same<ImageType, unsigned char>::value || std::is_same<ImageType, vpRGBa>::value, void >::type initClick(const vpImage<ImageType> &I, const std::string &initFile, bool displayHelp)
427 {
428 vpRBInitializationHelper initializer;
429 initializer.setCameraParameters(m_cam);
430 initializer.initClick(I, initFile, displayHelp, *this);
431 setPose(initializer.getPose());
432 }
433#endif
434
436protected:
437
444 void setupRenderer(const std::string &file);
458
467 template <typename T>
469 {
471 const vpImage<unsigned char> &Isilhouette = frame.renders.isSilhouette;
472 const vpRect bb = m_renderer.getBoundingBox();
473 for (unsigned int r = std::max(bb.getTop(), 0.); (r < bb.getBottom()) &&(r < I.getRows()); ++r) {
474 for (unsigned int c = std::max(bb.getLeft(), 0.); (c < bb.getRight()) && (c < I.getCols()); ++c) {
475 if (Isilhouette[r][c] != 0) {
477 }
478 }
479 }
480 }
481 }
482
495 std::vector<vpRBSilhouettePoint> extractSilhouettePoints(
496 const vpImage<vpRGBf> &Inorm, const vpImage<float> &Idepth,
497 const vpImage<float> &Ior, const vpImage<unsigned char> &Ivalid,
498 const vpCameraParameters &cam, const vpHomogeneousMatrix &cTcp);
499
507 template<typename T>
508 void checkDimensionsOrThrow(const vpImage<T> &I, const std::string &imgType) const
509 {
510 if (I.getRows() != m_imageHeight || I.getCols() != m_imageWidth) {
511 std::stringstream ss;
512 ss << "vpRBTracker: dimension error: expected " << imgType;
513 ss << " image to have the following resolution " << m_imageWidth << " x " << m_imageHeight;
514 ss << ", but got " << I.getCols() << " x " << I.getRows();
516 }
517 }
518
523 {
524 return m_renderer.getRenderer<vpPanda3DDepthCannyFilter>() != nullptr;
525 }
526
529 std::vector<std::shared_ptr<vpRBFeatureTracker>> m_trackers;
530
533
535 std::string m_modelPath;
537
544
546 double m_lambda;
550 double m_muInit;
555
563
566
567 std::shared_ptr<vpObjectMask> m_mask;
568 std::shared_ptr<vpRBDriftDetector> m_driftDetector;
569 std::shared_ptr<vpRBVisualOdometry> m_odometry;
570
571 std::shared_ptr<vpRBConvergenceMetric> m_convergenceMetric;
572
574
575};
576
577END_VISP_NAMESPACE
578
579#endif
580#endif
Generic class defining intrinsic camera parameters.
static const vpColor green
Definition vpColor.h:201
static void displayPoint(const vpImage< unsigned char > &I, const vpImagePoint &ip, const vpColor &color, unsigned int thickness=1)
error that can be emitted by ViSP classes.
Definition vpException.h:60
@ badValue
Used to indicate that a value is not in the allowed range.
Definition vpException.h:73
@ dimensionError
Bad dimension.
Definition vpException.h:71
Implementation of an homogeneous matrix and operations on such kind of matrices.
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
Definition vpImage.h:131
Implementation of a matrix and operations on matrices.
Definition vpMatrix.h:175
Single object focused renderer.
Implementation of canny filtering, using Sobel kernels.
Rendering parameters for a panda3D simulation.
Base interface for algorithms that should detect tracking drift for the render-based tracker.
All the data related to a single tracking frame. This contains both the input data (from a real camer...
vpRBRenderData renders
camera parameters
vpHomogeneousMatrix getPose() const
void setCameraParameters(const vpCameraParameters &cam)
bool m_displaySilhouette
Metric used to compare the motion between different poses.
std::shared_ptr< vpRBDriftDetector > m_driftDetector
std::shared_ptr< vpRBVisualOdometry > m_odometry
vpSilhouettePointsExtractionSettings m_depthSilhouetteSettings
Settings for silhouette extraction.
~vpRBTracker()=default
vpCameraParameters m_cam
Camera intrinsics.
void checkDimensionsOrThrow(const vpImage< T > &I, const std::string &imgType) const
Check that a given image has the correct dimensions, previously specified with setCameraParameters or...
double getOptimizationInitialMu() const
vpObjectCentricRenderer m_renderer
3D renderer
unsigned int getImageWidth() const
unsigned m_vvsIterations
Maximum number of optimization iterations.
std::shared_ptr< vpRBDriftDetector > getDriftDetector() const
bool m_firstIteration
Whether this is the first iteration.
void setScaleInvariantRegularization(bool invariant)
double m_muIterFactor
Factor with which to multiply mu at every iteration during optimization.
void updateRender(vpRBFeatureTrackerInput &frame)
Update the render data with a render at the last tracked pose.
std::vector< std::shared_ptr< vpRBFeatureTracker > > m_trackers
List of feature trackers.
std::string m_modelPath
Location of the 3D model to load.
vpPanda3DRenderParameters m_rendererSettings
Camera specific setup for the 3D Panda renderer.
std::shared_ptr< vpRBVisualOdometry > getOdometryMethod() const
double getOptimizationMuIterFactor() const
friend vpRBInitializationHelper
std::shared_ptr< vpRBConvergenceMetric > m_convergenceMetric
unsigned m_imageHeight
Color and render image dimensions.
bool scaleInvariantRegularization() const
double m_lambda
Optimization gain.
void getPose(vpHomogeneousMatrix &cMo) const
Get the estimated pose of the object in the camera frame.
std::shared_ptr< vpObjectMask > m_mask
vpRBTrackingResult track(const vpImage< unsigned char > &I)
track and re-estimate the pose of the object in this frame, given only a grayscale image The pose aft...
vpHomogeneousMatrix m_cMoPrev
Previous pose of the object in the camera frame.
bool shouldRenderSilhouette()
Returns whether the renderer should render the silhouette information.
void setDriftDetector(const std::shared_ptr< vpRBDriftDetector > &detector)
Sets the method to perform drift detection and estimate tracking confidence. Set to null to disable t...
vpRBFeatureTrackerInput m_currentFrame
void setOdometryMethod(const std::shared_ptr< vpRBVisualOdometry > &odometry)
Set the method to use when performing visual odometry to preestimate the camera motion before trackin...
unsigned int getImageHeight() const
bool m_rendererIsSetup
double getOptimizationGain() const
void setupRenderer(const std::string &file)
Setup the renderer, and load the 3D model.
unsigned m_imageWidth
vpRBFeatureTrackerInput m_previousFrame
vpObjectCentricRenderer & getRenderer()
Get the renderer used to render the object.
unsigned int getMaxOptimizationIters() const
const vpRBFeatureTrackerInput & getMostRecentFrame() const
Retrieve the most recent frame that was used when tracking the object. The renders may not correspond...
std::enable_if< std::is_same< ImageType, unsignedchar >::value||std::is_same< ImageType, vpRGBa >::value, void >::type initClick(const vpImage< ImageType > &I, const std::string &initFile, bool displayHelp)
Perform Pose initialization by asking the user to click on predefined 3D points. This method is simil...
std::shared_ptr< vpObjectMask > getObjectSegmentationMethod() const
void setOptimizationMuIterFactor(double factor)
void setOptimizationInitialMu(double mu)
bool m_modelChanged
void setOptimizationGain(double lambda)
double m_muInit
Initial mu value for Levenberg-Marquardt.
std::string getModelPath() const
Get the path to the 3D model to track.
void setPose(const vpHomogeneousMatrix &cMo)
Sets the pose of the object in the camera frame. Should be called when initializing the tracker or wh...
std::vector< std::shared_ptr< vpRBFeatureTracker > > getFeatureTrackers() const
Get the tracked features.
vpSilhouettePointsExtractionSettings getSilhouetteExtractionParameters() const
void setObjectSegmentationMethod(const std::shared_ptr< vpObjectMask > &mask)
Sets the algorithm to use when performing object segmentation. Set to null to disable the use of segm...
vpHomogeneousMatrix m_cMo
Current pose of the object in the camera frame.
bool m_scaleInvariantOptim
Whether to use diagonal scaling in Levenberg-Marquardt regularization.
void setMaxOptimizationIters(unsigned int iters)
void displaySilhouette(const vpImage< T > &I, const vpRBFeatureTrackerInput &frame)
Display the object silhouette of the frame in I.
Defines a rectangle in the plane.
Definition vpRect.h:79
vpImage< unsigned char > isSilhouette
Image containing the orientation of the gradients.