#include <iostream>
#include <visp3/core/vpConfig.h>
#if defined(VISP_HAVE_NLOHMANN_JSON)
#include <visp3/core/vpException.h>
#include <visp3/core/vpImageException.h>
#include <visp3/core/vpIoTools.h>
#include <visp3/core/vpRGBa.h>
#include <visp3/io/vpVideoReader.h>
#include <visp3/io/vpVideoWriter.h>
#include <visp3/ar/vpPanda3DFrameworkManager.h>
#include <visp3/rbt/vpRBTracker.h>
#ifdef ENABLE_VISP_NAMESPACE
#endif
#include "render-based-tutorial-utils.h"
{
{ }
{
parser
.addArgument(
"--color",
colorSequence,
true,
"The color sequence (in video reader format, eg., /path/to/I\%04d.png)")
.addArgument(
"--depth",
depthFolder,
false,
"The depth images associated to the color sequence. Frames should be aligned")
.addArgument(
"--start",
startFrame,
false,
"The first frame of the sequence")
.addArgument(
"--step",
frameStep,
false,
"How many frames should be read between calls to the tracker")
.addFlag(
"--step-by-step",
stepByStep,
"Go through the sequence interactively, frame by frame");
}
{
}
}
};
int main(int argc, const char **argv)
{
vpRBTrackerTutorial::BaseArguments baseArgs;
vpRBTrackerTutorial::vpRBExperimentLogger logger;
vpRBTrackerTutorial::vpRBExperimentPlotter plotter;
vpJsonArgumentParser parser(
"Tutorial showing how to use the Render-Based Tracker on an offline sequence",
"--config",
"/");
baseArgs.registerArguments(parser);
logger.registerArguments(parser);
plotter.registerArguments(parser);
parser.parse(argc, argv);
baseArgs.postProcessArguments();
plotter.postProcessArguments(baseArgs.display);
if (baseArgs.enableRenderProfiling) {
vpRBTrackerTutorial::enableRendererProfiling();
}
baseArgs.display = true;
logger.startLog();
tracker.loadConfigurationFile(baseArgs.trackerConfiguration);
std::cout << "Input video" << std::endl;
std::cout <<
" Filename : " << sequenceArgs.
colorSequence << std::endl;
std::cout <<
" First frame: " << sequenceArgs.
startFrame << std::endl;
}
}
std::cout <<
" Image size : " <<
width <<
" x " <<
height << std::endl;
std::vector<std::shared_ptr<vpDisplay>> displays, debugDisplays;
if (baseArgs.display) {
displays = vpRBTrackerTutorial::createDisplays(Id, Icol, depthDisplay, IProbaDisplay);
if (baseArgs.debugDisplay) {
1, 3,
0, 0,
20, 20,
"Normals in object frame", InormDisplay,
"Depth canny", ICannyDisplay,
"Color render", IRender
);
}
}
nlohmann::json result = nlohmann::json::array();
std::cout << "Starting init" << std::endl;
if (baseArgs.hasInlineInit()) {
}
else if (baseArgs.display) {
tracker.initClick(Id, baseArgs.initFile,
true);
}
else {
}
if (baseArgs.display) {
}
bool quit = false;
while (!quit) {
for (
unsigned int sp = 0; sp < sequenceArgs.
frameStep; ++sp) {
float scale = 9.999999747378752e-05;
depth.resize(dataArray.getHeight(), dataArray.getWidth());
depthDisplay.resize(dataArray.getHeight(), dataArray.getWidth());
#ifdef VISP_HAVE_OPENMP
#pragma omp parallel for
#endif
for (
int i = 0; i < static_cast<int>(dataArray.getSize()); ++
i) {
float value =
static_cast<float>(dataArray.bitmap[
i]) * scale;
depthDisplay.bitmap[
i] = value > baseArgs.maxDepthDisplay ? 0.f :
static_cast<unsigned char>((
depth.bitmap[
i] / baseArgs.maxDepthDisplay) * 255.f);
}
}
}
if (
depth.getSize() == 0) {
trackingResult =
tracker.track(Id, Icol);
}
else {
trackingResult =
tracker.track(Id, Icol, depth);
}
case vpRBTrackingStoppingReason::EXCEPTION:
{
std::cout << "Encountered an exception during tracking, pose was not updated" << std::endl;
break;
}
case vpRBTrackingStoppingReason::NOT_ENOUGH_FEATURES:
{
std::cout << "There were not enough feature to perform tracking" << std::endl;
break;
}
case vpRBTrackingStoppingReason::OBJECT_NOT_IN_IMAGE:
{
std::cout << "Object is not in image" << std::endl;
break;
}
case vpRBTrackingStoppingReason::CONVERGENCE_CRITERION:
{
std::cout << "Convergence criterion reached:" << std::endl;
std::cout <<
"- Num iterations: " << trackingResult.
getNumIterations() << std::endl;
break;
}
case vpRBTrackingStoppingReason::MAX_ITERS:
{
break;
}
default:
{
}
}
if (baseArgs.display) {
if (baseArgs.debugDisplay) {
vpRBTrackerTutorial::displayNormals(lastFrame.
renders.
normals, InormDisplay);
}
}
tracker.display(Id, Icol, depthDisplay);
tracker.displayMask(IProbaDisplay);
if (
depth.getSize() > 0) {
}
}
result.push_back(cMo);
logger.logFrame(tracker, iter, Id, Icol, depthDisplay, IProbaDisplay);
if (sequenceArgs.
stepByStep && baseArgs.display) {
}
std::cout <<
"Iter: " <<
iter << std::endl;
++im;
quit = true;
std::cout << "End of video reached" << std::endl;
}
std::cout << "Frame took: " << frameEnd - frameStart << "ms" << std::endl;
plotter.plot(tracker, (frameEnd - expStart) / 1000.0);
quit = true;
}
}
logger.close();
return EXIT_SUCCESS;
}
#else
int main()
{
std::cout << "This tutorial requires nlohmann_json 3rdparty." << std::endl;
}
#endif
Generic class defining intrinsic camera parameters.
static const vpColor none
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
@ badValue
Used to indicate that a value is not in the allowed range.
@ notImplementedError
Not implemented.
unsigned int getWidth() const
Return the number of columns in the image.
unsigned int getHeight() const
Return the number of rows in the image.
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
unsigned int getSize() const
Command line argument parsing with support for JSON files. If a JSON file is supplied,...
static vpPanda3DFrameworkManager & getInstance()
Class implementing the Render-Based Tracker (RBT).
vpRBTrackingStoppingReason getStoppingReason() const
const std::vector< double > & getConvergenceMetricValues() const
unsigned int getNumIterations() const
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
void open(vpImage< vpRGBa > &I) VP_OVERRIDE
void setFileName(const std::string &filename)
void setFirstFrameIndex(const long first_frame)
long getFirstFrameIndex()
void acquire(vpImage< vpRGBa > &I) VP_OVERRIDE
VISP_EXPORT NpyArray npy_load(const std::string &fname)
std::vector< std::shared_ptr< vpDisplay > > makeDisplayGrid(unsigned int rows, unsigned int cols, unsigned int startX, unsigned int startY, unsigned int paddingX, unsigned int paddingY, Args &... args)
Create a grid of displays, given a set of images. All the displays will be initialized in the correct...
VISP_EXPORT double measureTimeMs()
void registerArguments(vpJsonArgumentParser &parser)
std::string colorSequence
void postProcessArguments()
std::vector< size_t > shape
vpImage< vpRGBf > normals
vpImage< unsigned char > isSilhouette
Image containing the orientation of the gradients.
vpImage< float > silhouetteCanny