Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
servoAfma6AprilTagIBVS.cpp
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2024 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Description:
31 * tests the control law
32 * eye-in-hand control
33 * velocity computed in the camera frame
34 */
35
51
52#include <iostream>
53
54#include <visp3/core/vpConfig.h>
55
56#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) && defined(VISP_HAVE_AFMA6)
57
58#include <visp3/core/vpCameraParameters.h>
59#include <visp3/detection/vpDetectorAprilTag.h>
60#include <visp3/gui/vpDisplayFactory.h>
61#include <visp3/gui/vpPlot.h>
62#include <visp3/io/vpImageIo.h>
63#include <visp3/robot/vpRobotAfma6.h>
64#include <visp3/sensor/vpRealSense2.h>
65#include <visp3/visual_features/vpFeatureBuilder.h>
66#include <visp3/visual_features/vpFeaturePoint.h>
67#include <visp3/vs/vpServo.h>
68#include <visp3/vs/vpServoDisplay.h>
69
70#ifdef ENABLE_VISP_NAMESPACE
71using namespace VISP_NAMESPACE_NAME;
72#endif
73
74void display_point_trajectory(const vpImage<unsigned char> &I,
75 const std::vector<vpImagePoint> &vip,
76 std::vector<vpImagePoint> *traj_vip)
77{
78 for (size_t i = 0; i < vip.size(); ++i) {
79 if (traj_vip[i].size()) {
80 // Add the point only if distance with the previous > 1 pixel
81 if (vpImagePoint::distance(vip[i], traj_vip[i].back()) > 1.) {
82 traj_vip[i].push_back(vip[i]);
83 }
84 }
85 else {
86 traj_vip[i].push_back(vip[i]);
87 }
88 }
89 for (size_t i = 0; i < vip.size(); ++i) {
90 for (size_t j = 1; j < traj_vip[i].size(); ++j) {
91 vpDisplay::displayLine(I, traj_vip[i][j - 1], traj_vip[i][j], vpColor::green, 2);
92 }
93 }
94}
95
96int main(int argc, char **argv)
97{
98 double opt_tagSize = 0.120;
99 int opt_quad_decimate = 2;
100 bool opt_verbose = false;
101 bool opt_plot = false;
102 bool opt_adaptive_gain = false;
103 bool opt_task_sequencing = false;
104 double opt_convergence_threshold = 0.00005;
105
106 bool display_tag = true;
107
108 for (int i = 1; i < argc; ++i) {
109 if ((std::string(argv[i]) == "--tag-size") && (i + 1 < argc)) {
110 opt_tagSize = std::stod(argv[++i]);
111 }
112 else if ((std::string(argv[i]) == "--tag-quad-decimate") && (i + 1 < argc)) {
113 opt_quad_decimate = std::stoi(argv[++i]);
114 }
115 else if (std::string(argv[i]) == "--verbose") {
116 opt_verbose = true;
117 }
118 else if (std::string(argv[i]) == "--plot") {
119 opt_plot = true;
120 }
121 else if (std::string(argv[i]) == "--adaptive-gain") {
122 opt_adaptive_gain = true;
123 }
124 else if (std::string(argv[i]) == "--task-sequencing") {
125 opt_task_sequencing = true;
126 }
127 else if (std::string(argv[i]) == "--no-convergence-threshold") {
128 opt_convergence_threshold = 0.;
129 }
130 else if ((std::string(argv[i]) == "--help") || (std::string(argv[i]) == "-h")) {
131 std::cout
132 << argv[0]
133 << " [--tag-size <marker size in meter; default " << opt_tagSize << ">]"
134 << " [--tag-quad-decimate <decimation; default " << opt_quad_decimate << ">]"
135 << " [--adaptive-gain]"
136 << " [--plot]"
137 << " [--task-sequencing]"
138 << " [--no-convergence-threshold]"
139 << " [--verbose]"
140 << " [--help, -h]"
141 << std::endl;
142 return EXIT_SUCCESS;
143 }
144 }
145
146 vpRobotAfma6 robot;
148
149 // Load the end-effector to camera frame transformation obtained
150 // using a camera intrinsic model with distortion
151 robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, projModel);
152
153 try {
154 std::cout << "WARNING: This example will move the robot! "
155 << "Please make sure to have the user stop button at hand!" << std::endl
156 << "Press Enter to continue..." << std::endl;
157 std::cin.ignore();
158
159 vpRealSense2 rs;
160 rs2::config config;
161 unsigned int width = 640, height = 480, fps = 60;
162 config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps);
163 config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps);
164 config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps);
165 rs.open(config);
166
167 // Warm up camera
169 for (size_t i = 0; i < 10; ++i) {
170 rs.acquire(I);
171 }
172
173 // Get camera intrinsics
175 robot.getCameraParameters(cam, I);
176 std::cout << "cam:\n" << cam << std::endl;
177
178 std::shared_ptr<vpDisplay> d = vpDisplayFactory::createDisplay(I, 10, 10, "Current image");
179
182 //vpDetectorAprilTag::vpPoseEstimationMethod poseEstimationMethod = vpDetectorAprilTag::BEST_RESIDUAL_VIRTUAL_VS;
183 vpDetectorAprilTag detector(tagFamily);
184 detector.setAprilTagPoseEstimationMethod(poseEstimationMethod);
185 detector.setDisplayTag(display_tag);
186 detector.setAprilTagQuadDecimate(opt_quad_decimate);
187 detector.setZAlignedWithCameraAxis(true);
188
189 // Tag frame for pose estimation is the following
190 // - When using
191 // detector.setZAlignedWithCameraAxis(false);
192 // detector.detect();
193 // we consider the tag frame (o) such as z_o axis is not aligned with camera frame
194 // (meaning z_o axis is facing the camera)
195 //
196 // 3 y 2
197 // |
198 // o--x
199 //
200 // 0 1
201 //
202 // In that configuration, it is more difficult to set a desired camera pose c_M_o.
203 // To ease this step we can introduce an extra transformation matrix o'_M_o to align the axis
204 // with the camera frame:
205 //
206 // o'--x
207 // |
208 // y
209 //
210 // where
211 // | 1 0 0 0 |
212 // o'_M_o = | 0 -1 0 0 |
213 // | 0 0 -1 0 |
214 // | 0 0 0 1 |
215 //
216 // Defining the desired camera pose in frame o' becomes than easier.
217 //
218 // - When using rather
219 // detector.setZAlignedWithCameraAxis(true);
220 // detector.detect();
221 // we consider the tag frame (o) such as z_o axis is aligned with camera frame
222 //
223 // 3 2
224 //
225 // o--x
226 // |
227 // 0 y 1
228 //
229 // In that configuration, it is easier to define a desired camera pose c_M_o since all the axis
230 // (camera frame and tag frame are aligned)
231
232 // Servo
233 vpHomogeneousMatrix cd_M_c, c_M_o, o_M_o;
234
235 // Desired pose used to compute the desired features
236 vpHomogeneousMatrix cd_M_o(vpTranslationVector(0, 0, opt_tagSize * 3.5), // 3.5 times tag with along camera z axis
238 if (!detector.isZAlignedWithCameraAxis()) {
239 vpHomogeneousMatrix oprim_M_o = vpHomogeneousMatrix({ 1, 0, 0, 0,
240 0, -1, 0, 0,
241 0, 0, -1, 0,
242 0, 0, 0, 1 });
243 cd_M_o *= oprim_M_o;
244 }
245 // Create visual features
246 std::vector<vpFeaturePoint> s_point(4), s_point_d(4); // We use 4 points
247
248 // Define 4 3D points corresponding to the CAD model of the Apriltag
249 std::vector<vpPoint> point(4);
250 if (detector.isZAlignedWithCameraAxis()) {
251 point[0].setWorldCoordinates(-opt_tagSize / 2., +opt_tagSize / 2., 0);
252 point[1].setWorldCoordinates(+opt_tagSize / 2., +opt_tagSize / 2., 0);
253 point[2].setWorldCoordinates(+opt_tagSize / 2., -opt_tagSize / 2., 0);
254 point[3].setWorldCoordinates(-opt_tagSize / 2., -opt_tagSize / 2., 0);
255 }
256 else {
257 point[0].setWorldCoordinates(-opt_tagSize / 2., -opt_tagSize / 2., 0);
258 point[1].setWorldCoordinates(+opt_tagSize / 2., -opt_tagSize / 2., 0);
259 point[2].setWorldCoordinates(+opt_tagSize / 2., +opt_tagSize / 2., 0);
260 point[3].setWorldCoordinates(-opt_tagSize / 2., +opt_tagSize / 2., 0);
261 }
262
264 // Add the 4 visual feature points
265 for (size_t i = 0; i < s_point.size(); ++i) {
266 task.addFeature(s_point[i], s_point_d[i]);
267 }
269 task.setInteractionMatrixType(vpServo::CURRENT);
270
271 // Set the gain
272 if (opt_adaptive_gain) {
273 vpAdaptiveGain lambda(1.5, 0.4, 30); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30
274 task.setLambda(lambda);
275 }
276 else {
277 task.setLambda(0.2);
278 }
279
280 vpPlot *plotter = nullptr;
281 int iter_plot = 0;
282
283 if (opt_plot) {
284 plotter = new vpPlot(2, static_cast<int>(250 * 2), 500, static_cast<int>(I.getWidth()) + 80, 10,
285 "Real time curves plotter");
286 plotter->setTitle(0, "Visual features error");
287 plotter->setTitle(1, "Camera velocities");
288 plotter->initGraph(0, 8);
289 plotter->initGraph(1, 6);
290 plotter->setLegend(0, 0, "error_feat_p1_x");
291 plotter->setLegend(0, 1, "error_feat_p1_y");
292 plotter->setLegend(0, 2, "error_feat_p2_x");
293 plotter->setLegend(0, 3, "error_feat_p2_y");
294 plotter->setLegend(0, 4, "error_feat_p3_x");
295 plotter->setLegend(0, 5, "error_feat_p3_y");
296 plotter->setLegend(0, 6, "error_feat_p4_x");
297 plotter->setLegend(0, 7, "error_feat_p4_y");
298 plotter->setLegend(1, 0, "vc_x");
299 plotter->setLegend(1, 1, "vc_y");
300 plotter->setLegend(1, 2, "vc_z");
301 plotter->setLegend(1, 3, "wc_x");
302 plotter->setLegend(1, 4, "wc_y");
303 plotter->setLegend(1, 5, "wc_z");
304 }
305
306 bool final_quit = false;
307 bool has_converged = false;
308 bool send_velocities = false;
309 bool servo_started = false;
310 std::vector<vpImagePoint> *traj_corners = nullptr; // To memorize point trajectory
311
312 static double t_init_servo = vpTime::measureTimeMs();
313
314 robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL);
315
316 while (!has_converged && !final_quit) {
317 double t_start = vpTime::measureTimeMs();
318
319 rs.acquire(I);
320
322
323 std::vector<vpHomogeneousMatrix> c_M_o_vec;
324 detector.detect(I, opt_tagSize, cam, c_M_o_vec);
325
326 {
327 std::stringstream ss;
328 ss << "Left click to " << (send_velocities ? "stop the robot" : "servo the robot") << ", right click to quit.";
329 vpDisplay::displayText(I, 20, 20, ss.str(), vpColor::red);
330 }
331
332 vpColVector v_c(6);
333
334 // Ensure that only one tag is detected during servoing
335 if (c_M_o_vec.size() == 1) {
336 c_M_o = c_M_o_vec[0];
337
338 static bool first_time = true;
339 if (first_time) {
340 // Introduce security wrt tag positioning in order to avoid PI rotation
341 std::vector<vpHomogeneousMatrix> v_o_M_o(2), v_cd_M_c(2);
342 v_o_M_o[1].buildFrom(0, 0, 0, 0, 0, M_PI);
343 for (size_t i = 0; i < 2; ++i) {
344 v_cd_M_c[i] = cd_M_o * v_o_M_o[i] * c_M_o.inverse();
345 }
346 if (std::fabs(v_cd_M_c[0].getThetaUVector().getTheta()) < std::fabs(v_cd_M_c[1].getThetaUVector().getTheta())) {
347 o_M_o = v_o_M_o[0];
348 }
349 else {
350 std::cout << "Desired frame modified to avoid PI rotation of the camera" << std::endl;
351 o_M_o = v_o_M_o[1]; // Introduce PI rotation
352 }
353
354 // Compute the desired position of the features from the desired pose
355 for (size_t i = 0; i < point.size(); ++i) {
356 vpColVector cP, p;
357 point[i].changeFrame(cd_M_o * o_M_o, cP);
358 point[i].projection(cP, p);
359
360 s_point_d[i].set_x(p[0]);
361 s_point_d[i].set_y(p[1]);
362 s_point_d[i].set_Z(cP[2]);
363 }
364 }
365
366 // Get tag corners
367 std::vector<vpImagePoint> corners = detector.getPolygon(0);
368
369 // Update visual features
370 for (size_t i = 0; i < corners.size(); ++i) {
371 // Update the point feature from the tag corners location
372 vpFeatureBuilder::create(s_point[i], cam, corners[i]);
373 // Set the feature Z coordinate from the pose
374 vpColVector c_P;
375 point[i].changeFrame(c_M_o, c_P);
376
377 s_point[i].set_Z(c_P[2]);
378 }
379
380 if (opt_task_sequencing) {
381 if (!servo_started) {
382 if (send_velocities) {
383 servo_started = true;
384 }
385 t_init_servo = vpTime::measureTimeMs();
386 }
387 v_c = task.computeControlLaw((vpTime::measureTimeMs() - t_init_servo) / 1000.);
388 }
389 else {
390 v_c = task.computeControlLaw();
391 }
392
393 // Display the current and desired feature points in the image display
394 vpServoDisplay::display(task, cam, I);
395 for (size_t i = 0; i < corners.size(); ++i) {
396 std::stringstream ss;
397 ss << i;
398 // Display current point indexes
399 vpDisplay::displayText(I, corners[i] + vpImagePoint(15, 15), ss.str(), vpColor::red);
400 // Display desired point indexes
401 vpImagePoint ip;
402 vpMeterPixelConversion::convertPoint(cam, s_point_d[i].get_x(), s_point_d[i].get_y(), ip);
403 vpDisplay::displayText(I, ip + vpImagePoint(15, 15), ss.str(), vpColor::red);
404 }
405 if (first_time) {
406 traj_corners = new std::vector<vpImagePoint>[corners.size()];
407 }
408 // Display the trajectory of the points used as features
409 display_point_trajectory(I, corners, traj_corners);
410
411 if (opt_plot) {
412 plotter->plot(0, iter_plot, task.getError());
413 plotter->plot(1, iter_plot, v_c);
414 iter_plot++;
415 }
416
417 if (opt_verbose) {
418 std::cout << "v_c: " << v_c.t() << std::endl;
419 }
420
421 double error = task.getError().sumSquare();
422 std::stringstream ss;
423 ss << "error: " << error;
424 vpDisplay::displayText(I, 20, static_cast<int>(I.getWidth()) - 150, ss.str(), vpColor::red);
425
426 if (opt_verbose)
427 std::cout << "error: " << error << std::endl;
428
429 if (error < opt_convergence_threshold) {
430 has_converged = true;
431 std::cout << "Servo task has converged" << std::endl;
432 vpDisplay::displayText(I, 100, 20, "Servo task has converged", vpColor::red);
433 }
434 if (first_time) {
435 first_time = false;
436 }
437 } // end if (c_M_o_vec.size() == 1)
438 else {
439 v_c = 0;
440 }
441
442 if (!send_velocities) {
443 v_c = 0;
444 }
445
446 // Send to the robot
447 robot.setVelocity(vpRobot::CAMERA_FRAME, v_c);
448
449 {
450 std::stringstream ss;
451 ss << "Loop time: " << vpTime::measureTimeMs() - t_start << " ms";
452 vpDisplay::displayText(I, 40, 20, ss.str(), vpColor::red);
453 }
455
457 if (vpDisplay::getClick(I, button, false)) {
458 switch (button) {
460 send_velocities = !send_velocities;
461 break;
462
464 final_quit = true;
465 break;
466
467 default:
468 break;
469 }
470 }
471 }
472 std::cout << "Stop the robot " << std::endl;
473 robot.setRobotState(vpRobot::STATE_STOP);
474
475 if (opt_plot && plotter != nullptr) {
476 delete plotter;
477 plotter = nullptr;
478 }
479
480 if (!final_quit) {
481 while (!final_quit) {
482 rs.acquire(I);
484
485 vpDisplay::displayText(I, 20, 20, "Click to quit the program.", vpColor::red);
486 vpDisplay::displayText(I, 40, 20, "Visual servo converged.", vpColor::red);
487
488 if (vpDisplay::getClick(I, false)) {
489 final_quit = true;
490 }
491
493 }
494 }
495 if (traj_corners) {
496 delete[] traj_corners;
497 }
498 }
499 catch (const vpException &e) {
500 std::cout << "ViSP exception: " << e.what() << std::endl;
501 std::cout << "Stop the robot " << std::endl;
502 robot.setRobotState(vpRobot::STATE_STOP);
503 return EXIT_FAILURE;
504 }
505
506 return EXIT_SUCCESS;
507}
508#else
509int main()
510{
511#if !defined(VISP_HAVE_REALSENSE2)
512 std::cout << "Install librealsense-2.x" << std::endl;
513#endif
514#if !defined(VISP_HAVE_AFMA6)
515 std::cout << "ViSP is not build with Afma6 robot support..." << std::endl;
516#endif
517 return EXIT_SUCCESS;
518}
519#endif
Adaptive gain computation.
@ TOOL_INTEL_D435_CAMERA
Definition vpAfma6.h:129
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithDistortion
Perspective projection with distortion model.
Implementation of column vector and the associated operations.
static const vpColor red
Definition vpColor.h:198
static const vpColor green
Definition vpColor.h:201
@ TAG_36h11
AprilTag 36h11 pattern (recommended).
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition vpException.h:60
static void create(vpFeaturePoint &s, const vpCameraParameters &cam, const vpDot &d)
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
static double distance(const vpImagePoint &iP1, const vpImagePoint &iP2)
Definition of the vpImage class member functions.
Definition vpImage.h:131
static double rad(double deg)
Definition vpMath.h:129
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
This class enables real time drawing of 2D or 3D graphics. An instance of the class open a window whi...
Definition vpPlot.h:117
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
Control of Irisa's gantry robot named Afma6.
@ CAMERA_FRAME
Definition vpRobot.h:81
@ STATE_VELOCITY_CONTROL
Initialize the velocity controller.
Definition vpRobot.h:64
@ STATE_STOP
Stops robot motion especially in velocity and acceleration control.
Definition vpRobot.h:63
static void display(const vpServo &s, const vpCameraParameters &cam, const vpImage< unsigned char > &I, vpColor currentColor=vpColor::green, vpColor desiredColor=vpColor::red, unsigned int thickness=1)
@ EYEINHAND_CAMERA
Definition vpServo.h:176
@ CURRENT
Definition vpServo.h:217
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT double measureTimeMs()