Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
servoFrankaIBVS-EyeToHand-Lcur_cVe_eJe.cpp
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2025 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Description:
31 * tests the control law
32 * eye-to-hand control
33 * velocity computed in the camera frame
34 */
35
67
68#include <iostream>
69
70#include <visp3/core/vpConfig.h>
71#include <visp3/core/vpCameraParameters.h>
72#include <visp3/core/vpIoTools.h>
73#include <visp3/core/vpXmlParserCamera.h>
74#include <visp3/detection/vpDetectorAprilTag.h>
75#include <visp3/gui/vpDisplayFactory.h>
76#include <visp3/gui/vpPlot.h>
77#include <visp3/io/vpImageIo.h>
78#include <visp3/robot/vpRobotFranka.h>
79#include <visp3/sensor/vpRealSense2.h>
80#include <visp3/visual_features/vpFeatureBuilder.h>
81#include <visp3/visual_features/vpFeaturePoint.h>
82#include <visp3/vs/vpServo.h>
83#include <visp3/vs/vpServoDisplay.h>
84
85#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) && defined(VISP_HAVE_FRANKA) && defined(VISP_HAVE_PUGIXML)
86
87#ifdef ENABLE_VISP_NAMESPACE
88using namespace VISP_NAMESPACE_NAME;
89#endif
90
91bool save_desired_features(const std::string &filename, const std::vector<vpFeaturePoint> &desired_features)
92{
93 std::ofstream file(filename);
94 if (file.is_open()) {
95 for (size_t i = 0; i < desired_features.size(); ++i) {
96 file << desired_features[i].get_x() << " " << desired_features[i].get_y() << " " << desired_features[i].get_Z() << std::endl;
97 }
98
99 file.close();
100 return true;
101 }
102 else {
103 return false;
104 }
105}
106
107bool read_desired_features(const std::string &filename, std::vector<vpFeaturePoint> &desired_features)
108{
109 desired_features.clear();
110 std::ifstream file(filename);
111 if (file.is_open()) {
112 std::string line;
113 while (std::getline(file, line)) {
114 std::istringstream iss(line);
115 double x, y, Z;
116 if (!(iss >> x >> y >> Z)) {
117 return false;
118 }
119 vpFeaturePoint s_d;
120 s_d.buildFrom(x, y, Z);
121 desired_features.push_back(s_d);
122 }
123 file.close();
124
125 return true;
126 }
127 else {
128 return false;
129 }
130}
131
132void display_point_trajectory(const vpImage<unsigned char> &I, const std::vector<vpImagePoint> &vip,
133 std::vector<vpImagePoint> *traj_vip)
134{
135 for (size_t i = 0; i < vip.size(); ++i) {
136 if (traj_vip[i].size()) {
137 // Add the point only if distance with the previous > 1 pixel
138 if (vpImagePoint::distance(vip[i], traj_vip[i].back()) > 1.) {
139 traj_vip[i].push_back(vip[i]);
140 }
141 }
142 else {
143 traj_vip[i].push_back(vip[i]);
144 }
145 }
146 for (size_t i = 0; i < vip.size(); ++i) {
147 for (size_t j = 1; j < traj_vip[i].size(); j++) {
148 vpDisplay::displayLine(I, traj_vip[i][j - 1], traj_vip[i][j], vpColor::green, 2);
149 }
150 }
151}
152
153int main(int argc, char **argv)
154{
155 double opt_tag_size = 0.120;
156 bool opt_tag_z_aligned = false;
157 std::string opt_robot_ip = "192.168.1.1";
158 std::string opt_eMo_filename = "";
159 std::string opt_intrinsic_filename = "";
160 std::string opt_camera_name = "Camera";
161 bool display_tag = true;
162 int opt_quad_decimate = 2;
163 bool opt_verbose = false;
164 bool opt_plot = false;
165 bool opt_adaptive_gain = false;
166 bool opt_task_sequencing = false;
167 bool opt_learn_desired_features = false;
168 std::string desired_features_filename = "learned_desired_features.txt";
169 double convergence_threshold = 0.00005;
170
171 for (int i = 1; i < argc; ++i) {
172 if ((std::string(argv[i]) == "--tag-size") && (i + 1 < argc)) {
173 opt_tag_size = std::stod(argv[++i]);
174 }
175 else if ((std::string(argv[i]) == "--tag-quad-decimate") && (i + 1 < argc)) {
176 opt_quad_decimate = std::stoi(argv[++i]);
177 }
178 else if (std::string(argv[i]) == "--tag-z-aligned") {
179 opt_tag_z_aligned = true;
180 }
181 else if ((std::string(argv[i]) == "--ip") && (i + 1 < argc)) {
182 opt_robot_ip = std::string(argv[++i]);
183 }
184 else if (std::string(argv[i]) == "--intrinsic" && i + 1 < argc) {
185 opt_intrinsic_filename = std::string(argv[++i]);
186 }
187 else if (std::string(argv[i]) == "--camera-name" && i + 1 < argc) {
188 opt_camera_name = std::string(argv[++i]);
189 }
190 else if ((std::string(argv[i]) == "--eMo") && (i + 1 < argc)) {
191 opt_eMo_filename = std::string(argv[++i]);
192 }
193 else if ((std::string(argv[i]) == "--verbose") || (std::string(argv[i]) == "-v")) {
194 opt_verbose = true;
195 }
196 else if (std::string(argv[i]) == "--plot") {
197 opt_plot = true;
198 }
199 else if (std::string(argv[i]) == "--adaptive-gain") {
200 opt_adaptive_gain = true;
201 }
202 else if (std::string(argv[i]) == "--task-sequencing") {
203 opt_task_sequencing = true;
204 }
205 else if (std::string(argv[i]) == "--no-convergence-threshold") {
206 convergence_threshold = 0.;
207 }
208 else if (std::string(argv[i]) == "--learn-desired-features") {
209 opt_learn_desired_features = true;
210 }
211 else if ((std::string(argv[i]) == "--help") || (std::string(argv[i]) == "-h")) {
212 std::cout << "SYNOPSYS" << std::endl
213 << " " << argv[0]
214 << " [--ip <controller ip>]"
215 << " [--intrinsic <xml file>]"
216 << " [--camera-name <name>]"
217 << " [--tag-size <size>]"
218 << " [--tag-quad-decimate <decimation factor>]"
219 << " [--tag-z-aligned]"
220 << " [--learn-desired-pose]"
221 << " [--eMo <file.yaml>]"
222 << " [--adaptive-gain]"
223 << " [--plot]"
224 << " [--task-sequencing]"
225 << " [--no-convergence-threshold]"
226 << " [--verbose, -v]"
227 << " [--help, -h]\n"
228 << std::endl;
229 std::cout << "DESCRIPTION" << std::endl
230 << " Use an image-based visual-servoing scheme to position the camera in front of an Apriltag." << std::endl
231 << std::endl
232 << " --ip <controller ip>" << std::endl
233 << " Franka controller ip address" << std::endl
234 << " Default: " << opt_robot_ip << std::endl
235 << std::endl
236 << " --intrinsic <xml file>" << std::endl
237 << " XML file that contains camera intrinsic parameters. " << std::endl
238 << " If no file is specified, use Realsense camera factory intrinsic parameters." << std::endl
239 << std::endl
240 << " --camera-name <name>" << std::endl
241 << " Camera name in the XML file that contains camera intrinsic parameters." << std::endl
242 << " Default: \"Camera\"" << std::endl
243 << std::endl
244 << " --tag-size <size>" << std::endl
245 << " Apriltag size in [m]." << std::endl
246 << " Default: " << opt_tag_size << " [m]" << std::endl
247 << std::endl
248 << " --tag-quad-decimate <decimation factor>" << std::endl
249 << " Decimation factor used during Apriltag detection." << std::endl
250 << " Default: " << opt_quad_decimate << std::endl
251 << std::endl
252 << " --tag-z-aligned" << std::endl
253 << " When enabled, tag z-axis and camera z-axis are aligned." << std::endl
254 << " Default: false" << std::endl
255 << std::endl
256 << " --eMo <file.yaml>" << std::endl
257 << " Yaml file containing the extrinsic transformation between" << std::endl
258 << " robot end-effector frame and object (Apriltag) frames." << std::endl
259 << std::endl
260 << " --adaptive-gain" << std::endl
261 << " Flag to enable adaptive gain to speed up visual servo near convergence." << std::endl
262 << std::endl
263 << " --plot" << std::endl
264 << " Flag to enable curve plotter." << std::endl
265 << std::endl
266 << " --task-sequencing" << std::endl
267 << " Flag to enable task sequencing scheme." << std::endl
268 << std::endl
269 << " --no-convergence-threshold" << std::endl
270 << " Flag to disable convergence threshold used to stop the visual servo." << std::endl
271 << std::endl
272 << " --learn-desired-pose" << std::endl
273 << " Flag to enable desired pose learning." << std::endl
274 << " Data are saved in learned-desired-pose.yaml file." << std::endl
275 << std::endl
276 << " --verbose, -v" << std::endl
277 << " Flag to enable extra verbosity." << std::endl
278 << std::endl
279 << " --help, -h" << std::endl
280 << " Print this helper message." << std::endl
281 << std::endl;
282
283 return EXIT_SUCCESS;
284 }
285 else {
286 std::cout << "\nERROR" << std::endl
287 << std::string(argv[i]) << " command line option is not supported." << std::endl
288 << "Use " << std::string(argv[0]) << " --help" << std::endl
289 << std::endl;
290 return EXIT_FAILURE;
291 }
292 }
293
294 vpRealSense2 rs;
295 rs2::config config;
296 unsigned int width = 640, height = 480;
297 config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);
298 config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30);
299 config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30);
300 rs.open(config);
301
302 vpImage<unsigned char> I(height, width);
303
304#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
305 std::shared_ptr<vpDisplay> display = vpDisplayFactory::createDisplay(I, 10, 10, "Current image");
306#else
307 vpDisplay *display = vpDisplayFactory::allocateDisplay(I, 10, 10, "Current image");
308#endif
309
310 std::cout << "Parameters:" << std::endl;
311 std::cout << " Apriltag " << std::endl;
312 std::cout << " Size [m] : " << opt_tag_size << std::endl;
313 std::cout << " Z aligned : " << (opt_tag_z_aligned ? "true" : "false") << std::endl;
314 std::cout << " Camera intrinsics " << std::endl;
315 std::cout << " Factory parameters : " << (opt_intrinsic_filename.empty() ? "yes" : "no") << std::endl;
316
317 // Get camera intrinsics
319 if (opt_intrinsic_filename.empty()) {
320 std::cout << "Use Realsense camera intrinsic factory parameters: " << std::endl;
322 std::cout << "cam:\n" << cam << std::endl;
323 }
324 else if (!vpIoTools::checkFilename(opt_intrinsic_filename)) {
325 std::cout << "Camera parameters file " << opt_intrinsic_filename << " doesn't exist." << std::endl;
326 return EXIT_FAILURE;
327 }
328 else {
330 if (!opt_camera_name.empty()) {
331
332 std::cout << " Param file name [.xml]: " << opt_intrinsic_filename << std::endl;
333 std::cout << " Camera name : " << opt_camera_name << std::endl;
334
335 if (parser.parse(cam, opt_intrinsic_filename, opt_camera_name, vpCameraParameters::perspectiveProjWithDistortion) !=
337 std::cout << "Unable to parse parameters with distortion for camera \"" << opt_camera_name << "\" from "
338 << opt_intrinsic_filename << " file" << std::endl;
339 std::cout << "Attempt to find parameters without distortion" << std::endl;
340
341 if (parser.parse(cam, opt_intrinsic_filename, opt_camera_name,
343 std::cout << "Unable to parse parameters without distortion for camera \"" << opt_camera_name << "\" from "
344 << opt_intrinsic_filename << " file" << std::endl;
345 return EXIT_FAILURE;
346 }
347 }
348 }
349 }
350
351 std::cout << "Camera parameters used to compute the pose:\n" << cam << std::endl;
352
353 // Setup Apriltag detector
356 // vpDetectorAprilTag::vpPoseEstimationMethod poseEstimationMethod = vpDetectorAprilTag::BEST_RESIDUAL_VIRTUAL_VS;
357 vpDetectorAprilTag detector(tagFamily);
358 detector.setAprilTagPoseEstimationMethod(poseEstimationMethod);
359 detector.setDisplayTag(display_tag);
360 detector.setAprilTagQuadDecimate(opt_quad_decimate);
361 detector.setZAlignedWithCameraAxis(opt_tag_z_aligned);
362
363 // Define 4 3D points corresponding to the CAD model of the Apriltag
364 std::vector<vpPoint> point(4);
365 point[0].setWorldCoordinates(-opt_tag_size / 2., -opt_tag_size / 2., 0);
366 point[1].setWorldCoordinates(+opt_tag_size / 2., -opt_tag_size / 2., 0);
367 point[2].setWorldCoordinates(+opt_tag_size / 2., +opt_tag_size / 2., 0);
368 point[3].setWorldCoordinates(-opt_tag_size / 2., +opt_tag_size / 2., 0);
369
370 if (opt_learn_desired_features) {
371
372 bool quit = false;
373 while (!quit) {
374 rs.acquire(I);
375
377
378 std::vector<vpHomogeneousMatrix> c_M_o_vec;
379 // To learn the desired visual features (x*, y*, Z*) we will use the tag pose to estimate Z*
380 bool ret = detector.detect(I, opt_tag_size, cam, c_M_o_vec);
381
382 vpDisplay::displayText(I, 20, 20, "Move the robot to the desired tag pose...", vpColor::red);
383 vpDisplay::displayText(I, 40, 20, "Left click to learn desired features, right click to quit", vpColor::red);
384
385 std::vector< std::vector<vpImagePoint> > tags_corners;
386 if (ret) {
387 if (detector.getNbObjects() == 1) {
388 tags_corners = detector.getTagsCorners();
389 for (size_t i = 0; i < 4; ++i) {
390 std::stringstream ss;
391 ss << i;
392 vpDisplay::displayText(I, tags_corners[0][i]+vpImagePoint(15, -15), ss.str(), vpColor::red);
393 vpDisplay::displayCross(I, tags_corners[0][i], 15, vpColor::red, 2);
394 }
395 }
396 }
397
399 if (vpDisplay::getClick(I, button, false)) {
400 switch (button) {
402 if (ret) {
403 if (detector.getNbObjects() == 1) {
404 tags_corners = detector.getTagsCorners();
405 vpHomogeneousMatrix cd_M_o = c_M_o_vec[0];
406 std::vector<vpFeaturePoint> p_d(4);
407
408 // Update x*, y* for each desired visual feature
409 for (size_t i = 0; i < 4; ++i) {
410 double x = 0, y = 0;
411 vpPixelMeterConversion::convertPoint(cam, tags_corners[0][i], x, y);
412 p_d[i].set_x(x);
413 p_d[i].set_y(y);
414 }
415
416 // Update Z* for each desired visual feature
417 for (size_t i = 0; i < point.size(); ++i) {
418 vpColVector c_P, p;
419 point[i].changeFrame(cd_M_o, c_P);
420 p_d[i].set_Z(c_P[2]);
421 }
422 if (save_desired_features(desired_features_filename, p_d)) {
423 std::cout << "Desired visual features saved in: " << desired_features_filename << std::endl;
424 }
425 else {
426 std::cout << "Error: Unable to save desired features in " << desired_features_filename << std::endl;
427 return EXIT_FAILURE;
428 }
429 }
430 else {
431 std::cout << "Cannot save desired features. More than 1 tag is visible in the image..." << std::endl;
432 }
433 }
434 else {
435 std::cout << "Cannot save desired features. Tag is not visible in the image..." << std::endl;
436 }
437 break;
439 quit = true;
440 break;
441 default:
442 break;
443 }
444 }
445
447 }
448 return EXIT_SUCCESS;
449 }
450
451 // Load desired features to reach by visual servo
452
454 std::vector<vpFeaturePoint> p_d; // Desired visual features
455 // Sanity options check
456 if (desired_features_filename.empty() || (!vpIoTools::checkFilename(desired_features_filename))) {
457 std::cout << "Cannot start eye-to-hand visual-servoing. Desired features are not available." << std::endl;
458 std::cout << "use --learn-desired-features flag to learn the desired features." << std::endl;
459 return EXIT_FAILURE;
460 }
461 else {
462 if (!read_desired_features(desired_features_filename, p_d)) {
463 std::cout << "Error: Unable to read desired features from: " << desired_features_filename << std::endl;
464 return EXIT_FAILURE;
465 }
466 }
468
469 // Get camera extrinsics
470 vpPoseVector e_P_o;
471
472 // Read camera extrinsics from --eMo <file>
473 if (!opt_eMo_filename.empty()) {
474 e_P_o.loadYAML(opt_eMo_filename, e_P_o);
475 }
476 else {
477 std::cout << "Warning, eMo transformation is not specified using --eMo parameter." << std::endl;
478 return EXIT_FAILURE;
479 }
480 vpHomogeneousMatrix e_M_o(e_P_o);
481 std::cout << "e_M_o:\n" << e_M_o << std::endl;
482
483 vpRobotFranka robot;
484
485 try {
486 robot.connect(opt_robot_ip);
487
490
491 // Create current visual features
492 std::vector<vpFeaturePoint> p(4); // We use 4 points
493
494 // Add the 4 visual feature points
495 for (size_t i = 0; i < p.size(); ++i) {
496 task.addFeature(p[i], p_d[i]);
497 }
499
504 task.setInteractionMatrixType(vpServo::CURRENT);
506
507 if (opt_adaptive_gain) {
508 vpAdaptiveGain lambda(1, 0.4, 30); // lambda(0)=1, lambda(oo)=0.4 and lambda'(0)=30
509 task.setLambda(lambda);
510 }
511 else {
512 task.setLambda(0.2);
513 }
514
515 vpPlot *plotter = nullptr;
516 int iter_plot = 0;
517
518 if (opt_plot) {
519 plotter = new vpPlot(2, static_cast<int>(250 * 2), 500, static_cast<int>(I.getWidth()) + 80, 10,
520 "Real time curves plotter");
521 plotter->setTitle(0, "Visual features error");
522 plotter->setTitle(1, "Joint velocities");
523 plotter->initGraph(0, 8);
524 plotter->initGraph(1, 7);
525 plotter->setLegend(0, 0, "error_feat_p1_x");
526 plotter->setLegend(0, 1, "error_feat_p1_y");
527 plotter->setLegend(0, 2, "error_feat_p2_x");
528 plotter->setLegend(0, 3, "error_feat_p2_y");
529 plotter->setLegend(0, 4, "error_feat_p3_x");
530 plotter->setLegend(0, 5, "error_feat_p3_y");
531 plotter->setLegend(0, 6, "error_feat_p4_x");
532 plotter->setLegend(0, 7, "error_feat_p4_y");
533 plotter->setLegend(1, 0, "q_1");
534 plotter->setLegend(1, 1, "q_2");
535 plotter->setLegend(1, 2, "q_3");
536 plotter->setLegend(1, 3, "q_4");
537 plotter->setLegend(1, 4, "q_5");
538 plotter->setLegend(1, 5, "q_6");
539 plotter->setLegend(1, 6, "q_7");
540 }
541
542 bool final_quit = false;
543 bool has_converged = false;
544 bool send_velocities = false;
545 bool servo_started = false;
546 std::vector<vpImagePoint> *traj_corners = nullptr; // To memorize point trajectory
547
548 static double t_init_servo = vpTime::measureTimeMs();
549
550 //robot.set_eMc(eMc); // Set location of the camera wrt end-effector frame
551 robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL);
552
553 while (!has_converged && !final_quit) {
554 double t_start = vpTime::measureTimeMs();
555
556 rs.acquire(I);
557
559
560 std::vector<vpHomogeneousMatrix> c_M_o_vec;
561 // To update the current visual feature (x,y,Z) parameters, we need the tag pose to estimate Z
563 bool ret = detector.detect(I, opt_tag_size, cam, c_M_o_vec);
565
566 {
567 std::stringstream ss;
568 ss << "Left click to " << (send_velocities ? "stop the robot" : "servo the robot") << ", right click to quit.";
569 vpDisplay::displayText(I, 20, 20, ss.str(), vpColor::red);
570 }
571
572 vpColVector qdot(robot.getNDof());
573
574 // Only one tag is detected
575 if (ret && (c_M_o_vec.size() == 1)) {
577 vpHomogeneousMatrix c_M_o = c_M_o_vec[0];
579
580 static bool first_time = true;
581
583 // Get tag corners
584 std::vector<vpImagePoint> corners = detector.getPolygon(0);
585
586 // Update visual features
587 for (size_t i = 0; i < corners.size(); ++i) {
588 // Update the point feature from the tag corners location
589 vpFeatureBuilder::create(p[i], cam, corners[i]);
590 // Set the feature Z coordinate from the pose
591 vpColVector c_P;
592 point[i].changeFrame(c_M_o, c_P);
593
594 p[i].set_Z(c_P[2]); // Update Z value of each visual feature thanks to the tag pose
595 }
597
600 c_M_e = c_M_o * e_M_o.inverse();
602
603 task.set_cVe(c_M_e);
604
606 vpMatrix e_J_e;
607 robot.get_eJe(e_J_e);
608 task.set_eJe(e_J_e);
610
611 if (opt_task_sequencing) {
612 if (!servo_started) {
613 if (send_velocities) {
614 servo_started = true;
615 }
616 t_init_servo = vpTime::measureTimeMs();
617 }
618 qdot = task.computeControlLaw((vpTime::measureTimeMs() - t_init_servo) / 1000.);
619 }
620 else {
622 qdot = task.computeControlLaw();
624 }
625
626 // Display the current and desired feature points in the image display
627 vpServoDisplay::display(task, cam, I);
628 for (size_t i = 0; i < corners.size(); ++i) {
629 std::stringstream ss;
630 ss << i;
631 // Display current point indexes
632 vpDisplay::displayText(I, corners[i] + vpImagePoint(15, 15), ss.str(), vpColor::red);
633 // Display desired point indexes
634 vpImagePoint ip;
635 vpMeterPixelConversion::convertPoint(cam, p_d[i].get_x(), p_d[i].get_y(), ip);
636 vpDisplay::displayText(I, ip + vpImagePoint(15, 15), ss.str(), vpColor::red);
637 }
638 if (first_time) {
639 traj_corners = new std::vector<vpImagePoint>[corners.size()];
640 }
641 // Display the trajectory of the points used as features
642 display_point_trajectory(I, corners, traj_corners);
643
644 if (opt_plot) {
645 plotter->plot(0, iter_plot, task.getError());
646 plotter->plot(1, iter_plot, qdot);
647 iter_plot++;
648 }
649
650 if (opt_verbose) {
651 std::cout << "qdot: " << qdot.t() << std::endl;
652 }
653
654 double error = task.getError().sumSquare();
655 std::stringstream ss;
656 ss << "error: " << error;
657 vpDisplay::displayText(I, 20, static_cast<int>(I.getWidth()) - 150, ss.str(), vpColor::red);
658
659 if (opt_verbose)
660 std::cout << "error: " << error << std::endl;
661
662 if (error < convergence_threshold) {
663 has_converged = true;
664 std::cout << "Servo task has converged" << std::endl;
665 vpDisplay::displayText(I, 100, 20, "Servo task has converged", vpColor::red);
666 }
667 if (first_time) {
668 first_time = false;
669 }
670 } // end if (c_M_o_vec.size() == 1)
671 else {
672 qdot = 0;
673 }
674
675 if (!send_velocities) {
676 qdot = 0;
677 }
678
679 // Send to the robot
680 robot.setVelocity(vpRobot::JOINT_STATE, qdot);
681
682 {
683 std::stringstream ss;
684 ss << "Loop time: " << vpTime::measureTimeMs() - t_start << " ms";
685 vpDisplay::displayText(I, 40, 20, ss.str(), vpColor::red);
686 }
688
690 if (vpDisplay::getClick(I, button, false)) {
691 switch (button) {
693 send_velocities = !send_velocities;
694 break;
695
697 final_quit = true;
698 qdot = 0;
699 break;
700
701 default:
702 break;
703 }
704 }
705 }
706 std::cout << "Stop the robot " << std::endl;
707 robot.setRobotState(vpRobot::STATE_STOP);
708
709 if (opt_plot && plotter != nullptr) {
710 delete plotter;
711 plotter = nullptr;
712 }
713
714 if (!final_quit) {
715 while (!final_quit) {
716 rs.acquire(I);
718
719 vpDisplay::displayText(I, 20, 20, "Click to quit the program.", vpColor::red);
720 vpDisplay::displayText(I, 40, 20, "Visual servo converged.", vpColor::red);
721
722 if (vpDisplay::getClick(I, false)) {
723 final_quit = true;
724 }
725
727 }
728 }
729 if (traj_corners) {
730 delete[] traj_corners;
731 }
732 }
733 catch (const vpException &e) {
734 std::cout << "ViSP exception: " << e.what() << std::endl;
735 std::cout << "Stop the robot " << std::endl;
736 robot.setRobotState(vpRobot::STATE_STOP);
737#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
738 if (display != nullptr) {
739 delete display;
740 }
741#endif
742 return EXIT_FAILURE;
743 }
744 catch (const franka::NetworkException &e) {
745 std::cout << "Franka network exception: " << e.what() << std::endl;
746 std::cout << "Check if you are connected to the Franka robot"
747 << " or if you specified the right IP using --ip command line option set by default to 192.168.1.1. "
748 << std::endl;
749#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
750 if (display != nullptr) {
751 delete display;
752 }
753#endif
754 return EXIT_FAILURE;
755 }
756 catch (const std::exception &e) {
757 std::cout << "Franka exception: " << e.what() << std::endl;
758#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
759 if (display != nullptr) {
760 delete display;
761 }
762#endif
763 return EXIT_FAILURE;
764 }
765
766#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
767 if (display != nullptr) {
768 delete display;
769 }
770#endif
771
772 return EXIT_SUCCESS;
773}
774#else
775int main()
776{
777#if !defined(VISP_HAVE_REALSENSE2)
778 std::cout << "Install librealsense-2.x and rebuild ViSP." << std::endl;
779#endif
780#if !defined(VISP_HAVE_FRANKA)
781 std::cout << "Install libfranka and rebuild ViSP." << std::endl;
782#endif
783#if !defined(VISP_HAVE_PUGIXML)
784 std::cout << "Build ViSP with pugixml support enabled." << std::endl;
785#endif
786 return EXIT_SUCCESS;
787}
788#endif
Adaptive gain computation.
static bool loadYAML(const std::string &filename, vpArray2D< Type > &A, char *header=nullptr)
Definition vpArray2D.h:874
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithDistortion
Perspective projection with distortion model.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
Implementation of column vector and the associated operations.
static const vpColor red
Definition vpColor.h:198
static const vpColor green
Definition vpColor.h:201
@ TAG_36h11
AprilTag 36h11 pattern (recommended).
Class that defines generic functionalities for display.
Definition vpDisplay.h:171
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition vpException.h:60
static void create(vpFeaturePoint &s, const vpCameraParameters &cam, const vpDot &d)
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
vpFeaturePoint & buildFrom(const double &x, const double &y, const double &Z)
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
static double distance(const vpImagePoint &iP1, const vpImagePoint &iP2)
Definition of the vpImage class member functions.
Definition vpImage.h:131
static bool checkFilename(const std::string &filename)
Implementation of a matrix and operations on matrices.
Definition vpMatrix.h:175
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class enables real time drawing of 2D or 3D graphics. An instance of the class open a window whi...
Definition vpPlot.h:117
Implementation of a pose vector and operations on poses.
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
@ JOINT_STATE
Definition vpRobot.h:79
@ STATE_VELOCITY_CONTROL
Initialize the velocity controller.
Definition vpRobot.h:64
@ STATE_STOP
Stops robot motion especially in velocity and acceleration control.
Definition vpRobot.h:63
static void display(const vpServo &s, const vpCameraParameters &cam, const vpImage< unsigned char > &I, vpColor currentColor=vpColor::green, vpColor desiredColor=vpColor::red, unsigned int thickness=1)
@ EYETOHAND_L_cVe_eJe
Definition vpServo.h:190
@ CURRENT
Definition vpServo.h:217
XML parser to load and save intrinsic camera parameters.
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT double measureTimeMs()