Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
servoFrankaIBVS-EyeToHand-Lcur_cVf_fVe_eJe.cpp
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2025 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Description:
31 * tests the control law
32 * eye-to-hand control
33 * velocity computed in the camera frame
34 */
35
68
69#include <iostream>
70#include <fstream>
71
72#include <visp3/core/vpConfig.h>
73#include <visp3/core/vpCameraParameters.h>
74#include <visp3/core/vpIoTools.h>
75#include <visp3/core/vpXmlParserCamera.h>
76#include <visp3/detection/vpDetectorAprilTag.h>
77#include <visp3/gui/vpDisplayFactory.h>
78#include <visp3/gui/vpPlot.h>
79#include <visp3/io/vpImageIo.h>
80#include <visp3/robot/vpRobotFranka.h>
81#include <visp3/sensor/vpRealSense2.h>
82#include <visp3/visual_features/vpFeatureBuilder.h>
83#include <visp3/visual_features/vpFeaturePoint.h>
84#include <visp3/vs/vpServo.h>
85#include <visp3/vs/vpServoDisplay.h>
86
87#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) && defined(VISP_HAVE_FRANKA) && defined(VISP_HAVE_PUGIXML)
88
89#ifdef ENABLE_VISP_NAMESPACE
90using namespace VISP_NAMESPACE_NAME;
91#endif
92
93bool save_desired_features(const std::string &filename, const std::vector<vpFeaturePoint> &desired_features)
94{
95 std::ofstream file(filename);
96 if (file.is_open()) {
97 for (size_t i = 0; i < desired_features.size(); ++i) {
98 file << desired_features[i].get_x() << " " << desired_features[i].get_y() << " " << desired_features[i].get_Z() << std::endl;
99 }
100
101 file.close();
102 return true;
103 }
104 else {
105 return false;
106 }
107}
108
109bool read_desired_features(const std::string &filename, std::vector<vpFeaturePoint> &desired_features)
110{
111 desired_features.clear();
112 std::ifstream file(filename);
113 if (file.is_open()) {
114 std::string line;
115 while (std::getline(file, line)) {
116 std::istringstream iss(line);
117 double x, y, Z;
118 if (!(iss >> x >> y >> Z)) {
119 return false;
120 }
121 vpFeaturePoint s_d;
122 s_d.buildFrom(x, y, Z);
123 desired_features.push_back(s_d);
124 }
125 file.close();
126
127 return true;
128 }
129 else {
130 return false;
131 }
132}
133
134void display_point_trajectory(const vpImage<unsigned char> &I, const std::vector<vpImagePoint> &vip,
135 std::vector<vpImagePoint> *traj_vip)
136{
137 for (size_t i = 0; i < vip.size(); ++i) {
138 if (traj_vip[i].size()) {
139 // Add the point only if distance with the previous > 1 pixel
140 if (vpImagePoint::distance(vip[i], traj_vip[i].back()) > 1.) {
141 traj_vip[i].push_back(vip[i]);
142 }
143 }
144 else {
145 traj_vip[i].push_back(vip[i]);
146 }
147 }
148 for (size_t i = 0; i < vip.size(); ++i) {
149 for (size_t j = 1; j < traj_vip[i].size(); j++) {
150 vpDisplay::displayLine(I, traj_vip[i][j - 1], traj_vip[i][j], vpColor::green, 2);
151 }
152 }
153}
154
155int main(int argc, char **argv)
156{
157 double opt_tag_size = 0.120;
158 bool opt_tag_z_aligned = false;
159 std::string opt_robot_ip = "192.168.1.1";
160 std::string opt_rMc_filename = "";
161 std::string opt_intrinsic_filename = "";
162 std::string opt_camera_name = "Camera";
163 bool display_tag = true;
164 int opt_quad_decimate = 2;
165 bool opt_verbose = false;
166 bool opt_plot = false;
167 bool opt_adaptive_gain = false;
168 bool opt_task_sequencing = false;
169 bool opt_learn_desired_features = false;
170 std::string desired_features_filename = "learned_desired_features.txt";
171 double convergence_threshold = 0.00005;
172
173 for (int i = 1; i < argc; ++i) {
174 if ((std::string(argv[i]) == "--tag-size") && (i + 1 < argc)) {
175 opt_tag_size = std::stod(argv[++i]);
176 }
177 else if (std::string(argv[i]) == "--tag-z-aligned") {
178 opt_tag_z_aligned = true;
179 }
180 else if ((std::string(argv[i]) == "--ip") && (i + 1 < argc)) {
181 opt_robot_ip = std::string(argv[++i]);
182 }
183 else if (std::string(argv[i]) == "--intrinsic" && i + 1 < argc) {
184 opt_intrinsic_filename = std::string(argv[++i]);
185 }
186 else if (std::string(argv[i]) == "--camera-name" && i + 1 < argc) {
187 opt_camera_name = std::string(argv[++i]);
188 }
189 else if ((std::string(argv[i]) == "--rMc") && (i + 1 < argc)) {
190 opt_rMc_filename = std::string(argv[++i]);
191 }
192 else if ((std::string(argv[i]) == "--verbose") || (std::string(argv[i]) == "-v")) {
193 opt_verbose = true;
194 }
195 else if (std::string(argv[i]) == "--plot") {
196 opt_plot = true;
197 }
198 else if (std::string(argv[i]) == "--adaptive-gain") {
199 opt_adaptive_gain = true;
200 }
201 else if (std::string(argv[i]) == "--task-sequencing") {
202 opt_task_sequencing = true;
203 }
204 else if ((std::string(argv[i]) == "--tag-quad-decimate") && (i + 1 < argc)) {
205 opt_quad_decimate = std::stoi(argv[++i]);
206 }
207 else if (std::string(argv[i]) == "--no-convergence-threshold") {
208 convergence_threshold = 0.;
209 }
210 else if (std::string(argv[i]) == "--learn-desired-features") {
211 opt_learn_desired_features = true;
212 }
213 else if ((std::string(argv[i]) == "--help") || (std::string(argv[i]) == "-h")) {
214 std::cout << "SYNOPSYS" << std::endl
215 << " " << argv[0]
216 << " [--ip <controller ip>]"
217 << " [--intrinsic <xml file>]"
218 << " [--camera-name <name>]"
219 << " [--tag-size <size>]"
220 << " [--tag-quad-decimate <decimation factor>]"
221 << " [--tag-z-aligned]"
222 << " [--learn-desired-features]"
223 << " [--rMc <file.yaml>]"
224 << " [--adaptive-gain]"
225 << " [--plot]"
226 << " [--task-sequencing]"
227 << " [--no-convergence-threshold]"
228 << " [--verbose, -v]"
229 << " [--help, -h]\n"
230 << std::endl;
231 std::cout << "DESCRIPTION" << std::endl
232 << " Use an image-based visual-servoing scheme to position the camera in front of an Apriltag." << std::endl
233 << std::endl
234 << " --ip <controller ip>" << std::endl
235 << " Franka controller ip address" << std::endl
236 << " Default: " << opt_robot_ip << std::endl
237 << std::endl
238 << " --intrinsic <xml file>" << std::endl
239 << " XML file that contains camera intrinsic parameters. " << std::endl
240 << " If no file is specified, use Realsense camera factory intrinsic parameters." << std::endl
241 << std::endl
242 << " --camera-name <name>" << std::endl
243 << " Camera name in the XML file that contains camera intrinsic parameters." << std::endl
244 << " Default: \"Camera\"" << std::endl
245 << std::endl
246 << " --tag-size <size>" << std::endl
247 << " Apriltag size in [m]." << std::endl
248 << " Default: " << opt_tag_size << " [m]" << std::endl
249 << std::endl
250 << " --tag-quad-decimate <decimation factor>" << std::endl
251 << " Decimation factor used during Apriltag detection." << std::endl
252 << " Default: " << opt_quad_decimate << std::endl
253 << std::endl
254 << " --tag-z-aligned" << std::endl
255 << " When enabled, tag z-axis and camera z-axis are aligned." << std::endl
256 << " Default: false" << std::endl
257 << std::endl
258 << " --rMc <file.yaml>" << std::endl
259 << " Yaml file containing the extrinsic transformation between" << std::endl
260 << " robot reference frame and camera frames." << std::endl
261 << std::endl
262 << " --adaptive-gain" << std::endl
263 << " Flag to enable adaptive gain to speed up visual servo near convergence." << std::endl
264 << std::endl
265 << " --plot" << std::endl
266 << " Flag to enable curve plotter." << std::endl
267 << std::endl
268 << " --task-sequencing" << std::endl
269 << " Flag to enable task sequencing scheme." << std::endl
270 << std::endl
271 << " --no-convergence-threshold" << std::endl
272 << " Flag to disable convergence threshold used to stop the visual servo." << std::endl
273 << std::endl
274 << " --learn-desired-pose" << std::endl
275 << " Flag to enable desired pose learning." << std::endl
276 << " Data are saved in learned-desired-pose.yaml file." << std::endl
277 << std::endl
278 << " --verbose, -v" << std::endl
279 << " Flag to enable extra verbosity." << std::endl
280 << std::endl
281 << " --help, -h" << std::endl
282 << " Print this helper message." << std::endl
283 << std::endl;
284
285 return EXIT_SUCCESS;
286 }
287 else {
288 std::cout << "\nERROR" << std::endl
289 << std::string(argv[i]) << " command line option is not supported." << std::endl
290 << "Use " << std::string(argv[0]) << " --help" << std::endl
291 << std::endl;
292 return EXIT_FAILURE;
293 }
294 }
295
296 vpRealSense2 rs;
297 rs2::config config;
298 unsigned int width = 640, height = 480;
299 config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);
300 config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30);
301 config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30);
302 rs.open(config);
303
304 vpImage<unsigned char> I(height, width);
305
306#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
307 std::shared_ptr<vpDisplay> display = vpDisplayFactory::createDisplay(I, 10, 10, "Current image");
308#else
309 vpDisplay *display = vpDisplayFactory::allocateDisplay(I, 10, 10, "Current image");
310#endif
311
312 std::cout << "Parameters:" << std::endl;
313 std::cout << " Apriltag " << std::endl;
314 std::cout << " Size [m] : " << opt_tag_size << std::endl;
315 std::cout << " Z aligned : " << (opt_tag_z_aligned ? "true" : "false") << std::endl;
316 std::cout << " Camera intrinsics " << std::endl;
317 std::cout << " Factory parameters : " << (opt_intrinsic_filename.empty() ? "yes" : "no") << std::endl;
318
319 // Get camera intrinsics
321 if (opt_intrinsic_filename.empty()) {
322 std::cout << "Use Realsense camera intrinsic factory parameters: " << std::endl;
324 std::cout << "cam:\n" << cam << std::endl;
325 }
326 else if (!vpIoTools::checkFilename(opt_intrinsic_filename)) {
327 std::cout << "Camera parameters file " << opt_intrinsic_filename << " doesn't exist." << std::endl;
328 return EXIT_FAILURE;
329 }
330 else {
332 if (!opt_camera_name.empty()) {
333
334 std::cout << " Param file name [.xml]: " << opt_intrinsic_filename << std::endl;
335 std::cout << " Camera name : " << opt_camera_name << std::endl;
336
337 if (parser.parse(cam, opt_intrinsic_filename, opt_camera_name, vpCameraParameters::perspectiveProjWithDistortion) !=
339 std::cout << "Unable to parse parameters with distortion for camera \"" << opt_camera_name << "\" from "
340 << opt_intrinsic_filename << " file" << std::endl;
341 std::cout << "Attempt to find parameters without distortion" << std::endl;
342
343 if (parser.parse(cam, opt_intrinsic_filename, opt_camera_name,
345 std::cout << "Unable to parse parameters without distortion for camera \"" << opt_camera_name << "\" from "
346 << opt_intrinsic_filename << " file" << std::endl;
347 return EXIT_FAILURE;
348 }
349 }
350 }
351 }
352
353 std::cout << "Camera parameters used to compute the pose:\n" << cam << std::endl;
354
355 // Setup Apriltag detector
358 // vpDetectorAprilTag::vpPoseEstimationMethod poseEstimationMethod = vpDetectorAprilTag::BEST_RESIDUAL_VIRTUAL_VS;
359 vpDetectorAprilTag detector(tagFamily);
360 detector.setAprilTagPoseEstimationMethod(poseEstimationMethod);
361 detector.setDisplayTag(display_tag);
362 detector.setAprilTagQuadDecimate(opt_quad_decimate);
363 detector.setZAlignedWithCameraAxis(opt_tag_z_aligned);
364
365 // Define 4 3D points corresponding to the CAD model of the Apriltag
366 std::vector<vpPoint> point(4);
367 point[0].setWorldCoordinates(-opt_tag_size / 2., -opt_tag_size / 2., 0);
368 point[1].setWorldCoordinates(+opt_tag_size / 2., -opt_tag_size / 2., 0);
369 point[2].setWorldCoordinates(+opt_tag_size / 2., +opt_tag_size / 2., 0);
370 point[3].setWorldCoordinates(-opt_tag_size / 2., +opt_tag_size / 2., 0);
371
372 if (opt_learn_desired_features) {
373
374 bool quit = false;
375 while (!quit) {
376 rs.acquire(I);
377
379
380 std::vector<vpHomogeneousMatrix> c_M_o_vec;
381 // To learn the desired visual features (x*, y*, Z*) we will use the tag pose to estimate Z*
382 bool ret = detector.detect(I, opt_tag_size, cam, c_M_o_vec);
383
384 vpDisplay::displayText(I, 20, 20, "Move the robot to the desired tag pose...", vpColor::red);
385 vpDisplay::displayText(I, 40, 20, "Left click to learn desired features, right click to quit", vpColor::red);
386
387 std::vector< std::vector<vpImagePoint> > tags_corners;
388 if (ret) {
389 if (detector.getNbObjects() == 1) {
390 tags_corners = detector.getTagsCorners();
391 for (size_t i = 0; i < 4; ++i) {
392 std::stringstream ss;
393 ss << i;
394 vpDisplay::displayText(I, tags_corners[0][i]+vpImagePoint(15, -15), ss.str(), vpColor::red);
395 vpDisplay::displayCross(I, tags_corners[0][i], 15, vpColor::red, 2);
396 }
397 }
398 }
399
401 if (vpDisplay::getClick(I, button, false)) {
402 switch (button) {
404 if (ret) {
405 if (detector.getNbObjects() == 1) {
406 vpHomogeneousMatrix cd_M_o = c_M_o_vec[0];
407 std::vector<vpFeaturePoint> p_d(4);
408
409 // Update x*, y* for each desired visual feature
410 for (size_t i = 0; i < 4; ++i) {
411 double x = 0, y = 0;
412 vpPixelMeterConversion::convertPoint(cam, tags_corners[0][i], x, y);
413 std::stringstream ss;
414 ss << i;
415 vpDisplay::displayText(I, tags_corners[0][i]+vpImagePoint(10, 10), ss.str(), vpColor::red);
416 p_d[i].set_x(x);
417 p_d[i].set_y(y);
418 }
419
420 // Update Z* for each desired visual feature
421 for (size_t i = 0; i < point.size(); ++i) {
422 vpColVector c_P, p;
423 point[i].changeFrame(cd_M_o, c_P);
424 p_d[i].set_Z(c_P[2]);
425 }
426 if (save_desired_features(desired_features_filename, p_d)) {
427 std::cout << "Desired visual features saved in: " << desired_features_filename << std::endl;
428 }
429 else {
430 std::cout << "Error: Unable to save desired features in " << desired_features_filename << std::endl;
431 return EXIT_FAILURE;
432 }
433 }
434 else {
435 std::cout << "Cannot save desired features. More than 1 tag is visible in the image..." << std::endl;
436 }
437 }
438 else {
439 std::cout << "Cannot save desired features. Tag is not visible in the image..." << std::endl;
440 }
441 break;
443 quit = true;
444 break;
445 default:
446 break;
447 }
448 }
449
451 }
452 return EXIT_SUCCESS;
453 }
454
455 // Load desired features to reach by visual servo
456
457 std::vector<vpFeaturePoint> p_d; // Desired visual features
458 // Sanity options check
459 if (desired_features_filename.empty() || (!vpIoTools::checkFilename(desired_features_filename))) {
460 std::cout << "Cannot start eye-to-hand visual-servoing. Desired features are not available." << std::endl;
461 std::cout << "use --learn-desired-features flag to learn the desired features." << std::endl;
462 return EXIT_FAILURE;
463 }
464 else {
465 if (!read_desired_features(desired_features_filename, p_d)) {
466 std::cout << "Error: Unable to read desired features from: " << desired_features_filename << std::endl;
467 return EXIT_FAILURE;
468 }
469 }
470
471 // Get camera extrinsics
472 vpPoseVector r_P_c;
473
474 // Read camera extrinsics from --eMo <file>
475 if (!opt_rMc_filename.empty()) {
476 r_P_c.loadYAML(opt_rMc_filename, r_P_c);
477 }
478 else {
479 std::cout << "Warning, rMc transformation is not specified using --rMc parameter." << std::endl;
480 return EXIT_FAILURE;
481 }
482 vpHomogeneousMatrix r_M_c(r_P_c);
483 std::cout << "r_M_c:\n" << r_M_c << std::endl;
484
485 vpRobotFranka robot;
486
487 try {
488 robot.connect(opt_robot_ip);
489
490 // Create current visual features
491 std::vector<vpFeaturePoint> p(4); // We use 4 points
492
494 // Add the 4 visual feature points
495 for (size_t i = 0; i < p.size(); ++i) {
496 task.addFeature(p[i], p_d[i]);
497 }
499 task.setInteractionMatrixType(vpServo::CURRENT);
500
501 if (opt_adaptive_gain) {
502 vpAdaptiveGain lambda(1, 0.4, 30); // lambda(0)=1, lambda(oo)=0.4 and lambda'(0)=30
503 task.setLambda(lambda);
504 }
505 else {
506 task.setLambda(0.2);
507 }
508
509 // Set the camera to robot reference frame velocity twist matrix constant transformation
510 task.set_cVf(r_M_c.inverse());
511
512 vpPlot *plotter = nullptr;
513 int iter_plot = 0;
514
515 if (opt_plot) {
516 plotter = new vpPlot(2, static_cast<int>(250 * 2), 500, static_cast<int>(I.getWidth()) + 80, 10,
517 "Real time curves plotter");
518 plotter->setTitle(0, "Visual features error");
519 plotter->setTitle(1, "Joint velocities");
520 plotter->initGraph(0, 8);
521 plotter->initGraph(1, 7);
522 plotter->setLegend(0, 0, "error_feat_p1_x");
523 plotter->setLegend(0, 1, "error_feat_p1_y");
524 plotter->setLegend(0, 2, "error_feat_p2_x");
525 plotter->setLegend(0, 3, "error_feat_p2_y");
526 plotter->setLegend(0, 4, "error_feat_p3_x");
527 plotter->setLegend(0, 5, "error_feat_p3_y");
528 plotter->setLegend(0, 6, "error_feat_p4_x");
529 plotter->setLegend(0, 7, "error_feat_p4_y");
530 plotter->setLegend(1, 0, "q_1");
531 plotter->setLegend(1, 1, "q_2");
532 plotter->setLegend(1, 2, "q_3");
533 plotter->setLegend(1, 3, "q_4");
534 plotter->setLegend(1, 4, "q_5");
535 plotter->setLegend(1, 5, "q_6");
536 plotter->setLegend(1, 6, "q_7");
537 }
538
539 bool final_quit = false;
540 bool has_converged = false;
541 bool send_velocities = false;
542 bool servo_started = false;
543 std::vector<vpImagePoint> *traj_corners = nullptr; // To memorize point trajectory
544
545 static double t_init_servo = vpTime::measureTimeMs();
546
547 //robot.set_eMc(eMc); // Set location of the camera wrt end-effector frame
548 robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL);
549
550 while (!has_converged && !final_quit) {
551 double t_start = vpTime::measureTimeMs();
552
553 rs.acquire(I);
554
556
557 std::vector<vpHomogeneousMatrix> c_M_o_vec;
558 // To update the current visual feature (x,y,Z) parameters, we need the tag pose to estimate Z
559 bool ret = detector.detect(I, opt_tag_size, cam, c_M_o_vec);
560
561 {
562 std::stringstream ss;
563 ss << "Left click to " << (send_velocities ? "stop the robot" : "servo the robot") << ", right click to quit.";
564 vpDisplay::displayText(I, 20, 20, ss.str(), vpColor::red);
565 }
566
567 vpColVector qdot(robot.getNDof());
568
569 // Only one tag is detected
570 if (ret && detector.getNbObjects() == 1) {
571 static bool first_time = true;
572 vpHomogeneousMatrix c_M_o = c_M_o_vec[0];
573
574 // Get tag corners
575 std::vector<vpImagePoint> corners = detector.getPolygon(0);
576
577 // Update visual features
578 for (size_t i = 0; i < corners.size(); ++i) {
579 // Update the point feature from the tag corners location
580 vpFeatureBuilder::create(p[i], cam, corners[i]);
581 // Set the feature Z coordinate from the pose
582 vpColVector c_P;
583 point[i].changeFrame(c_M_o, c_P);
584
585 p[i].set_Z(c_P[2]); // Update Z value of each visual feature thanks to the tag pose
586 }
587
588 // Set the robot reference frame to end-effector frame velocity twist matrix transformation
589 vpPoseVector r_P_e;
590 robot.getPosition(vpRobot::END_EFFECTOR_FRAME, r_P_e);
591 vpHomogeneousMatrix r_M_e(r_P_e);
592 task.set_fVe(r_M_e);
593
594 // Set the Jacobian (expressed in the end-effector frame)
595 vpMatrix e_J_e;
596 robot.get_eJe(e_J_e);
597 task.set_eJe(e_J_e);
598
599 if (opt_task_sequencing) {
600 if (!servo_started) {
601 if (send_velocities) {
602 servo_started = true;
603 }
604 t_init_servo = vpTime::measureTimeMs();
605 }
606 qdot = task.computeControlLaw((vpTime::measureTimeMs() - t_init_servo) / 1000.);
607 }
608 else {
609 qdot = task.computeControlLaw();
610 }
611
612 // Display the current and desired feature points in the image display
613 vpServoDisplay::display(task, cam, I);
614 for (size_t i = 0; i < corners.size(); ++i) {
615 std::stringstream ss;
616 ss << i;
617 // Display current point indexes
618 vpDisplay::displayText(I, corners[i] + vpImagePoint(15, 15), ss.str(), vpColor::red);
619 // Display desired point indexes
620 vpImagePoint ip;
621 vpMeterPixelConversion::convertPoint(cam, p_d[i].get_x(), p_d[i].get_y(), ip);
622 vpDisplay::displayText(I, ip + vpImagePoint(15, 15), ss.str(), vpColor::red);
623 }
624 if (first_time) {
625 traj_corners = new std::vector<vpImagePoint>[corners.size()];
626 }
627 // Display the trajectory of the points used as features
628 display_point_trajectory(I, corners, traj_corners);
629
630 if (opt_plot) {
631 plotter->plot(0, iter_plot, task.getError());
632 plotter->plot(1, iter_plot, qdot);
633 iter_plot++;
634 }
635
636 if (opt_verbose) {
637 std::cout << "qdot: " << qdot.t() << std::endl;
638 }
639
640 double error = task.getError().sumSquare();
641 std::stringstream ss;
642 ss << "error: " << error;
643 vpDisplay::displayText(I, 20, static_cast<int>(I.getWidth()) - 150, ss.str(), vpColor::red);
644
645 if (opt_verbose)
646 std::cout << "error: " << error << std::endl;
647
648 if (error < convergence_threshold) {
649 has_converged = true;
650 std::cout << "Servo task has converged" << std::endl;
651 vpDisplay::displayText(I, 100, 20, "Servo task has converged", vpColor::red);
652 }
653 if (first_time) {
654 first_time = false;
655 }
656 } // end if (c_M_o_vec.size() == 1)
657 else {
658 qdot = 0;
659 }
660
661 if (!send_velocities) {
662 qdot = 0;
663 }
664
665 // Send to the robot
666 robot.setVelocity(vpRobot::JOINT_STATE, qdot);
667
668 {
669 std::stringstream ss;
670 ss << "Loop time: " << vpTime::measureTimeMs() - t_start << " ms";
671 vpDisplay::displayText(I, 40, 20, ss.str(), vpColor::red);
672 }
674
676 if (vpDisplay::getClick(I, button, false)) {
677 switch (button) {
679 send_velocities = !send_velocities;
680 break;
681
683 final_quit = true;
684 qdot = 0;
685 break;
686
687 default:
688 break;
689 }
690 }
691 }
692 std::cout << "Stop the robot " << std::endl;
693 robot.setRobotState(vpRobot::STATE_STOP);
694
695 if (opt_plot && plotter != nullptr) {
696 delete plotter;
697 plotter = nullptr;
698 }
699
700 if (!final_quit) {
701 while (!final_quit) {
702 rs.acquire(I);
704
705 vpDisplay::displayText(I, 20, 20, "Click to quit the program.", vpColor::red);
706 vpDisplay::displayText(I, 40, 20, "Visual servo converged.", vpColor::red);
707
708 if (vpDisplay::getClick(I, false)) {
709 final_quit = true;
710 }
711
713 }
714 }
715 if (traj_corners) {
716 delete[] traj_corners;
717 }
718 }
719 catch (const vpException &e) {
720 std::cout << "ViSP exception: " << e.what() << std::endl;
721 std::cout << "Stop the robot " << std::endl;
722 robot.setRobotState(vpRobot::STATE_STOP);
723#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
724 if (display != nullptr) {
725 delete display;
726 }
727#endif
728 return EXIT_FAILURE;
729 }
730 catch (const franka::NetworkException &e) {
731 std::cout << "Franka network exception: " << e.what() << std::endl;
732 std::cout << "Check if you are connected to the Franka robot"
733 << " or if you specified the right IP using --ip command line option set by default to 192.168.1.1. "
734 << std::endl;
735#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
736 if (display != nullptr) {
737 delete display;
738 }
739#endif
740 return EXIT_FAILURE;
741 }
742 catch (const std::exception &e) {
743 std::cout << "Franka exception: " << e.what() << std::endl;
744#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
745 if (display != nullptr) {
746 delete display;
747 }
748#endif
749 return EXIT_FAILURE;
750 }
751
752#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
753 if (display != nullptr) {
754 delete display;
755 }
756#endif
757
758 return EXIT_SUCCESS;
759}
760#else
761int main()
762{
763#if !defined(VISP_HAVE_REALSENSE2)
764 std::cout << "Install librealsense-2.x and rebuild ViSP." << std::endl;
765#endif
766#if !defined(VISP_HAVE_FRANKA)
767 std::cout << "Install libfranka and rebuild ViSP." << std::endl;
768#endif
769#if !defined(VISP_HAVE_PUGIXML)
770 std::cout << "Build ViSP with pugixml support enabled." << std::endl;
771#endif
772 return EXIT_SUCCESS;
773}
774#endif
Adaptive gain computation.
static bool loadYAML(const std::string &filename, vpArray2D< Type > &A, char *header=nullptr)
Definition vpArray2D.h:874
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithDistortion
Perspective projection with distortion model.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
Implementation of column vector and the associated operations.
static const vpColor red
Definition vpColor.h:198
static const vpColor green
Definition vpColor.h:201
@ TAG_36h11
AprilTag 36h11 pattern (recommended).
Class that defines generic functionalities for display.
Definition vpDisplay.h:171
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition vpException.h:60
static void create(vpFeaturePoint &s, const vpCameraParameters &cam, const vpDot &d)
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
vpFeaturePoint & buildFrom(const double &x, const double &y, const double &Z)
Implementation of an homogeneous matrix and operations on such kind of matrices.
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
static double distance(const vpImagePoint &iP1, const vpImagePoint &iP2)
Definition of the vpImage class member functions.
Definition vpImage.h:131
static bool checkFilename(const std::string &filename)
Implementation of a matrix and operations on matrices.
Definition vpMatrix.h:175
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class enables real time drawing of 2D or 3D graphics. An instance of the class open a window whi...
Definition vpPlot.h:117
Implementation of a pose vector and operations on poses.
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
@ JOINT_STATE
Definition vpRobot.h:79
@ END_EFFECTOR_FRAME
Definition vpRobot.h:80
@ STATE_VELOCITY_CONTROL
Initialize the velocity controller.
Definition vpRobot.h:64
@ STATE_STOP
Stops robot motion especially in velocity and acceleration control.
Definition vpRobot.h:63
static void display(const vpServo &s, const vpCameraParameters &cam, const vpImage< unsigned char > &I, vpColor currentColor=vpColor::green, vpColor desiredColor=vpColor::red, unsigned int thickness=1)
@ EYETOHAND_L_cVf_fVe_eJe
Definition vpServo.h:197
@ CURRENT
Definition vpServo.h:217
XML parser to load and save intrinsic camera parameters.
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT double measureTimeMs()