Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
realsense-rbt.py
35
36import argparse
37from dataclasses import dataclass
38from pathlib import Path
39from typing import List, Optional, Tuple
40import numpy as np
41import time
42import faulthandler
43faulthandler.enable()
44
45from visp.core import CameraParameters, HomogeneousMatrix
46from visp.core import Color, Display, ImageConvert
47from visp.core import ImageGray, ImageUInt16, ImageRGBa, ImageFloat
48from visp.io import ImageIo
49from visp.rbt import RBTracker, RBFeatureTracker, RBFeatureTrackerInput
50from visp.python.display_utils import get_display
51from visp.python.rbt import PythonRBExtensions
52
53import pyrealsense2 as rs
54import matplotlib.pyplot as plt
55
56class PyBaseFeatureTracker(RBFeatureTracker):
57 '''
58 The base structure that a class should have to implement a render based feature tracker
59 '''
60 def __init__(self):
61 RBFeatureTracker.__init__(self)
62
63 def requiresRGB(self) -> bool:
64 return False
65 def requiresDepth(self) -> bool:
66 return False
68 return False
69
70 def onTrackingIterStart(self, frame: RBFeatureTrackerInput, cMo: HomogeneousMatrix):
71 self.cov.resize(6, 6)
72 self.LTL.resize(6, 6)
73 self.LTR.resize(6)
74 self.numFeatures = 0
75
76 def extractFeatures(self, frame: RBFeatureTrackerInput, previousFrame: RBFeatureTrackerInput, cMo: HomogeneousMatrix):
77 pass
78
79 def trackFeatures(self, frame: RBFeatureTrackerInput, previousFrame: RBFeatureTrackerInput, cMo: HomogeneousMatrix):
80 pass
81
82 def initVVS(self, frame: RBFeatureTrackerInput, previousFrame: RBFeatureTrackerInput, cMo: HomogeneousMatrix):
83 print('INITVVS Was called')
84 pass
85
86 def computeVVSIter(self, frame: RBFeatureTrackerInput, cMo: HomogeneousMatrix, iteration: int):
87 pass
88
89 def onTrackingIterEnd(self, cMo: HomogeneousMatrix):
90 pass
91
92 def display(self, cam: CameraParameters, I: ImageGray, IRGB: ImageRGBa, I_depth: ImageGray):
93 pass
94
95@dataclass
97 I: ImageGray
98 IRGB: ImageRGBa
99 I_depth: Optional[ImageFloat]
100
101
102def read_data(depth_scale: Optional[float], IRGB: ImageRGBa, I: ImageGray, pipe: rs.pipeline):
103 use_depth = depth_scale is not None
104 iteration = 1
105 align_to = rs.align(rs.stream.color)
106 while True:
107 frames = pipe.wait_for_frames()
108 frames = align_to.process(frames)
109 I_np = np.asanyarray(frames.get_color_frame().as_frame().get_data())
110 I_np = np.concatenate((I_np, np.ones_like(I_np[..., 0:1], dtype=np.uint8)), axis=-1)
111 IRGB.resize(I_np.shape[0], I_np.shape[1])
112 I_rgba_ref = IRGB.numpy()
113 I_rgba_ref[...] = I_np
114 ImageConvert.convert(IRGB, I, 0)
115 I_depth_float = None
116 if use_depth:
117 I_depth_raw = np.asanyarray(frames.get_depth_frame().as_frame().get_data())
118 I_depth_float = I_depth_raw.astype(np.float32) * depth_scale
119 iteration += 1
120 yield FrameData(I, IRGB, ImageFloat(I_depth_float))
121
122
123def cam_from_rs_profile(profile) -> Tuple[CameraParameters, int, int]:
124 intr = profile.as_video_stream_profile().get_intrinsics() # Downcast to video_stream_profile and fetch intrinsics
125 return CameraParameters(intr.fx, intr.fy, intr.ppx, intr.ppy), intr.height, intr.width
126
127if __name__ == '__main__':
128 parser = argparse.ArgumentParser()
129 parser.add_argument('--tracker', type=str, required=True,
130 help='Path to the json file containing the tracker configuration.')
131 parser.add_argument('--model', type=str, required=False,
132 help='Path to the .obj/.bam file describing the CAD model.')
133
134 args = parser.parse_args()
135 tracker_path: str = args.tracker
136 assert Path(tracker_path).exists(), 'Tracker file not found'
137 model_path = args.model
138 if model_path is not None:
139 assert Path(model_path).exists(), '3D CAD model file not found'
140
141 # Initialize realsense2
142 pipe = rs.pipeline()
143 config = rs.config()
144 config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 60)
145 config.enable_stream(rs.stream.color, 848, 480, rs.format.rgb8, 60)
146
147 cfg = pipe.start(config)
148 depth_scale = cfg.get_device().first_depth_sensor().get_depth_scale()
149
150
151 tracker = RBTracker()
152
153 tracker.loadConfigurationFile(tracker_path)
154 extensions = PythonRBExtensions()
155 extensions.parse_python_extensions(tracker, Path(tracker_path))
156 if model_path is not None:
157 tracker.setModelPath(model_path)
158
159 custom_feature = PyBaseFeatureTracker()
160 tracker.addTracker(custom_feature)
161
162 cam_color, color_height, color_width = cam_from_rs_profile(cfg.get_stream(rs.stream.color))
163
164 tracker.setCameraParameters(cam_color, color_height, color_width)
165
166 # Camera intrinsics
167
168 print('Color intrinsics:', cam_color)
169 I = ImageGray()
170 IRGB = ImageRGBa()
171 I_depth_display = ImageGray()
172 data_generator = read_data(depth_scale, IRGB, I, pipe)
173 frame_data = next(data_generator) # Get first frame for init
174
175 # Initialize displays
176 dI = get_display()
177 dI.init(I, 0, 0, 'Color image')
178
179 dRGB = get_display()
180 dRGB.init(IRGB, I.getWidth(), 0, 'Color image')
181
182 I_depth = ImageGray()
183 dDepth = get_display()
184
185 ImageConvert.createDepthHistogram(frame_data.I_depth, I_depth)
186 dDepth.init(I_depth, I.getWidth() * 2, 0, 'Depth')
187
188 for frame in data_generator:
189 Display.display(I)
190 Display.displayText(I, 50, 0, 'Click to initialize tracking', Color.red)
191 Display.flush(I)
192 Display.display(IRGB)
193 Display.flush(IRGB)
194 event = Display.getClick(I, blocking=False)
195 if event:
196 break
197 tracker.startTracking()
198 tracker.initClick(I, tracker_path.replace('.json', '.init'), True)
199 start_time = time.time()
200 for frame_data in data_generator:
201 if frame_data.I_depth is not None:
202 I_depth_np = I_depth.numpy()
203 I_depth_np[...] = ((np.minimum(frame_data.I_depth, 0.5) / 0.5) * 255.0).astype(np.uint8)
204
205 displayed = [I, IRGB, I_depth]
206
207 for display_image in displayed:
208 Display.display(display_image)
209 Display.displayText(I, 50, 0, 'Click to stop tracking', Color.red)
210
211 # if args.disable_depth:
212 # tracker.track(I=I, IRGB=IRGB)
213 # else:
214 t1 = time.time()
215 tracker.track(I=frame.I, IRGB=frame_data.IRGB, depth=frame_data.I_depth)
216 tracking_time = np.round((time.time() - t1) * 1000.0, 2)
217
218 Display.displayText(I, 60, 0, f'Tracking time: {tracking_time}', Color.red)
219 cMo = HomogeneousMatrix()
220 tracker.getPose(cMo)
221
222 tracker.display(I, IRGB, I_depth)
223 Display.displayFrame(I, cMo, cam_color, 0.05, Color.none, 2)
224
225
226 for display_image in displayed:
227 Display.flush(display_image)
228
229 event = Display.getClick(I, blocking=False)
230 if event:
231 break
232 end_time = time.time()
233 print(f'total time = {end_time - start_time}s')
initVVS(self, RBFeatureTrackerInput frame, RBFeatureTrackerInput previousFrame, HomogeneousMatrix cMo)
display(self, CameraParameters cam, ImageGray I, ImageRGBa IRGB, ImageGray I_depth)
onTrackingIterEnd(self, HomogeneousMatrix cMo)
computeVVSIter(self, RBFeatureTrackerInput frame, HomogeneousMatrix cMo, int iteration)
trackFeatures(self, RBFeatureTrackerInput frame, RBFeatureTrackerInput previousFrame, HomogeneousMatrix cMo)
onTrackingIterStart(self, RBFeatureTrackerInput frame, HomogeneousMatrix cMo)
extractFeatures(self, RBFeatureTrackerInput frame, RBFeatureTrackerInput previousFrame, HomogeneousMatrix cMo)
Tuple[CameraParameters, int, int] cam_from_rs_profile(profile)
read_data(Optional[float] depth_scale, ImageRGBa IRGB, ImageGray I, rs.pipeline pipe)