Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
XFeatTrackingBackend.py
35
36import numpy as np
37import torch
38
39from visp.core import ImageTools, Rect
40from visp.core import ImageRGBa, Matrix
41from visp.rbt import RBFeatureTrackerInput
42from visp.rbt import RBVisualOdometryUtils
43from visp.python.vision.xfeat import XFeatBackend, XFeatRepresentation
44
45
46class XFeatTrackingBackend(XFeatBackend):
47 def __init__(self, num_points: int, min_cos: float, use_dense=False, scale_factor = 1.0, only_on_bb = False):
48 XFeatBackend.__init__(self, num_points, min_cos, use_dense, scale_factor)
49
50 self.last_iter = -1
51
55 self.only_on_bb = only_on_bb
56
59
62
63 def load_settings(self, d: dict):
64 XFeatBackend.load_settings(self, d)
65 self.only_on_bb = d.get('onlyOnBB', False)
66 self.min_env_distance = d['minSilhouetteDistanceEnvPoint']
67 self.min_obj_distance = d['minSilhouetteDistanceObjectPoint']
68 self.min_obj_mask_value = d['minObjMaskValue']
69
70 def match_unidirectional(self, feats_current: torch.Tensor, feats_map: torch.Tensor):
71 min_cossim = self.min_cos
72 cossim = feats_current @ feats_map.t()
73 best_cossim, match12 = cossim.max(dim=1)
74
75 w = torch.exp(best_cossim) / torch.sum(torch.exp(cossim), dim=-1)
76
77 idx0 = torch.arange(len(match12), device=match12.device)
78 good = best_cossim > min_cossim
79 idx0 = idx0[good]
80 idx1 = match12[good]
81 w = w[good]
82 assert len(idx0) == len(idx1) == len(w)
83 return idx0, idx1, w
84
85 def process_frame(self, frame: RBFeatureTrackerInput, iteration: int):
86 if self.last_iter > 0 and self.last_iter == iteration:
87 return # Frame was already processed
88
89 with torch.no_grad():
90 input_rgb = frame.IRGB
91 bb = Rect(frame.renders.boundingBox)
92 if self.only_on_bb:
93 crop = ImageRGBa()
94 bb.setWidth(np.maximum(32, bb.getWidth()))
95 bb.setHeight(np.maximum(32, bb.getHeight()))
96 bb.setLeft(np.maximum(0, bb.getLeft()))
97 bb.setTop(np.maximum(0, bb.getTop()))
98
99 ImageTools.crop(frame.IRGB, bb, crop)
100
101 input_rgb = crop
102 representation = self.detect(input_rgb)
103 if self.only_on_bb:
104
105 l = np.maximum(0, np.ceil(bb.getLeft()))
106 t = np.maximum(0, np.ceil(bb.getTop()))
107 representation.keypoints[:, 0] += l
108 representation.keypoints[:, 1] += t
109
110 self.last_iter = iteration
113 obj_indices, env_indices = RBVisualOdometryUtils.computeIndicesObjectAndEnvironment(Matrix(representation.keypoints), frame,
116 self.current_representation_object, self.current_representation_environment = representation.split(obj_indices, env_indices)
117
118 def get_current_object_data(self) -> XFeatRepresentation:
119 if self.current_representation_object is not None:
120 return self.current_representation_object.copy()
121 else:
122 return None
123 def get_previous_object_data(self) -> XFeatRepresentation:
124 if self.previous_representation_object is not None:
125 return self.previous_representation_object.copy()
126 else:
127 return None
128 def get_current_environment_data(self) -> XFeatRepresentation:
129 if self.current_representation_environment is not None:
130 return self.current_representation_environment.copy()
131 else:
132 return None
match_unidirectional(self, torch.Tensor feats_current, torch.Tensor feats_map)
process_frame(self, RBFeatureTrackerInput frame, int iteration)
__init__(self, int num_points, float min_cos, use_dense=False, scale_factor=1.0, only_on_bb=False)