compute_overlap.py
Go to the documentation of this file.
1 import numpy as np
2 
3 from pypointmatcher import pointmatcher as pm, pointmatchersupport as pms
4 
5 # Code example for DataFilter taking a sequence of point clouds with
6 # their global coordinates and build a map with a fix (manageable) number of points
7 
8 PM = pm.PointMatcher
9 PMIO = pm.PointMatcherIO
10 DP = PM.DataPoints
11 Matches = PM.Matches
12 params = pms.Parametrizable.Parameters()
13 
14 # Path of output directory (default: tests/compute_overlap/)
15 # The output directory must already exist
16 # Leave empty to save in the current directory
17 output_base_directory = "tests/compute_overlap/"
18 
19 # Name of output files (default: test)
20 output_base_file = "test"
21 
22 # Loading the list of files
23 file_info_list = PMIO.FileInfoVector("../data/carCloudList.csv", "../data/")
24 # or
25 # file_info_vec = PMIO.FileInfoVector("../data/cloudList.csv", "../data/")
26 
27 # If True, it will compute the overlap of only 2 point cloud ids
28 # and dump VTK files for visual inspection
29 debug_mode = False
30 
31 # Choose the first point cloud to compute the overlap and debug.
32 # Must be different than debug_J
33 debug_I = 1
34 
35 # Choose the second point cloud to compute the overlap and debug
36 # Must be different than debug_I
37 debug_J = 0
38 
39 if debug_mode:
40  pms.setLogger(PM.get().LoggerRegistrar.create("FileLogger"))
41 
42 # Prepare transformation chain for maps
43 rigid_trans = PM.get().TransformationRegistrar.create("RigidTransformation")
44 
45 transformations = PM.Transformations()
46 transformations.append(rigid_trans)
47 
48 Tread = np.identity(4)
49 Tref = np.identity(4)
50 
51 starting_I = 0
52 list_size_I = len(file_info_list)
53 list_size_J = len(file_info_list)
54 
55 overlap_results = np.ones((list_size_J, list_size_I), np.float)
56 
57 if debug_mode:
58  starting_I = debug_I
59  list_size_I = starting_I + 1
60 
61 for i in range(starting_I, list_size_I):
62  starting_J = i + 1
63 
64  if debug_mode:
65  starting_J = debug_J
66  list_size_J = starting_J + 1
67 
68  for j in range(starting_J, list_size_J):
69  # Load point clouds
70  reading = DP.load(file_info_list[i].readingFileName)
71  reference = DP.load(file_info_list[j].readingFileName)
72 
73  print("Point cloud loaded")
74 
75  # Load transformation matrices
76  if file_info_list[i].groundTruthTransformation.shape[0] != 0:
77  Tread = file_info_list[i].groundTruthTransformation
78  Tref = file_info_list[j].groundTruthTransformation
79  else:
80  print("ERROR: fields gTXX (i.e., ground truth matrix) is required")
81  exit()
82 
83  # Move point cloud in global frame
84  transformations.apply(reading, Tread)
85  transformations.apply(reference, Tref)
86 
87  # Prepare filters
88  params["prob"] = "0.5"
89  sub_sample = PM.get().DataPointsFilterRegistrar.create("RandomSamplingDataPointsFilter",
90  params)
91  params.clear()
92 
93  max_density = PM.get().DataPointsFilterRegistrar.create("MaxDensityDataPointsFilter")
94 
95  # params["dim"] = "1"
96  # params["minDist"] = "0"
97  # cut_in_half = PM.get().DataPointsFilterRegistrar.create("MinDistDataPointsFilter",
98  # params)
99  # params.clear()
100 
101  params["knn"] = "20"
102  params["keepDensities"] = "1"
103  compute_density = PM.get().DataPointsFilterRegistrar.create("SurfaceNormalDataPointsFilter",
104  params)
105  params.clear()
106 
107  reading = sub_sample.filter(reading)
108  reading = compute_density.filter(reading)
109  reading = max_density.filter(reading)
110  # reading = cut_in_half.filter(reading)
111  inliers_read = np.zeros((1, reading.features.shape[1]))
112  reading.addDescriptor("inliers", inliers_read)
113 
114  reference = sub_sample.filter(reference)
115  reference = compute_density.filter(reference)
116  reference = max_density.filter(reference)
117  inliers_ref = np.zeros((1, reference.features.shape[1]))
118  reference.addDescriptor("inliers", inliers_ref)
119 
120  self = reading
121  target = reference
122 
123  for l in range(2):
124  self_pts_count = self.features.shape[1]
125  target_pts_count = target.features.shape[1]
126 
127  # Build kd-tree
128  knn = 20
129  knn_all = 50
130 
131  params["knn"] = str(knn)
132  matcher_self = PM.get().MatcherRegistrar.create("KDTreeMatcher", params)
133  params.clear()
134 
135  params["knn"] = str(knn_all)
136  params["maxDistField"] = "maxSearchDist"
137  matcher_target = PM.get().MatcherRegistrar.create("KDTreeVarDistMatcher", params)
138  params.clear()
139 
140  matcher_self.init(self)
141  matcher_target.init(target)
142 
143  self_matches = Matches(knn, self_pts_count)
144  self_matches = matcher_self.findClosests(self)
145 
146  max_search_dist = np.sqrt(self_matches.dists.max(axis=0, keepdims=True), order='F')
147  self.addDescriptor("maxSearchDist", max_search_dist)
148 
149  target_matches = Matches(knn_all, target_pts_count)
150  target_matches = matcher_target.findClosests(self)
151 
152  inlier_self = self.getDescriptorViewByName("inliers")
153  inlier_target = target.getDescriptorViewByName("inliers")
154 
155  for m in range(self_pts_count):
156  for n in range(knn_all):
157  if target_matches.dists[n, m] != np.infty:
158  inlier_self[0, m] = 1.0
159  inlier_target[0, target_matches.ids[n, m]] = 1.0
160 
161  PM.get().swapDataPoints(self, target)
162 
163  final_inlier_self = self.getDescriptorViewByName("inliers")
164  final_inlier_target = target.getDescriptorViewByName("inliers")
165  self_ratio = np.count_nonzero(final_inlier_self) / final_inlier_self.shape[1]
166  target_ratio = np.count_nonzero(final_inlier_target) / final_inlier_target.shape[1]
167 
168  print(f"{i} -> {j}: {self_ratio:.6}")
169  print(f"{j} -> {i}: {target_ratio:.6}")
170 
171  overlap_results[j, i] = self_ratio
172  overlap_results[i, j] = target_ratio
173 
174  if debug_mode:
175  self.save(f"{output_base_directory + output_base_file}_scan_i.vtk")
176  target.save(f"{output_base_directory + output_base_file}_scan_j.vtk")
177 
178 with open(f"{output_base_directory}overlap_results.csv", 'w') as out_file:
179  for x in range(overlap_results.shape[0]):
180  for y in range(overlap_results.shape[1]):
181  out_file.write(f"{overlap_results[x, y]:.6}, ")
182 
183  out_file.write('\n')
void setLogger(std::shared_ptr< Logger > newLogger)
Set a new logger, protected by a mutex.
Definition: Logger.cpp:98
A vector of file info, to be used in batch processing.
Definition: IO.h:245


libpointmatcher
Author(s):
autogenerated on Sat May 27 2023 02:36:30