3 from pypointmatcher
import pointmatcher
as pm, pointmatchersupport
as pms
9 PMIO = pm.PointMatcherIO
12 params = pms.Parametrizable.Parameters()
17 output_base_directory =
"tests/compute_overlap/" 20 output_base_file =
"test" 43 rigid_trans = PM.get().TransformationRegistrar.create(
"RigidTransformation")
45 transformations = PM.Transformations()
46 transformations.append(rigid_trans)
48 Tread = np.identity(4)
52 list_size_I = len(file_info_list)
53 list_size_J = len(file_info_list)
55 overlap_results = np.ones((list_size_J, list_size_I), np.float)
59 list_size_I = starting_I + 1
61 for i
in range(starting_I, list_size_I):
66 list_size_J = starting_J + 1
68 for j
in range(starting_J, list_size_J):
70 reading = DP.load(file_info_list[i].readingFileName)
71 reference = DP.load(file_info_list[j].readingFileName)
73 print(
"Point cloud loaded")
76 if file_info_list[i].groundTruthTransformation.shape[0] != 0:
77 Tread = file_info_list[i].groundTruthTransformation
78 Tref = file_info_list[j].groundTruthTransformation
80 print(
"ERROR: fields gTXX (i.e., ground truth matrix) is required")
84 transformations.apply(reading, Tread)
85 transformations.apply(reference, Tref)
88 params[
"prob"] =
"0.5" 89 sub_sample = PM.get().DataPointsFilterRegistrar.create(
"RandomSamplingDataPointsFilter",
93 max_density = PM.get().DataPointsFilterRegistrar.create(
"MaxDensityDataPointsFilter")
102 params[
"keepDensities"] =
"1" 103 compute_density = PM.get().DataPointsFilterRegistrar.create(
"SurfaceNormalDataPointsFilter",
107 reading = sub_sample.filter(reading)
108 reading = compute_density.filter(reading)
109 reading = max_density.filter(reading)
111 inliers_read = np.zeros((1, reading.features.shape[1]))
112 reading.addDescriptor(
"inliers", inliers_read)
114 reference = sub_sample.filter(reference)
115 reference = compute_density.filter(reference)
116 reference = max_density.filter(reference)
117 inliers_ref = np.zeros((1, reference.features.shape[1]))
118 reference.addDescriptor(
"inliers", inliers_ref)
124 self_pts_count = self.features.shape[1]
125 target_pts_count = target.features.shape[1]
131 params[
"knn"] = str(knn)
132 matcher_self = PM.get().MatcherRegistrar.create(
"KDTreeMatcher", params)
135 params[
"knn"] = str(knn_all)
136 params[
"maxDistField"] =
"maxSearchDist" 137 matcher_target = PM.get().MatcherRegistrar.create(
"KDTreeVarDistMatcher", params)
140 matcher_self.init(self)
141 matcher_target.init(target)
143 self_matches = Matches(knn, self_pts_count)
144 self_matches = matcher_self.findClosests(self)
146 max_search_dist = np.sqrt(self_matches.dists.max(axis=0, keepdims=
True), order=
'F')
147 self.addDescriptor(
"maxSearchDist", max_search_dist)
149 target_matches = Matches(knn_all, target_pts_count)
150 target_matches = matcher_target.findClosests(self)
152 inlier_self = self.getDescriptorViewByName(
"inliers")
153 inlier_target = target.getDescriptorViewByName(
"inliers")
155 for m
in range(self_pts_count):
156 for n
in range(knn_all):
157 if target_matches.dists[n, m] != np.infty:
158 inlier_self[0, m] = 1.0
159 inlier_target[0, target_matches.ids[n, m]] = 1.0
161 PM.get().swapDataPoints(self, target)
163 final_inlier_self = self.getDescriptorViewByName(
"inliers")
164 final_inlier_target = target.getDescriptorViewByName(
"inliers")
165 self_ratio = np.count_nonzero(final_inlier_self) / final_inlier_self.shape[1]
166 target_ratio = np.count_nonzero(final_inlier_target) / final_inlier_target.shape[1]
168 print(f
"{i} -> {j}: {self_ratio:.6}")
169 print(f
"{j} -> {i}: {target_ratio:.6}")
171 overlap_results[j, i] = self_ratio
172 overlap_results[i, j] = target_ratio
175 self.save(f
"{output_base_directory + output_base_file}_scan_i.vtk")
176 target.save(f
"{output_base_directory + output_base_file}_scan_j.vtk")
178 with open(f
"{output_base_directory}overlap_results.csv",
'w')
as out_file:
179 for x
in range(overlap_results.shape[0]):
180 for y
in range(overlap_results.shape[1]):
181 out_file.write(f
"{overlap_results[x, y]:.6}, ")
void setLogger(std::shared_ptr< Logger > newLogger)
Set a new logger, protected by a mutex.
A vector of file info, to be used in batch processing.