scene_recognition.cpp
Go to the documentation of this file.
1 
18 //Global includes
19 #include <stdio.h>
20 #include <string>
21 #include <algorithm>
22 #include <stdlib.h>
23 #include<fstream>
24 
25 //Pkg includes
26 #include <ros/ros.h>
27 #include <asr_msgs/AsrObject.h>
28 #include <geometry_msgs/Pose.h>
29 #include <ISM/recognizer/Recognizer.hpp>
30 #include <ISM/utility/TableHelper.hpp>
31 #include <ISM/utility/GeometryHelper.hpp>
32 #include <ISM/common_type/Pose.hpp>
33 #include <tf/transform_datatypes.h>
34 #include <tf/transform_listener.h>
35 #include <visualization_msgs/MarkerArray.h>
36 #include <thread>
37 
38 //Local includes
41 namespace ser = ros::serialization;
42 
43 using namespace std_srvs;
44 using namespace std;
45 using namespace ros;
46 using namespace ISM;
47 using namespace IH;
48 using namespace VIZ;
49 
51 {
52 
53 SceneRecognition::SceneRecognition(std::vector<ISM::RecognitionResultPtr>& results_already_shared, SharedRecognitionResultsManagerPtr shared_recognition_results_ptr) :
54  node_handle_(ros::this_node::getName()),
55  //Used for writing input configurations to scene recognition onto filesystem.
56  object_constellation_folder_path_(""),
57  object_constellation_file_counter_(0),
58  last_object_set_size_(0),
59  results_already_shared_(results_already_shared),
60  is_visualization_active_(true),
61  shared_recognition_results_ptr_(shared_recognition_results_ptr)
62 
63 {
64  ParamHelper ph("Scene Recognition", node_handle_);
65  ph.getParam<double>("bin_size", &bin_size_, 0.1);
66  ph.getParam<double>("maxProjectionAngleDeviation", &max_projection_angle_deviation_, 10);
67 
68  ph.getParam<string>("dbfilename", &database_filename_, "");
69  if(database_filename_.empty()){
70  ROS_FATAL("DB Filename empty. Shutting down node!");
71  ros::shutdown();
72  }
73 
74  ph.getParam<int unsigned>("raterType", &rater_type_, 0);
75  ph.getParam<int unsigned>("objectSetMaxCount", &object_set_max_count_, 676);
76 
77  ph.getParam<int>("enableRotationMode", &enable_rotation_mode_, 0);
78  ph.getParam<string>("rotationFrame", &rotation_frame_, "/map");
79  ph.getParam<string>("rotationObjectType", &rotation_object_type_, "");
80  ph.getParam<string>("rotationObjectId", &rotation_object_id_, "");
81 
82  ph.getParam<string>("baseFrame", &base_frame_, "/map");
83  ph.getParam<string>("sceneMarkersSceneRecognitionPublisherName", &scene_markers_publisher_name_, "");
84  ph.getParam<string>("vizSamplesClientName", &viz_samples_client_name_, "");
85  ph.getParam<string>("pushCompletePatternsName", &push_complete_patterns_name_, "");
86  ph.getParam<double>("markerLifetime", &marker_life_time_, 0);
87 
88 
89  /* ----------------- ROS & Viz initialization ------------------ */
90  viz_samples_client_ = node_handle_.serviceClient<asr_world_model::VisualizeSampledPoses>(viz_samples_client_name_);
92 
97 
98  /* ----------------- ism initialization ------------------ */
99 
100  table_helper_ = TableHelperPtr(new TableHelper(database_filename_));
101  recognizer_ = RecognizerPtr(new Recognizer(database_filename_, bin_size_, max_projection_angle_deviation_, false, rater_type_));
102  converter = ObjectConverterPtr(new ObjectConverter(base_frame_, false, false, enable_rotation_mode_, rotation_frame_, rotation_object_type_, rotation_object_id_, false));
103 
104  ROS_INFO("SceneRecognition: Initialized recognizer from ism_lib.");
105 
106  /* ----------------- viz initialization ------------------ */
107 
108  visualization_publisher_ = node_handle_.advertise<visualization_msgs::MarkerArray>(scene_markers_publisher_name_, 1000);
109  res_visualizer_ = ISMResultVisualizerRVIZPtr(new ISMResultVisualizerRVIZ(visualization_publisher_, ros::NodeHandle(node_handle_.getNamespace() + "/result_visualizer/")));
110  object_model_visualizer_ = new ObjectModelVisualizerRVIZ(visualization_publisher_, base_frame_, "", marker_life_time_);
111 
112  ROS_INFO("SceneRecognition: Initialized ism visualizations.");
114 }
115 
117  //Constructing a mapping from pattern names to objects, belonging to pattern, from sqlite db that makes up our scene models.
118 
119  ROS_DEBUG_NAMED("SceneRecognition", "Building pattern_object_map_ from database.");
120  for(string& pattern_name : table_helper_->getModelPatternNames())
121  {
122  set<pair<string, string> > object_type_and_id_in_pattern =
123  table_helper_->getObjectTypesAndIdsBelongingToPattern(pattern_name);
124  set<pair<string, string> > object_types_and_ids_non_sub;
125  for (pair<string, string> object : object_type_and_id_in_pattern)
126  {
127  //Filter out patternNames contain _sub* endings, as they do not name entire scenes, we need.
128  if(object.first.find("_sub") != string::npos)
129  continue;
130  object_types_and_ids_non_sub.insert(make_pair(object.first,object.second));
131  }
132  unsigned int last_index = pattern_name.find("_sub");
133  if (last_index != string::npos)
134  {
135  //contains _sub ending
136  string pattern_name_without_sub = pattern_name.substr(0, last_index);
137  ROS_DEBUG_STREAM_NAMED("SceneRecognition", "Renamed scene " << pattern_name << " to " << pattern_name_without_sub << ".");
138  pattern_name = pattern_name_without_sub;
139  }
140  map<string, set<pair<string, string> > >::iterator it = pattern_object_map_.find(pattern_name);
141  if (it != pattern_object_map_.end())
142  {
143  set<pair<string, string> > old_set = it->second;
144  old_set.insert(object_types_and_ids_non_sub.begin(), object_types_and_ids_non_sub.end());
145  it->second = old_set;
146  }
147  else
148  {
149  pattern_object_map_.insert(pair<string, set<pair<string, string> > >(pattern_name, object_types_and_ids_non_sub));
150  }
151  for (auto obj : pattern_object_map_.find(pattern_name)->second)
152  {
153  ROS_DEBUG_STREAM_NAMED("SceneRecognition", "Map element for scene " << pattern_name << " contains object [" << obj.first << ", " << obj.second << "].");
154  }
155  }
156 
157  vector<string> pattern_names = table_helper_->getModelPatternNames();
158  unique_pattern_names_.clear();
159  for (string pattern_name : pattern_names)
160  {
161  if (pattern_name.find("_sub") == string::npos)
162  {
163  unique_pattern_names_.insert(pattern_name);
164  }
165  }
166 }
167 
168  void SceneRecognition::createObjectSetsFromPoseSamples(vector<asr_msgs::AsrObject>::iterator object_it, vector<asr_msgs::AsrObject>::iterator end_it,
169  ObjectSetPtr temp_set_ptr, vector<ObjectSetPtr>& object_sets, bool measured_value_only) {
170 
171  //We have added a sample from each object (type and id) to object set -> save it.
172  if(object_it == end_it){
173  object_sets.push_back(ObjectSetPtr( new ObjectSet(*temp_set_ptr)));
174  return;
175  }
176 
177  //Set it to only when we disable pose sample processing.
178  unsigned int used_samples_count = measured_value_only ? 1 : object_it->sampledPoses.size();
179 
180  //Go through all samples poses for a given object (type and id).
181  for (unsigned int i = 0; i < used_samples_count; i++) {
182 
183  geometry_msgs::Pose sample_pose = object_it->sampledPoses.at(i).pose;
184  double sampled_position_x = sample_pose.position.x;
185  double sampled_position_y = sample_pose.position.y;
186  double sampled_position_z = sample_pose.position.z;
187 
188  double sampled_quaternion_w = sample_pose.orientation.w;
189  double sampled_quaternion_x = sample_pose.orientation.x;
190  double sampled_quaternion_y = sample_pose.orientation.y;
191  double sampled_quaternion_z = sample_pose.orientation.z;
192 
193  QuaternionPtr ism_quaternion_ptr = QuaternionPtr(new Quaternion(sampled_quaternion_w,
194  sampled_quaternion_x,
195  sampled_quaternion_y,
196  sampled_quaternion_z));
197  QuaternionPtr ism_quaternion_normalized_ptr = GeometryHelper::normalize(ism_quaternion_ptr);
198  //Create ism object (from which object sets are made of) from the given asr object.
199  ObjectPtr new_object(new Object(object_it->type,
200  new Pose(
201  new Point(sampled_position_x,
202  sampled_position_y,
203  sampled_position_z),
204  ism_quaternion_normalized_ptr),
205  object_it->identifier,
206  boost::filesystem::path(object_it->meshResourcePath).replace_extension(".dae").string()));
207  //Add object sample to incomplete set for the object, considered in the current recursion level.
208  temp_set_ptr->objects.push_back(new_object);
209  //Go to next object in set to chose sample.
210  createObjectSetsFromPoseSamples(object_it+1, end_it, temp_set_ptr, object_sets, measured_value_only);
211  //Remove current sample of current object before adding another sample for the current object in next run of loop.
212  temp_set_ptr->objects.pop_back();
213  }
214 
215 }
216 
217 bool SceneRecognition::processFindScenesServiceCall(asr_recognizer_prediction_ism::FindScenes::Request &request, asr_recognizer_prediction_ism::FindScenes::Response &response)
218 {
219  ROS_INFO("\n==================================================\n");
220  ROS_INFO("Callback SceneRecognition::processFindScenesServiceCall() is called.");
221 
222  if (request.objects.empty())
223  {
224  ROS_INFO("No objects passed to scene detection callback.");
225  response.result_size.data = 0;
226  response.output = "";
227 
228  ROS_INFO("\n==================================================\n");
229  return false;
230 
231  }
232  for (asr_msgs::AsrObject pbd_object : request.objects)
233  {
234  ROS_ASSERT_MSG(!pbd_object.sampledPoses.empty(),"Object estimate with no samples not allowed.");
235  }
236 
237  //Clear shared RecognitionResult vector.
238  shared_recognition_results_ptr_->clearResults();
239 
240  ROS_DEBUG_STREAM_NAMED("SceneRecognition", shared_recognition_results_ptr_->getResultsNumber() << " recognition results in shared memory after flushing at the begin of findScenes().");
241 
242  //Contains an object set per combination of object pose samples for all input objects.
243  vector<ObjectSetPtr> object_sets;
245 
246  //Precalculate how many object sets would be generated by pose sample processing to avoid overflow. Samples just required when few poses available.
247  unsigned int number_of_combinations = 1;
248 
249  for (asr_msgs::AsrObject asr_object : request.objects)
250  {
251  number_of_combinations *= asr_object.sampledPoses.size();
252  //Get all objects (types and ids) from input set as well.
253  object_types_and_ids_set_.insert(make_pair(asr_object.type, asr_object.identifier));
254  }
255 
256  //Get all object sets from combining samples of all given objects.
257  createObjectSetsFromPoseSamples(request.objects.begin(), request.objects.end(), ObjectSetPtr(new ObjectSet()),
258  object_sets, number_of_combinations > object_set_max_count_);
259 
260  ROS_INFO_STREAM("Number of object set combinations: " << object_sets.size());
261 
262  //Only show pose samples in RViz, when processed.
263  if(number_of_combinations <= object_set_max_count_)
264  {
265  ROS_ASSERT_MSG(number_of_combinations == object_sets.size(), "Error in recursive object set generation, expect: %u actual: %zu", number_of_combinations, object_sets.size());
266 
267  if (viz_samples_client_.exists()) {
268  asr_world_model::VisualizeSampledPoses srv;
269  for (asr_msgs::AsrObject pbd_object : request.objects)
270  {
271  srv.request.object_type = pbd_object.type;
272  srv.request.object_id = pbd_object.identifier;
273  //Show additional samples for this object.
275  }
276 
277  } else {
278  ROS_WARN("visualization server from world model does not exists -> sampled poses will not be visualized");
279  }
280 
281  } else {
282  ROS_ASSERT_MSG(1 == object_sets.size(), "Error in recursive object set generation, expect: %u actual: %zu", 1, object_sets.size());
283  }
284 
285  //For every scene type (pattern name), return the root of the recognized scene model rated best of
286  //the given object set (concerning its confidence).
287 
290  std::vector<ISM::RecognitionResultPtr> recognition_results;
291 
292  //Call scene recognition for each object set separately since otherwise object samples get lost during scene recognition.
293  for(vector<ObjectSetPtr>::iterator set_it = object_sets.begin(); set_it != object_sets.end(); set_it++) {
294 
295  //Backup xml file of object set for fake_object_recognition.
296  writeObjectConstellationToXML((*set_it)->objects, object_constellation_file_counter_, distance(object_sets.begin(), set_it));
297 
298  //Normalize orientations of rotation invariant objects.
299  converter->normalizeRotationInvariantObjects((*set_it));
300 
301  //THIS IS THE EFFECTIVE CALL TO ISM SCENE RECOGNITION.
302  vector<RecognitionResultPtr> resultsForCombination = recognizer_->recognizePattern((*set_it), 0, -1);
303 
304  recognition_results.insert(recognition_results.end(), resultsForCombination.begin(), resultsForCombination.end());
305 
306  }
307 
309  ROS_INFO_STREAM("This is the " << object_constellation_file_counter_ << "th scene recognition run");
310 
311  if (recognition_results.empty())
312  {
313  ROS_INFO("Not any scene could be recognized by ism in input objects.");
314  response.result_size.data = 0;
315  response.output = "Not any scene could be recognized by ism in input objects.";
316 
317  ROS_INFO("\n==================================================\n");
318  return true;
319  }
320 
321  stringstream output;
322  output << "Summary of results from SceneRecognition::processFindScenesServiceCall() in rp_ism_node." << endl;
323  output << "These are the best recognition results, returned per patternName:";
324 
325  //Sorting MUST just happen once (at least before we used stable sort). Otherwise order of scenes with same confidence gets inconsistent.
326  //recognitionResults are now sorted
327  stable_sort(recognition_results.begin(), recognition_results.end(), compareConfidence);
328 
329  filterIncompleteRR(recognition_results);
330 
331 
332 
333  //Only fill shared memory with new recognition results if a new object has been detected since last execution of
334  if (request.objects.size() != last_object_set_size_)
335  {
336 
337  ROS_INFO("New objects detected since last scene recognition call.");
338  ROS_INFO("Flushing shared memory with recognition results and inserting new results.");
339 
340  deleteAllNewRR();
341 
342  //Push back all incomplete recognition results to result vector in shared memory for prediction.
344 
345  ROS_DEBUG_STREAM_NAMED("SceneRecognition", shared_recognition_results_ptr_->getResultsNumber() << " recognition results in shared memory after adding new results.");
346 
347  last_object_set_size_ = request.objects.size();
348 
349  //reverse resultsForVisualization to show best scenes
351 
352  //We only want to log and visualize the best results for every pattern type. Therefore we have a list of all patterns for which no result has been shown yet.
353  set<string> remaining_pattern_names_counter = unique_pattern_names_;
354 
355 
356  int scene_count = 0;
357  for (RecognitionResultPtr& recognition_result_ptr : results_for_visualization_)
358  {
359  const string current_pattern_name = recognition_result_ptr->patternName;
360  if (remaining_pattern_names_counter.find(current_pattern_name) != remaining_pattern_names_counter.end())
361  {
362  scene_count++;
363  }
364  remaining_pattern_names_counter.erase(current_pattern_name);
365  }
366 
368 
369  res_visualizer_->clearAllMarkerOfTopic();
370  res_visualizer_->setSceneCount(scene_count);
371 
372  ROS_DEBUG_NAMED("SceneRecognition", "Flushed visualization for scene recognition.");
373  }
374 
375  sortBestRRperScene(output);
376 
378  res_visualizer_->publishCollectedMarkers();
379 
380  }
381  }
382  //construct data for service call result
383  std_msgs::UInt8 result_size;
384  result_size.data = recognition_results.size();
385  response.result_size = result_size;
386 
387  ROS_INFO_STREAM("OUT: "<< output.str());
388 
389  response.output = output.str();
390 
391  ROS_INFO("\n==================================================\n");
392  return true;
393 }
394 
395 void SceneRecognition::filterIncompleteRR(std::vector<ISM::RecognitionResultPtr> &recognition_results){
396  //TODO-DONE: Create method for filtering incomplete recognition results.
397  set<string> complete_pattern_names;
398  vector<asr_world_model::CompletePattern> completePatternsToPush;
399 
400  ROS_INFO_STREAM("These are all sorted " << recognition_results.size() << " recognition results from the current recognition call:");
401  //Get all patterns for which all objects have been found.
402  for(RecognitionResultPtr& recognition_result_ptr : recognition_results)
403  {
404  ROS_DEBUG_STREAM_NAMED("SceneRecognition", recognition_result_ptr);
405 
406  if (SceneRecognition::isCompleteSubset(pattern_object_map_.find(recognition_result_ptr->patternName)->second, object_types_and_ids_set_))
407  {
408  complete_pattern_names.insert(recognition_result_ptr->patternName);
409  vector<asr_world_model::CompletePattern>::iterator it = find_if(completePatternsToPush.begin(), completePatternsToPush.end(), [&recognition_result_ptr]
410  (const asr_world_model::CompletePattern& r) { return r.patternName == recognition_result_ptr->patternName; } );
411  if (it == completePatternsToPush.end()) {
412  asr_world_model::CompletePattern completePattern;
413  completePattern.patternName = recognition_result_ptr->patternName;
414  completePattern.confidence = recognition_result_ptr->confidence;
415  completePatternsToPush.push_back(completePattern);
416  } else {
417  it->confidence = max(it->confidence, static_cast<float>(recognition_result_ptr->confidence));
418  }
419  }
420  }
421 
422  asr_world_model::PushCompletePatterns push_complete_patterns;
423  for (const asr_world_model::CompletePattern &pattern : completePatternsToPush) {
424  push_complete_patterns.request.completePatterns.push_back(pattern);
425  }
426  push_complete_patterns_server_.call(push_complete_patterns);
427 
428  for(string pattern_name : complete_pattern_names)
429  ROS_INFO_STREAM("Found all objects in scene " << pattern_name << ", hence no pose prediction for it.");
430 
431  //Filter out completely detected patterns and construct copy of all results for viz (latter required for being able to resort recognition results for viz).
432  for(RecognitionResultPtr& recognition_result_ptr : recognition_results)
433  {
434  results_for_visualization_.push_back(recognition_result_ptr);
435  if (complete_pattern_names.find(recognition_result_ptr->patternName) == complete_pattern_names.end())
436  {
437  incomplete_recognition_results_.push_back(recognition_result_ptr); //use scene for pose prediction
438  }
439  }
440 }
441 
442 
443 void SceneRecognition::sortBestRRperScene(std::stringstream& output){
444  set<string> remaining_pattern_names = unique_pattern_names_;
445  // resultsForVisualization are sorted now.
446  // Pick best result of incomplete scenes per scene and viz it.
447  for (RecognitionResultPtr& recognition_result_ptr : results_for_visualization_)
448  {
449  const string current_pattern_name = recognition_result_ptr->patternName;
450  if (remaining_pattern_names.find(current_pattern_name) != remaining_pattern_names.end())
451  {
452  output << endl << recognition_result_ptr << endl;
454 
455  object_model_visualizer_->drawObjectModels(extractRealObjects(recognition_result_ptr));
456  res_visualizer_->addVisualization(recognition_result_ptr);
457 
458  }
459 
460  remaining_pattern_names.erase(current_pattern_name);
461  }
462  // best results for every scene type has been visualized
463  if (remaining_pattern_names.size() == 0)
464  {
465  break;
466  }
467  }
468 }
469 
470 
472  ROS_INFO("Now DELETING all NEW RECOGNITION RESULTS, already used for pose prediction.");
473 
474  ROS_INFO_STREAM("Number of already used results: " << results_already_shared_.size());
475  int number_of_erased_incomplete_recognition_results = 0;
476 
477  vector<RecognitionResultPtr>::iterator incomplete_it = incomplete_recognition_results_.begin();
478  //Remove all current recognition results that have already been chosen by importance sampling to prevent them from being processed again. Inefficient otherwise...
479  while (incomplete_it != incomplete_recognition_results_.end()) {
480  if (find(results_already_shared_.begin(), results_already_shared_.end(), *incomplete_it) == results_already_shared_.end()) {
481  ++incomplete_it;
482  }
483  else {
484  incomplete_it = incomplete_recognition_results_.erase(incomplete_it);
485  ++number_of_erased_incomplete_recognition_results;
486  }
487  }
488  ROS_DEBUG_STREAM_NAMED("SceneRecognition", "Number of deleted results: " << number_of_erased_incomplete_recognition_results);
489  ROS_DEBUG_STREAM_NAMED("SceneRecognition", "Remaining recognition results: " << incomplete_recognition_results_.size());
490 }
491 
492 
493 bool SceneRecognition::setLogFilePathServiceCall(asr_recognizer_prediction_ism::SetLogDir::Request &request, asr_recognizer_prediction_ism::SetLogDir::Response &response)
494 {
495  if (boost::filesystem::exists(request.log_dir_path))
496  {
497  object_constellation_folder_path_ = request.log_dir_path;
498  return true;
499  }
500  ROS_ERROR_STREAM(request.log_dir_path << " does not exist!");
501  return false;
502 }
503 
505  Empty::Response &response)
506 {
507  ROS_DEBUG_NAMED("SceneRecognition", "Callback SceneRecognition::processToggleVisualizationServiceCall() is called.");
509  ROS_INFO_COND(is_visualization_active_, "Visualization is active");
510  ROS_WARN_COND(!is_visualization_active_, "Visualization is not active");
511  return true;
512 }
513 
514 bool SceneRecognition::processResetServiceCall(Empty::Request &request, Empty::Response &response)
515 {
516  ROS_DEBUG_NAMED("SceneRecognition", "Callback SceneRecognition::processResetServiceCall() is called.");
517  ROS_DEBUG_NAMED("SceneRecognition", "Setting last object count from %d back to 0", last_object_set_size_);
519  ROS_DEBUG_NAMED("SceneRecognition", "Resetting shared mem with recognition results.");
520  shared_recognition_results_ptr_->clearResults();
521  ROS_DEBUG_NAMED("SceneRecognition", "Flushing buffer of results, already present in shared mem.");
522  results_already_shared_.clear();
523  ROS_DEBUG_NAMED("SceneRecognition", "Setting configuration file counter back to 0");
525  return true;
526 }
527 
528 void SceneRecognition::writeObjectConstellationToXML(vector<ObjectPtr> &objects, int scene_recog_count, int constellation_count)
529 {
530 
531  //Don`t do anything if no path was provided that specifies where the configuration should be stored.
533  return;
534 
535  if (objects.size() > 0)
536  {
537  std::ofstream myfile;
538  string filePath = object_constellation_folder_path_ + "/" + "recognition_" + to_string(scene_recog_count) + "_constellation_" + to_string(constellation_count) + ".xml";
539  myfile.open(filePath);
540  myfile << "<Objects>";
541 
542  for (unsigned int i = 0; i < objects.size(); ++i)
543  {
544  ObjectPtr object = objects[i];
545  PosePtr pose = object->pose;
546  myfile << "<Object "
547  << "type=\"" << object->type
548  << "\" id=\"" << object->observedId
549  << "\" mesh=\"" << object->ressourcePath.string()
550  << "\" angles=\"quaternion\">"
551  << pose->point->eigen.x()
552  << "," << pose->point->eigen.y()
553  << "," << pose->point->eigen.z()
554  << "," << pose->quat->eigen.w()
555  << "," << pose->quat->eigen.x()
556  << "," << pose->quat->eigen.y()
557  << "," << pose->quat->eigen.z()
558  << " </Object>";
559  }
560 
561  myfile << "</Objects>";
562  myfile.close();
563  }
564 }
565 
566 vector<ObjectPtr> SceneRecognition::extractRealObjects(RecognitionResultPtr result)
567 {
568  vector<ObjectPtr> ret_objects;
569  for(ObjectPtr obj : result->recognizedSet->objects)
570  {
571  if( obj->type.find( result->patternName ) == string::npos)
572  {
573  ret_objects.push_back(obj);
574  }
575  }
576 
577  vector<ObjectPtr> temp_objects;
578  for (RecognitionResultPtr sub_result_ptr : result->subPatterns)
579  {
580  temp_objects = extractRealObjects(sub_result_ptr);
581  ret_objects.insert( ret_objects.end(), temp_objects.begin(), temp_objects.end());
582  }
583  return ret_objects;
584 }
585 
586 bool SceneRecognition::compareConfidence(RecognitionResultPtr i, RecognitionResultPtr j) {
587  return (i->confidence < j->confidence);
588 }
589 
590 bool SceneRecognition::isCompleteSubset(set<pair<string, string> > set_a, set<pair<string, string> > set_b)
591 {
592  unsigned int counter = 0;
593  for (pair<string, string> el_a : set_a)
594  {
595  for (pair<string, string> el_b : set_b)
596  {
597  if(!el_a.first.compare(el_b.first) && !el_a.second.compare(el_b.second))
598  counter++;
599  }
600  }
601  return counter == set_a.size();
602 }
603 }
ServiceClient serviceClient(const std::string &service_name, bool persistent=false, const M_string &header_values=M_string())
std::vector< ISM::RecognitionResultPtr > & results_already_shared_
#define ROS_FATAL(...)
std::set< std::pair< std::string, std::string > > object_types_and_ids_set_
#define ROS_DEBUG_STREAM_NAMED(name, args)
boost::shared_ptr< ObjectConverter > ObjectConverterPtr
Definition: ism_helper.h:111
std::vector< ISM::ObjectPtr > extractRealObjects(ISM::RecognitionResultPtr result_ptr)
bool call(MReq &req, MRes &res)
std::string getName(void *handle)
Definition: ism_helper.h:36
ServiceServer advertiseService(const std::string &service, bool(T::*srv_func)(MReq &, MRes &), T *obj)
bool processFindScenesServiceCall(asr_recognizer_prediction_ism::FindScenes::Request &request, asr_recognizer_prediction_ism::FindScenes::Response &response)
processFindScenesServiceCall Receives service call with detected AsrObjects.
#define ROS_WARN(...)
std::vector< ISM::RecognitionResultPtr > results_for_visualization_
SharedRecognitionResultsManagerPtr shared_recognition_results_ptr_
void sortBestRRperScene(std::stringstream &output)
std::vector< ISM::RecognitionResultPtr > incomplete_recognition_results_
static bool isCompleteSubset(std::set< std::pair< std::string, std::string > > set_a, std::set< std::pair< std::string, std::string > > set_b)
#define ROS_DEBUG_NAMED(name,...)
std::map< std::string, std::set< std::pair< std::string, std::string > > > pattern_object_map_
#define ROS_INFO(...)
bool setLogFilePathServiceCall(asr_recognizer_prediction_ism::SetLogDir::Request &request, asr_recognizer_prediction_ism::SetLogDir::Response &response)
processSetLogDirServiceCall Receives service call where to write object constellations, used for scene recognition.
#define ROS_ASSERT_MSG(cond,...)
const std::string & getNamespace() const
void getParam(std::string param_name, T *variable, T default_value)
Definition: param_helper.h:28
Publisher advertise(const std::string &topic, uint32_t queue_size, bool latch=false)
void filterIncompleteRR(std::vector< ISM::RecognitionResultPtr > &recognition_results)
void writeObjectConstellationToXML(std::vector< ISM::ObjectPtr > &objects, int sceneRecogCount, int constellation_count)
bool processResetServiceCall(std_srvs::Empty::Request &request, std_srvs::Empty::Response &response)
processResetServiceCall Clears the old recognition results.
void createObjectSetsFromPoseSamples(std::vector< asr_msgs::AsrObject >::iterator object_it, std::vector< asr_msgs::AsrObject >::iterator end_it, ISM::ObjectSetPtr temp_set_ptr, std::vector< ISM::ObjectSetPtr > &object_sets, bool measured_value_only)
VIZ::ObjectModelVisualizerRVIZ * object_model_visualizer_
#define ROS_INFO_STREAM(args)
static bool compareConfidence(ISM::RecognitionResultPtr i, ISM::RecognitionResultPtr j)
bool processToggleVisualizationServiceCall(std_srvs::Empty::Request &request, std_srvs::Empty::Response &response)
processToggleVisualizationServiceCall Toggle the visualization via service call.
#define ROS_WARN_COND(cond,...)
#define ROS_INFO_COND(cond,...)
ROSCPP_DECL void shutdown()
#define ROS_ERROR_STREAM(args)
VIZ::ISMResultVisualizerRVIZPtr res_visualizer_


asr_recognizer_prediction_ism
Author(s): Aumann Florian, Heller Florian, Hutmacher Robin, Meißner Pascal, Stöckle Patrick, Stroh Daniel
autogenerated on Wed Jan 8 2020 03:18:32