face_recognizer_node.cpp
Go to the documentation of this file.
00001 
00061 #ifdef __LINUX__
00062 #include "cob_people_detection/face_recognizer_node.h"
00063 #include "cob_vision_utils/GlobalDefines.h"
00064 #include "cob_people_detection/face_detection_message_helper.h"
00065 #else
00066 #endif
00067 
00068 // OpenCV
00069 #include <opencv2/opencv.hpp>
00070 #include <cv_bridge/cv_bridge.h>
00071 #include <sensor_msgs/image_encodings.h>
00072 
00073 // Boost
00074 #include <boost/shared_ptr.hpp>
00075 
00076 #include <sys/time.h>
00077 
00078 using namespace ipa_PeopleDetector;
00079 
00080 FaceRecognizerNode::FaceRecognizerNode(ros::NodeHandle nh) :
00081         node_handle_(nh)
00082 {
00083         //      data_directory_ = ros::package::getPath("cob_people_detection") + "/common/files/";
00084         //      classifier_directory_ = ros::package::getPath("cob_people_detection") + "/common/files/";
00085         // Parameters
00086         bool norm_illumination;
00087         bool norm_align;
00088         bool norm_extreme_illumination;
00089         int norm_size; // Desired width and height of the Eigenfaces (=eigenvectors).
00090         int feature_dimension; // Number of eigenvectors per person to identify -> controls the total number of eigenvectors
00091         double threshold_facespace; // Threshold to facespace
00092         double threshold_unknown; // Threshold to detect unknown faces
00093         int metric; // metric for nearest neighbor search in face space: 0 = Euklidean, 1 = Mahalanobis, 2 = Mahalanobis Cosine
00094         bool debug; // enables some debug outputs
00095         int recognition_method; // choose subspace method
00096         bool use_unknown_thresh; // use threshold for unknown faces
00097         bool use_depth; // use depth for recognition
00098         std::vector < std::string > identification_labels_to_recognize; // a list of labels of persons that shall be recognized
00099         std::cout << "\n--------------------------\nFace Recognizer Parameters:\n--------------------------\n";
00100         //if(!node_handle_.getParam("~data_directory", data_directory_)) std::cout<<"PARAM NOT AVAILABLE"<<std::endl;
00101         if (!node_handle_.getParam("data_storage_directory", data_directory_))
00102                 std::cout << "PARAM NOT AVAILABLE" << std::endl;
00103         std::cout << "data_directory = " << data_directory_ << "\n";
00104         node_handle_.param("enable_face_recognition", enable_face_recognition_, true);
00105         std::cout << "enable_face_recognition = " << enable_face_recognition_ << "\n";
00106         node_handle_.param("feature_dimension", feature_dimension, 10);
00107         std::cout << "feature dimension = " << feature_dimension << "\n";
00108         node_handle_.param("threshold_facespace", threshold_facespace, 10000.0);
00109         std::cout << "threshold_facespace = " << threshold_facespace << "\n";
00110         node_handle_.param("threshold_unknown", threshold_unknown, 1000.0);
00111         std::cout << "threshold_unknown = " << threshold_unknown << "\n";
00112         node_handle_.param("metric", metric, 0);
00113         std::cout << "metric = " << metric << "\n";
00114         node_handle_.param("debug", debug, false);
00115         std::cout << "debug = " << debug << "\n";
00116         node_handle_.param("recognition_method", recognition_method, 3);
00117         std::cout << "recognition method: " << recognition_method << "\n";
00118         node_handle_.param("use_unknown_thresh", use_unknown_thresh, true);
00119         std::cout << " use use unknown thresh: " << use_unknown_thresh << "\n";
00120         node_handle_.param("use_depth", use_depth, true);
00121         std::cout << " use depth: " << use_depth << "\n";
00122         node_handle_.param("display_timing", display_timing_, false);
00123         std::cout << "display_timing = " << display_timing_ << "\n";
00124         node_handle_.param("norm_size", norm_size, 100);
00125         std::cout << "norm_size = " << norm_size << "\n";
00126         node_handle_.param("norm_illumination", norm_illumination, true);
00127         std::cout << "norm_illumination = " << norm_illumination << "\n";
00128         node_handle_.param("norm_align", norm_align, false);
00129         std::cout << "norm_align = " << norm_align << "\n";
00130         node_handle_.param("norm_extreme_illumination", norm_extreme_illumination, false);
00131         std::cout << "norm_extreme_illumination = " << norm_extreme_illumination << "\n";
00132         node_handle_.param("debug", debug, false);
00133         std::cout << "debug = " << debug << "\n";
00134         node_handle_.param("use_depth", use_depth, false);
00135         std::cout << "use depth: " << use_depth << "\n";
00136         // todo: make parameters for illumination and alignment normalization on/off
00137 
00138         std::cout << "identification_labels_to_recognize: \n";
00139         XmlRpc::XmlRpcValue identification_labels_to_recognize_list;
00140         node_handle_.getParam("identification_labels_to_recognize", identification_labels_to_recognize_list);
00141         if (identification_labels_to_recognize_list.getType() == XmlRpc::XmlRpcValue::TypeArray)
00142         {
00143                 identification_labels_to_recognize.resize(identification_labels_to_recognize_list.size());
00144                 for (int i = 0; i < identification_labels_to_recognize_list.size(); i++)
00145                 {
00146                         ROS_ASSERT(identification_labels_to_recognize_list[i].getType() == XmlRpc::XmlRpcValue::TypeString);
00147                         identification_labels_to_recognize[i] = static_cast<std::string>(identification_labels_to_recognize_list[i]);
00148                 }
00149         }
00150 
00151         // initialize face recognizer
00152         unsigned long return_value = face_recognizer_.init(data_directory_, norm_size, norm_illumination, norm_align, norm_extreme_illumination, metric, debug,
00153                         identification_labels_to_recognize, recognition_method, feature_dimension, use_unknown_thresh, use_depth);
00154         if (return_value == ipa_Utils::RET_FAILED)
00155         {
00156                 ROS_ERROR("Recognition model not trained.");
00157         }
00158         else if (return_value == ipa_Utils::RET_OK)
00159         {
00160                 std::cout << "Recognition model trained or loaded for:\n";
00161                 for (unsigned int i = 0; i < identification_labels_to_recognize.size(); i++)
00162                         std::cout << "   - " << identification_labels_to_recognize[i] << std::endl;
00163         }
00164         // launch LoadModel server
00165         load_model_server_ = new LoadModelServer(node_handle_, "load_model_server", boost::bind(&FaceRecognizerNode::loadModelServerCallback, this, _1), false);
00166         load_model_server_->start();
00167         ROS_INFO("FaceRecognizerNode initialized.");
00168 
00169 
00170         // advertise topics
00171         face_recognition_publisher_ = node_handle_.advertise<cob_perception_msgs::DetectionArray>("face_recognitions", 1);
00172 
00173         // subscribe to head detection topic
00174         face_position_subscriber_ = nh.subscribe("face_positions", 1, &FaceRecognizerNode::facePositionsCallback, this);
00175 }
00176 
00177 FaceRecognizerNode::~FaceRecognizerNode(void)
00178 {
00179         if (load_model_server_ != 0)
00180                 delete load_model_server_;
00181 }
00182 
00183 // Prevent deleting memory twice, when using smart pointer
00184 void voidDeleter(const sensor_msgs::Image* const )
00185 {
00186 }
00187 
00188 //void FaceRecognizerNode::facePositionsCallback(const cob_perception_msgs::ColorDepthImageCropArray::ConstPtr& face_positions)
00189 //{
00190 //      // receive head and face positions and recognize faces in the face region, finally publish detected and recognized faces
00191 //
00192 //      // --- convert color image patches of head regions and contained face bounding boxes ---
00193 //      cv_bridge::CvImageConstPtr cv_ptr;
00194 //      std::vector<cv::Mat> heads_color_images;
00195 //      heads_color_images.resize(face_positions->cdia.head_detections.size());
00196 //      std::vector<cv::Mat> heads_depth_images;
00197 //      heads_depth_images.resize(face_positions->cdia.head_detections.size());
00198 //
00199 //      std::vector< std::vector<cv::Rect> > face_bounding_boxes;
00200 //      std::vector< std::vector<cv::Rect> > crop_bounding_boxes;
00201 //      face_bounding_boxes.resize(face_positions->cdia.head_detections.size());
00202 //      crop_bounding_boxes.resize(face_positions->cdia.head_detections.size());
00203 //  cv::Rect bb=cv::Rect(0,0,160,160);
00204 //
00205 //      std::vector<cv::Rect> head_bounding_boxes;
00206 //      head_bounding_boxes.resize(face_positions->cdia.head_detections.size());
00207 //
00208 //      for (unsigned int i=0; i<face_positions->cdia.head_detections.size(); i++)
00209 //      {
00210 //              // color image
00211 //              if (enable_face_recognition_ == true)
00212 //              {
00213 //                      sensor_msgs::ImageConstPtr msgPtr = boost::shared_ptr<sensor_msgs::Image const>(&(face_positions->cdia.head_detections[i].color_image), voidDeleter);
00214 //                      try
00215 //                      {
00216 //                              cv_ptr = cv_bridge::toCvShare(msgPtr, sensor_msgs::image_encodings::BGR8);
00217 //                      }
00218 //                      catch (cv_bridge::Exception& e)
00219 //                      {
00220 //                              ROS_ERROR("cv_bridge exception: %s", e.what());
00221 //                              return;
00222 //                      }
00223 //                      heads_color_images[i] = cv_ptr->image;
00224 //              }
00225 //
00226 //              // depth image
00227 //              sensor_msgs::ImageConstPtr msgPtr = boost::shared_ptr<sensor_msgs::Image const>(&(face_positions->cdia.head_detections[i].depth_image), voidDeleter);
00228 //              try
00229 //              {
00230 //                      cv_ptr = cv_bridge::toCvShare(msgPtr, sensor_msgs::image_encodings::TYPE_32FC3);
00231 //              }
00232 //              catch (cv_bridge::Exception& e)
00233 //              {
00234 //                      ROS_ERROR("cv_bridge exception: %s", e.what());
00235 //                      return;
00236 //              }
00237 //              heads_depth_images[i] = cv_ptr->image;
00238 //
00239 //              // face bounding boxes
00240 //              face_bounding_boxes[i].resize(face_positions->cdia.head_detections[i].face_detections.size());
00241 //              crop_bounding_boxes[i].resize(face_positions->cdia.head_detections[i].face_detections.size());
00242 //              for (uint j=0; j<face_bounding_boxes[i].size(); j++)
00243 //              {
00244 //                      const cob_perception_msgs::Rect& source_rect = face_positions->cdia.head_detections[i].face_detections[j];
00245 //                      cv::Rect rect(source_rect.x, source_rect.y, source_rect.width, source_rect.height);
00246 //                      face_bounding_boxes[i][j] = rect;
00247 //                      crop_bounding_boxes[i][j] = bb;
00248 //              }
00249 //
00250 //              // head bounding box
00251 //              const cob_perception_msgs::Rect& source_rect = face_positions->cdia.head_detections[i].head_detection;
00252 //              cv::Rect rect(source_rect.x, source_rect.y, source_rect.width, source_rect.height);
00253 //              head_bounding_boxes[i] = rect;
00254 //      }
00255 //
00258 //      std::vector<cv::Mat> crops;
00259 //      //std::vector<std::vector<cv::Rect> > crop_bounding_boxes;
00260 //      crops.resize(face_positions->crops.size());
00261 //    for(int k=0;k<face_positions->crops.size();k++)
00262 //    {
00263 //      //tenpora TODO:
00264 //                      sensor_msgs::ImageConstPtr msgPtr = boost::shared_ptr<sensor_msgs::Image const>(&(face_positions->crops[k]), voidDeleter);
00265 //                      try
00266 //                      {
00267 //                              cv_ptr = cv_bridge::toCvShare(msgPtr, sensor_msgs::image_encodings::BGR8);
00268 //                      }
00269 //                      catch (cv_bridge::Exception& e)
00270 //                      {
00271 //                              ROS_ERROR("cv_bridge exception: %s", e.what());
00272 //                              return;
00273 //                      }
00274 //                      crops[k]= cv_ptr->image;
00275 //      //temporary TODO
00276 //      //crop_bounding_boxes[k][0]=bb;
00277 //
00278 //    }
00281 //
00282 //
00283 //      // --- face recognition ---
00284 //      std::vector< std::vector<std::string> > identification_labels;
00285 //      bool identification_failed = false;
00286 //      if (enable_face_recognition_ == true)
00287 //      {
00288 //              // recognize faces
00289 //              //unsigned long result_state = face_recognizer_.recognizeFaces(heads_color_images, face_bounding_boxes, identification_labels);
00290 //              unsigned long result_state = face_recognizer_.recognizeFaces(crops,crop_bounding_boxes, identification_labels);
00291 //              if (result_state == ipa_Utils::RET_FAILED)
00292 //              {
00293 //                      ROS_ERROR("FaceRecognizerNode::face_positions_callback: Please load a face recognition model at first.");
00294 //                      identification_failed = true;
00295 //              }
00296 //      }
00297 //      if (enable_face_recognition_ == false || identification_failed == true)
00298 //      {
00299 //              // label all image unknown if face recognition disabled
00300 //              identification_labels.resize(face_positions->cdia.head_detections.size());
00301 //              for (uint i=0; i<identification_labels.size(); i++)
00302 //              {
00303 //                      identification_labels[i].resize(face_positions->cdia.head_detections[i].face_detections.size());
00304 //                      for (uint j=0; j<identification_labels[i].size(); j++)
00305 //                              identification_labels[i][j] = "Unknown";
00306 //              }
00307 //      }
00308 //
00309 //      // --- publish detection message ---
00310 //      cob_perception_msgs::DetectionArray detection_msg;
00311 //      detection_msg.header = face_positions->header;
00312 //
00313 //      // prepare message
00314 //      for (int head=0; head<(int)head_bounding_boxes.size(); head++)
00315 //      {
00316 //              if (face_bounding_boxes[head].size() == 0)
00317 //              {
00318 //                      // no faces detected in head region -> publish head position
00319 //                      cob_perception_msgs::Detection det;
00320 //                      cv::Rect& head_bb = head_bounding_boxes[head];
00321 //                      // set 3d position of head's center
00322 //                      bool valid_3d_position = determine3DFaceCoordinates(heads_depth_images[head], 0.5*(float)head_bb.width, 0.5*(float)head_bb.height, det.pose.pose.position, 6);
00323 //                      if (valid_3d_position==false)
00324 //                              continue;
00325 //                      // write bounding box
00326 //                      det.mask.roi.x = head_bb.x;           det.mask.roi.y = head_bb.y;
00327 //                      det.mask.roi.width = head_bb.width;   det.mask.roi.height = head_bb.height;
00328 //                      // set label
00329 //                      det.label="UnknownHead";
00330 //                      // set origin of detection
00331 //                      det.detector = "head";
00332 //                      // header
00333 //                      det.header = face_positions->header;
00334 //                      // add to message
00335 //                      detection_msg.detections.push_back(det);
00336 //              }
00337 //              else
00338 //              {
00339 //                      // process all faces in head region
00340 //                      for (int face=0; face<(int)face_bounding_boxes[head].size(); face++)
00341 //                      {
00342 //                              cob_perception_msgs::Detection det;
00343 //                              cv::Rect& head_bb = head_bounding_boxes[head];
00344 //                              cv::Rect& face_bb = face_bounding_boxes[head][face];
00345 //                              // set 3d position of head's center
00346 //                              bool valid_3d_position = determine3DFaceCoordinates(heads_depth_images[head], face_bb.x+0.5*(float)face_bb.width, face_bb.y+0.5*(float)face_bb.height, det.pose.pose.position, 6);
00347 //                              if (valid_3d_position==false)
00348 //                                      continue;
00349 //                              // write bounding box
00350 //                              det.mask.roi.x = head_bb.x+face_bb.x; det.mask.roi.y = head_bb.y+face_bb.y;
00351 //                              det.mask.roi.width = face_bb.width;   det.mask.roi.height = face_bb.height;
00352 //                              // set label
00353 //                              det.label=identification_labels[head][face];
00354 //                              // set origin of detection
00355 //                              det.detector = "face";
00356 //                              // header
00357 //                              det.header = face_positions->header;
00358 //                              // add to message
00359 //                              detection_msg.detections.push_back(det);
00360 //                      }
00361 //              }
00362 //      }
00363 //
00364 //      // publish message
00365 //      face_recognition_publisher_.publish(detection_msg);
00366 //}
00367 void FaceRecognizerNode::facePositionsCallback(const cob_perception_msgs::ColorDepthImageArray::ConstPtr& face_positions)
00368 {
00369         //      Timer tim;
00370         //      tim.start();
00371 
00372         // receive head and face positions and recognize faces in the face region, finally publish detected and recognized faces
00373 
00374         // --- convert color image patches of head regions and contained face bounding boxes ---
00375         cv_bridge::CvImageConstPtr cv_ptr;
00376         std::vector<cv::Mat> heads_color_images(face_positions->head_detections.size());
00377         std::vector<cv::Mat> heads_depth_images(face_positions->head_detections.size());
00378         std::vector<std::vector<cv::Rect> > face_bounding_boxes(face_positions->head_detections.size());
00379         std::vector<cv::Rect> head_bounding_boxes(face_positions->head_detections.size());
00380         for (unsigned int i = 0; i < face_positions->head_detections.size(); i++)
00381         {
00382                 // color image
00383                 if (enable_face_recognition_ == true)
00384                 {
00385                         sensor_msgs::ImageConstPtr msgPtr = boost::shared_ptr<sensor_msgs::Image const>(&(face_positions->head_detections[i].color_image), voidDeleter);
00386                         try
00387                         {
00388                                 cv_ptr = cv_bridge::toCvShare(msgPtr, sensor_msgs::image_encodings::BGR8);
00389                         } catch (cv_bridge::Exception& e)
00390                         {
00391                                 ROS_ERROR("cv_bridge exception: %s", e.what());
00392                                 return;
00393                         }
00394                         heads_color_images[i] = cv_ptr->image;
00395                 }
00396 
00397                 // depth image
00398                 sensor_msgs::ImageConstPtr msgPtr = boost::shared_ptr<sensor_msgs::Image const>(&(face_positions->head_detections[i].depth_image), voidDeleter);
00399                 try
00400                 {
00401                         cv_ptr = cv_bridge::toCvShare(msgPtr, sensor_msgs::image_encodings::TYPE_32FC3);
00402                 } catch (cv_bridge::Exception& e)
00403                 {
00404                         ROS_ERROR("cv_bridge exception: %s", e.what());
00405                         return;
00406                 }
00407                 heads_depth_images[i] = cv_ptr->image;
00408 
00409                 // face bounding boxes
00410                 face_bounding_boxes[i].resize(face_positions->head_detections[i].face_detections.size());
00411                 for (uint j = 0; j < face_bounding_boxes[i].size(); j++)
00412                 {
00413                         const cob_perception_msgs::Rect& source_rect = face_positions->head_detections[i].face_detections[j];
00414                         cv::Rect rect(source_rect.x, source_rect.y, source_rect.width, source_rect.height);
00415                         face_bounding_boxes[i][j] = rect;
00416                 }
00417 
00418                 // head bounding box
00419                 const cob_perception_msgs::Rect& source_rect = face_positions->head_detections[i].head_detection;
00420                 cv::Rect rect(source_rect.x, source_rect.y, source_rect.width, source_rect.height);
00421                 head_bounding_boxes[i] = rect;
00422         }
00423 
00424         // --- face recognition ---
00425         std::vector<std::vector<std::string> > identification_labels;
00426         bool identification_failed = false;
00427         if (enable_face_recognition_ == true)
00428         {
00429                 // recognize faces
00430                 //unsigned long result_state = face_recognizer_.recognizeFaces(heads_color_images, face_bounding_boxes, identification_labels);
00431 
00432                 //timeval t1,t2;
00433                 //gettimeofday(&t1,NULL);
00434                 unsigned long result_state = face_recognizer_.recognizeFaces(heads_color_images, heads_depth_images, face_bounding_boxes, identification_labels);
00435                 //gettimeofday(&t2,NULL);
00436                 //std::cout<<(t2.tv_sec - t1.tv_sec) * 1000.0<<std::endl;
00437                 if (result_state == ipa_Utils::RET_FAILED)
00438                 {
00439                         ROS_ERROR("FaceRecognizerNode::face_positions_callback: Please load a face recognition model at first.");
00440                         identification_failed = true;
00441                 }
00442         }
00443         if (enable_face_recognition_ == false || identification_failed == true)
00444         {
00445                 // label all image unknown if face recognition disabled
00446                 identification_labels.resize(face_positions->head_detections.size());
00447                 for (uint i = 0; i < identification_labels.size(); i++)
00448                 {
00449                         identification_labels[i].resize(face_positions->head_detections[i].face_detections.size());
00450                         for (uint j = 0; j < identification_labels[i].size(); j++)
00451                                 identification_labels[i][j] = "Unknown";
00452                 }
00453         }
00454 
00455         // --- publish detection message ---
00456         FaceDetectionMessageHelper face_detection_message_helper;
00457         cob_perception_msgs::DetectionArray detection_msg;
00458         face_detection_message_helper.prepareCartesionDetectionMessage(detection_msg, face_positions->header, heads_depth_images, head_bounding_boxes,
00459                         face_bounding_boxes, &identification_labels);
00460         // publish message
00461         face_recognition_publisher_.publish(detection_msg);
00462 
00463         if (display_timing_ == true)
00464                 ROS_INFO("%d FaceRecognition: Time stamp of pointcloud message: %f. Delay: %f.", face_positions->header.seq, face_positions->header.stamp.toSec(),
00465                                 ros::Time::now().toSec() - face_positions->header.stamp.toSec());
00466         //      ROS_INFO("Face recognition took %f ms", tim.getElapsedTimeInMilliSec());
00467 }
00468 
00469 bool FaceRecognizerNode::determine3DFaceCoordinates(cv::Mat& depth_image, int center2Dx, int center2Dy, geometry_msgs::Point& center3D, int search_radius)
00470 {
00471         // 3D world coordinates (and verify that the read pixel contained valid coordinates, otherwise search for valid pixel in neighborhood)
00472         cv::Point3f p;
00473         bool valid_coordinates = false;
00474         for (int d = 0; (d < search_radius && !valid_coordinates); d++)
00475         {
00476                 for (int v = -d; (v <= d && !valid_coordinates); v++)
00477                 {
00478                         for (int u = -d; (u <= d && !valid_coordinates); u++)
00479                         {
00480                                 if ((abs(v) != d && abs(u) != d) || center2Dx + u < 0 || center2Dx + u >= depth_image.cols || center2Dy + v < 0 || center2Dy + v >= depth_image.rows)
00481                                         continue;
00482 
00483                                 p = depth_image.at<cv::Point3f>(center2Dy + v, center2Dx + u);
00484                                 if (!isnan(p.x) && !isnan(p.y) && p.z != 0.f)
00485                                 {
00486                                         valid_coordinates = true;
00487                                         center3D.x = p.x;
00488                                         center3D.y = p.y;
00489                                         center3D.z = p.z;
00490                                 }
00491                         }
00492                 }
00493         }
00494 
00495         return valid_coordinates;
00496 }
00497 
00498 void FaceRecognizerNode::loadModelServerCallback(const cob_people_detection::loadModelGoalConstPtr& goal)
00499 {
00500         // read list of labels of persons that shall be recognized from goal message
00501         std::vector < std::string > identification_labels_to_recognize(goal->labels.size());
00502         for (int i = 0; i < (int)goal->labels.size(); i++)
00503                 identification_labels_to_recognize[i] = goal->labels[i];
00504 
00505         // load the corresponding recognition model
00506         unsigned long result_state = face_recognizer_.loadRecognitionModel(identification_labels_to_recognize);
00507 
00508         cob_people_detection::loadModelResult result;
00509         if (result_state == ipa_Utils::RET_OK)
00510                 load_model_server_->setSucceeded(result, "Model successfully loaded.");
00511         else
00512                 load_model_server_->setAborted(result, "Loading new model failed.");
00513 }
00514 
00515 //#######################
00516 //#### main programm ####
00517 int main(int argc, char** argv)
00518 {
00519         // Initialize ROS, specify name of node
00520         ros::init(argc, argv, "face_recognizer");
00521 
00522         // Create a handle for this node, initialize node
00523         ros::NodeHandle nh("~");
00524 
00525         // Create FaceRecognizerNode class instance
00526         FaceRecognizerNode face_recognizer_node(nh);
00527 
00528         // Create action nodes
00529         //DetectObjectsAction detect_action_node(object_detection_node, nh);
00530         //AcquireObjectImageAction acquire_image_node(object_detection_node, nh);
00531         //TrainObjectAction train_object_node(object_detection_node, nh);
00532 
00533         ros::spin();
00534 
00535         return 0;
00536 }


cob_people_detection
Author(s): Richard Bormann , Thomas Zwölfer
autogenerated on Mon May 6 2019 02:32:06