00001 """autogenerated by genpy from interactive_perception_msgs/ObjectRecognitionGuiActionGoal.msg. Do not edit."""
00002 import sys
00003 python3 = True if sys.hexversion > 0x03000000 else False
00004 import genpy
00005 import struct
00006
00007 import interactive_perception_msgs.msg
00008 import actionlib_msgs.msg
00009 import geometry_msgs.msg
00010 import shape_msgs.msg
00011 import sensor_msgs.msg
00012 import genpy
00013 import std_msgs.msg
00014
00015 class ObjectRecognitionGuiActionGoal(genpy.Message):
00016 _md5sum = "1e32fe3c98b77648d340d3bea5e19eeb"
00017 _type = "interactive_perception_msgs/ObjectRecognitionGuiActionGoal"
00018 _has_header = True
00019 _full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
00020
00021 Header header
00022 actionlib_msgs/GoalID goal_id
00023 ObjectRecognitionGuiGoal goal
00024
00025 ================================================================================
00026 MSG: std_msgs/Header
00027 # Standard metadata for higher-level stamped data types.
00028 # This is generally used to communicate timestamped data
00029 # in a particular coordinate frame.
00030 #
00031 # sequence ID: consecutively increasing ID
00032 uint32 seq
00033 #Two-integer timestamp that is expressed as:
00034 # * stamp.secs: seconds (stamp_secs) since epoch
00035 # * stamp.nsecs: nanoseconds since stamp_secs
00036 # time-handling sugar is provided by the client library
00037 time stamp
00038 #Frame this data is associated with
00039 # 0: no frame
00040 # 1: global frame
00041 string frame_id
00042
00043 ================================================================================
00044 MSG: actionlib_msgs/GoalID
00045 # The stamp should store the time at which this goal was requested.
00046 # It is used by an action server when it tries to preempt all
00047 # goals that were requested before a certain time
00048 time stamp
00049
00050 # The id provides a way to associate feedback and
00051 # result message with specific goal requests. The id
00052 # specified must be unique.
00053 string id
00054
00055
00056 ================================================================================
00057 MSG: interactive_perception_msgs/ObjectRecognitionGuiGoal
00058 # ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
00059
00060 #the original sensor data (depth/disparity image)
00061 sensor_msgs/Image image
00062 sensor_msgs/CameraInfo camera_info
00063
00064 #list of mesh/pose hypotheses for each recognized point cluster
00065 ModelHypothesisList[] model_hypotheses
00066
00067 ================================================================================
00068 MSG: sensor_msgs/Image
00069 # This message contains an uncompressed image
00070 # (0, 0) is at top-left corner of image
00071 #
00072
00073 Header header # Header timestamp should be acquisition time of image
00074 # Header frame_id should be optical frame of camera
00075 # origin of frame should be optical center of cameara
00076 # +x should point to the right in the image
00077 # +y should point down in the image
00078 # +z should point into to plane of the image
00079 # If the frame_id here and the frame_id of the CameraInfo
00080 # message associated with the image conflict
00081 # the behavior is undefined
00082
00083 uint32 height # image height, that is, number of rows
00084 uint32 width # image width, that is, number of columns
00085
00086 # The legal values for encoding are in file src/image_encodings.cpp
00087 # If you want to standardize a new string format, join
00088 # ros-users@lists.sourceforge.net and send an email proposing a new encoding.
00089
00090 string encoding # Encoding of pixels -- channel meaning, ordering, size
00091 # taken from the list of strings in include/sensor_msgs/image_encodings.h
00092
00093 uint8 is_bigendian # is this data bigendian?
00094 uint32 step # Full row length in bytes
00095 uint8[] data # actual matrix data, size is (step * rows)
00096
00097 ================================================================================
00098 MSG: sensor_msgs/CameraInfo
00099 # This message defines meta information for a camera. It should be in a
00100 # camera namespace on topic "camera_info" and accompanied by up to five
00101 # image topics named:
00102 #
00103 # image_raw - raw data from the camera driver, possibly Bayer encoded
00104 # image - monochrome, distorted
00105 # image_color - color, distorted
00106 # image_rect - monochrome, rectified
00107 # image_rect_color - color, rectified
00108 #
00109 # The image_pipeline contains packages (image_proc, stereo_image_proc)
00110 # for producing the four processed image topics from image_raw and
00111 # camera_info. The meaning of the camera parameters are described in
00112 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.
00113 #
00114 # The image_geometry package provides a user-friendly interface to
00115 # common operations using this meta information. If you want to, e.g.,
00116 # project a 3d point into image coordinates, we strongly recommend
00117 # using image_geometry.
00118 #
00119 # If the camera is uncalibrated, the matrices D, K, R, P should be left
00120 # zeroed out. In particular, clients may assume that K[0] == 0.0
00121 # indicates an uncalibrated camera.
00122
00123 #######################################################################
00124 # Image acquisition info #
00125 #######################################################################
00126
00127 # Time of image acquisition, camera coordinate frame ID
00128 Header header # Header timestamp should be acquisition time of image
00129 # Header frame_id should be optical frame of camera
00130 # origin of frame should be optical center of camera
00131 # +x should point to the right in the image
00132 # +y should point down in the image
00133 # +z should point into the plane of the image
00134
00135
00136 #######################################################################
00137 # Calibration Parameters #
00138 #######################################################################
00139 # These are fixed during camera calibration. Their values will be the #
00140 # same in all messages until the camera is recalibrated. Note that #
00141 # self-calibrating systems may "recalibrate" frequently. #
00142 # #
00143 # The internal parameters can be used to warp a raw (distorted) image #
00144 # to: #
00145 # 1. An undistorted image (requires D and K) #
00146 # 2. A rectified image (requires D, K, R) #
00147 # The projection matrix P projects 3D points into the rectified image.#
00148 #######################################################################
00149
00150 # The image dimensions with which the camera was calibrated. Normally
00151 # this will be the full camera resolution in pixels.
00152 uint32 height
00153 uint32 width
00154
00155 # The distortion model used. Supported models are listed in
00156 # sensor_msgs/distortion_models.h. For most cameras, "plumb_bob" - a
00157 # simple model of radial and tangential distortion - is sufficent.
00158 string distortion_model
00159
00160 # The distortion parameters, size depending on the distortion model.
00161 # For "plumb_bob", the 5 parameters are: (k1, k2, t1, t2, k3).
00162 float64[] D
00163
00164 # Intrinsic camera matrix for the raw (distorted) images.
00165 # [fx 0 cx]
00166 # K = [ 0 fy cy]
00167 # [ 0 0 1]
00168 # Projects 3D points in the camera coordinate frame to 2D pixel
00169 # coordinates using the focal lengths (fx, fy) and principal point
00170 # (cx, cy).
00171 float64[9] K # 3x3 row-major matrix
00172
00173 # Rectification matrix (stereo cameras only)
00174 # A rotation matrix aligning the camera coordinate system to the ideal
00175 # stereo image plane so that epipolar lines in both stereo images are
00176 # parallel.
00177 float64[9] R # 3x3 row-major matrix
00178
00179 # Projection/camera matrix
00180 # [fx' 0 cx' Tx]
00181 # P = [ 0 fy' cy' Ty]
00182 # [ 0 0 1 0]
00183 # By convention, this matrix specifies the intrinsic (camera) matrix
00184 # of the processed (rectified) image. That is, the left 3x3 portion
00185 # is the normal camera intrinsic matrix for the rectified image.
00186 # It projects 3D points in the camera coordinate frame to 2D pixel
00187 # coordinates using the focal lengths (fx', fy') and principal point
00188 # (cx', cy') - these may differ from the values in K.
00189 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will
00190 # also have R = the identity and P[1:3,1:3] = K.
00191 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the
00192 # position of the optical center of the second camera in the first
00193 # camera's frame. We assume Tz = 0 so both cameras are in the same
00194 # stereo image plane. The first camera always has Tx = Ty = 0. For
00195 # the right (second) camera of a horizontal stereo pair, Ty = 0 and
00196 # Tx = -fx' * B, where B is the baseline between the cameras.
00197 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto
00198 # the rectified image is given by:
00199 # [u v w]' = P * [X Y Z 1]'
00200 # x = u / w
00201 # y = v / w
00202 # This holds for both images of a stereo pair.
00203 float64[12] P # 3x4 row-major matrix
00204
00205
00206 #######################################################################
00207 # Operational Parameters #
00208 #######################################################################
00209 # These define the image region actually captured by the camera #
00210 # driver. Although they affect the geometry of the output image, they #
00211 # may be changed freely without recalibrating the camera. #
00212 #######################################################################
00213
00214 # Binning refers here to any camera setting which combines rectangular
00215 # neighborhoods of pixels into larger "super-pixels." It reduces the
00216 # resolution of the output image to
00217 # (width / binning_x) x (height / binning_y).
00218 # The default values binning_x = binning_y = 0 is considered the same
00219 # as binning_x = binning_y = 1 (no subsampling).
00220 uint32 binning_x
00221 uint32 binning_y
00222
00223 # Region of interest (subwindow of full camera resolution), given in
00224 # full resolution (unbinned) image coordinates. A particular ROI
00225 # always denotes the same window of pixels on the camera sensor,
00226 # regardless of binning settings.
00227 # The default setting of roi (all values 0) is considered the same as
00228 # full resolution (roi.width = width, roi.height = height).
00229 RegionOfInterest roi
00230
00231 ================================================================================
00232 MSG: sensor_msgs/RegionOfInterest
00233 # This message is used to specify a region of interest within an image.
00234 #
00235 # When used to specify the ROI setting of the camera when the image was
00236 # taken, the height and width fields should either match the height and
00237 # width fields for the associated image; or height = width = 0
00238 # indicates that the full resolution image was captured.
00239
00240 uint32 x_offset # Leftmost pixel of the ROI
00241 # (0 if the ROI includes the left edge of the image)
00242 uint32 y_offset # Topmost pixel of the ROI
00243 # (0 if the ROI includes the top edge of the image)
00244 uint32 height # Height of ROI
00245 uint32 width # Width of ROI
00246
00247 # True if a distinct rectified ROI should be calculated from the "raw"
00248 # ROI in this message. Typically this should be False if the full image
00249 # is captured (ROI not used), and True if a subwindow is captured (ROI
00250 # used).
00251 bool do_rectify
00252
00253 ================================================================================
00254 MSG: interactive_perception_msgs/ModelHypothesisList
00255 ModelHypothesis[] hypotheses
00256
00257 #initial guess if this can be a correct recognition result at all
00258 bool accept
00259 ================================================================================
00260 MSG: interactive_perception_msgs/ModelHypothesis
00261 #describes a hypothesis about a recognized object (mesh+pose)
00262
00263 shape_msgs/Mesh mesh
00264 geometry_msgs/PoseStamped pose
00265
00266 ================================================================================
00267 MSG: shape_msgs/Mesh
00268 # Definition of a mesh
00269
00270 # list of triangles; the index values refer to positions in vertices[]
00271 MeshTriangle[] triangles
00272
00273 # the actual vertices that make up the mesh
00274 geometry_msgs/Point[] vertices
00275
00276 ================================================================================
00277 MSG: shape_msgs/MeshTriangle
00278 # Definition of a triangle's vertices
00279 uint32[3] vertex_indices
00280
00281 ================================================================================
00282 MSG: geometry_msgs/Point
00283 # This contains the position of a point in free space
00284 float64 x
00285 float64 y
00286 float64 z
00287
00288 ================================================================================
00289 MSG: geometry_msgs/PoseStamped
00290 # A Pose with reference coordinate frame and timestamp
00291 Header header
00292 Pose pose
00293
00294 ================================================================================
00295 MSG: geometry_msgs/Pose
00296 # A representation of pose in free space, composed of postion and orientation.
00297 Point position
00298 Quaternion orientation
00299
00300 ================================================================================
00301 MSG: geometry_msgs/Quaternion
00302 # This represents an orientation in free space in quaternion form.
00303
00304 float64 x
00305 float64 y
00306 float64 z
00307 float64 w
00308
00309 """
00310 __slots__ = ['header','goal_id','goal']
00311 _slot_types = ['std_msgs/Header','actionlib_msgs/GoalID','interactive_perception_msgs/ObjectRecognitionGuiGoal']
00312
00313 def __init__(self, *args, **kwds):
00314 """
00315 Constructor. Any message fields that are implicitly/explicitly
00316 set to None will be assigned a default value. The recommend
00317 use is keyword arguments as this is more robust to future message
00318 changes. You cannot mix in-order arguments and keyword arguments.
00319
00320 The available fields are:
00321 header,goal_id,goal
00322
00323 :param args: complete set of field values, in .msg order
00324 :param kwds: use keyword arguments corresponding to message field names
00325 to set specific fields.
00326 """
00327 if args or kwds:
00328 super(ObjectRecognitionGuiActionGoal, self).__init__(*args, **kwds)
00329
00330 if self.header is None:
00331 self.header = std_msgs.msg.Header()
00332 if self.goal_id is None:
00333 self.goal_id = actionlib_msgs.msg.GoalID()
00334 if self.goal is None:
00335 self.goal = interactive_perception_msgs.msg.ObjectRecognitionGuiGoal()
00336 else:
00337 self.header = std_msgs.msg.Header()
00338 self.goal_id = actionlib_msgs.msg.GoalID()
00339 self.goal = interactive_perception_msgs.msg.ObjectRecognitionGuiGoal()
00340
00341 def _get_types(self):
00342 """
00343 internal API method
00344 """
00345 return self._slot_types
00346
00347 def serialize(self, buff):
00348 """
00349 serialize message into buffer
00350 :param buff: buffer, ``StringIO``
00351 """
00352 try:
00353 _x = self
00354 buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
00355 _x = self.header.frame_id
00356 length = len(_x)
00357 if python3 or type(_x) == unicode:
00358 _x = _x.encode('utf-8')
00359 length = len(_x)
00360 buff.write(struct.pack('<I%ss'%length, length, _x))
00361 _x = self
00362 buff.write(_struct_2I.pack(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs))
00363 _x = self.goal_id.id
00364 length = len(_x)
00365 if python3 or type(_x) == unicode:
00366 _x = _x.encode('utf-8')
00367 length = len(_x)
00368 buff.write(struct.pack('<I%ss'%length, length, _x))
00369 _x = self
00370 buff.write(_struct_3I.pack(_x.goal.image.header.seq, _x.goal.image.header.stamp.secs, _x.goal.image.header.stamp.nsecs))
00371 _x = self.goal.image.header.frame_id
00372 length = len(_x)
00373 if python3 or type(_x) == unicode:
00374 _x = _x.encode('utf-8')
00375 length = len(_x)
00376 buff.write(struct.pack('<I%ss'%length, length, _x))
00377 _x = self
00378 buff.write(_struct_2I.pack(_x.goal.image.height, _x.goal.image.width))
00379 _x = self.goal.image.encoding
00380 length = len(_x)
00381 if python3 or type(_x) == unicode:
00382 _x = _x.encode('utf-8')
00383 length = len(_x)
00384 buff.write(struct.pack('<I%ss'%length, length, _x))
00385 _x = self
00386 buff.write(_struct_BI.pack(_x.goal.image.is_bigendian, _x.goal.image.step))
00387 _x = self.goal.image.data
00388 length = len(_x)
00389
00390 if type(_x) in [list, tuple]:
00391 buff.write(struct.pack('<I%sB'%length, length, *_x))
00392 else:
00393 buff.write(struct.pack('<I%ss'%length, length, _x))
00394 _x = self
00395 buff.write(_struct_3I.pack(_x.goal.camera_info.header.seq, _x.goal.camera_info.header.stamp.secs, _x.goal.camera_info.header.stamp.nsecs))
00396 _x = self.goal.camera_info.header.frame_id
00397 length = len(_x)
00398 if python3 or type(_x) == unicode:
00399 _x = _x.encode('utf-8')
00400 length = len(_x)
00401 buff.write(struct.pack('<I%ss'%length, length, _x))
00402 _x = self
00403 buff.write(_struct_2I.pack(_x.goal.camera_info.height, _x.goal.camera_info.width))
00404 _x = self.goal.camera_info.distortion_model
00405 length = len(_x)
00406 if python3 or type(_x) == unicode:
00407 _x = _x.encode('utf-8')
00408 length = len(_x)
00409 buff.write(struct.pack('<I%ss'%length, length, _x))
00410 length = len(self.goal.camera_info.D)
00411 buff.write(_struct_I.pack(length))
00412 pattern = '<%sd'%length
00413 buff.write(struct.pack(pattern, *self.goal.camera_info.D))
00414 buff.write(_struct_9d.pack(*self.goal.camera_info.K))
00415 buff.write(_struct_9d.pack(*self.goal.camera_info.R))
00416 buff.write(_struct_12d.pack(*self.goal.camera_info.P))
00417 _x = self
00418 buff.write(_struct_6IB.pack(_x.goal.camera_info.binning_x, _x.goal.camera_info.binning_y, _x.goal.camera_info.roi.x_offset, _x.goal.camera_info.roi.y_offset, _x.goal.camera_info.roi.height, _x.goal.camera_info.roi.width, _x.goal.camera_info.roi.do_rectify))
00419 length = len(self.goal.model_hypotheses)
00420 buff.write(_struct_I.pack(length))
00421 for val1 in self.goal.model_hypotheses:
00422 length = len(val1.hypotheses)
00423 buff.write(_struct_I.pack(length))
00424 for val2 in val1.hypotheses:
00425 _v1 = val2.mesh
00426 length = len(_v1.triangles)
00427 buff.write(_struct_I.pack(length))
00428 for val4 in _v1.triangles:
00429 buff.write(_struct_3I.pack(*val4.vertex_indices))
00430 length = len(_v1.vertices)
00431 buff.write(_struct_I.pack(length))
00432 for val4 in _v1.vertices:
00433 _x = val4
00434 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
00435 _v2 = val2.pose
00436 _v3 = _v2.header
00437 buff.write(_struct_I.pack(_v3.seq))
00438 _v4 = _v3.stamp
00439 _x = _v4
00440 buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
00441 _x = _v3.frame_id
00442 length = len(_x)
00443 if python3 or type(_x) == unicode:
00444 _x = _x.encode('utf-8')
00445 length = len(_x)
00446 buff.write(struct.pack('<I%ss'%length, length, _x))
00447 _v5 = _v2.pose
00448 _v6 = _v5.position
00449 _x = _v6
00450 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
00451 _v7 = _v5.orientation
00452 _x = _v7
00453 buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
00454 buff.write(_struct_B.pack(val1.accept))
00455 except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
00456 except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
00457
00458 def deserialize(self, str):
00459 """
00460 unpack serialized message in str into this message instance
00461 :param str: byte array of serialized message, ``str``
00462 """
00463 try:
00464 if self.header is None:
00465 self.header = std_msgs.msg.Header()
00466 if self.goal_id is None:
00467 self.goal_id = actionlib_msgs.msg.GoalID()
00468 if self.goal is None:
00469 self.goal = interactive_perception_msgs.msg.ObjectRecognitionGuiGoal()
00470 end = 0
00471 _x = self
00472 start = end
00473 end += 12
00474 (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00475 start = end
00476 end += 4
00477 (length,) = _struct_I.unpack(str[start:end])
00478 start = end
00479 end += length
00480 if python3:
00481 self.header.frame_id = str[start:end].decode('utf-8')
00482 else:
00483 self.header.frame_id = str[start:end]
00484 _x = self
00485 start = end
00486 end += 8
00487 (_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
00488 start = end
00489 end += 4
00490 (length,) = _struct_I.unpack(str[start:end])
00491 start = end
00492 end += length
00493 if python3:
00494 self.goal_id.id = str[start:end].decode('utf-8')
00495 else:
00496 self.goal_id.id = str[start:end]
00497 _x = self
00498 start = end
00499 end += 12
00500 (_x.goal.image.header.seq, _x.goal.image.header.stamp.secs, _x.goal.image.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00501 start = end
00502 end += 4
00503 (length,) = _struct_I.unpack(str[start:end])
00504 start = end
00505 end += length
00506 if python3:
00507 self.goal.image.header.frame_id = str[start:end].decode('utf-8')
00508 else:
00509 self.goal.image.header.frame_id = str[start:end]
00510 _x = self
00511 start = end
00512 end += 8
00513 (_x.goal.image.height, _x.goal.image.width,) = _struct_2I.unpack(str[start:end])
00514 start = end
00515 end += 4
00516 (length,) = _struct_I.unpack(str[start:end])
00517 start = end
00518 end += length
00519 if python3:
00520 self.goal.image.encoding = str[start:end].decode('utf-8')
00521 else:
00522 self.goal.image.encoding = str[start:end]
00523 _x = self
00524 start = end
00525 end += 5
00526 (_x.goal.image.is_bigendian, _x.goal.image.step,) = _struct_BI.unpack(str[start:end])
00527 start = end
00528 end += 4
00529 (length,) = _struct_I.unpack(str[start:end])
00530 start = end
00531 end += length
00532 self.goal.image.data = str[start:end]
00533 _x = self
00534 start = end
00535 end += 12
00536 (_x.goal.camera_info.header.seq, _x.goal.camera_info.header.stamp.secs, _x.goal.camera_info.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00537 start = end
00538 end += 4
00539 (length,) = _struct_I.unpack(str[start:end])
00540 start = end
00541 end += length
00542 if python3:
00543 self.goal.camera_info.header.frame_id = str[start:end].decode('utf-8')
00544 else:
00545 self.goal.camera_info.header.frame_id = str[start:end]
00546 _x = self
00547 start = end
00548 end += 8
00549 (_x.goal.camera_info.height, _x.goal.camera_info.width,) = _struct_2I.unpack(str[start:end])
00550 start = end
00551 end += 4
00552 (length,) = _struct_I.unpack(str[start:end])
00553 start = end
00554 end += length
00555 if python3:
00556 self.goal.camera_info.distortion_model = str[start:end].decode('utf-8')
00557 else:
00558 self.goal.camera_info.distortion_model = str[start:end]
00559 start = end
00560 end += 4
00561 (length,) = _struct_I.unpack(str[start:end])
00562 pattern = '<%sd'%length
00563 start = end
00564 end += struct.calcsize(pattern)
00565 self.goal.camera_info.D = struct.unpack(pattern, str[start:end])
00566 start = end
00567 end += 72
00568 self.goal.camera_info.K = _struct_9d.unpack(str[start:end])
00569 start = end
00570 end += 72
00571 self.goal.camera_info.R = _struct_9d.unpack(str[start:end])
00572 start = end
00573 end += 96
00574 self.goal.camera_info.P = _struct_12d.unpack(str[start:end])
00575 _x = self
00576 start = end
00577 end += 25
00578 (_x.goal.camera_info.binning_x, _x.goal.camera_info.binning_y, _x.goal.camera_info.roi.x_offset, _x.goal.camera_info.roi.y_offset, _x.goal.camera_info.roi.height, _x.goal.camera_info.roi.width, _x.goal.camera_info.roi.do_rectify,) = _struct_6IB.unpack(str[start:end])
00579 self.goal.camera_info.roi.do_rectify = bool(self.goal.camera_info.roi.do_rectify)
00580 start = end
00581 end += 4
00582 (length,) = _struct_I.unpack(str[start:end])
00583 self.goal.model_hypotheses = []
00584 for i in range(0, length):
00585 val1 = interactive_perception_msgs.msg.ModelHypothesisList()
00586 start = end
00587 end += 4
00588 (length,) = _struct_I.unpack(str[start:end])
00589 val1.hypotheses = []
00590 for i in range(0, length):
00591 val2 = interactive_perception_msgs.msg.ModelHypothesis()
00592 _v8 = val2.mesh
00593 start = end
00594 end += 4
00595 (length,) = _struct_I.unpack(str[start:end])
00596 _v8.triangles = []
00597 for i in range(0, length):
00598 val4 = shape_msgs.msg.MeshTriangle()
00599 start = end
00600 end += 12
00601 val4.vertex_indices = _struct_3I.unpack(str[start:end])
00602 _v8.triangles.append(val4)
00603 start = end
00604 end += 4
00605 (length,) = _struct_I.unpack(str[start:end])
00606 _v8.vertices = []
00607 for i in range(0, length):
00608 val4 = geometry_msgs.msg.Point()
00609 _x = val4
00610 start = end
00611 end += 24
00612 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
00613 _v8.vertices.append(val4)
00614 _v9 = val2.pose
00615 _v10 = _v9.header
00616 start = end
00617 end += 4
00618 (_v10.seq,) = _struct_I.unpack(str[start:end])
00619 _v11 = _v10.stamp
00620 _x = _v11
00621 start = end
00622 end += 8
00623 (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
00624 start = end
00625 end += 4
00626 (length,) = _struct_I.unpack(str[start:end])
00627 start = end
00628 end += length
00629 if python3:
00630 _v10.frame_id = str[start:end].decode('utf-8')
00631 else:
00632 _v10.frame_id = str[start:end]
00633 _v12 = _v9.pose
00634 _v13 = _v12.position
00635 _x = _v13
00636 start = end
00637 end += 24
00638 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
00639 _v14 = _v12.orientation
00640 _x = _v14
00641 start = end
00642 end += 32
00643 (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])
00644 val1.hypotheses.append(val2)
00645 start = end
00646 end += 1
00647 (val1.accept,) = _struct_B.unpack(str[start:end])
00648 val1.accept = bool(val1.accept)
00649 self.goal.model_hypotheses.append(val1)
00650 return self
00651 except struct.error as e:
00652 raise genpy.DeserializationError(e)
00653
00654
00655 def serialize_numpy(self, buff, numpy):
00656 """
00657 serialize message with numpy array types into buffer
00658 :param buff: buffer, ``StringIO``
00659 :param numpy: numpy python module
00660 """
00661 try:
00662 _x = self
00663 buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
00664 _x = self.header.frame_id
00665 length = len(_x)
00666 if python3 or type(_x) == unicode:
00667 _x = _x.encode('utf-8')
00668 length = len(_x)
00669 buff.write(struct.pack('<I%ss'%length, length, _x))
00670 _x = self
00671 buff.write(_struct_2I.pack(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs))
00672 _x = self.goal_id.id
00673 length = len(_x)
00674 if python3 or type(_x) == unicode:
00675 _x = _x.encode('utf-8')
00676 length = len(_x)
00677 buff.write(struct.pack('<I%ss'%length, length, _x))
00678 _x = self
00679 buff.write(_struct_3I.pack(_x.goal.image.header.seq, _x.goal.image.header.stamp.secs, _x.goal.image.header.stamp.nsecs))
00680 _x = self.goal.image.header.frame_id
00681 length = len(_x)
00682 if python3 or type(_x) == unicode:
00683 _x = _x.encode('utf-8')
00684 length = len(_x)
00685 buff.write(struct.pack('<I%ss'%length, length, _x))
00686 _x = self
00687 buff.write(_struct_2I.pack(_x.goal.image.height, _x.goal.image.width))
00688 _x = self.goal.image.encoding
00689 length = len(_x)
00690 if python3 or type(_x) == unicode:
00691 _x = _x.encode('utf-8')
00692 length = len(_x)
00693 buff.write(struct.pack('<I%ss'%length, length, _x))
00694 _x = self
00695 buff.write(_struct_BI.pack(_x.goal.image.is_bigendian, _x.goal.image.step))
00696 _x = self.goal.image.data
00697 length = len(_x)
00698
00699 if type(_x) in [list, tuple]:
00700 buff.write(struct.pack('<I%sB'%length, length, *_x))
00701 else:
00702 buff.write(struct.pack('<I%ss'%length, length, _x))
00703 _x = self
00704 buff.write(_struct_3I.pack(_x.goal.camera_info.header.seq, _x.goal.camera_info.header.stamp.secs, _x.goal.camera_info.header.stamp.nsecs))
00705 _x = self.goal.camera_info.header.frame_id
00706 length = len(_x)
00707 if python3 or type(_x) == unicode:
00708 _x = _x.encode('utf-8')
00709 length = len(_x)
00710 buff.write(struct.pack('<I%ss'%length, length, _x))
00711 _x = self
00712 buff.write(_struct_2I.pack(_x.goal.camera_info.height, _x.goal.camera_info.width))
00713 _x = self.goal.camera_info.distortion_model
00714 length = len(_x)
00715 if python3 or type(_x) == unicode:
00716 _x = _x.encode('utf-8')
00717 length = len(_x)
00718 buff.write(struct.pack('<I%ss'%length, length, _x))
00719 length = len(self.goal.camera_info.D)
00720 buff.write(_struct_I.pack(length))
00721 pattern = '<%sd'%length
00722 buff.write(self.goal.camera_info.D.tostring())
00723 buff.write(self.goal.camera_info.K.tostring())
00724 buff.write(self.goal.camera_info.R.tostring())
00725 buff.write(self.goal.camera_info.P.tostring())
00726 _x = self
00727 buff.write(_struct_6IB.pack(_x.goal.camera_info.binning_x, _x.goal.camera_info.binning_y, _x.goal.camera_info.roi.x_offset, _x.goal.camera_info.roi.y_offset, _x.goal.camera_info.roi.height, _x.goal.camera_info.roi.width, _x.goal.camera_info.roi.do_rectify))
00728 length = len(self.goal.model_hypotheses)
00729 buff.write(_struct_I.pack(length))
00730 for val1 in self.goal.model_hypotheses:
00731 length = len(val1.hypotheses)
00732 buff.write(_struct_I.pack(length))
00733 for val2 in val1.hypotheses:
00734 _v15 = val2.mesh
00735 length = len(_v15.triangles)
00736 buff.write(_struct_I.pack(length))
00737 for val4 in _v15.triangles:
00738 buff.write(val4.vertex_indices.tostring())
00739 length = len(_v15.vertices)
00740 buff.write(_struct_I.pack(length))
00741 for val4 in _v15.vertices:
00742 _x = val4
00743 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
00744 _v16 = val2.pose
00745 _v17 = _v16.header
00746 buff.write(_struct_I.pack(_v17.seq))
00747 _v18 = _v17.stamp
00748 _x = _v18
00749 buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
00750 _x = _v17.frame_id
00751 length = len(_x)
00752 if python3 or type(_x) == unicode:
00753 _x = _x.encode('utf-8')
00754 length = len(_x)
00755 buff.write(struct.pack('<I%ss'%length, length, _x))
00756 _v19 = _v16.pose
00757 _v20 = _v19.position
00758 _x = _v20
00759 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
00760 _v21 = _v19.orientation
00761 _x = _v21
00762 buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
00763 buff.write(_struct_B.pack(val1.accept))
00764 except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
00765 except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
00766
00767 def deserialize_numpy(self, str, numpy):
00768 """
00769 unpack serialized message in str into this message instance using numpy for array types
00770 :param str: byte array of serialized message, ``str``
00771 :param numpy: numpy python module
00772 """
00773 try:
00774 if self.header is None:
00775 self.header = std_msgs.msg.Header()
00776 if self.goal_id is None:
00777 self.goal_id = actionlib_msgs.msg.GoalID()
00778 if self.goal is None:
00779 self.goal = interactive_perception_msgs.msg.ObjectRecognitionGuiGoal()
00780 end = 0
00781 _x = self
00782 start = end
00783 end += 12
00784 (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00785 start = end
00786 end += 4
00787 (length,) = _struct_I.unpack(str[start:end])
00788 start = end
00789 end += length
00790 if python3:
00791 self.header.frame_id = str[start:end].decode('utf-8')
00792 else:
00793 self.header.frame_id = str[start:end]
00794 _x = self
00795 start = end
00796 end += 8
00797 (_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
00798 start = end
00799 end += 4
00800 (length,) = _struct_I.unpack(str[start:end])
00801 start = end
00802 end += length
00803 if python3:
00804 self.goal_id.id = str[start:end].decode('utf-8')
00805 else:
00806 self.goal_id.id = str[start:end]
00807 _x = self
00808 start = end
00809 end += 12
00810 (_x.goal.image.header.seq, _x.goal.image.header.stamp.secs, _x.goal.image.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00811 start = end
00812 end += 4
00813 (length,) = _struct_I.unpack(str[start:end])
00814 start = end
00815 end += length
00816 if python3:
00817 self.goal.image.header.frame_id = str[start:end].decode('utf-8')
00818 else:
00819 self.goal.image.header.frame_id = str[start:end]
00820 _x = self
00821 start = end
00822 end += 8
00823 (_x.goal.image.height, _x.goal.image.width,) = _struct_2I.unpack(str[start:end])
00824 start = end
00825 end += 4
00826 (length,) = _struct_I.unpack(str[start:end])
00827 start = end
00828 end += length
00829 if python3:
00830 self.goal.image.encoding = str[start:end].decode('utf-8')
00831 else:
00832 self.goal.image.encoding = str[start:end]
00833 _x = self
00834 start = end
00835 end += 5
00836 (_x.goal.image.is_bigendian, _x.goal.image.step,) = _struct_BI.unpack(str[start:end])
00837 start = end
00838 end += 4
00839 (length,) = _struct_I.unpack(str[start:end])
00840 start = end
00841 end += length
00842 self.goal.image.data = str[start:end]
00843 _x = self
00844 start = end
00845 end += 12
00846 (_x.goal.camera_info.header.seq, _x.goal.camera_info.header.stamp.secs, _x.goal.camera_info.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00847 start = end
00848 end += 4
00849 (length,) = _struct_I.unpack(str[start:end])
00850 start = end
00851 end += length
00852 if python3:
00853 self.goal.camera_info.header.frame_id = str[start:end].decode('utf-8')
00854 else:
00855 self.goal.camera_info.header.frame_id = str[start:end]
00856 _x = self
00857 start = end
00858 end += 8
00859 (_x.goal.camera_info.height, _x.goal.camera_info.width,) = _struct_2I.unpack(str[start:end])
00860 start = end
00861 end += 4
00862 (length,) = _struct_I.unpack(str[start:end])
00863 start = end
00864 end += length
00865 if python3:
00866 self.goal.camera_info.distortion_model = str[start:end].decode('utf-8')
00867 else:
00868 self.goal.camera_info.distortion_model = str[start:end]
00869 start = end
00870 end += 4
00871 (length,) = _struct_I.unpack(str[start:end])
00872 pattern = '<%sd'%length
00873 start = end
00874 end += struct.calcsize(pattern)
00875 self.goal.camera_info.D = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
00876 start = end
00877 end += 72
00878 self.goal.camera_info.K = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)
00879 start = end
00880 end += 72
00881 self.goal.camera_info.R = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)
00882 start = end
00883 end += 96
00884 self.goal.camera_info.P = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=12)
00885 _x = self
00886 start = end
00887 end += 25
00888 (_x.goal.camera_info.binning_x, _x.goal.camera_info.binning_y, _x.goal.camera_info.roi.x_offset, _x.goal.camera_info.roi.y_offset, _x.goal.camera_info.roi.height, _x.goal.camera_info.roi.width, _x.goal.camera_info.roi.do_rectify,) = _struct_6IB.unpack(str[start:end])
00889 self.goal.camera_info.roi.do_rectify = bool(self.goal.camera_info.roi.do_rectify)
00890 start = end
00891 end += 4
00892 (length,) = _struct_I.unpack(str[start:end])
00893 self.goal.model_hypotheses = []
00894 for i in range(0, length):
00895 val1 = interactive_perception_msgs.msg.ModelHypothesisList()
00896 start = end
00897 end += 4
00898 (length,) = _struct_I.unpack(str[start:end])
00899 val1.hypotheses = []
00900 for i in range(0, length):
00901 val2 = interactive_perception_msgs.msg.ModelHypothesis()
00902 _v22 = val2.mesh
00903 start = end
00904 end += 4
00905 (length,) = _struct_I.unpack(str[start:end])
00906 _v22.triangles = []
00907 for i in range(0, length):
00908 val4 = shape_msgs.msg.MeshTriangle()
00909 start = end
00910 end += 12
00911 val4.vertex_indices = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=3)
00912 _v22.triangles.append(val4)
00913 start = end
00914 end += 4
00915 (length,) = _struct_I.unpack(str[start:end])
00916 _v22.vertices = []
00917 for i in range(0, length):
00918 val4 = geometry_msgs.msg.Point()
00919 _x = val4
00920 start = end
00921 end += 24
00922 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
00923 _v22.vertices.append(val4)
00924 _v23 = val2.pose
00925 _v24 = _v23.header
00926 start = end
00927 end += 4
00928 (_v24.seq,) = _struct_I.unpack(str[start:end])
00929 _v25 = _v24.stamp
00930 _x = _v25
00931 start = end
00932 end += 8
00933 (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
00934 start = end
00935 end += 4
00936 (length,) = _struct_I.unpack(str[start:end])
00937 start = end
00938 end += length
00939 if python3:
00940 _v24.frame_id = str[start:end].decode('utf-8')
00941 else:
00942 _v24.frame_id = str[start:end]
00943 _v26 = _v23.pose
00944 _v27 = _v26.position
00945 _x = _v27
00946 start = end
00947 end += 24
00948 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
00949 _v28 = _v26.orientation
00950 _x = _v28
00951 start = end
00952 end += 32
00953 (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])
00954 val1.hypotheses.append(val2)
00955 start = end
00956 end += 1
00957 (val1.accept,) = _struct_B.unpack(str[start:end])
00958 val1.accept = bool(val1.accept)
00959 self.goal.model_hypotheses.append(val1)
00960 return self
00961 except struct.error as e:
00962 raise genpy.DeserializationError(e)
00963
00964 _struct_I = genpy.struct_I
00965 _struct_6IB = struct.Struct("<6IB")
00966 _struct_B = struct.Struct("<B")
00967 _struct_12d = struct.Struct("<12d")
00968 _struct_9d = struct.Struct("<9d")
00969 _struct_BI = struct.Struct("<BI")
00970 _struct_3I = struct.Struct("<3I")
00971 _struct_4d = struct.Struct("<4d")
00972 _struct_2I = struct.Struct("<2I")
00973 _struct_3d = struct.Struct("<3d")