00001 """autogenerated by genmsg_py from ObjectRecognitionGuiActionGoal.msg. Do not edit."""
00002 import roslib.message
00003 import struct
00004
00005 import roslib.rostime
00006 import actionlib_msgs.msg
00007 import geometric_shapes_msgs.msg
00008 import geometry_msgs.msg
00009 import object_recognition_gui.msg
00010 import sensor_msgs.msg
00011 import std_msgs.msg
00012
00013 class ObjectRecognitionGuiActionGoal(roslib.message.Message):
00014 _md5sum = "e9351c4dd7bd646f1f371db4c7fd13af"
00015 _type = "object_recognition_gui/ObjectRecognitionGuiActionGoal"
00016 _has_header = True
00017 _full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
00018
00019 Header header
00020 actionlib_msgs/GoalID goal_id
00021 ObjectRecognitionGuiGoal goal
00022
00023 ================================================================================
00024 MSG: std_msgs/Header
00025 # Standard metadata for higher-level stamped data types.
00026 # This is generally used to communicate timestamped data
00027 # in a particular coordinate frame.
00028 #
00029 # sequence ID: consecutively increasing ID
00030 uint32 seq
00031 #Two-integer timestamp that is expressed as:
00032 # * stamp.secs: seconds (stamp_secs) since epoch
00033 # * stamp.nsecs: nanoseconds since stamp_secs
00034 # time-handling sugar is provided by the client library
00035 time stamp
00036 #Frame this data is associated with
00037 # 0: no frame
00038 # 1: global frame
00039 string frame_id
00040
00041 ================================================================================
00042 MSG: actionlib_msgs/GoalID
00043 # The stamp should store the time at which this goal was requested.
00044 # It is used by an action server when it tries to preempt all
00045 # goals that were requested before a certain time
00046 time stamp
00047
00048 # The id provides a way to associate feedback and
00049 # result message with specific goal requests. The id
00050 # specified must be unique.
00051 string id
00052
00053
00054 ================================================================================
00055 MSG: object_recognition_gui/ObjectRecognitionGuiGoal
00056 # ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
00057
00058 #the original sensor data (depth/disparity image)
00059 sensor_msgs/Image image
00060 sensor_msgs/CameraInfo camera_info
00061
00062 #list of mesh/pose hypotheses for each recognized point cluster
00063 ModelHypothesisList[] model_hypotheses
00064
00065 ================================================================================
00066 MSG: sensor_msgs/Image
00067 # This message contains an uncompressed image
00068 # (0, 0) is at top-left corner of image
00069 #
00070
00071 Header header # Header timestamp should be acquisition time of image
00072 # Header frame_id should be optical frame of camera
00073 # origin of frame should be optical center of cameara
00074 # +x should point to the right in the image
00075 # +y should point down in the image
00076 # +z should point into to plane of the image
00077 # If the frame_id here and the frame_id of the CameraInfo
00078 # message associated with the image conflict
00079 # the behavior is undefined
00080
00081 uint32 height # image height, that is, number of rows
00082 uint32 width # image width, that is, number of columns
00083
00084 # The legal values for encoding are in file src/image_encodings.cpp
00085 # If you want to standardize a new string format, join
00086 # ros-users@lists.sourceforge.net and send an email proposing a new encoding.
00087
00088 string encoding # Encoding of pixels -- channel meaning, ordering, size
00089 # taken from the list of strings in src/image_encodings.cpp
00090
00091 uint8 is_bigendian # is this data bigendian?
00092 uint32 step # Full row length in bytes
00093 uint8[] data # actual matrix data, size is (step * rows)
00094
00095 ================================================================================
00096 MSG: sensor_msgs/CameraInfo
00097 # This message defines meta information for a camera. It should be in a
00098 # camera namespace on topic "camera_info" and accompanied by up to five
00099 # image topics named:
00100 #
00101 # image_raw - raw data from the camera driver, possibly Bayer encoded
00102 # image - monochrome, distorted
00103 # image_color - color, distorted
00104 # image_rect - monochrome, rectified
00105 # image_rect_color - color, rectified
00106 #
00107 # The image_pipeline contains packages (image_proc, stereo_image_proc)
00108 # for producing the four processed image topics from image_raw and
00109 # camera_info. The meaning of the camera parameters are described in
00110 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.
00111 #
00112 # The image_geometry package provides a user-friendly interface to
00113 # common operations using this meta information. If you want to, e.g.,
00114 # project a 3d point into image coordinates, we strongly recommend
00115 # using image_geometry.
00116 #
00117 # If the camera is uncalibrated, the matrices D, K, R, P should be left
00118 # zeroed out. In particular, clients may assume that K[0] == 0.0
00119 # indicates an uncalibrated camera.
00120
00121 #######################################################################
00122 # Image acquisition info #
00123 #######################################################################
00124
00125 # Time of image acquisition, camera coordinate frame ID
00126 Header header # Header timestamp should be acquisition time of image
00127 # Header frame_id should be optical frame of camera
00128 # origin of frame should be optical center of camera
00129 # +x should point to the right in the image
00130 # +y should point down in the image
00131 # +z should point into the plane of the image
00132
00133
00134 #######################################################################
00135 # Calibration Parameters #
00136 #######################################################################
00137 # These are fixed during camera calibration. Their values will be the #
00138 # same in all messages until the camera is recalibrated. Note that #
00139 # self-calibrating systems may "recalibrate" frequently. #
00140 # #
00141 # The internal parameters can be used to warp a raw (distorted) image #
00142 # to: #
00143 # 1. An undistorted image (requires D and K) #
00144 # 2. A rectified image (requires D, K, R) #
00145 # The projection matrix P projects 3D points into the rectified image.#
00146 #######################################################################
00147
00148 # The image dimensions with which the camera was calibrated. Normally
00149 # this will be the full camera resolution in pixels.
00150 uint32 height
00151 uint32 width
00152
00153 # The distortion model used. Supported models are listed in
00154 # sensor_msgs/distortion_models.h. For most cameras, "plumb_bob" - a
00155 # simple model of radial and tangential distortion - is sufficent.
00156 string distortion_model
00157
00158 # The distortion parameters, size depending on the distortion model.
00159 # For "plumb_bob", the 5 parameters are: (k1, k2, t1, t2, k3).
00160 float64[] D
00161
00162 # Intrinsic camera matrix for the raw (distorted) images.
00163 # [fx 0 cx]
00164 # K = [ 0 fy cy]
00165 # [ 0 0 1]
00166 # Projects 3D points in the camera coordinate frame to 2D pixel
00167 # coordinates using the focal lengths (fx, fy) and principal point
00168 # (cx, cy).
00169 float64[9] K # 3x3 row-major matrix
00170
00171 # Rectification matrix (stereo cameras only)
00172 # A rotation matrix aligning the camera coordinate system to the ideal
00173 # stereo image plane so that epipolar lines in both stereo images are
00174 # parallel.
00175 float64[9] R # 3x3 row-major matrix
00176
00177 # Projection/camera matrix
00178 # [fx' 0 cx' Tx]
00179 # P = [ 0 fy' cy' Ty]
00180 # [ 0 0 1 0]
00181 # By convention, this matrix specifies the intrinsic (camera) matrix
00182 # of the processed (rectified) image. That is, the left 3x3 portion
00183 # is the normal camera intrinsic matrix for the rectified image.
00184 # It projects 3D points in the camera coordinate frame to 2D pixel
00185 # coordinates using the focal lengths (fx', fy') and principal point
00186 # (cx', cy') - these may differ from the values in K.
00187 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will
00188 # also have R = the identity and P[1:3,1:3] = K.
00189 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the
00190 # position of the optical center of the second camera in the first
00191 # camera's frame. We assume Tz = 0 so both cameras are in the same
00192 # stereo image plane. The first camera always has Tx = Ty = 0. For
00193 # the right (second) camera of a horizontal stereo pair, Ty = 0 and
00194 # Tx = -fx' * B, where B is the baseline between the cameras.
00195 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto
00196 # the rectified image is given by:
00197 # [u v w]' = P * [X Y Z 1]'
00198 # x = u / w
00199 # y = v / w
00200 # This holds for both images of a stereo pair.
00201 float64[12] P # 3x4 row-major matrix
00202
00203
00204 #######################################################################
00205 # Operational Parameters #
00206 #######################################################################
00207 # These define the image region actually captured by the camera #
00208 # driver. Although they affect the geometry of the output image, they #
00209 # may be changed freely without recalibrating the camera. #
00210 #######################################################################
00211
00212 # Binning refers here to any camera setting which combines rectangular
00213 # neighborhoods of pixels into larger "super-pixels." It reduces the
00214 # resolution of the output image to
00215 # (width / binning_x) x (height / binning_y).
00216 # The default values binning_x = binning_y = 0 is considered the same
00217 # as binning_x = binning_y = 1 (no subsampling).
00218 uint32 binning_x
00219 uint32 binning_y
00220
00221 # Region of interest (subwindow of full camera resolution), given in
00222 # full resolution (unbinned) image coordinates. A particular ROI
00223 # always denotes the same window of pixels on the camera sensor,
00224 # regardless of binning settings.
00225 # The default setting of roi (all values 0) is considered the same as
00226 # full resolution (roi.width = width, roi.height = height).
00227 RegionOfInterest roi
00228
00229 ================================================================================
00230 MSG: sensor_msgs/RegionOfInterest
00231 # This message is used to specify a region of interest within an image.
00232 #
00233 # When used to specify the ROI setting of the camera when the image was
00234 # taken, the height and width fields should either match the height and
00235 # width fields for the associated image; or height = width = 0
00236 # indicates that the full resolution image was captured.
00237
00238 uint32 x_offset # Leftmost pixel of the ROI
00239 # (0 if the ROI includes the left edge of the image)
00240 uint32 y_offset # Topmost pixel of the ROI
00241 # (0 if the ROI includes the top edge of the image)
00242 uint32 height # Height of ROI
00243 uint32 width # Width of ROI
00244
00245 # True if a distinct rectified ROI should be calculated from the "raw"
00246 # ROI in this message. Typically this should be False if the full image
00247 # is captured (ROI not used), and True if a subwindow is captured (ROI
00248 # used).
00249 bool do_rectify
00250
00251 ================================================================================
00252 MSG: object_recognition_gui/ModelHypothesisList
00253 ModelHypothesis[] hypotheses
00254
00255 #initial guess if this can be a correct recognition result at all
00256 bool accept
00257 ================================================================================
00258 MSG: object_recognition_gui/ModelHypothesis
00259 #describes a hypothesis about a recognized object (mesh+pose)
00260
00261 geometric_shapes_msgs/Shape mesh
00262 geometry_msgs/PoseStamped pose
00263
00264 ================================================================================
00265 MSG: geometric_shapes_msgs/Shape
00266 byte SPHERE=0
00267 byte BOX=1
00268 byte CYLINDER=2
00269 byte MESH=3
00270
00271 byte type
00272
00273
00274 #### define sphere, box, cylinder ####
00275 # the origin of each shape is considered at the shape's center
00276
00277 # for sphere
00278 # radius := dimensions[0]
00279
00280 # for cylinder
00281 # radius := dimensions[0]
00282 # length := dimensions[1]
00283 # the length is along the Z axis
00284
00285 # for box
00286 # size_x := dimensions[0]
00287 # size_y := dimensions[1]
00288 # size_z := dimensions[2]
00289 float64[] dimensions
00290
00291
00292 #### define mesh ####
00293
00294 # list of triangles; triangle k is defined by tre vertices located
00295 # at indices triangles[3k], triangles[3k+1], triangles[3k+2]
00296 int32[] triangles
00297 geometry_msgs/Point[] vertices
00298
00299 ================================================================================
00300 MSG: geometry_msgs/Point
00301 # This contains the position of a point in free space
00302 float64 x
00303 float64 y
00304 float64 z
00305
00306 ================================================================================
00307 MSG: geometry_msgs/PoseStamped
00308 # A Pose with reference coordinate frame and timestamp
00309 Header header
00310 Pose pose
00311
00312 ================================================================================
00313 MSG: geometry_msgs/Pose
00314 # A representation of pose in free space, composed of postion and orientation.
00315 Point position
00316 Quaternion orientation
00317
00318 ================================================================================
00319 MSG: geometry_msgs/Quaternion
00320 # This represents an orientation in free space in quaternion form.
00321
00322 float64 x
00323 float64 y
00324 float64 z
00325 float64 w
00326
00327 """
00328 __slots__ = ['header','goal_id','goal']
00329 _slot_types = ['Header','actionlib_msgs/GoalID','object_recognition_gui/ObjectRecognitionGuiGoal']
00330
00331 def __init__(self, *args, **kwds):
00332 """
00333 Constructor. Any message fields that are implicitly/explicitly
00334 set to None will be assigned a default value. The recommend
00335 use is keyword arguments as this is more robust to future message
00336 changes. You cannot mix in-order arguments and keyword arguments.
00337
00338 The available fields are:
00339 header,goal_id,goal
00340
00341 @param args: complete set of field values, in .msg order
00342 @param kwds: use keyword arguments corresponding to message field names
00343 to set specific fields.
00344 """
00345 if args or kwds:
00346 super(ObjectRecognitionGuiActionGoal, self).__init__(*args, **kwds)
00347
00348 if self.header is None:
00349 self.header = std_msgs.msg._Header.Header()
00350 if self.goal_id is None:
00351 self.goal_id = actionlib_msgs.msg.GoalID()
00352 if self.goal is None:
00353 self.goal = object_recognition_gui.msg.ObjectRecognitionGuiGoal()
00354 else:
00355 self.header = std_msgs.msg._Header.Header()
00356 self.goal_id = actionlib_msgs.msg.GoalID()
00357 self.goal = object_recognition_gui.msg.ObjectRecognitionGuiGoal()
00358
00359 def _get_types(self):
00360 """
00361 internal API method
00362 """
00363 return self._slot_types
00364
00365 def serialize(self, buff):
00366 """
00367 serialize message into buffer
00368 @param buff: buffer
00369 @type buff: StringIO
00370 """
00371 try:
00372 _x = self
00373 buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
00374 _x = self.header.frame_id
00375 length = len(_x)
00376 buff.write(struct.pack('<I%ss'%length, length, _x))
00377 _x = self
00378 buff.write(_struct_2I.pack(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs))
00379 _x = self.goal_id.id
00380 length = len(_x)
00381 buff.write(struct.pack('<I%ss'%length, length, _x))
00382 _x = self
00383 buff.write(_struct_3I.pack(_x.goal.image.header.seq, _x.goal.image.header.stamp.secs, _x.goal.image.header.stamp.nsecs))
00384 _x = self.goal.image.header.frame_id
00385 length = len(_x)
00386 buff.write(struct.pack('<I%ss'%length, length, _x))
00387 _x = self
00388 buff.write(_struct_2I.pack(_x.goal.image.height, _x.goal.image.width))
00389 _x = self.goal.image.encoding
00390 length = len(_x)
00391 buff.write(struct.pack('<I%ss'%length, length, _x))
00392 _x = self
00393 buff.write(_struct_BI.pack(_x.goal.image.is_bigendian, _x.goal.image.step))
00394 _x = self.goal.image.data
00395 length = len(_x)
00396
00397 if type(_x) in [list, tuple]:
00398 buff.write(struct.pack('<I%sB'%length, length, *_x))
00399 else:
00400 buff.write(struct.pack('<I%ss'%length, length, _x))
00401 _x = self
00402 buff.write(_struct_3I.pack(_x.goal.camera_info.header.seq, _x.goal.camera_info.header.stamp.secs, _x.goal.camera_info.header.stamp.nsecs))
00403 _x = self.goal.camera_info.header.frame_id
00404 length = len(_x)
00405 buff.write(struct.pack('<I%ss'%length, length, _x))
00406 _x = self
00407 buff.write(_struct_2I.pack(_x.goal.camera_info.height, _x.goal.camera_info.width))
00408 _x = self.goal.camera_info.distortion_model
00409 length = len(_x)
00410 buff.write(struct.pack('<I%ss'%length, length, _x))
00411 length = len(self.goal.camera_info.D)
00412 buff.write(_struct_I.pack(length))
00413 pattern = '<%sd'%length
00414 buff.write(struct.pack(pattern, *self.goal.camera_info.D))
00415 buff.write(_struct_9d.pack(*self.goal.camera_info.K))
00416 buff.write(_struct_9d.pack(*self.goal.camera_info.R))
00417 buff.write(_struct_12d.pack(*self.goal.camera_info.P))
00418 _x = self
00419 buff.write(_struct_6IB.pack(_x.goal.camera_info.binning_x, _x.goal.camera_info.binning_y, _x.goal.camera_info.roi.x_offset, _x.goal.camera_info.roi.y_offset, _x.goal.camera_info.roi.height, _x.goal.camera_info.roi.width, _x.goal.camera_info.roi.do_rectify))
00420 length = len(self.goal.model_hypotheses)
00421 buff.write(_struct_I.pack(length))
00422 for val1 in self.goal.model_hypotheses:
00423 length = len(val1.hypotheses)
00424 buff.write(_struct_I.pack(length))
00425 for val2 in val1.hypotheses:
00426 _v1 = val2.mesh
00427 buff.write(_struct_b.pack(_v1.type))
00428 length = len(_v1.dimensions)
00429 buff.write(_struct_I.pack(length))
00430 pattern = '<%sd'%length
00431 buff.write(struct.pack(pattern, *_v1.dimensions))
00432 length = len(_v1.triangles)
00433 buff.write(_struct_I.pack(length))
00434 pattern = '<%si'%length
00435 buff.write(struct.pack(pattern, *_v1.triangles))
00436 length = len(_v1.vertices)
00437 buff.write(_struct_I.pack(length))
00438 for val4 in _v1.vertices:
00439 _x = val4
00440 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
00441 _v2 = val2.pose
00442 _v3 = _v2.header
00443 buff.write(_struct_I.pack(_v3.seq))
00444 _v4 = _v3.stamp
00445 _x = _v4
00446 buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
00447 _x = _v3.frame_id
00448 length = len(_x)
00449 buff.write(struct.pack('<I%ss'%length, length, _x))
00450 _v5 = _v2.pose
00451 _v6 = _v5.position
00452 _x = _v6
00453 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
00454 _v7 = _v5.orientation
00455 _x = _v7
00456 buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
00457 buff.write(_struct_B.pack(val1.accept))
00458 except struct.error, se: self._check_types(se)
00459 except TypeError, te: self._check_types(te)
00460
00461 def deserialize(self, str):
00462 """
00463 unpack serialized message in str into this message instance
00464 @param str: byte array of serialized message
00465 @type str: str
00466 """
00467 try:
00468 if self.header is None:
00469 self.header = std_msgs.msg._Header.Header()
00470 if self.goal_id is None:
00471 self.goal_id = actionlib_msgs.msg.GoalID()
00472 if self.goal is None:
00473 self.goal = object_recognition_gui.msg.ObjectRecognitionGuiGoal()
00474 end = 0
00475 _x = self
00476 start = end
00477 end += 12
00478 (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00479 start = end
00480 end += 4
00481 (length,) = _struct_I.unpack(str[start:end])
00482 start = end
00483 end += length
00484 self.header.frame_id = str[start:end]
00485 _x = self
00486 start = end
00487 end += 8
00488 (_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
00489 start = end
00490 end += 4
00491 (length,) = _struct_I.unpack(str[start:end])
00492 start = end
00493 end += length
00494 self.goal_id.id = str[start:end]
00495 _x = self
00496 start = end
00497 end += 12
00498 (_x.goal.image.header.seq, _x.goal.image.header.stamp.secs, _x.goal.image.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00499 start = end
00500 end += 4
00501 (length,) = _struct_I.unpack(str[start:end])
00502 start = end
00503 end += length
00504 self.goal.image.header.frame_id = str[start:end]
00505 _x = self
00506 start = end
00507 end += 8
00508 (_x.goal.image.height, _x.goal.image.width,) = _struct_2I.unpack(str[start:end])
00509 start = end
00510 end += 4
00511 (length,) = _struct_I.unpack(str[start:end])
00512 start = end
00513 end += length
00514 self.goal.image.encoding = str[start:end]
00515 _x = self
00516 start = end
00517 end += 5
00518 (_x.goal.image.is_bigendian, _x.goal.image.step,) = _struct_BI.unpack(str[start:end])
00519 start = end
00520 end += 4
00521 (length,) = _struct_I.unpack(str[start:end])
00522 start = end
00523 end += length
00524 self.goal.image.data = str[start:end]
00525 _x = self
00526 start = end
00527 end += 12
00528 (_x.goal.camera_info.header.seq, _x.goal.camera_info.header.stamp.secs, _x.goal.camera_info.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00529 start = end
00530 end += 4
00531 (length,) = _struct_I.unpack(str[start:end])
00532 start = end
00533 end += length
00534 self.goal.camera_info.header.frame_id = str[start:end]
00535 _x = self
00536 start = end
00537 end += 8
00538 (_x.goal.camera_info.height, _x.goal.camera_info.width,) = _struct_2I.unpack(str[start:end])
00539 start = end
00540 end += 4
00541 (length,) = _struct_I.unpack(str[start:end])
00542 start = end
00543 end += length
00544 self.goal.camera_info.distortion_model = str[start:end]
00545 start = end
00546 end += 4
00547 (length,) = _struct_I.unpack(str[start:end])
00548 pattern = '<%sd'%length
00549 start = end
00550 end += struct.calcsize(pattern)
00551 self.goal.camera_info.D = struct.unpack(pattern, str[start:end])
00552 start = end
00553 end += 72
00554 self.goal.camera_info.K = _struct_9d.unpack(str[start:end])
00555 start = end
00556 end += 72
00557 self.goal.camera_info.R = _struct_9d.unpack(str[start:end])
00558 start = end
00559 end += 96
00560 self.goal.camera_info.P = _struct_12d.unpack(str[start:end])
00561 _x = self
00562 start = end
00563 end += 25
00564 (_x.goal.camera_info.binning_x, _x.goal.camera_info.binning_y, _x.goal.camera_info.roi.x_offset, _x.goal.camera_info.roi.y_offset, _x.goal.camera_info.roi.height, _x.goal.camera_info.roi.width, _x.goal.camera_info.roi.do_rectify,) = _struct_6IB.unpack(str[start:end])
00565 self.goal.camera_info.roi.do_rectify = bool(self.goal.camera_info.roi.do_rectify)
00566 start = end
00567 end += 4
00568 (length,) = _struct_I.unpack(str[start:end])
00569 self.goal.model_hypotheses = []
00570 for i in xrange(0, length):
00571 val1 = object_recognition_gui.msg.ModelHypothesisList()
00572 start = end
00573 end += 4
00574 (length,) = _struct_I.unpack(str[start:end])
00575 val1.hypotheses = []
00576 for i in xrange(0, length):
00577 val2 = object_recognition_gui.msg.ModelHypothesis()
00578 _v8 = val2.mesh
00579 start = end
00580 end += 1
00581 (_v8.type,) = _struct_b.unpack(str[start:end])
00582 start = end
00583 end += 4
00584 (length,) = _struct_I.unpack(str[start:end])
00585 pattern = '<%sd'%length
00586 start = end
00587 end += struct.calcsize(pattern)
00588 _v8.dimensions = struct.unpack(pattern, str[start:end])
00589 start = end
00590 end += 4
00591 (length,) = _struct_I.unpack(str[start:end])
00592 pattern = '<%si'%length
00593 start = end
00594 end += struct.calcsize(pattern)
00595 _v8.triangles = struct.unpack(pattern, str[start:end])
00596 start = end
00597 end += 4
00598 (length,) = _struct_I.unpack(str[start:end])
00599 _v8.vertices = []
00600 for i in xrange(0, length):
00601 val4 = geometry_msgs.msg.Point()
00602 _x = val4
00603 start = end
00604 end += 24
00605 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
00606 _v8.vertices.append(val4)
00607 _v9 = val2.pose
00608 _v10 = _v9.header
00609 start = end
00610 end += 4
00611 (_v10.seq,) = _struct_I.unpack(str[start:end])
00612 _v11 = _v10.stamp
00613 _x = _v11
00614 start = end
00615 end += 8
00616 (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
00617 start = end
00618 end += 4
00619 (length,) = _struct_I.unpack(str[start:end])
00620 start = end
00621 end += length
00622 _v10.frame_id = str[start:end]
00623 _v12 = _v9.pose
00624 _v13 = _v12.position
00625 _x = _v13
00626 start = end
00627 end += 24
00628 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
00629 _v14 = _v12.orientation
00630 _x = _v14
00631 start = end
00632 end += 32
00633 (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])
00634 val1.hypotheses.append(val2)
00635 start = end
00636 end += 1
00637 (val1.accept,) = _struct_B.unpack(str[start:end])
00638 val1.accept = bool(val1.accept)
00639 self.goal.model_hypotheses.append(val1)
00640 return self
00641 except struct.error, e:
00642 raise roslib.message.DeserializationError(e)
00643
00644
00645 def serialize_numpy(self, buff, numpy):
00646 """
00647 serialize message with numpy array types into buffer
00648 @param buff: buffer
00649 @type buff: StringIO
00650 @param numpy: numpy python module
00651 @type numpy module
00652 """
00653 try:
00654 _x = self
00655 buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
00656 _x = self.header.frame_id
00657 length = len(_x)
00658 buff.write(struct.pack('<I%ss'%length, length, _x))
00659 _x = self
00660 buff.write(_struct_2I.pack(_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs))
00661 _x = self.goal_id.id
00662 length = len(_x)
00663 buff.write(struct.pack('<I%ss'%length, length, _x))
00664 _x = self
00665 buff.write(_struct_3I.pack(_x.goal.image.header.seq, _x.goal.image.header.stamp.secs, _x.goal.image.header.stamp.nsecs))
00666 _x = self.goal.image.header.frame_id
00667 length = len(_x)
00668 buff.write(struct.pack('<I%ss'%length, length, _x))
00669 _x = self
00670 buff.write(_struct_2I.pack(_x.goal.image.height, _x.goal.image.width))
00671 _x = self.goal.image.encoding
00672 length = len(_x)
00673 buff.write(struct.pack('<I%ss'%length, length, _x))
00674 _x = self
00675 buff.write(_struct_BI.pack(_x.goal.image.is_bigendian, _x.goal.image.step))
00676 _x = self.goal.image.data
00677 length = len(_x)
00678
00679 if type(_x) in [list, tuple]:
00680 buff.write(struct.pack('<I%sB'%length, length, *_x))
00681 else:
00682 buff.write(struct.pack('<I%ss'%length, length, _x))
00683 _x = self
00684 buff.write(_struct_3I.pack(_x.goal.camera_info.header.seq, _x.goal.camera_info.header.stamp.secs, _x.goal.camera_info.header.stamp.nsecs))
00685 _x = self.goal.camera_info.header.frame_id
00686 length = len(_x)
00687 buff.write(struct.pack('<I%ss'%length, length, _x))
00688 _x = self
00689 buff.write(_struct_2I.pack(_x.goal.camera_info.height, _x.goal.camera_info.width))
00690 _x = self.goal.camera_info.distortion_model
00691 length = len(_x)
00692 buff.write(struct.pack('<I%ss'%length, length, _x))
00693 length = len(self.goal.camera_info.D)
00694 buff.write(_struct_I.pack(length))
00695 pattern = '<%sd'%length
00696 buff.write(self.goal.camera_info.D.tostring())
00697 buff.write(self.goal.camera_info.K.tostring())
00698 buff.write(self.goal.camera_info.R.tostring())
00699 buff.write(self.goal.camera_info.P.tostring())
00700 _x = self
00701 buff.write(_struct_6IB.pack(_x.goal.camera_info.binning_x, _x.goal.camera_info.binning_y, _x.goal.camera_info.roi.x_offset, _x.goal.camera_info.roi.y_offset, _x.goal.camera_info.roi.height, _x.goal.camera_info.roi.width, _x.goal.camera_info.roi.do_rectify))
00702 length = len(self.goal.model_hypotheses)
00703 buff.write(_struct_I.pack(length))
00704 for val1 in self.goal.model_hypotheses:
00705 length = len(val1.hypotheses)
00706 buff.write(_struct_I.pack(length))
00707 for val2 in val1.hypotheses:
00708 _v15 = val2.mesh
00709 buff.write(_struct_b.pack(_v15.type))
00710 length = len(_v15.dimensions)
00711 buff.write(_struct_I.pack(length))
00712 pattern = '<%sd'%length
00713 buff.write(_v15.dimensions.tostring())
00714 length = len(_v15.triangles)
00715 buff.write(_struct_I.pack(length))
00716 pattern = '<%si'%length
00717 buff.write(_v15.triangles.tostring())
00718 length = len(_v15.vertices)
00719 buff.write(_struct_I.pack(length))
00720 for val4 in _v15.vertices:
00721 _x = val4
00722 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
00723 _v16 = val2.pose
00724 _v17 = _v16.header
00725 buff.write(_struct_I.pack(_v17.seq))
00726 _v18 = _v17.stamp
00727 _x = _v18
00728 buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
00729 _x = _v17.frame_id
00730 length = len(_x)
00731 buff.write(struct.pack('<I%ss'%length, length, _x))
00732 _v19 = _v16.pose
00733 _v20 = _v19.position
00734 _x = _v20
00735 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
00736 _v21 = _v19.orientation
00737 _x = _v21
00738 buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
00739 buff.write(_struct_B.pack(val1.accept))
00740 except struct.error, se: self._check_types(se)
00741 except TypeError, te: self._check_types(te)
00742
00743 def deserialize_numpy(self, str, numpy):
00744 """
00745 unpack serialized message in str into this message instance using numpy for array types
00746 @param str: byte array of serialized message
00747 @type str: str
00748 @param numpy: numpy python module
00749 @type numpy: module
00750 """
00751 try:
00752 if self.header is None:
00753 self.header = std_msgs.msg._Header.Header()
00754 if self.goal_id is None:
00755 self.goal_id = actionlib_msgs.msg.GoalID()
00756 if self.goal is None:
00757 self.goal = object_recognition_gui.msg.ObjectRecognitionGuiGoal()
00758 end = 0
00759 _x = self
00760 start = end
00761 end += 12
00762 (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00763 start = end
00764 end += 4
00765 (length,) = _struct_I.unpack(str[start:end])
00766 start = end
00767 end += length
00768 self.header.frame_id = str[start:end]
00769 _x = self
00770 start = end
00771 end += 8
00772 (_x.goal_id.stamp.secs, _x.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
00773 start = end
00774 end += 4
00775 (length,) = _struct_I.unpack(str[start:end])
00776 start = end
00777 end += length
00778 self.goal_id.id = str[start:end]
00779 _x = self
00780 start = end
00781 end += 12
00782 (_x.goal.image.header.seq, _x.goal.image.header.stamp.secs, _x.goal.image.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00783 start = end
00784 end += 4
00785 (length,) = _struct_I.unpack(str[start:end])
00786 start = end
00787 end += length
00788 self.goal.image.header.frame_id = str[start:end]
00789 _x = self
00790 start = end
00791 end += 8
00792 (_x.goal.image.height, _x.goal.image.width,) = _struct_2I.unpack(str[start:end])
00793 start = end
00794 end += 4
00795 (length,) = _struct_I.unpack(str[start:end])
00796 start = end
00797 end += length
00798 self.goal.image.encoding = str[start:end]
00799 _x = self
00800 start = end
00801 end += 5
00802 (_x.goal.image.is_bigendian, _x.goal.image.step,) = _struct_BI.unpack(str[start:end])
00803 start = end
00804 end += 4
00805 (length,) = _struct_I.unpack(str[start:end])
00806 start = end
00807 end += length
00808 self.goal.image.data = str[start:end]
00809 _x = self
00810 start = end
00811 end += 12
00812 (_x.goal.camera_info.header.seq, _x.goal.camera_info.header.stamp.secs, _x.goal.camera_info.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00813 start = end
00814 end += 4
00815 (length,) = _struct_I.unpack(str[start:end])
00816 start = end
00817 end += length
00818 self.goal.camera_info.header.frame_id = str[start:end]
00819 _x = self
00820 start = end
00821 end += 8
00822 (_x.goal.camera_info.height, _x.goal.camera_info.width,) = _struct_2I.unpack(str[start:end])
00823 start = end
00824 end += 4
00825 (length,) = _struct_I.unpack(str[start:end])
00826 start = end
00827 end += length
00828 self.goal.camera_info.distortion_model = str[start:end]
00829 start = end
00830 end += 4
00831 (length,) = _struct_I.unpack(str[start:end])
00832 pattern = '<%sd'%length
00833 start = end
00834 end += struct.calcsize(pattern)
00835 self.goal.camera_info.D = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
00836 start = end
00837 end += 72
00838 self.goal.camera_info.K = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)
00839 start = end
00840 end += 72
00841 self.goal.camera_info.R = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)
00842 start = end
00843 end += 96
00844 self.goal.camera_info.P = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=12)
00845 _x = self
00846 start = end
00847 end += 25
00848 (_x.goal.camera_info.binning_x, _x.goal.camera_info.binning_y, _x.goal.camera_info.roi.x_offset, _x.goal.camera_info.roi.y_offset, _x.goal.camera_info.roi.height, _x.goal.camera_info.roi.width, _x.goal.camera_info.roi.do_rectify,) = _struct_6IB.unpack(str[start:end])
00849 self.goal.camera_info.roi.do_rectify = bool(self.goal.camera_info.roi.do_rectify)
00850 start = end
00851 end += 4
00852 (length,) = _struct_I.unpack(str[start:end])
00853 self.goal.model_hypotheses = []
00854 for i in xrange(0, length):
00855 val1 = object_recognition_gui.msg.ModelHypothesisList()
00856 start = end
00857 end += 4
00858 (length,) = _struct_I.unpack(str[start:end])
00859 val1.hypotheses = []
00860 for i in xrange(0, length):
00861 val2 = object_recognition_gui.msg.ModelHypothesis()
00862 _v22 = val2.mesh
00863 start = end
00864 end += 1
00865 (_v22.type,) = _struct_b.unpack(str[start:end])
00866 start = end
00867 end += 4
00868 (length,) = _struct_I.unpack(str[start:end])
00869 pattern = '<%sd'%length
00870 start = end
00871 end += struct.calcsize(pattern)
00872 _v22.dimensions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
00873 start = end
00874 end += 4
00875 (length,) = _struct_I.unpack(str[start:end])
00876 pattern = '<%si'%length
00877 start = end
00878 end += struct.calcsize(pattern)
00879 _v22.triangles = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
00880 start = end
00881 end += 4
00882 (length,) = _struct_I.unpack(str[start:end])
00883 _v22.vertices = []
00884 for i in xrange(0, length):
00885 val4 = geometry_msgs.msg.Point()
00886 _x = val4
00887 start = end
00888 end += 24
00889 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
00890 _v22.vertices.append(val4)
00891 _v23 = val2.pose
00892 _v24 = _v23.header
00893 start = end
00894 end += 4
00895 (_v24.seq,) = _struct_I.unpack(str[start:end])
00896 _v25 = _v24.stamp
00897 _x = _v25
00898 start = end
00899 end += 8
00900 (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
00901 start = end
00902 end += 4
00903 (length,) = _struct_I.unpack(str[start:end])
00904 start = end
00905 end += length
00906 _v24.frame_id = str[start:end]
00907 _v26 = _v23.pose
00908 _v27 = _v26.position
00909 _x = _v27
00910 start = end
00911 end += 24
00912 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
00913 _v28 = _v26.orientation
00914 _x = _v28
00915 start = end
00916 end += 32
00917 (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])
00918 val1.hypotheses.append(val2)
00919 start = end
00920 end += 1
00921 (val1.accept,) = _struct_B.unpack(str[start:end])
00922 val1.accept = bool(val1.accept)
00923 self.goal.model_hypotheses.append(val1)
00924 return self
00925 except struct.error, e:
00926 raise roslib.message.DeserializationError(e)
00927
00928 _struct_I = roslib.message.struct_I
00929 _struct_6IB = struct.Struct("<6IB")
00930 _struct_b = struct.Struct("<b")
00931 _struct_12d = struct.Struct("<12d")
00932 _struct_9d = struct.Struct("<9d")
00933 _struct_BI = struct.Struct("<BI")
00934 _struct_3I = struct.Struct("<3I")
00935 _struct_B = struct.Struct("<B")
00936 _struct_4d = struct.Struct("<4d")
00937 _struct_2I = struct.Struct("<2I")
00938 _struct_3d = struct.Struct("<3d")