00001 """autogenerated by genpy from interactive_perception_msgs/ObjectRecognitionGuiAction.msg. Do not edit."""
00002 import sys
00003 python3 = True if sys.hexversion > 0x03000000 else False
00004 import genpy
00005 import struct
00006
00007 import interactive_perception_msgs.msg
00008 import actionlib_msgs.msg
00009 import geometry_msgs.msg
00010 import shape_msgs.msg
00011 import sensor_msgs.msg
00012 import genpy
00013 import std_msgs.msg
00014
00015 class ObjectRecognitionGuiAction(genpy.Message):
00016 _md5sum = "47f7f67cd27eb64111c1ce53f5ab94a7"
00017 _type = "interactive_perception_msgs/ObjectRecognitionGuiAction"
00018 _has_header = False
00019 _full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
00020
00021 ObjectRecognitionGuiActionGoal action_goal
00022 ObjectRecognitionGuiActionResult action_result
00023 ObjectRecognitionGuiActionFeedback action_feedback
00024
00025 ================================================================================
00026 MSG: interactive_perception_msgs/ObjectRecognitionGuiActionGoal
00027 # ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
00028
00029 Header header
00030 actionlib_msgs/GoalID goal_id
00031 ObjectRecognitionGuiGoal goal
00032
00033 ================================================================================
00034 MSG: std_msgs/Header
00035 # Standard metadata for higher-level stamped data types.
00036 # This is generally used to communicate timestamped data
00037 # in a particular coordinate frame.
00038 #
00039 # sequence ID: consecutively increasing ID
00040 uint32 seq
00041 #Two-integer timestamp that is expressed as:
00042 # * stamp.secs: seconds (stamp_secs) since epoch
00043 # * stamp.nsecs: nanoseconds since stamp_secs
00044 # time-handling sugar is provided by the client library
00045 time stamp
00046 #Frame this data is associated with
00047 # 0: no frame
00048 # 1: global frame
00049 string frame_id
00050
00051 ================================================================================
00052 MSG: actionlib_msgs/GoalID
00053 # The stamp should store the time at which this goal was requested.
00054 # It is used by an action server when it tries to preempt all
00055 # goals that were requested before a certain time
00056 time stamp
00057
00058 # The id provides a way to associate feedback and
00059 # result message with specific goal requests. The id
00060 # specified must be unique.
00061 string id
00062
00063
00064 ================================================================================
00065 MSG: interactive_perception_msgs/ObjectRecognitionGuiGoal
00066 # ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
00067
00068 #the original sensor data (depth/disparity image)
00069 sensor_msgs/Image image
00070 sensor_msgs/CameraInfo camera_info
00071
00072 #list of mesh/pose hypotheses for each recognized point cluster
00073 ModelHypothesisList[] model_hypotheses
00074
00075 ================================================================================
00076 MSG: sensor_msgs/Image
00077 # This message contains an uncompressed image
00078 # (0, 0) is at top-left corner of image
00079 #
00080
00081 Header header # Header timestamp should be acquisition time of image
00082 # Header frame_id should be optical frame of camera
00083 # origin of frame should be optical center of cameara
00084 # +x should point to the right in the image
00085 # +y should point down in the image
00086 # +z should point into to plane of the image
00087 # If the frame_id here and the frame_id of the CameraInfo
00088 # message associated with the image conflict
00089 # the behavior is undefined
00090
00091 uint32 height # image height, that is, number of rows
00092 uint32 width # image width, that is, number of columns
00093
00094 # The legal values for encoding are in file src/image_encodings.cpp
00095 # If you want to standardize a new string format, join
00096 # ros-users@lists.sourceforge.net and send an email proposing a new encoding.
00097
00098 string encoding # Encoding of pixels -- channel meaning, ordering, size
00099 # taken from the list of strings in include/sensor_msgs/image_encodings.h
00100
00101 uint8 is_bigendian # is this data bigendian?
00102 uint32 step # Full row length in bytes
00103 uint8[] data # actual matrix data, size is (step * rows)
00104
00105 ================================================================================
00106 MSG: sensor_msgs/CameraInfo
00107 # This message defines meta information for a camera. It should be in a
00108 # camera namespace on topic "camera_info" and accompanied by up to five
00109 # image topics named:
00110 #
00111 # image_raw - raw data from the camera driver, possibly Bayer encoded
00112 # image - monochrome, distorted
00113 # image_color - color, distorted
00114 # image_rect - monochrome, rectified
00115 # image_rect_color - color, rectified
00116 #
00117 # The image_pipeline contains packages (image_proc, stereo_image_proc)
00118 # for producing the four processed image topics from image_raw and
00119 # camera_info. The meaning of the camera parameters are described in
00120 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo.
00121 #
00122 # The image_geometry package provides a user-friendly interface to
00123 # common operations using this meta information. If you want to, e.g.,
00124 # project a 3d point into image coordinates, we strongly recommend
00125 # using image_geometry.
00126 #
00127 # If the camera is uncalibrated, the matrices D, K, R, P should be left
00128 # zeroed out. In particular, clients may assume that K[0] == 0.0
00129 # indicates an uncalibrated camera.
00130
00131 #######################################################################
00132 # Image acquisition info #
00133 #######################################################################
00134
00135 # Time of image acquisition, camera coordinate frame ID
00136 Header header # Header timestamp should be acquisition time of image
00137 # Header frame_id should be optical frame of camera
00138 # origin of frame should be optical center of camera
00139 # +x should point to the right in the image
00140 # +y should point down in the image
00141 # +z should point into the plane of the image
00142
00143
00144 #######################################################################
00145 # Calibration Parameters #
00146 #######################################################################
00147 # These are fixed during camera calibration. Their values will be the #
00148 # same in all messages until the camera is recalibrated. Note that #
00149 # self-calibrating systems may "recalibrate" frequently. #
00150 # #
00151 # The internal parameters can be used to warp a raw (distorted) image #
00152 # to: #
00153 # 1. An undistorted image (requires D and K) #
00154 # 2. A rectified image (requires D, K, R) #
00155 # The projection matrix P projects 3D points into the rectified image.#
00156 #######################################################################
00157
00158 # The image dimensions with which the camera was calibrated. Normally
00159 # this will be the full camera resolution in pixels.
00160 uint32 height
00161 uint32 width
00162
00163 # The distortion model used. Supported models are listed in
00164 # sensor_msgs/distortion_models.h. For most cameras, "plumb_bob" - a
00165 # simple model of radial and tangential distortion - is sufficent.
00166 string distortion_model
00167
00168 # The distortion parameters, size depending on the distortion model.
00169 # For "plumb_bob", the 5 parameters are: (k1, k2, t1, t2, k3).
00170 float64[] D
00171
00172 # Intrinsic camera matrix for the raw (distorted) images.
00173 # [fx 0 cx]
00174 # K = [ 0 fy cy]
00175 # [ 0 0 1]
00176 # Projects 3D points in the camera coordinate frame to 2D pixel
00177 # coordinates using the focal lengths (fx, fy) and principal point
00178 # (cx, cy).
00179 float64[9] K # 3x3 row-major matrix
00180
00181 # Rectification matrix (stereo cameras only)
00182 # A rotation matrix aligning the camera coordinate system to the ideal
00183 # stereo image plane so that epipolar lines in both stereo images are
00184 # parallel.
00185 float64[9] R # 3x3 row-major matrix
00186
00187 # Projection/camera matrix
00188 # [fx' 0 cx' Tx]
00189 # P = [ 0 fy' cy' Ty]
00190 # [ 0 0 1 0]
00191 # By convention, this matrix specifies the intrinsic (camera) matrix
00192 # of the processed (rectified) image. That is, the left 3x3 portion
00193 # is the normal camera intrinsic matrix for the rectified image.
00194 # It projects 3D points in the camera coordinate frame to 2D pixel
00195 # coordinates using the focal lengths (fx', fy') and principal point
00196 # (cx', cy') - these may differ from the values in K.
00197 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will
00198 # also have R = the identity and P[1:3,1:3] = K.
00199 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the
00200 # position of the optical center of the second camera in the first
00201 # camera's frame. We assume Tz = 0 so both cameras are in the same
00202 # stereo image plane. The first camera always has Tx = Ty = 0. For
00203 # the right (second) camera of a horizontal stereo pair, Ty = 0 and
00204 # Tx = -fx' * B, where B is the baseline between the cameras.
00205 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto
00206 # the rectified image is given by:
00207 # [u v w]' = P * [X Y Z 1]'
00208 # x = u / w
00209 # y = v / w
00210 # This holds for both images of a stereo pair.
00211 float64[12] P # 3x4 row-major matrix
00212
00213
00214 #######################################################################
00215 # Operational Parameters #
00216 #######################################################################
00217 # These define the image region actually captured by the camera #
00218 # driver. Although they affect the geometry of the output image, they #
00219 # may be changed freely without recalibrating the camera. #
00220 #######################################################################
00221
00222 # Binning refers here to any camera setting which combines rectangular
00223 # neighborhoods of pixels into larger "super-pixels." It reduces the
00224 # resolution of the output image to
00225 # (width / binning_x) x (height / binning_y).
00226 # The default values binning_x = binning_y = 0 is considered the same
00227 # as binning_x = binning_y = 1 (no subsampling).
00228 uint32 binning_x
00229 uint32 binning_y
00230
00231 # Region of interest (subwindow of full camera resolution), given in
00232 # full resolution (unbinned) image coordinates. A particular ROI
00233 # always denotes the same window of pixels on the camera sensor,
00234 # regardless of binning settings.
00235 # The default setting of roi (all values 0) is considered the same as
00236 # full resolution (roi.width = width, roi.height = height).
00237 RegionOfInterest roi
00238
00239 ================================================================================
00240 MSG: sensor_msgs/RegionOfInterest
00241 # This message is used to specify a region of interest within an image.
00242 #
00243 # When used to specify the ROI setting of the camera when the image was
00244 # taken, the height and width fields should either match the height and
00245 # width fields for the associated image; or height = width = 0
00246 # indicates that the full resolution image was captured.
00247
00248 uint32 x_offset # Leftmost pixel of the ROI
00249 # (0 if the ROI includes the left edge of the image)
00250 uint32 y_offset # Topmost pixel of the ROI
00251 # (0 if the ROI includes the top edge of the image)
00252 uint32 height # Height of ROI
00253 uint32 width # Width of ROI
00254
00255 # True if a distinct rectified ROI should be calculated from the "raw"
00256 # ROI in this message. Typically this should be False if the full image
00257 # is captured (ROI not used), and True if a subwindow is captured (ROI
00258 # used).
00259 bool do_rectify
00260
00261 ================================================================================
00262 MSG: interactive_perception_msgs/ModelHypothesisList
00263 ModelHypothesis[] hypotheses
00264
00265 #initial guess if this can be a correct recognition result at all
00266 bool accept
00267 ================================================================================
00268 MSG: interactive_perception_msgs/ModelHypothesis
00269 #describes a hypothesis about a recognized object (mesh+pose)
00270
00271 shape_msgs/Mesh mesh
00272 geometry_msgs/PoseStamped pose
00273
00274 ================================================================================
00275 MSG: shape_msgs/Mesh
00276 # Definition of a mesh
00277
00278 # list of triangles; the index values refer to positions in vertices[]
00279 MeshTriangle[] triangles
00280
00281 # the actual vertices that make up the mesh
00282 geometry_msgs/Point[] vertices
00283
00284 ================================================================================
00285 MSG: shape_msgs/MeshTriangle
00286 # Definition of a triangle's vertices
00287 uint32[3] vertex_indices
00288
00289 ================================================================================
00290 MSG: geometry_msgs/Point
00291 # This contains the position of a point in free space
00292 float64 x
00293 float64 y
00294 float64 z
00295
00296 ================================================================================
00297 MSG: geometry_msgs/PoseStamped
00298 # A Pose with reference coordinate frame and timestamp
00299 Header header
00300 Pose pose
00301
00302 ================================================================================
00303 MSG: geometry_msgs/Pose
00304 # A representation of pose in free space, composed of postion and orientation.
00305 Point position
00306 Quaternion orientation
00307
00308 ================================================================================
00309 MSG: geometry_msgs/Quaternion
00310 # This represents an orientation in free space in quaternion form.
00311
00312 float64 x
00313 float64 y
00314 float64 z
00315 float64 w
00316
00317 ================================================================================
00318 MSG: interactive_perception_msgs/ObjectRecognitionGuiActionResult
00319 # ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
00320
00321 Header header
00322 actionlib_msgs/GoalStatus status
00323 ObjectRecognitionGuiResult result
00324
00325 ================================================================================
00326 MSG: actionlib_msgs/GoalStatus
00327 GoalID goal_id
00328 uint8 status
00329 uint8 PENDING = 0 # The goal has yet to be processed by the action server
00330 uint8 ACTIVE = 1 # The goal is currently being processed by the action server
00331 uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing
00332 # and has since completed its execution (Terminal State)
00333 uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State)
00334 uint8 ABORTED = 4 # The goal was aborted during execution by the action server due
00335 # to some failure (Terminal State)
00336 uint8 REJECTED = 5 # The goal was rejected by the action server without being processed,
00337 # because the goal was unattainable or invalid (Terminal State)
00338 uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing
00339 # and has not yet completed execution
00340 uint8 RECALLING = 7 # The goal received a cancel request before it started executing,
00341 # but the action server has not yet confirmed that the goal is canceled
00342 uint8 RECALLED = 8 # The goal received a cancel request before it started executing
00343 # and was successfully cancelled (Terminal State)
00344 uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be
00345 # sent over the wire by an action server
00346
00347 #Allow for the user to associate a string with GoalStatus for debugging
00348 string text
00349
00350
00351 ================================================================================
00352 MSG: interactive_perception_msgs/ObjectRecognitionGuiResult
00353 # ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
00354
00355 #the index of the model hypothesis that the user has selected for each cluster
00356 #values below 0 mean 'reject all hypotheses'
00357 int32[] selected_hypothesis_indices
00358
00359 ================================================================================
00360 MSG: interactive_perception_msgs/ObjectRecognitionGuiActionFeedback
00361 # ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
00362
00363 Header header
00364 actionlib_msgs/GoalStatus status
00365 ObjectRecognitionGuiFeedback feedback
00366
00367 ================================================================================
00368 MSG: interactive_perception_msgs/ObjectRecognitionGuiFeedback
00369 # ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
00370
00371
00372 """
00373 __slots__ = ['action_goal','action_result','action_feedback']
00374 _slot_types = ['interactive_perception_msgs/ObjectRecognitionGuiActionGoal','interactive_perception_msgs/ObjectRecognitionGuiActionResult','interactive_perception_msgs/ObjectRecognitionGuiActionFeedback']
00375
00376 def __init__(self, *args, **kwds):
00377 """
00378 Constructor. Any message fields that are implicitly/explicitly
00379 set to None will be assigned a default value. The recommend
00380 use is keyword arguments as this is more robust to future message
00381 changes. You cannot mix in-order arguments and keyword arguments.
00382
00383 The available fields are:
00384 action_goal,action_result,action_feedback
00385
00386 :param args: complete set of field values, in .msg order
00387 :param kwds: use keyword arguments corresponding to message field names
00388 to set specific fields.
00389 """
00390 if args or kwds:
00391 super(ObjectRecognitionGuiAction, self).__init__(*args, **kwds)
00392
00393 if self.action_goal is None:
00394 self.action_goal = interactive_perception_msgs.msg.ObjectRecognitionGuiActionGoal()
00395 if self.action_result is None:
00396 self.action_result = interactive_perception_msgs.msg.ObjectRecognitionGuiActionResult()
00397 if self.action_feedback is None:
00398 self.action_feedback = interactive_perception_msgs.msg.ObjectRecognitionGuiActionFeedback()
00399 else:
00400 self.action_goal = interactive_perception_msgs.msg.ObjectRecognitionGuiActionGoal()
00401 self.action_result = interactive_perception_msgs.msg.ObjectRecognitionGuiActionResult()
00402 self.action_feedback = interactive_perception_msgs.msg.ObjectRecognitionGuiActionFeedback()
00403
00404 def _get_types(self):
00405 """
00406 internal API method
00407 """
00408 return self._slot_types
00409
00410 def serialize(self, buff):
00411 """
00412 serialize message into buffer
00413 :param buff: buffer, ``StringIO``
00414 """
00415 try:
00416 _x = self
00417 buff.write(_struct_3I.pack(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs))
00418 _x = self.action_goal.header.frame_id
00419 length = len(_x)
00420 if python3 or type(_x) == unicode:
00421 _x = _x.encode('utf-8')
00422 length = len(_x)
00423 buff.write(struct.pack('<I%ss'%length, length, _x))
00424 _x = self
00425 buff.write(_struct_2I.pack(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs))
00426 _x = self.action_goal.goal_id.id
00427 length = len(_x)
00428 if python3 or type(_x) == unicode:
00429 _x = _x.encode('utf-8')
00430 length = len(_x)
00431 buff.write(struct.pack('<I%ss'%length, length, _x))
00432 _x = self
00433 buff.write(_struct_3I.pack(_x.action_goal.goal.image.header.seq, _x.action_goal.goal.image.header.stamp.secs, _x.action_goal.goal.image.header.stamp.nsecs))
00434 _x = self.action_goal.goal.image.header.frame_id
00435 length = len(_x)
00436 if python3 or type(_x) == unicode:
00437 _x = _x.encode('utf-8')
00438 length = len(_x)
00439 buff.write(struct.pack('<I%ss'%length, length, _x))
00440 _x = self
00441 buff.write(_struct_2I.pack(_x.action_goal.goal.image.height, _x.action_goal.goal.image.width))
00442 _x = self.action_goal.goal.image.encoding
00443 length = len(_x)
00444 if python3 or type(_x) == unicode:
00445 _x = _x.encode('utf-8')
00446 length = len(_x)
00447 buff.write(struct.pack('<I%ss'%length, length, _x))
00448 _x = self
00449 buff.write(_struct_BI.pack(_x.action_goal.goal.image.is_bigendian, _x.action_goal.goal.image.step))
00450 _x = self.action_goal.goal.image.data
00451 length = len(_x)
00452
00453 if type(_x) in [list, tuple]:
00454 buff.write(struct.pack('<I%sB'%length, length, *_x))
00455 else:
00456 buff.write(struct.pack('<I%ss'%length, length, _x))
00457 _x = self
00458 buff.write(_struct_3I.pack(_x.action_goal.goal.camera_info.header.seq, _x.action_goal.goal.camera_info.header.stamp.secs, _x.action_goal.goal.camera_info.header.stamp.nsecs))
00459 _x = self.action_goal.goal.camera_info.header.frame_id
00460 length = len(_x)
00461 if python3 or type(_x) == unicode:
00462 _x = _x.encode('utf-8')
00463 length = len(_x)
00464 buff.write(struct.pack('<I%ss'%length, length, _x))
00465 _x = self
00466 buff.write(_struct_2I.pack(_x.action_goal.goal.camera_info.height, _x.action_goal.goal.camera_info.width))
00467 _x = self.action_goal.goal.camera_info.distortion_model
00468 length = len(_x)
00469 if python3 or type(_x) == unicode:
00470 _x = _x.encode('utf-8')
00471 length = len(_x)
00472 buff.write(struct.pack('<I%ss'%length, length, _x))
00473 length = len(self.action_goal.goal.camera_info.D)
00474 buff.write(_struct_I.pack(length))
00475 pattern = '<%sd'%length
00476 buff.write(struct.pack(pattern, *self.action_goal.goal.camera_info.D))
00477 buff.write(_struct_9d.pack(*self.action_goal.goal.camera_info.K))
00478 buff.write(_struct_9d.pack(*self.action_goal.goal.camera_info.R))
00479 buff.write(_struct_12d.pack(*self.action_goal.goal.camera_info.P))
00480 _x = self
00481 buff.write(_struct_6IB.pack(_x.action_goal.goal.camera_info.binning_x, _x.action_goal.goal.camera_info.binning_y, _x.action_goal.goal.camera_info.roi.x_offset, _x.action_goal.goal.camera_info.roi.y_offset, _x.action_goal.goal.camera_info.roi.height, _x.action_goal.goal.camera_info.roi.width, _x.action_goal.goal.camera_info.roi.do_rectify))
00482 length = len(self.action_goal.goal.model_hypotheses)
00483 buff.write(_struct_I.pack(length))
00484 for val1 in self.action_goal.goal.model_hypotheses:
00485 length = len(val1.hypotheses)
00486 buff.write(_struct_I.pack(length))
00487 for val2 in val1.hypotheses:
00488 _v1 = val2.mesh
00489 length = len(_v1.triangles)
00490 buff.write(_struct_I.pack(length))
00491 for val4 in _v1.triangles:
00492 buff.write(_struct_3I.pack(*val4.vertex_indices))
00493 length = len(_v1.vertices)
00494 buff.write(_struct_I.pack(length))
00495 for val4 in _v1.vertices:
00496 _x = val4
00497 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
00498 _v2 = val2.pose
00499 _v3 = _v2.header
00500 buff.write(_struct_I.pack(_v3.seq))
00501 _v4 = _v3.stamp
00502 _x = _v4
00503 buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
00504 _x = _v3.frame_id
00505 length = len(_x)
00506 if python3 or type(_x) == unicode:
00507 _x = _x.encode('utf-8')
00508 length = len(_x)
00509 buff.write(struct.pack('<I%ss'%length, length, _x))
00510 _v5 = _v2.pose
00511 _v6 = _v5.position
00512 _x = _v6
00513 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
00514 _v7 = _v5.orientation
00515 _x = _v7
00516 buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
00517 buff.write(_struct_B.pack(val1.accept))
00518 _x = self
00519 buff.write(_struct_3I.pack(_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs))
00520 _x = self.action_result.header.frame_id
00521 length = len(_x)
00522 if python3 or type(_x) == unicode:
00523 _x = _x.encode('utf-8')
00524 length = len(_x)
00525 buff.write(struct.pack('<I%ss'%length, length, _x))
00526 _x = self
00527 buff.write(_struct_2I.pack(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs))
00528 _x = self.action_result.status.goal_id.id
00529 length = len(_x)
00530 if python3 or type(_x) == unicode:
00531 _x = _x.encode('utf-8')
00532 length = len(_x)
00533 buff.write(struct.pack('<I%ss'%length, length, _x))
00534 buff.write(_struct_B.pack(self.action_result.status.status))
00535 _x = self.action_result.status.text
00536 length = len(_x)
00537 if python3 or type(_x) == unicode:
00538 _x = _x.encode('utf-8')
00539 length = len(_x)
00540 buff.write(struct.pack('<I%ss'%length, length, _x))
00541 length = len(self.action_result.result.selected_hypothesis_indices)
00542 buff.write(_struct_I.pack(length))
00543 pattern = '<%si'%length
00544 buff.write(struct.pack(pattern, *self.action_result.result.selected_hypothesis_indices))
00545 _x = self
00546 buff.write(_struct_3I.pack(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs))
00547 _x = self.action_feedback.header.frame_id
00548 length = len(_x)
00549 if python3 or type(_x) == unicode:
00550 _x = _x.encode('utf-8')
00551 length = len(_x)
00552 buff.write(struct.pack('<I%ss'%length, length, _x))
00553 _x = self
00554 buff.write(_struct_2I.pack(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs))
00555 _x = self.action_feedback.status.goal_id.id
00556 length = len(_x)
00557 if python3 or type(_x) == unicode:
00558 _x = _x.encode('utf-8')
00559 length = len(_x)
00560 buff.write(struct.pack('<I%ss'%length, length, _x))
00561 buff.write(_struct_B.pack(self.action_feedback.status.status))
00562 _x = self.action_feedback.status.text
00563 length = len(_x)
00564 if python3 or type(_x) == unicode:
00565 _x = _x.encode('utf-8')
00566 length = len(_x)
00567 buff.write(struct.pack('<I%ss'%length, length, _x))
00568 except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
00569 except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
00570
00571 def deserialize(self, str):
00572 """
00573 unpack serialized message in str into this message instance
00574 :param str: byte array of serialized message, ``str``
00575 """
00576 try:
00577 if self.action_goal is None:
00578 self.action_goal = interactive_perception_msgs.msg.ObjectRecognitionGuiActionGoal()
00579 if self.action_result is None:
00580 self.action_result = interactive_perception_msgs.msg.ObjectRecognitionGuiActionResult()
00581 if self.action_feedback is None:
00582 self.action_feedback = interactive_perception_msgs.msg.ObjectRecognitionGuiActionFeedback()
00583 end = 0
00584 _x = self
00585 start = end
00586 end += 12
00587 (_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00588 start = end
00589 end += 4
00590 (length,) = _struct_I.unpack(str[start:end])
00591 start = end
00592 end += length
00593 if python3:
00594 self.action_goal.header.frame_id = str[start:end].decode('utf-8')
00595 else:
00596 self.action_goal.header.frame_id = str[start:end]
00597 _x = self
00598 start = end
00599 end += 8
00600 (_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
00601 start = end
00602 end += 4
00603 (length,) = _struct_I.unpack(str[start:end])
00604 start = end
00605 end += length
00606 if python3:
00607 self.action_goal.goal_id.id = str[start:end].decode('utf-8')
00608 else:
00609 self.action_goal.goal_id.id = str[start:end]
00610 _x = self
00611 start = end
00612 end += 12
00613 (_x.action_goal.goal.image.header.seq, _x.action_goal.goal.image.header.stamp.secs, _x.action_goal.goal.image.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00614 start = end
00615 end += 4
00616 (length,) = _struct_I.unpack(str[start:end])
00617 start = end
00618 end += length
00619 if python3:
00620 self.action_goal.goal.image.header.frame_id = str[start:end].decode('utf-8')
00621 else:
00622 self.action_goal.goal.image.header.frame_id = str[start:end]
00623 _x = self
00624 start = end
00625 end += 8
00626 (_x.action_goal.goal.image.height, _x.action_goal.goal.image.width,) = _struct_2I.unpack(str[start:end])
00627 start = end
00628 end += 4
00629 (length,) = _struct_I.unpack(str[start:end])
00630 start = end
00631 end += length
00632 if python3:
00633 self.action_goal.goal.image.encoding = str[start:end].decode('utf-8')
00634 else:
00635 self.action_goal.goal.image.encoding = str[start:end]
00636 _x = self
00637 start = end
00638 end += 5
00639 (_x.action_goal.goal.image.is_bigendian, _x.action_goal.goal.image.step,) = _struct_BI.unpack(str[start:end])
00640 start = end
00641 end += 4
00642 (length,) = _struct_I.unpack(str[start:end])
00643 start = end
00644 end += length
00645 self.action_goal.goal.image.data = str[start:end]
00646 _x = self
00647 start = end
00648 end += 12
00649 (_x.action_goal.goal.camera_info.header.seq, _x.action_goal.goal.camera_info.header.stamp.secs, _x.action_goal.goal.camera_info.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00650 start = end
00651 end += 4
00652 (length,) = _struct_I.unpack(str[start:end])
00653 start = end
00654 end += length
00655 if python3:
00656 self.action_goal.goal.camera_info.header.frame_id = str[start:end].decode('utf-8')
00657 else:
00658 self.action_goal.goal.camera_info.header.frame_id = str[start:end]
00659 _x = self
00660 start = end
00661 end += 8
00662 (_x.action_goal.goal.camera_info.height, _x.action_goal.goal.camera_info.width,) = _struct_2I.unpack(str[start:end])
00663 start = end
00664 end += 4
00665 (length,) = _struct_I.unpack(str[start:end])
00666 start = end
00667 end += length
00668 if python3:
00669 self.action_goal.goal.camera_info.distortion_model = str[start:end].decode('utf-8')
00670 else:
00671 self.action_goal.goal.camera_info.distortion_model = str[start:end]
00672 start = end
00673 end += 4
00674 (length,) = _struct_I.unpack(str[start:end])
00675 pattern = '<%sd'%length
00676 start = end
00677 end += struct.calcsize(pattern)
00678 self.action_goal.goal.camera_info.D = struct.unpack(pattern, str[start:end])
00679 start = end
00680 end += 72
00681 self.action_goal.goal.camera_info.K = _struct_9d.unpack(str[start:end])
00682 start = end
00683 end += 72
00684 self.action_goal.goal.camera_info.R = _struct_9d.unpack(str[start:end])
00685 start = end
00686 end += 96
00687 self.action_goal.goal.camera_info.P = _struct_12d.unpack(str[start:end])
00688 _x = self
00689 start = end
00690 end += 25
00691 (_x.action_goal.goal.camera_info.binning_x, _x.action_goal.goal.camera_info.binning_y, _x.action_goal.goal.camera_info.roi.x_offset, _x.action_goal.goal.camera_info.roi.y_offset, _x.action_goal.goal.camera_info.roi.height, _x.action_goal.goal.camera_info.roi.width, _x.action_goal.goal.camera_info.roi.do_rectify,) = _struct_6IB.unpack(str[start:end])
00692 self.action_goal.goal.camera_info.roi.do_rectify = bool(self.action_goal.goal.camera_info.roi.do_rectify)
00693 start = end
00694 end += 4
00695 (length,) = _struct_I.unpack(str[start:end])
00696 self.action_goal.goal.model_hypotheses = []
00697 for i in range(0, length):
00698 val1 = interactive_perception_msgs.msg.ModelHypothesisList()
00699 start = end
00700 end += 4
00701 (length,) = _struct_I.unpack(str[start:end])
00702 val1.hypotheses = []
00703 for i in range(0, length):
00704 val2 = interactive_perception_msgs.msg.ModelHypothesis()
00705 _v8 = val2.mesh
00706 start = end
00707 end += 4
00708 (length,) = _struct_I.unpack(str[start:end])
00709 _v8.triangles = []
00710 for i in range(0, length):
00711 val4 = shape_msgs.msg.MeshTriangle()
00712 start = end
00713 end += 12
00714 val4.vertex_indices = _struct_3I.unpack(str[start:end])
00715 _v8.triangles.append(val4)
00716 start = end
00717 end += 4
00718 (length,) = _struct_I.unpack(str[start:end])
00719 _v8.vertices = []
00720 for i in range(0, length):
00721 val4 = geometry_msgs.msg.Point()
00722 _x = val4
00723 start = end
00724 end += 24
00725 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
00726 _v8.vertices.append(val4)
00727 _v9 = val2.pose
00728 _v10 = _v9.header
00729 start = end
00730 end += 4
00731 (_v10.seq,) = _struct_I.unpack(str[start:end])
00732 _v11 = _v10.stamp
00733 _x = _v11
00734 start = end
00735 end += 8
00736 (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
00737 start = end
00738 end += 4
00739 (length,) = _struct_I.unpack(str[start:end])
00740 start = end
00741 end += length
00742 if python3:
00743 _v10.frame_id = str[start:end].decode('utf-8')
00744 else:
00745 _v10.frame_id = str[start:end]
00746 _v12 = _v9.pose
00747 _v13 = _v12.position
00748 _x = _v13
00749 start = end
00750 end += 24
00751 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
00752 _v14 = _v12.orientation
00753 _x = _v14
00754 start = end
00755 end += 32
00756 (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])
00757 val1.hypotheses.append(val2)
00758 start = end
00759 end += 1
00760 (val1.accept,) = _struct_B.unpack(str[start:end])
00761 val1.accept = bool(val1.accept)
00762 self.action_goal.goal.model_hypotheses.append(val1)
00763 _x = self
00764 start = end
00765 end += 12
00766 (_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00767 start = end
00768 end += 4
00769 (length,) = _struct_I.unpack(str[start:end])
00770 start = end
00771 end += length
00772 if python3:
00773 self.action_result.header.frame_id = str[start:end].decode('utf-8')
00774 else:
00775 self.action_result.header.frame_id = str[start:end]
00776 _x = self
00777 start = end
00778 end += 8
00779 (_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
00780 start = end
00781 end += 4
00782 (length,) = _struct_I.unpack(str[start:end])
00783 start = end
00784 end += length
00785 if python3:
00786 self.action_result.status.goal_id.id = str[start:end].decode('utf-8')
00787 else:
00788 self.action_result.status.goal_id.id = str[start:end]
00789 start = end
00790 end += 1
00791 (self.action_result.status.status,) = _struct_B.unpack(str[start:end])
00792 start = end
00793 end += 4
00794 (length,) = _struct_I.unpack(str[start:end])
00795 start = end
00796 end += length
00797 if python3:
00798 self.action_result.status.text = str[start:end].decode('utf-8')
00799 else:
00800 self.action_result.status.text = str[start:end]
00801 start = end
00802 end += 4
00803 (length,) = _struct_I.unpack(str[start:end])
00804 pattern = '<%si'%length
00805 start = end
00806 end += struct.calcsize(pattern)
00807 self.action_result.result.selected_hypothesis_indices = struct.unpack(pattern, str[start:end])
00808 _x = self
00809 start = end
00810 end += 12
00811 (_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
00812 start = end
00813 end += 4
00814 (length,) = _struct_I.unpack(str[start:end])
00815 start = end
00816 end += length
00817 if python3:
00818 self.action_feedback.header.frame_id = str[start:end].decode('utf-8')
00819 else:
00820 self.action_feedback.header.frame_id = str[start:end]
00821 _x = self
00822 start = end
00823 end += 8
00824 (_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
00825 start = end
00826 end += 4
00827 (length,) = _struct_I.unpack(str[start:end])
00828 start = end
00829 end += length
00830 if python3:
00831 self.action_feedback.status.goal_id.id = str[start:end].decode('utf-8')
00832 else:
00833 self.action_feedback.status.goal_id.id = str[start:end]
00834 start = end
00835 end += 1
00836 (self.action_feedback.status.status,) = _struct_B.unpack(str[start:end])
00837 start = end
00838 end += 4
00839 (length,) = _struct_I.unpack(str[start:end])
00840 start = end
00841 end += length
00842 if python3:
00843 self.action_feedback.status.text = str[start:end].decode('utf-8')
00844 else:
00845 self.action_feedback.status.text = str[start:end]
00846 return self
00847 except struct.error as e:
00848 raise genpy.DeserializationError(e)
00849
00850
00851 def serialize_numpy(self, buff, numpy):
00852 """
00853 serialize message with numpy array types into buffer
00854 :param buff: buffer, ``StringIO``
00855 :param numpy: numpy python module
00856 """
00857 try:
00858 _x = self
00859 buff.write(_struct_3I.pack(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs))
00860 _x = self.action_goal.header.frame_id
00861 length = len(_x)
00862 if python3 or type(_x) == unicode:
00863 _x = _x.encode('utf-8')
00864 length = len(_x)
00865 buff.write(struct.pack('<I%ss'%length, length, _x))
00866 _x = self
00867 buff.write(_struct_2I.pack(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs))
00868 _x = self.action_goal.goal_id.id
00869 length = len(_x)
00870 if python3 or type(_x) == unicode:
00871 _x = _x.encode('utf-8')
00872 length = len(_x)
00873 buff.write(struct.pack('<I%ss'%length, length, _x))
00874 _x = self
00875 buff.write(_struct_3I.pack(_x.action_goal.goal.image.header.seq, _x.action_goal.goal.image.header.stamp.secs, _x.action_goal.goal.image.header.stamp.nsecs))
00876 _x = self.action_goal.goal.image.header.frame_id
00877 length = len(_x)
00878 if python3 or type(_x) == unicode:
00879 _x = _x.encode('utf-8')
00880 length = len(_x)
00881 buff.write(struct.pack('<I%ss'%length, length, _x))
00882 _x = self
00883 buff.write(_struct_2I.pack(_x.action_goal.goal.image.height, _x.action_goal.goal.image.width))
00884 _x = self.action_goal.goal.image.encoding
00885 length = len(_x)
00886 if python3 or type(_x) == unicode:
00887 _x = _x.encode('utf-8')
00888 length = len(_x)
00889 buff.write(struct.pack('<I%ss'%length, length, _x))
00890 _x = self
00891 buff.write(_struct_BI.pack(_x.action_goal.goal.image.is_bigendian, _x.action_goal.goal.image.step))
00892 _x = self.action_goal.goal.image.data
00893 length = len(_x)
00894
00895 if type(_x) in [list, tuple]:
00896 buff.write(struct.pack('<I%sB'%length, length, *_x))
00897 else:
00898 buff.write(struct.pack('<I%ss'%length, length, _x))
00899 _x = self
00900 buff.write(_struct_3I.pack(_x.action_goal.goal.camera_info.header.seq, _x.action_goal.goal.camera_info.header.stamp.secs, _x.action_goal.goal.camera_info.header.stamp.nsecs))
00901 _x = self.action_goal.goal.camera_info.header.frame_id
00902 length = len(_x)
00903 if python3 or type(_x) == unicode:
00904 _x = _x.encode('utf-8')
00905 length = len(_x)
00906 buff.write(struct.pack('<I%ss'%length, length, _x))
00907 _x = self
00908 buff.write(_struct_2I.pack(_x.action_goal.goal.camera_info.height, _x.action_goal.goal.camera_info.width))
00909 _x = self.action_goal.goal.camera_info.distortion_model
00910 length = len(_x)
00911 if python3 or type(_x) == unicode:
00912 _x = _x.encode('utf-8')
00913 length = len(_x)
00914 buff.write(struct.pack('<I%ss'%length, length, _x))
00915 length = len(self.action_goal.goal.camera_info.D)
00916 buff.write(_struct_I.pack(length))
00917 pattern = '<%sd'%length
00918 buff.write(self.action_goal.goal.camera_info.D.tostring())
00919 buff.write(self.action_goal.goal.camera_info.K.tostring())
00920 buff.write(self.action_goal.goal.camera_info.R.tostring())
00921 buff.write(self.action_goal.goal.camera_info.P.tostring())
00922 _x = self
00923 buff.write(_struct_6IB.pack(_x.action_goal.goal.camera_info.binning_x, _x.action_goal.goal.camera_info.binning_y, _x.action_goal.goal.camera_info.roi.x_offset, _x.action_goal.goal.camera_info.roi.y_offset, _x.action_goal.goal.camera_info.roi.height, _x.action_goal.goal.camera_info.roi.width, _x.action_goal.goal.camera_info.roi.do_rectify))
00924 length = len(self.action_goal.goal.model_hypotheses)
00925 buff.write(_struct_I.pack(length))
00926 for val1 in self.action_goal.goal.model_hypotheses:
00927 length = len(val1.hypotheses)
00928 buff.write(_struct_I.pack(length))
00929 for val2 in val1.hypotheses:
00930 _v15 = val2.mesh
00931 length = len(_v15.triangles)
00932 buff.write(_struct_I.pack(length))
00933 for val4 in _v15.triangles:
00934 buff.write(val4.vertex_indices.tostring())
00935 length = len(_v15.vertices)
00936 buff.write(_struct_I.pack(length))
00937 for val4 in _v15.vertices:
00938 _x = val4
00939 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
00940 _v16 = val2.pose
00941 _v17 = _v16.header
00942 buff.write(_struct_I.pack(_v17.seq))
00943 _v18 = _v17.stamp
00944 _x = _v18
00945 buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
00946 _x = _v17.frame_id
00947 length = len(_x)
00948 if python3 or type(_x) == unicode:
00949 _x = _x.encode('utf-8')
00950 length = len(_x)
00951 buff.write(struct.pack('<I%ss'%length, length, _x))
00952 _v19 = _v16.pose
00953 _v20 = _v19.position
00954 _x = _v20
00955 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
00956 _v21 = _v19.orientation
00957 _x = _v21
00958 buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
00959 buff.write(_struct_B.pack(val1.accept))
00960 _x = self
00961 buff.write(_struct_3I.pack(_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs))
00962 _x = self.action_result.header.frame_id
00963 length = len(_x)
00964 if python3 or type(_x) == unicode:
00965 _x = _x.encode('utf-8')
00966 length = len(_x)
00967 buff.write(struct.pack('<I%ss'%length, length, _x))
00968 _x = self
00969 buff.write(_struct_2I.pack(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs))
00970 _x = self.action_result.status.goal_id.id
00971 length = len(_x)
00972 if python3 or type(_x) == unicode:
00973 _x = _x.encode('utf-8')
00974 length = len(_x)
00975 buff.write(struct.pack('<I%ss'%length, length, _x))
00976 buff.write(_struct_B.pack(self.action_result.status.status))
00977 _x = self.action_result.status.text
00978 length = len(_x)
00979 if python3 or type(_x) == unicode:
00980 _x = _x.encode('utf-8')
00981 length = len(_x)
00982 buff.write(struct.pack('<I%ss'%length, length, _x))
00983 length = len(self.action_result.result.selected_hypothesis_indices)
00984 buff.write(_struct_I.pack(length))
00985 pattern = '<%si'%length
00986 buff.write(self.action_result.result.selected_hypothesis_indices.tostring())
00987 _x = self
00988 buff.write(_struct_3I.pack(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs))
00989 _x = self.action_feedback.header.frame_id
00990 length = len(_x)
00991 if python3 or type(_x) == unicode:
00992 _x = _x.encode('utf-8')
00993 length = len(_x)
00994 buff.write(struct.pack('<I%ss'%length, length, _x))
00995 _x = self
00996 buff.write(_struct_2I.pack(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs))
00997 _x = self.action_feedback.status.goal_id.id
00998 length = len(_x)
00999 if python3 or type(_x) == unicode:
01000 _x = _x.encode('utf-8')
01001 length = len(_x)
01002 buff.write(struct.pack('<I%ss'%length, length, _x))
01003 buff.write(_struct_B.pack(self.action_feedback.status.status))
01004 _x = self.action_feedback.status.text
01005 length = len(_x)
01006 if python3 or type(_x) == unicode:
01007 _x = _x.encode('utf-8')
01008 length = len(_x)
01009 buff.write(struct.pack('<I%ss'%length, length, _x))
01010 except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
01011 except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
01012
01013 def deserialize_numpy(self, str, numpy):
01014 """
01015 unpack serialized message in str into this message instance using numpy for array types
01016 :param str: byte array of serialized message, ``str``
01017 :param numpy: numpy python module
01018 """
01019 try:
01020 if self.action_goal is None:
01021 self.action_goal = interactive_perception_msgs.msg.ObjectRecognitionGuiActionGoal()
01022 if self.action_result is None:
01023 self.action_result = interactive_perception_msgs.msg.ObjectRecognitionGuiActionResult()
01024 if self.action_feedback is None:
01025 self.action_feedback = interactive_perception_msgs.msg.ObjectRecognitionGuiActionFeedback()
01026 end = 0
01027 _x = self
01028 start = end
01029 end += 12
01030 (_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
01031 start = end
01032 end += 4
01033 (length,) = _struct_I.unpack(str[start:end])
01034 start = end
01035 end += length
01036 if python3:
01037 self.action_goal.header.frame_id = str[start:end].decode('utf-8')
01038 else:
01039 self.action_goal.header.frame_id = str[start:end]
01040 _x = self
01041 start = end
01042 end += 8
01043 (_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
01044 start = end
01045 end += 4
01046 (length,) = _struct_I.unpack(str[start:end])
01047 start = end
01048 end += length
01049 if python3:
01050 self.action_goal.goal_id.id = str[start:end].decode('utf-8')
01051 else:
01052 self.action_goal.goal_id.id = str[start:end]
01053 _x = self
01054 start = end
01055 end += 12
01056 (_x.action_goal.goal.image.header.seq, _x.action_goal.goal.image.header.stamp.secs, _x.action_goal.goal.image.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
01057 start = end
01058 end += 4
01059 (length,) = _struct_I.unpack(str[start:end])
01060 start = end
01061 end += length
01062 if python3:
01063 self.action_goal.goal.image.header.frame_id = str[start:end].decode('utf-8')
01064 else:
01065 self.action_goal.goal.image.header.frame_id = str[start:end]
01066 _x = self
01067 start = end
01068 end += 8
01069 (_x.action_goal.goal.image.height, _x.action_goal.goal.image.width,) = _struct_2I.unpack(str[start:end])
01070 start = end
01071 end += 4
01072 (length,) = _struct_I.unpack(str[start:end])
01073 start = end
01074 end += length
01075 if python3:
01076 self.action_goal.goal.image.encoding = str[start:end].decode('utf-8')
01077 else:
01078 self.action_goal.goal.image.encoding = str[start:end]
01079 _x = self
01080 start = end
01081 end += 5
01082 (_x.action_goal.goal.image.is_bigendian, _x.action_goal.goal.image.step,) = _struct_BI.unpack(str[start:end])
01083 start = end
01084 end += 4
01085 (length,) = _struct_I.unpack(str[start:end])
01086 start = end
01087 end += length
01088 self.action_goal.goal.image.data = str[start:end]
01089 _x = self
01090 start = end
01091 end += 12
01092 (_x.action_goal.goal.camera_info.header.seq, _x.action_goal.goal.camera_info.header.stamp.secs, _x.action_goal.goal.camera_info.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
01093 start = end
01094 end += 4
01095 (length,) = _struct_I.unpack(str[start:end])
01096 start = end
01097 end += length
01098 if python3:
01099 self.action_goal.goal.camera_info.header.frame_id = str[start:end].decode('utf-8')
01100 else:
01101 self.action_goal.goal.camera_info.header.frame_id = str[start:end]
01102 _x = self
01103 start = end
01104 end += 8
01105 (_x.action_goal.goal.camera_info.height, _x.action_goal.goal.camera_info.width,) = _struct_2I.unpack(str[start:end])
01106 start = end
01107 end += 4
01108 (length,) = _struct_I.unpack(str[start:end])
01109 start = end
01110 end += length
01111 if python3:
01112 self.action_goal.goal.camera_info.distortion_model = str[start:end].decode('utf-8')
01113 else:
01114 self.action_goal.goal.camera_info.distortion_model = str[start:end]
01115 start = end
01116 end += 4
01117 (length,) = _struct_I.unpack(str[start:end])
01118 pattern = '<%sd'%length
01119 start = end
01120 end += struct.calcsize(pattern)
01121 self.action_goal.goal.camera_info.D = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
01122 start = end
01123 end += 72
01124 self.action_goal.goal.camera_info.K = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)
01125 start = end
01126 end += 72
01127 self.action_goal.goal.camera_info.R = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9)
01128 start = end
01129 end += 96
01130 self.action_goal.goal.camera_info.P = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=12)
01131 _x = self
01132 start = end
01133 end += 25
01134 (_x.action_goal.goal.camera_info.binning_x, _x.action_goal.goal.camera_info.binning_y, _x.action_goal.goal.camera_info.roi.x_offset, _x.action_goal.goal.camera_info.roi.y_offset, _x.action_goal.goal.camera_info.roi.height, _x.action_goal.goal.camera_info.roi.width, _x.action_goal.goal.camera_info.roi.do_rectify,) = _struct_6IB.unpack(str[start:end])
01135 self.action_goal.goal.camera_info.roi.do_rectify = bool(self.action_goal.goal.camera_info.roi.do_rectify)
01136 start = end
01137 end += 4
01138 (length,) = _struct_I.unpack(str[start:end])
01139 self.action_goal.goal.model_hypotheses = []
01140 for i in range(0, length):
01141 val1 = interactive_perception_msgs.msg.ModelHypothesisList()
01142 start = end
01143 end += 4
01144 (length,) = _struct_I.unpack(str[start:end])
01145 val1.hypotheses = []
01146 for i in range(0, length):
01147 val2 = interactive_perception_msgs.msg.ModelHypothesis()
01148 _v22 = val2.mesh
01149 start = end
01150 end += 4
01151 (length,) = _struct_I.unpack(str[start:end])
01152 _v22.triangles = []
01153 for i in range(0, length):
01154 val4 = shape_msgs.msg.MeshTriangle()
01155 start = end
01156 end += 12
01157 val4.vertex_indices = numpy.frombuffer(str[start:end], dtype=numpy.uint32, count=3)
01158 _v22.triangles.append(val4)
01159 start = end
01160 end += 4
01161 (length,) = _struct_I.unpack(str[start:end])
01162 _v22.vertices = []
01163 for i in range(0, length):
01164 val4 = geometry_msgs.msg.Point()
01165 _x = val4
01166 start = end
01167 end += 24
01168 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
01169 _v22.vertices.append(val4)
01170 _v23 = val2.pose
01171 _v24 = _v23.header
01172 start = end
01173 end += 4
01174 (_v24.seq,) = _struct_I.unpack(str[start:end])
01175 _v25 = _v24.stamp
01176 _x = _v25
01177 start = end
01178 end += 8
01179 (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
01180 start = end
01181 end += 4
01182 (length,) = _struct_I.unpack(str[start:end])
01183 start = end
01184 end += length
01185 if python3:
01186 _v24.frame_id = str[start:end].decode('utf-8')
01187 else:
01188 _v24.frame_id = str[start:end]
01189 _v26 = _v23.pose
01190 _v27 = _v26.position
01191 _x = _v27
01192 start = end
01193 end += 24
01194 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
01195 _v28 = _v26.orientation
01196 _x = _v28
01197 start = end
01198 end += 32
01199 (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])
01200 val1.hypotheses.append(val2)
01201 start = end
01202 end += 1
01203 (val1.accept,) = _struct_B.unpack(str[start:end])
01204 val1.accept = bool(val1.accept)
01205 self.action_goal.goal.model_hypotheses.append(val1)
01206 _x = self
01207 start = end
01208 end += 12
01209 (_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
01210 start = end
01211 end += 4
01212 (length,) = _struct_I.unpack(str[start:end])
01213 start = end
01214 end += length
01215 if python3:
01216 self.action_result.header.frame_id = str[start:end].decode('utf-8')
01217 else:
01218 self.action_result.header.frame_id = str[start:end]
01219 _x = self
01220 start = end
01221 end += 8
01222 (_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
01223 start = end
01224 end += 4
01225 (length,) = _struct_I.unpack(str[start:end])
01226 start = end
01227 end += length
01228 if python3:
01229 self.action_result.status.goal_id.id = str[start:end].decode('utf-8')
01230 else:
01231 self.action_result.status.goal_id.id = str[start:end]
01232 start = end
01233 end += 1
01234 (self.action_result.status.status,) = _struct_B.unpack(str[start:end])
01235 start = end
01236 end += 4
01237 (length,) = _struct_I.unpack(str[start:end])
01238 start = end
01239 end += length
01240 if python3:
01241 self.action_result.status.text = str[start:end].decode('utf-8')
01242 else:
01243 self.action_result.status.text = str[start:end]
01244 start = end
01245 end += 4
01246 (length,) = _struct_I.unpack(str[start:end])
01247 pattern = '<%si'%length
01248 start = end
01249 end += struct.calcsize(pattern)
01250 self.action_result.result.selected_hypothesis_indices = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
01251 _x = self
01252 start = end
01253 end += 12
01254 (_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
01255 start = end
01256 end += 4
01257 (length,) = _struct_I.unpack(str[start:end])
01258 start = end
01259 end += length
01260 if python3:
01261 self.action_feedback.header.frame_id = str[start:end].decode('utf-8')
01262 else:
01263 self.action_feedback.header.frame_id = str[start:end]
01264 _x = self
01265 start = end
01266 end += 8
01267 (_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
01268 start = end
01269 end += 4
01270 (length,) = _struct_I.unpack(str[start:end])
01271 start = end
01272 end += length
01273 if python3:
01274 self.action_feedback.status.goal_id.id = str[start:end].decode('utf-8')
01275 else:
01276 self.action_feedback.status.goal_id.id = str[start:end]
01277 start = end
01278 end += 1
01279 (self.action_feedback.status.status,) = _struct_B.unpack(str[start:end])
01280 start = end
01281 end += 4
01282 (length,) = _struct_I.unpack(str[start:end])
01283 start = end
01284 end += length
01285 if python3:
01286 self.action_feedback.status.text = str[start:end].decode('utf-8')
01287 else:
01288 self.action_feedback.status.text = str[start:end]
01289 return self
01290 except struct.error as e:
01291 raise genpy.DeserializationError(e)
01292
01293 _struct_I = genpy.struct_I
01294 _struct_6IB = struct.Struct("<6IB")
01295 _struct_B = struct.Struct("<B")
01296 _struct_12d = struct.Struct("<12d")
01297 _struct_9d = struct.Struct("<9d")
01298 _struct_BI = struct.Struct("<BI")
01299 _struct_3I = struct.Struct("<3I")
01300 _struct_4d = struct.Struct("<4d")
01301 _struct_2I = struct.Struct("<2I")
01302 _struct_3d = struct.Struct("<3d")