$search
00001 """autogenerated by genmsg_py from ObjectRecognitionGuiAction.msg. Do not edit.""" 00002 import roslib.message 00003 import struct 00004 00005 import arm_navigation_msgs.msg 00006 import roslib.rostime 00007 import actionlib_msgs.msg 00008 import geometry_msgs.msg 00009 import object_recognition_gui.msg 00010 import sensor_msgs.msg 00011 import std_msgs.msg 00012 00013 class ObjectRecognitionGuiAction(roslib.message.Message): 00014 _md5sum = "ceb5b2d8cb3006a85f5fabfab9056abe" 00015 _type = "object_recognition_gui/ObjectRecognitionGuiAction" 00016 _has_header = False #flag to mark the presence of a Header object 00017 _full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ====== 00018 00019 ObjectRecognitionGuiActionGoal action_goal 00020 ObjectRecognitionGuiActionResult action_result 00021 ObjectRecognitionGuiActionFeedback action_feedback 00022 00023 ================================================================================ 00024 MSG: object_recognition_gui/ObjectRecognitionGuiActionGoal 00025 # ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ====== 00026 00027 Header header 00028 actionlib_msgs/GoalID goal_id 00029 ObjectRecognitionGuiGoal goal 00030 00031 ================================================================================ 00032 MSG: std_msgs/Header 00033 # Standard metadata for higher-level stamped data types. 00034 # This is generally used to communicate timestamped data 00035 # in a particular coordinate frame. 00036 # 00037 # sequence ID: consecutively increasing ID 00038 uint32 seq 00039 #Two-integer timestamp that is expressed as: 00040 # * stamp.secs: seconds (stamp_secs) since epoch 00041 # * stamp.nsecs: nanoseconds since stamp_secs 00042 # time-handling sugar is provided by the client library 00043 time stamp 00044 #Frame this data is associated with 00045 # 0: no frame 00046 # 1: global frame 00047 string frame_id 00048 00049 ================================================================================ 00050 MSG: actionlib_msgs/GoalID 00051 # The stamp should store the time at which this goal was requested. 00052 # It is used by an action server when it tries to preempt all 00053 # goals that were requested before a certain time 00054 time stamp 00055 00056 # The id provides a way to associate feedback and 00057 # result message with specific goal requests. The id 00058 # specified must be unique. 00059 string id 00060 00061 00062 ================================================================================ 00063 MSG: object_recognition_gui/ObjectRecognitionGuiGoal 00064 # ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ====== 00065 00066 #the original sensor data (depth/disparity image) 00067 sensor_msgs/Image image 00068 sensor_msgs/CameraInfo camera_info 00069 00070 #list of mesh/pose hypotheses for each recognized point cluster 00071 ModelHypothesisList[] model_hypotheses 00072 00073 ================================================================================ 00074 MSG: sensor_msgs/Image 00075 # This message contains an uncompressed image 00076 # (0, 0) is at top-left corner of image 00077 # 00078 00079 Header header # Header timestamp should be acquisition time of image 00080 # Header frame_id should be optical frame of camera 00081 # origin of frame should be optical center of cameara 00082 # +x should point to the right in the image 00083 # +y should point down in the image 00084 # +z should point into to plane of the image 00085 # If the frame_id here and the frame_id of the CameraInfo 00086 # message associated with the image conflict 00087 # the behavior is undefined 00088 00089 uint32 height # image height, that is, number of rows 00090 uint32 width # image width, that is, number of columns 00091 00092 # The legal values for encoding are in file src/image_encodings.cpp 00093 # If you want to standardize a new string format, join 00094 # ros-users@lists.sourceforge.net and send an email proposing a new encoding. 00095 00096 string encoding # Encoding of pixels -- channel meaning, ordering, size 00097 # taken from the list of strings in src/image_encodings.cpp 00098 00099 uint8 is_bigendian # is this data bigendian? 00100 uint32 step # Full row length in bytes 00101 uint8[] data # actual matrix data, size is (step * rows) 00102 00103 ================================================================================ 00104 MSG: sensor_msgs/CameraInfo 00105 # This message defines meta information for a camera. It should be in a 00106 # camera namespace on topic "camera_info" and accompanied by up to five 00107 # image topics named: 00108 # 00109 # image_raw - raw data from the camera driver, possibly Bayer encoded 00110 # image - monochrome, distorted 00111 # image_color - color, distorted 00112 # image_rect - monochrome, rectified 00113 # image_rect_color - color, rectified 00114 # 00115 # The image_pipeline contains packages (image_proc, stereo_image_proc) 00116 # for producing the four processed image topics from image_raw and 00117 # camera_info. The meaning of the camera parameters are described in 00118 # detail at http://www.ros.org/wiki/image_pipeline/CameraInfo. 00119 # 00120 # The image_geometry package provides a user-friendly interface to 00121 # common operations using this meta information. If you want to, e.g., 00122 # project a 3d point into image coordinates, we strongly recommend 00123 # using image_geometry. 00124 # 00125 # If the camera is uncalibrated, the matrices D, K, R, P should be left 00126 # zeroed out. In particular, clients may assume that K[0] == 0.0 00127 # indicates an uncalibrated camera. 00128 00129 ####################################################################### 00130 # Image acquisition info # 00131 ####################################################################### 00132 00133 # Time of image acquisition, camera coordinate frame ID 00134 Header header # Header timestamp should be acquisition time of image 00135 # Header frame_id should be optical frame of camera 00136 # origin of frame should be optical center of camera 00137 # +x should point to the right in the image 00138 # +y should point down in the image 00139 # +z should point into the plane of the image 00140 00141 00142 ####################################################################### 00143 # Calibration Parameters # 00144 ####################################################################### 00145 # These are fixed during camera calibration. Their values will be the # 00146 # same in all messages until the camera is recalibrated. Note that # 00147 # self-calibrating systems may "recalibrate" frequently. # 00148 # # 00149 # The internal parameters can be used to warp a raw (distorted) image # 00150 # to: # 00151 # 1. An undistorted image (requires D and K) # 00152 # 2. A rectified image (requires D, K, R) # 00153 # The projection matrix P projects 3D points into the rectified image.# 00154 ####################################################################### 00155 00156 # The image dimensions with which the camera was calibrated. Normally 00157 # this will be the full camera resolution in pixels. 00158 uint32 height 00159 uint32 width 00160 00161 # The distortion model used. Supported models are listed in 00162 # sensor_msgs/distortion_models.h. For most cameras, "plumb_bob" - a 00163 # simple model of radial and tangential distortion - is sufficent. 00164 string distortion_model 00165 00166 # The distortion parameters, size depending on the distortion model. 00167 # For "plumb_bob", the 5 parameters are: (k1, k2, t1, t2, k3). 00168 float64[] D 00169 00170 # Intrinsic camera matrix for the raw (distorted) images. 00171 # [fx 0 cx] 00172 # K = [ 0 fy cy] 00173 # [ 0 0 1] 00174 # Projects 3D points in the camera coordinate frame to 2D pixel 00175 # coordinates using the focal lengths (fx, fy) and principal point 00176 # (cx, cy). 00177 float64[9] K # 3x3 row-major matrix 00178 00179 # Rectification matrix (stereo cameras only) 00180 # A rotation matrix aligning the camera coordinate system to the ideal 00181 # stereo image plane so that epipolar lines in both stereo images are 00182 # parallel. 00183 float64[9] R # 3x3 row-major matrix 00184 00185 # Projection/camera matrix 00186 # [fx' 0 cx' Tx] 00187 # P = [ 0 fy' cy' Ty] 00188 # [ 0 0 1 0] 00189 # By convention, this matrix specifies the intrinsic (camera) matrix 00190 # of the processed (rectified) image. That is, the left 3x3 portion 00191 # is the normal camera intrinsic matrix for the rectified image. 00192 # It projects 3D points in the camera coordinate frame to 2D pixel 00193 # coordinates using the focal lengths (fx', fy') and principal point 00194 # (cx', cy') - these may differ from the values in K. 00195 # For monocular cameras, Tx = Ty = 0. Normally, monocular cameras will 00196 # also have R = the identity and P[1:3,1:3] = K. 00197 # For a stereo pair, the fourth column [Tx Ty 0]' is related to the 00198 # position of the optical center of the second camera in the first 00199 # camera's frame. We assume Tz = 0 so both cameras are in the same 00200 # stereo image plane. The first camera always has Tx = Ty = 0. For 00201 # the right (second) camera of a horizontal stereo pair, Ty = 0 and 00202 # Tx = -fx' * B, where B is the baseline between the cameras. 00203 # Given a 3D point [X Y Z]', the projection (x, y) of the point onto 00204 # the rectified image is given by: 00205 # [u v w]' = P * [X Y Z 1]' 00206 # x = u / w 00207 # y = v / w 00208 # This holds for both images of a stereo pair. 00209 float64[12] P # 3x4 row-major matrix 00210 00211 00212 ####################################################################### 00213 # Operational Parameters # 00214 ####################################################################### 00215 # These define the image region actually captured by the camera # 00216 # driver. Although they affect the geometry of the output image, they # 00217 # may be changed freely without recalibrating the camera. # 00218 ####################################################################### 00219 00220 # Binning refers here to any camera setting which combines rectangular 00221 # neighborhoods of pixels into larger "super-pixels." It reduces the 00222 # resolution of the output image to 00223 # (width / binning_x) x (height / binning_y). 00224 # The default values binning_x = binning_y = 0 is considered the same 00225 # as binning_x = binning_y = 1 (no subsampling). 00226 uint32 binning_x 00227 uint32 binning_y 00228 00229 # Region of interest (subwindow of full camera resolution), given in 00230 # full resolution (unbinned) image coordinates. A particular ROI 00231 # always denotes the same window of pixels on the camera sensor, 00232 # regardless of binning settings. 00233 # The default setting of roi (all values 0) is considered the same as 00234 # full resolution (roi.width = width, roi.height = height). 00235 RegionOfInterest roi 00236 00237 ================================================================================ 00238 MSG: sensor_msgs/RegionOfInterest 00239 # This message is used to specify a region of interest within an image. 00240 # 00241 # When used to specify the ROI setting of the camera when the image was 00242 # taken, the height and width fields should either match the height and 00243 # width fields for the associated image; or height = width = 0 00244 # indicates that the full resolution image was captured. 00245 00246 uint32 x_offset # Leftmost pixel of the ROI 00247 # (0 if the ROI includes the left edge of the image) 00248 uint32 y_offset # Topmost pixel of the ROI 00249 # (0 if the ROI includes the top edge of the image) 00250 uint32 height # Height of ROI 00251 uint32 width # Width of ROI 00252 00253 # True if a distinct rectified ROI should be calculated from the "raw" 00254 # ROI in this message. Typically this should be False if the full image 00255 # is captured (ROI not used), and True if a subwindow is captured (ROI 00256 # used). 00257 bool do_rectify 00258 00259 ================================================================================ 00260 MSG: object_recognition_gui/ModelHypothesisList 00261 ModelHypothesis[] hypotheses 00262 00263 #initial guess if this can be a correct recognition result at all 00264 bool accept 00265 ================================================================================ 00266 MSG: object_recognition_gui/ModelHypothesis 00267 #describes a hypothesis about a recognized object (mesh+pose) 00268 00269 arm_navigation_msgs/Shape mesh 00270 geometry_msgs/PoseStamped pose 00271 00272 ================================================================================ 00273 MSG: arm_navigation_msgs/Shape 00274 byte SPHERE=0 00275 byte BOX=1 00276 byte CYLINDER=2 00277 byte MESH=3 00278 00279 byte type 00280 00281 00282 #### define sphere, box, cylinder #### 00283 # the origin of each shape is considered at the shape's center 00284 00285 # for sphere 00286 # radius := dimensions[0] 00287 00288 # for cylinder 00289 # radius := dimensions[0] 00290 # length := dimensions[1] 00291 # the length is along the Z axis 00292 00293 # for box 00294 # size_x := dimensions[0] 00295 # size_y := dimensions[1] 00296 # size_z := dimensions[2] 00297 float64[] dimensions 00298 00299 00300 #### define mesh #### 00301 00302 # list of triangles; triangle k is defined by tre vertices located 00303 # at indices triangles[3k], triangles[3k+1], triangles[3k+2] 00304 int32[] triangles 00305 geometry_msgs/Point[] vertices 00306 00307 ================================================================================ 00308 MSG: geometry_msgs/Point 00309 # This contains the position of a point in free space 00310 float64 x 00311 float64 y 00312 float64 z 00313 00314 ================================================================================ 00315 MSG: geometry_msgs/PoseStamped 00316 # A Pose with reference coordinate frame and timestamp 00317 Header header 00318 Pose pose 00319 00320 ================================================================================ 00321 MSG: geometry_msgs/Pose 00322 # A representation of pose in free space, composed of postion and orientation. 00323 Point position 00324 Quaternion orientation 00325 00326 ================================================================================ 00327 MSG: geometry_msgs/Quaternion 00328 # This represents an orientation in free space in quaternion form. 00329 00330 float64 x 00331 float64 y 00332 float64 z 00333 float64 w 00334 00335 ================================================================================ 00336 MSG: object_recognition_gui/ObjectRecognitionGuiActionResult 00337 # ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ====== 00338 00339 Header header 00340 actionlib_msgs/GoalStatus status 00341 ObjectRecognitionGuiResult result 00342 00343 ================================================================================ 00344 MSG: actionlib_msgs/GoalStatus 00345 GoalID goal_id 00346 uint8 status 00347 uint8 PENDING = 0 # The goal has yet to be processed by the action server 00348 uint8 ACTIVE = 1 # The goal is currently being processed by the action server 00349 uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing 00350 # and has since completed its execution (Terminal State) 00351 uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State) 00352 uint8 ABORTED = 4 # The goal was aborted during execution by the action server due 00353 # to some failure (Terminal State) 00354 uint8 REJECTED = 5 # The goal was rejected by the action server without being processed, 00355 # because the goal was unattainable or invalid (Terminal State) 00356 uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing 00357 # and has not yet completed execution 00358 uint8 RECALLING = 7 # The goal received a cancel request before it started executing, 00359 # but the action server has not yet confirmed that the goal is canceled 00360 uint8 RECALLED = 8 # The goal received a cancel request before it started executing 00361 # and was successfully cancelled (Terminal State) 00362 uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be 00363 # sent over the wire by an action server 00364 00365 #Allow for the user to associate a string with GoalStatus for debugging 00366 string text 00367 00368 00369 ================================================================================ 00370 MSG: object_recognition_gui/ObjectRecognitionGuiResult 00371 # ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ====== 00372 00373 #the index of the model hypothesis that the user has selected for each cluster 00374 #values below 0 mean 'reject all hypotheses' 00375 int32[] selected_hypothesis_indices 00376 00377 ================================================================================ 00378 MSG: object_recognition_gui/ObjectRecognitionGuiActionFeedback 00379 # ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ====== 00380 00381 Header header 00382 actionlib_msgs/GoalStatus status 00383 ObjectRecognitionGuiFeedback feedback 00384 00385 ================================================================================ 00386 MSG: object_recognition_gui/ObjectRecognitionGuiFeedback 00387 # ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ====== 00388 00389 00390 """ 00391 __slots__ = ['action_goal','action_result','action_feedback'] 00392 _slot_types = ['object_recognition_gui/ObjectRecognitionGuiActionGoal','object_recognition_gui/ObjectRecognitionGuiActionResult','object_recognition_gui/ObjectRecognitionGuiActionFeedback'] 00393 00394 def __init__(self, *args, **kwds): 00395 """ 00396 Constructor. Any message fields that are implicitly/explicitly 00397 set to None will be assigned a default value. The recommend 00398 use is keyword arguments as this is more robust to future message 00399 changes. You cannot mix in-order arguments and keyword arguments. 00400 00401 The available fields are: 00402 action_goal,action_result,action_feedback 00403 00404 @param args: complete set of field values, in .msg order 00405 @param kwds: use keyword arguments corresponding to message field names 00406 to set specific fields. 00407 """ 00408 if args or kwds: 00409 super(ObjectRecognitionGuiAction, self).__init__(*args, **kwds) 00410 #message fields cannot be None, assign default values for those that are 00411 if self.action_goal is None: 00412 self.action_goal = object_recognition_gui.msg.ObjectRecognitionGuiActionGoal() 00413 if self.action_result is None: 00414 self.action_result = object_recognition_gui.msg.ObjectRecognitionGuiActionResult() 00415 if self.action_feedback is None: 00416 self.action_feedback = object_recognition_gui.msg.ObjectRecognitionGuiActionFeedback() 00417 else: 00418 self.action_goal = object_recognition_gui.msg.ObjectRecognitionGuiActionGoal() 00419 self.action_result = object_recognition_gui.msg.ObjectRecognitionGuiActionResult() 00420 self.action_feedback = object_recognition_gui.msg.ObjectRecognitionGuiActionFeedback() 00421 00422 def _get_types(self): 00423 """ 00424 internal API method 00425 """ 00426 return self._slot_types 00427 00428 def serialize(self, buff): 00429 """ 00430 serialize message into buffer 00431 @param buff: buffer 00432 @type buff: StringIO 00433 """ 00434 try: 00435 _x = self 00436 buff.write(_struct_3I.pack(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs)) 00437 _x = self.action_goal.header.frame_id 00438 length = len(_x) 00439 buff.write(struct.pack('<I%ss'%length, length, _x)) 00440 _x = self 00441 buff.write(_struct_2I.pack(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs)) 00442 _x = self.action_goal.goal_id.id 00443 length = len(_x) 00444 buff.write(struct.pack('<I%ss'%length, length, _x)) 00445 _x = self 00446 buff.write(_struct_3I.pack(_x.action_goal.goal.image.header.seq, _x.action_goal.goal.image.header.stamp.secs, _x.action_goal.goal.image.header.stamp.nsecs)) 00447 _x = self.action_goal.goal.image.header.frame_id 00448 length = len(_x) 00449 buff.write(struct.pack('<I%ss'%length, length, _x)) 00450 _x = self 00451 buff.write(_struct_2I.pack(_x.action_goal.goal.image.height, _x.action_goal.goal.image.width)) 00452 _x = self.action_goal.goal.image.encoding 00453 length = len(_x) 00454 buff.write(struct.pack('<I%ss'%length, length, _x)) 00455 _x = self 00456 buff.write(_struct_BI.pack(_x.action_goal.goal.image.is_bigendian, _x.action_goal.goal.image.step)) 00457 _x = self.action_goal.goal.image.data 00458 length = len(_x) 00459 # - if encoded as a list instead, serialize as bytes instead of string 00460 if type(_x) in [list, tuple]: 00461 buff.write(struct.pack('<I%sB'%length, length, *_x)) 00462 else: 00463 buff.write(struct.pack('<I%ss'%length, length, _x)) 00464 _x = self 00465 buff.write(_struct_3I.pack(_x.action_goal.goal.camera_info.header.seq, _x.action_goal.goal.camera_info.header.stamp.secs, _x.action_goal.goal.camera_info.header.stamp.nsecs)) 00466 _x = self.action_goal.goal.camera_info.header.frame_id 00467 length = len(_x) 00468 buff.write(struct.pack('<I%ss'%length, length, _x)) 00469 _x = self 00470 buff.write(_struct_2I.pack(_x.action_goal.goal.camera_info.height, _x.action_goal.goal.camera_info.width)) 00471 _x = self.action_goal.goal.camera_info.distortion_model 00472 length = len(_x) 00473 buff.write(struct.pack('<I%ss'%length, length, _x)) 00474 length = len(self.action_goal.goal.camera_info.D) 00475 buff.write(_struct_I.pack(length)) 00476 pattern = '<%sd'%length 00477 buff.write(struct.pack(pattern, *self.action_goal.goal.camera_info.D)) 00478 buff.write(_struct_9d.pack(*self.action_goal.goal.camera_info.K)) 00479 buff.write(_struct_9d.pack(*self.action_goal.goal.camera_info.R)) 00480 buff.write(_struct_12d.pack(*self.action_goal.goal.camera_info.P)) 00481 _x = self 00482 buff.write(_struct_6IB.pack(_x.action_goal.goal.camera_info.binning_x, _x.action_goal.goal.camera_info.binning_y, _x.action_goal.goal.camera_info.roi.x_offset, _x.action_goal.goal.camera_info.roi.y_offset, _x.action_goal.goal.camera_info.roi.height, _x.action_goal.goal.camera_info.roi.width, _x.action_goal.goal.camera_info.roi.do_rectify)) 00483 length = len(self.action_goal.goal.model_hypotheses) 00484 buff.write(_struct_I.pack(length)) 00485 for val1 in self.action_goal.goal.model_hypotheses: 00486 length = len(val1.hypotheses) 00487 buff.write(_struct_I.pack(length)) 00488 for val2 in val1.hypotheses: 00489 _v1 = val2.mesh 00490 buff.write(_struct_b.pack(_v1.type)) 00491 length = len(_v1.dimensions) 00492 buff.write(_struct_I.pack(length)) 00493 pattern = '<%sd'%length 00494 buff.write(struct.pack(pattern, *_v1.dimensions)) 00495 length = len(_v1.triangles) 00496 buff.write(_struct_I.pack(length)) 00497 pattern = '<%si'%length 00498 buff.write(struct.pack(pattern, *_v1.triangles)) 00499 length = len(_v1.vertices) 00500 buff.write(_struct_I.pack(length)) 00501 for val4 in _v1.vertices: 00502 _x = val4 00503 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z)) 00504 _v2 = val2.pose 00505 _v3 = _v2.header 00506 buff.write(_struct_I.pack(_v3.seq)) 00507 _v4 = _v3.stamp 00508 _x = _v4 00509 buff.write(_struct_2I.pack(_x.secs, _x.nsecs)) 00510 _x = _v3.frame_id 00511 length = len(_x) 00512 buff.write(struct.pack('<I%ss'%length, length, _x)) 00513 _v5 = _v2.pose 00514 _v6 = _v5.position 00515 _x = _v6 00516 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z)) 00517 _v7 = _v5.orientation 00518 _x = _v7 00519 buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w)) 00520 buff.write(_struct_B.pack(val1.accept)) 00521 _x = self 00522 buff.write(_struct_3I.pack(_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs)) 00523 _x = self.action_result.header.frame_id 00524 length = len(_x) 00525 buff.write(struct.pack('<I%ss'%length, length, _x)) 00526 _x = self 00527 buff.write(_struct_2I.pack(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs)) 00528 _x = self.action_result.status.goal_id.id 00529 length = len(_x) 00530 buff.write(struct.pack('<I%ss'%length, length, _x)) 00531 buff.write(_struct_B.pack(self.action_result.status.status)) 00532 _x = self.action_result.status.text 00533 length = len(_x) 00534 buff.write(struct.pack('<I%ss'%length, length, _x)) 00535 length = len(self.action_result.result.selected_hypothesis_indices) 00536 buff.write(_struct_I.pack(length)) 00537 pattern = '<%si'%length 00538 buff.write(struct.pack(pattern, *self.action_result.result.selected_hypothesis_indices)) 00539 _x = self 00540 buff.write(_struct_3I.pack(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs)) 00541 _x = self.action_feedback.header.frame_id 00542 length = len(_x) 00543 buff.write(struct.pack('<I%ss'%length, length, _x)) 00544 _x = self 00545 buff.write(_struct_2I.pack(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs)) 00546 _x = self.action_feedback.status.goal_id.id 00547 length = len(_x) 00548 buff.write(struct.pack('<I%ss'%length, length, _x)) 00549 buff.write(_struct_B.pack(self.action_feedback.status.status)) 00550 _x = self.action_feedback.status.text 00551 length = len(_x) 00552 buff.write(struct.pack('<I%ss'%length, length, _x)) 00553 except struct.error as se: self._check_types(se) 00554 except TypeError as te: self._check_types(te) 00555 00556 def deserialize(self, str): 00557 """ 00558 unpack serialized message in str into this message instance 00559 @param str: byte array of serialized message 00560 @type str: str 00561 """ 00562 try: 00563 if self.action_goal is None: 00564 self.action_goal = object_recognition_gui.msg.ObjectRecognitionGuiActionGoal() 00565 if self.action_result is None: 00566 self.action_result = object_recognition_gui.msg.ObjectRecognitionGuiActionResult() 00567 if self.action_feedback is None: 00568 self.action_feedback = object_recognition_gui.msg.ObjectRecognitionGuiActionFeedback() 00569 end = 0 00570 _x = self 00571 start = end 00572 end += 12 00573 (_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end]) 00574 start = end 00575 end += 4 00576 (length,) = _struct_I.unpack(str[start:end]) 00577 start = end 00578 end += length 00579 self.action_goal.header.frame_id = str[start:end] 00580 _x = self 00581 start = end 00582 end += 8 00583 (_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end]) 00584 start = end 00585 end += 4 00586 (length,) = _struct_I.unpack(str[start:end]) 00587 start = end 00588 end += length 00589 self.action_goal.goal_id.id = str[start:end] 00590 _x = self 00591 start = end 00592 end += 12 00593 (_x.action_goal.goal.image.header.seq, _x.action_goal.goal.image.header.stamp.secs, _x.action_goal.goal.image.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end]) 00594 start = end 00595 end += 4 00596 (length,) = _struct_I.unpack(str[start:end]) 00597 start = end 00598 end += length 00599 self.action_goal.goal.image.header.frame_id = str[start:end] 00600 _x = self 00601 start = end 00602 end += 8 00603 (_x.action_goal.goal.image.height, _x.action_goal.goal.image.width,) = _struct_2I.unpack(str[start:end]) 00604 start = end 00605 end += 4 00606 (length,) = _struct_I.unpack(str[start:end]) 00607 start = end 00608 end += length 00609 self.action_goal.goal.image.encoding = str[start:end] 00610 _x = self 00611 start = end 00612 end += 5 00613 (_x.action_goal.goal.image.is_bigendian, _x.action_goal.goal.image.step,) = _struct_BI.unpack(str[start:end]) 00614 start = end 00615 end += 4 00616 (length,) = _struct_I.unpack(str[start:end]) 00617 start = end 00618 end += length 00619 self.action_goal.goal.image.data = str[start:end] 00620 _x = self 00621 start = end 00622 end += 12 00623 (_x.action_goal.goal.camera_info.header.seq, _x.action_goal.goal.camera_info.header.stamp.secs, _x.action_goal.goal.camera_info.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end]) 00624 start = end 00625 end += 4 00626 (length,) = _struct_I.unpack(str[start:end]) 00627 start = end 00628 end += length 00629 self.action_goal.goal.camera_info.header.frame_id = str[start:end] 00630 _x = self 00631 start = end 00632 end += 8 00633 (_x.action_goal.goal.camera_info.height, _x.action_goal.goal.camera_info.width,) = _struct_2I.unpack(str[start:end]) 00634 start = end 00635 end += 4 00636 (length,) = _struct_I.unpack(str[start:end]) 00637 start = end 00638 end += length 00639 self.action_goal.goal.camera_info.distortion_model = str[start:end] 00640 start = end 00641 end += 4 00642 (length,) = _struct_I.unpack(str[start:end]) 00643 pattern = '<%sd'%length 00644 start = end 00645 end += struct.calcsize(pattern) 00646 self.action_goal.goal.camera_info.D = struct.unpack(pattern, str[start:end]) 00647 start = end 00648 end += 72 00649 self.action_goal.goal.camera_info.K = _struct_9d.unpack(str[start:end]) 00650 start = end 00651 end += 72 00652 self.action_goal.goal.camera_info.R = _struct_9d.unpack(str[start:end]) 00653 start = end 00654 end += 96 00655 self.action_goal.goal.camera_info.P = _struct_12d.unpack(str[start:end]) 00656 _x = self 00657 start = end 00658 end += 25 00659 (_x.action_goal.goal.camera_info.binning_x, _x.action_goal.goal.camera_info.binning_y, _x.action_goal.goal.camera_info.roi.x_offset, _x.action_goal.goal.camera_info.roi.y_offset, _x.action_goal.goal.camera_info.roi.height, _x.action_goal.goal.camera_info.roi.width, _x.action_goal.goal.camera_info.roi.do_rectify,) = _struct_6IB.unpack(str[start:end]) 00660 self.action_goal.goal.camera_info.roi.do_rectify = bool(self.action_goal.goal.camera_info.roi.do_rectify) 00661 start = end 00662 end += 4 00663 (length,) = _struct_I.unpack(str[start:end]) 00664 self.action_goal.goal.model_hypotheses = [] 00665 for i in range(0, length): 00666 val1 = object_recognition_gui.msg.ModelHypothesisList() 00667 start = end 00668 end += 4 00669 (length,) = _struct_I.unpack(str[start:end]) 00670 val1.hypotheses = [] 00671 for i in range(0, length): 00672 val2 = object_recognition_gui.msg.ModelHypothesis() 00673 _v8 = val2.mesh 00674 start = end 00675 end += 1 00676 (_v8.type,) = _struct_b.unpack(str[start:end]) 00677 start = end 00678 end += 4 00679 (length,) = _struct_I.unpack(str[start:end]) 00680 pattern = '<%sd'%length 00681 start = end 00682 end += struct.calcsize(pattern) 00683 _v8.dimensions = struct.unpack(pattern, str[start:end]) 00684 start = end 00685 end += 4 00686 (length,) = _struct_I.unpack(str[start:end]) 00687 pattern = '<%si'%length 00688 start = end 00689 end += struct.calcsize(pattern) 00690 _v8.triangles = struct.unpack(pattern, str[start:end]) 00691 start = end 00692 end += 4 00693 (length,) = _struct_I.unpack(str[start:end]) 00694 _v8.vertices = [] 00695 for i in range(0, length): 00696 val4 = geometry_msgs.msg.Point() 00697 _x = val4 00698 start = end 00699 end += 24 00700 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end]) 00701 _v8.vertices.append(val4) 00702 _v9 = val2.pose 00703 _v10 = _v9.header 00704 start = end 00705 end += 4 00706 (_v10.seq,) = _struct_I.unpack(str[start:end]) 00707 _v11 = _v10.stamp 00708 _x = _v11 00709 start = end 00710 end += 8 00711 (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end]) 00712 start = end 00713 end += 4 00714 (length,) = _struct_I.unpack(str[start:end]) 00715 start = end 00716 end += length 00717 _v10.frame_id = str[start:end] 00718 _v12 = _v9.pose 00719 _v13 = _v12.position 00720 _x = _v13 00721 start = end 00722 end += 24 00723 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end]) 00724 _v14 = _v12.orientation 00725 _x = _v14 00726 start = end 00727 end += 32 00728 (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end]) 00729 val1.hypotheses.append(val2) 00730 start = end 00731 end += 1 00732 (val1.accept,) = _struct_B.unpack(str[start:end]) 00733 val1.accept = bool(val1.accept) 00734 self.action_goal.goal.model_hypotheses.append(val1) 00735 _x = self 00736 start = end 00737 end += 12 00738 (_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end]) 00739 start = end 00740 end += 4 00741 (length,) = _struct_I.unpack(str[start:end]) 00742 start = end 00743 end += length 00744 self.action_result.header.frame_id = str[start:end] 00745 _x = self 00746 start = end 00747 end += 8 00748 (_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end]) 00749 start = end 00750 end += 4 00751 (length,) = _struct_I.unpack(str[start:end]) 00752 start = end 00753 end += length 00754 self.action_result.status.goal_id.id = str[start:end] 00755 start = end 00756 end += 1 00757 (self.action_result.status.status,) = _struct_B.unpack(str[start:end]) 00758 start = end 00759 end += 4 00760 (length,) = _struct_I.unpack(str[start:end]) 00761 start = end 00762 end += length 00763 self.action_result.status.text = str[start:end] 00764 start = end 00765 end += 4 00766 (length,) = _struct_I.unpack(str[start:end]) 00767 pattern = '<%si'%length 00768 start = end 00769 end += struct.calcsize(pattern) 00770 self.action_result.result.selected_hypothesis_indices = struct.unpack(pattern, str[start:end]) 00771 _x = self 00772 start = end 00773 end += 12 00774 (_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end]) 00775 start = end 00776 end += 4 00777 (length,) = _struct_I.unpack(str[start:end]) 00778 start = end 00779 end += length 00780 self.action_feedback.header.frame_id = str[start:end] 00781 _x = self 00782 start = end 00783 end += 8 00784 (_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end]) 00785 start = end 00786 end += 4 00787 (length,) = _struct_I.unpack(str[start:end]) 00788 start = end 00789 end += length 00790 self.action_feedback.status.goal_id.id = str[start:end] 00791 start = end 00792 end += 1 00793 (self.action_feedback.status.status,) = _struct_B.unpack(str[start:end]) 00794 start = end 00795 end += 4 00796 (length,) = _struct_I.unpack(str[start:end]) 00797 start = end 00798 end += length 00799 self.action_feedback.status.text = str[start:end] 00800 return self 00801 except struct.error as e: 00802 raise roslib.message.DeserializationError(e) #most likely buffer underfill 00803 00804 00805 def serialize_numpy(self, buff, numpy): 00806 """ 00807 serialize message with numpy array types into buffer 00808 @param buff: buffer 00809 @type buff: StringIO 00810 @param numpy: numpy python module 00811 @type numpy module 00812 """ 00813 try: 00814 _x = self 00815 buff.write(_struct_3I.pack(_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs)) 00816 _x = self.action_goal.header.frame_id 00817 length = len(_x) 00818 buff.write(struct.pack('<I%ss'%length, length, _x)) 00819 _x = self 00820 buff.write(_struct_2I.pack(_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs)) 00821 _x = self.action_goal.goal_id.id 00822 length = len(_x) 00823 buff.write(struct.pack('<I%ss'%length, length, _x)) 00824 _x = self 00825 buff.write(_struct_3I.pack(_x.action_goal.goal.image.header.seq, _x.action_goal.goal.image.header.stamp.secs, _x.action_goal.goal.image.header.stamp.nsecs)) 00826 _x = self.action_goal.goal.image.header.frame_id 00827 length = len(_x) 00828 buff.write(struct.pack('<I%ss'%length, length, _x)) 00829 _x = self 00830 buff.write(_struct_2I.pack(_x.action_goal.goal.image.height, _x.action_goal.goal.image.width)) 00831 _x = self.action_goal.goal.image.encoding 00832 length = len(_x) 00833 buff.write(struct.pack('<I%ss'%length, length, _x)) 00834 _x = self 00835 buff.write(_struct_BI.pack(_x.action_goal.goal.image.is_bigendian, _x.action_goal.goal.image.step)) 00836 _x = self.action_goal.goal.image.data 00837 length = len(_x) 00838 # - if encoded as a list instead, serialize as bytes instead of string 00839 if type(_x) in [list, tuple]: 00840 buff.write(struct.pack('<I%sB'%length, length, *_x)) 00841 else: 00842 buff.write(struct.pack('<I%ss'%length, length, _x)) 00843 _x = self 00844 buff.write(_struct_3I.pack(_x.action_goal.goal.camera_info.header.seq, _x.action_goal.goal.camera_info.header.stamp.secs, _x.action_goal.goal.camera_info.header.stamp.nsecs)) 00845 _x = self.action_goal.goal.camera_info.header.frame_id 00846 length = len(_x) 00847 buff.write(struct.pack('<I%ss'%length, length, _x)) 00848 _x = self 00849 buff.write(_struct_2I.pack(_x.action_goal.goal.camera_info.height, _x.action_goal.goal.camera_info.width)) 00850 _x = self.action_goal.goal.camera_info.distortion_model 00851 length = len(_x) 00852 buff.write(struct.pack('<I%ss'%length, length, _x)) 00853 length = len(self.action_goal.goal.camera_info.D) 00854 buff.write(_struct_I.pack(length)) 00855 pattern = '<%sd'%length 00856 buff.write(self.action_goal.goal.camera_info.D.tostring()) 00857 buff.write(self.action_goal.goal.camera_info.K.tostring()) 00858 buff.write(self.action_goal.goal.camera_info.R.tostring()) 00859 buff.write(self.action_goal.goal.camera_info.P.tostring()) 00860 _x = self 00861 buff.write(_struct_6IB.pack(_x.action_goal.goal.camera_info.binning_x, _x.action_goal.goal.camera_info.binning_y, _x.action_goal.goal.camera_info.roi.x_offset, _x.action_goal.goal.camera_info.roi.y_offset, _x.action_goal.goal.camera_info.roi.height, _x.action_goal.goal.camera_info.roi.width, _x.action_goal.goal.camera_info.roi.do_rectify)) 00862 length = len(self.action_goal.goal.model_hypotheses) 00863 buff.write(_struct_I.pack(length)) 00864 for val1 in self.action_goal.goal.model_hypotheses: 00865 length = len(val1.hypotheses) 00866 buff.write(_struct_I.pack(length)) 00867 for val2 in val1.hypotheses: 00868 _v15 = val2.mesh 00869 buff.write(_struct_b.pack(_v15.type)) 00870 length = len(_v15.dimensions) 00871 buff.write(_struct_I.pack(length)) 00872 pattern = '<%sd'%length 00873 buff.write(_v15.dimensions.tostring()) 00874 length = len(_v15.triangles) 00875 buff.write(_struct_I.pack(length)) 00876 pattern = '<%si'%length 00877 buff.write(_v15.triangles.tostring()) 00878 length = len(_v15.vertices) 00879 buff.write(_struct_I.pack(length)) 00880 for val4 in _v15.vertices: 00881 _x = val4 00882 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z)) 00883 _v16 = val2.pose 00884 _v17 = _v16.header 00885 buff.write(_struct_I.pack(_v17.seq)) 00886 _v18 = _v17.stamp 00887 _x = _v18 00888 buff.write(_struct_2I.pack(_x.secs, _x.nsecs)) 00889 _x = _v17.frame_id 00890 length = len(_x) 00891 buff.write(struct.pack('<I%ss'%length, length, _x)) 00892 _v19 = _v16.pose 00893 _v20 = _v19.position 00894 _x = _v20 00895 buff.write(_struct_3d.pack(_x.x, _x.y, _x.z)) 00896 _v21 = _v19.orientation 00897 _x = _v21 00898 buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w)) 00899 buff.write(_struct_B.pack(val1.accept)) 00900 _x = self 00901 buff.write(_struct_3I.pack(_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs)) 00902 _x = self.action_result.header.frame_id 00903 length = len(_x) 00904 buff.write(struct.pack('<I%ss'%length, length, _x)) 00905 _x = self 00906 buff.write(_struct_2I.pack(_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs)) 00907 _x = self.action_result.status.goal_id.id 00908 length = len(_x) 00909 buff.write(struct.pack('<I%ss'%length, length, _x)) 00910 buff.write(_struct_B.pack(self.action_result.status.status)) 00911 _x = self.action_result.status.text 00912 length = len(_x) 00913 buff.write(struct.pack('<I%ss'%length, length, _x)) 00914 length = len(self.action_result.result.selected_hypothesis_indices) 00915 buff.write(_struct_I.pack(length)) 00916 pattern = '<%si'%length 00917 buff.write(self.action_result.result.selected_hypothesis_indices.tostring()) 00918 _x = self 00919 buff.write(_struct_3I.pack(_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs)) 00920 _x = self.action_feedback.header.frame_id 00921 length = len(_x) 00922 buff.write(struct.pack('<I%ss'%length, length, _x)) 00923 _x = self 00924 buff.write(_struct_2I.pack(_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs)) 00925 _x = self.action_feedback.status.goal_id.id 00926 length = len(_x) 00927 buff.write(struct.pack('<I%ss'%length, length, _x)) 00928 buff.write(_struct_B.pack(self.action_feedback.status.status)) 00929 _x = self.action_feedback.status.text 00930 length = len(_x) 00931 buff.write(struct.pack('<I%ss'%length, length, _x)) 00932 except struct.error as se: self._check_types(se) 00933 except TypeError as te: self._check_types(te) 00934 00935 def deserialize_numpy(self, str, numpy): 00936 """ 00937 unpack serialized message in str into this message instance using numpy for array types 00938 @param str: byte array of serialized message 00939 @type str: str 00940 @param numpy: numpy python module 00941 @type numpy: module 00942 """ 00943 try: 00944 if self.action_goal is None: 00945 self.action_goal = object_recognition_gui.msg.ObjectRecognitionGuiActionGoal() 00946 if self.action_result is None: 00947 self.action_result = object_recognition_gui.msg.ObjectRecognitionGuiActionResult() 00948 if self.action_feedback is None: 00949 self.action_feedback = object_recognition_gui.msg.ObjectRecognitionGuiActionFeedback() 00950 end = 0 00951 _x = self 00952 start = end 00953 end += 12 00954 (_x.action_goal.header.seq, _x.action_goal.header.stamp.secs, _x.action_goal.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end]) 00955 start = end 00956 end += 4 00957 (length,) = _struct_I.unpack(str[start:end]) 00958 start = end 00959 end += length 00960 self.action_goal.header.frame_id = str[start:end] 00961 _x = self 00962 start = end 00963 end += 8 00964 (_x.action_goal.goal_id.stamp.secs, _x.action_goal.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end]) 00965 start = end 00966 end += 4 00967 (length,) = _struct_I.unpack(str[start:end]) 00968 start = end 00969 end += length 00970 self.action_goal.goal_id.id = str[start:end] 00971 _x = self 00972 start = end 00973 end += 12 00974 (_x.action_goal.goal.image.header.seq, _x.action_goal.goal.image.header.stamp.secs, _x.action_goal.goal.image.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end]) 00975 start = end 00976 end += 4 00977 (length,) = _struct_I.unpack(str[start:end]) 00978 start = end 00979 end += length 00980 self.action_goal.goal.image.header.frame_id = str[start:end] 00981 _x = self 00982 start = end 00983 end += 8 00984 (_x.action_goal.goal.image.height, _x.action_goal.goal.image.width,) = _struct_2I.unpack(str[start:end]) 00985 start = end 00986 end += 4 00987 (length,) = _struct_I.unpack(str[start:end]) 00988 start = end 00989 end += length 00990 self.action_goal.goal.image.encoding = str[start:end] 00991 _x = self 00992 start = end 00993 end += 5 00994 (_x.action_goal.goal.image.is_bigendian, _x.action_goal.goal.image.step,) = _struct_BI.unpack(str[start:end]) 00995 start = end 00996 end += 4 00997 (length,) = _struct_I.unpack(str[start:end]) 00998 start = end 00999 end += length 01000 self.action_goal.goal.image.data = str[start:end] 01001 _x = self 01002 start = end 01003 end += 12 01004 (_x.action_goal.goal.camera_info.header.seq, _x.action_goal.goal.camera_info.header.stamp.secs, _x.action_goal.goal.camera_info.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end]) 01005 start = end 01006 end += 4 01007 (length,) = _struct_I.unpack(str[start:end]) 01008 start = end 01009 end += length 01010 self.action_goal.goal.camera_info.header.frame_id = str[start:end] 01011 _x = self 01012 start = end 01013 end += 8 01014 (_x.action_goal.goal.camera_info.height, _x.action_goal.goal.camera_info.width,) = _struct_2I.unpack(str[start:end]) 01015 start = end 01016 end += 4 01017 (length,) = _struct_I.unpack(str[start:end]) 01018 start = end 01019 end += length 01020 self.action_goal.goal.camera_info.distortion_model = str[start:end] 01021 start = end 01022 end += 4 01023 (length,) = _struct_I.unpack(str[start:end]) 01024 pattern = '<%sd'%length 01025 start = end 01026 end += struct.calcsize(pattern) 01027 self.action_goal.goal.camera_info.D = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) 01028 start = end 01029 end += 72 01030 self.action_goal.goal.camera_info.K = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9) 01031 start = end 01032 end += 72 01033 self.action_goal.goal.camera_info.R = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=9) 01034 start = end 01035 end += 96 01036 self.action_goal.goal.camera_info.P = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=12) 01037 _x = self 01038 start = end 01039 end += 25 01040 (_x.action_goal.goal.camera_info.binning_x, _x.action_goal.goal.camera_info.binning_y, _x.action_goal.goal.camera_info.roi.x_offset, _x.action_goal.goal.camera_info.roi.y_offset, _x.action_goal.goal.camera_info.roi.height, _x.action_goal.goal.camera_info.roi.width, _x.action_goal.goal.camera_info.roi.do_rectify,) = _struct_6IB.unpack(str[start:end]) 01041 self.action_goal.goal.camera_info.roi.do_rectify = bool(self.action_goal.goal.camera_info.roi.do_rectify) 01042 start = end 01043 end += 4 01044 (length,) = _struct_I.unpack(str[start:end]) 01045 self.action_goal.goal.model_hypotheses = [] 01046 for i in range(0, length): 01047 val1 = object_recognition_gui.msg.ModelHypothesisList() 01048 start = end 01049 end += 4 01050 (length,) = _struct_I.unpack(str[start:end]) 01051 val1.hypotheses = [] 01052 for i in range(0, length): 01053 val2 = object_recognition_gui.msg.ModelHypothesis() 01054 _v22 = val2.mesh 01055 start = end 01056 end += 1 01057 (_v22.type,) = _struct_b.unpack(str[start:end]) 01058 start = end 01059 end += 4 01060 (length,) = _struct_I.unpack(str[start:end]) 01061 pattern = '<%sd'%length 01062 start = end 01063 end += struct.calcsize(pattern) 01064 _v22.dimensions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length) 01065 start = end 01066 end += 4 01067 (length,) = _struct_I.unpack(str[start:end]) 01068 pattern = '<%si'%length 01069 start = end 01070 end += struct.calcsize(pattern) 01071 _v22.triangles = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length) 01072 start = end 01073 end += 4 01074 (length,) = _struct_I.unpack(str[start:end]) 01075 _v22.vertices = [] 01076 for i in range(0, length): 01077 val4 = geometry_msgs.msg.Point() 01078 _x = val4 01079 start = end 01080 end += 24 01081 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end]) 01082 _v22.vertices.append(val4) 01083 _v23 = val2.pose 01084 _v24 = _v23.header 01085 start = end 01086 end += 4 01087 (_v24.seq,) = _struct_I.unpack(str[start:end]) 01088 _v25 = _v24.stamp 01089 _x = _v25 01090 start = end 01091 end += 8 01092 (_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end]) 01093 start = end 01094 end += 4 01095 (length,) = _struct_I.unpack(str[start:end]) 01096 start = end 01097 end += length 01098 _v24.frame_id = str[start:end] 01099 _v26 = _v23.pose 01100 _v27 = _v26.position 01101 _x = _v27 01102 start = end 01103 end += 24 01104 (_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end]) 01105 _v28 = _v26.orientation 01106 _x = _v28 01107 start = end 01108 end += 32 01109 (_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end]) 01110 val1.hypotheses.append(val2) 01111 start = end 01112 end += 1 01113 (val1.accept,) = _struct_B.unpack(str[start:end]) 01114 val1.accept = bool(val1.accept) 01115 self.action_goal.goal.model_hypotheses.append(val1) 01116 _x = self 01117 start = end 01118 end += 12 01119 (_x.action_result.header.seq, _x.action_result.header.stamp.secs, _x.action_result.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end]) 01120 start = end 01121 end += 4 01122 (length,) = _struct_I.unpack(str[start:end]) 01123 start = end 01124 end += length 01125 self.action_result.header.frame_id = str[start:end] 01126 _x = self 01127 start = end 01128 end += 8 01129 (_x.action_result.status.goal_id.stamp.secs, _x.action_result.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end]) 01130 start = end 01131 end += 4 01132 (length,) = _struct_I.unpack(str[start:end]) 01133 start = end 01134 end += length 01135 self.action_result.status.goal_id.id = str[start:end] 01136 start = end 01137 end += 1 01138 (self.action_result.status.status,) = _struct_B.unpack(str[start:end]) 01139 start = end 01140 end += 4 01141 (length,) = _struct_I.unpack(str[start:end]) 01142 start = end 01143 end += length 01144 self.action_result.status.text = str[start:end] 01145 start = end 01146 end += 4 01147 (length,) = _struct_I.unpack(str[start:end]) 01148 pattern = '<%si'%length 01149 start = end 01150 end += struct.calcsize(pattern) 01151 self.action_result.result.selected_hypothesis_indices = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length) 01152 _x = self 01153 start = end 01154 end += 12 01155 (_x.action_feedback.header.seq, _x.action_feedback.header.stamp.secs, _x.action_feedback.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end]) 01156 start = end 01157 end += 4 01158 (length,) = _struct_I.unpack(str[start:end]) 01159 start = end 01160 end += length 01161 self.action_feedback.header.frame_id = str[start:end] 01162 _x = self 01163 start = end 01164 end += 8 01165 (_x.action_feedback.status.goal_id.stamp.secs, _x.action_feedback.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end]) 01166 start = end 01167 end += 4 01168 (length,) = _struct_I.unpack(str[start:end]) 01169 start = end 01170 end += length 01171 self.action_feedback.status.goal_id.id = str[start:end] 01172 start = end 01173 end += 1 01174 (self.action_feedback.status.status,) = _struct_B.unpack(str[start:end]) 01175 start = end 01176 end += 4 01177 (length,) = _struct_I.unpack(str[start:end]) 01178 start = end 01179 end += length 01180 self.action_feedback.status.text = str[start:end] 01181 return self 01182 except struct.error as e: 01183 raise roslib.message.DeserializationError(e) #most likely buffer underfill 01184 01185 _struct_I = roslib.message.struct_I 01186 _struct_6IB = struct.Struct("<6IB") 01187 _struct_b = struct.Struct("<b") 01188 _struct_12d = struct.Struct("<12d") 01189 _struct_9d = struct.Struct("<9d") 01190 _struct_BI = struct.Struct("<BI") 01191 _struct_3I = struct.Struct("<3I") 01192 _struct_B = struct.Struct("<B") 01193 _struct_4d = struct.Struct("<4d") 01194 _struct_2I = struct.Struct("<2I") 01195 _struct_3d = struct.Struct("<3d")