deprojection_2D_to_3D.py
Go to the documentation of this file.
1 #!/usr/bin/python3
2 # -*- coding: utf-8 -*-
3 
4 import cv2
5 import numpy as np
6 import rospy
7 from cv_bridge import CvBridge, CvBridgeError
8 from sensor_msgs.msg import Image, LaserScan
9 import message_filters
10 import tf2_ros
11 import tf2_geometry_msgs
12 from geometry_msgs.msg import Point, PointStamped, Pose
13 
14 
15 bridge = CvBridge()
16 
17 
18 class Deprojection(object):
19 
20  def __init__(self):
21 
22  self.rate = rospy.Rate(30)
23 
24  self.fx = 439.5975502749937
25  self.fy = 439.5975502749937
26  self.cx = 160.5
27  self.cy = 120.5
28 
29  # Intrinsic matrix
30  # fx = 439.5975502749937
31  # fy = 439.5975502749937
32  # cx = 160.5
33  # cy = 120.5
34  # self.K = np.array([[fx, 0, cx],
35  # [0, fy, cy],
36  # [0, 0, 1]])
37 
38  # Extrinsic matrix (transformation matrix T)
39  # T = np.array([[R11, R12, R13, Tx],
40  # [R21, R22, R23, Ty],
41  # [R31, R32, R33, Tz],
42  # [0, 0, 0, 1]])
43  self.T = self.get_transformation("nicla_camera", "relax_arm1_link1")
44 
45  def run(self):
46 
47  while not rospy.is_shutdown():
48  # Subscribe to the ROS topic
49  image_sub = message_filters.Subscriber(
50  "/relax/nicla/camera/image_raw", Image
51  )
52  depth_sub = message_filters.Subscriber("/relax/nicla/tof", LaserScan)
53 
54  # ts = message_filters.TimeSynchronizer([image_sub, info_sub], 1)
55  ts = message_filters.ApproximateTimeSynchronizer(
56  [image_sub, depth_sub], 10, 0.1
57  ) # , allow_headerless=True
58  ts.registerCallback(self.callback)
59 
60  rospy.spin()
61 
62  def callback(self, msg_rgb, msg_depth):
63  # Solve all of perception here...
64  self.cv2_img = bridge.imgmsg_to_cv2(msg_rgb, "bgr8")
65 
66  self.img_shape = self.cv2_img.shape
67 
68  # To visualize the received image
69  # cv2.imshow("Received img", self.cv2_img)
70  # cv2.waitKey(1)
71 
72  self.depth = msg_depth.ranges[0]
73  # print("Received depth: ", self.depth)
74 
75  self.central_pixel_to_3d(self.depth)
76 
77  print("INFO: Received frame and depth.")
78 
79  def central_pixel_to_3d(self, depth):
80  """
81  Convert central pixel coordinates to 3D coordinates using camera intrinsics and extrinsics.
82  Note: the central pixel is assumed to be the one for which the laser gives the depth info.
83 
84  Parameters:
85  - depth: Depth value of the pixel
86 
87  Returns:
88  - Point in 3D space (x, y, z)
89  """
90 
91  # u, v: Pixel coordinates in the image
92  u = self.img_shape[0] // 2
93  v = self.img_shape[1] // 2
94 
95  # Compute normalized coordinates in camera space
96  cam_point = [0, 0, 1.0]
97  cam_point[0] = (u - self.cx) / self.fx
98  cam_point[1] = (v - self.cy) / self.fy
99 
100  print(cam_point)
101  cam_point = [value * depth for value in cam_point]
102 
103  print(cam_point)
104  x = cam_point[2]
105  y = cam_point[0]
106  z = cam_point[1]
107 
108  # Transform camera coordinates to world coordinates
109  point_wrt_source = Point(x, y, z)
110  point_wrt_target = self.transform_point(self.T, point_wrt_source)
111 
112  print(point_wrt_target)
113 
114  # return world_point.flatten()
115 
116  def DEPRECATED_pixel_to_3d(self, u, v, depth, K, T):
117  """
118  Convert pixel coordinates to 3D coordinates using camera intrinsics and extrinsics.
119 
120  Parameters:
121  - u, v: Pixel coordinates in the image
122  - depth: Depth value of the pixel
123  - K: Intrinsic camera matrix (3x3)
124  - T: Extrinsic camera matrix (4x4)
125 
126  Returns:
127  - Point in 3D space (x, y, z)
128  """
129 
130  # Convert pixel coordinates to homogeneous coordinates
131  pixel_homogeneous = np.array([[u], [v], [1]])
132 
133  # Compute inverse of intrinsic matrix
134  K_inv = np.linalg.inv(K)
135 
136  # Compute normalized coordinates in camera space
137  cam_point = np.dot(K_inv, pixel_homogeneous)
138 
139  print(cam_point.flatten())
140  cam_point *= depth
141 
142  print(cam_point.flatten())
143 
144  # Transform camera coordinates to world coordinates
145  world_point = np.dot(T[:3, :3], cam_point) + T[:3, 3:]
146 
147  return world_point.flatten()
148 
149  def transform_point(self, transformation, point_wrt_source):
150  point_wrt_target = tf2_geometry_msgs.do_transform_point(
151  PointStamped(point=point_wrt_source), transformation
152  ).point
153  return [point_wrt_target.x, point_wrt_target.y, point_wrt_target.z]
154 
155  def get_transformation(self, source_frame, target_frame, tf_cache_duration=2.0):
156  tf_buffer = tf2_ros.Buffer(rospy.Duration(tf_cache_duration))
157  tf2_ros.TransformListener(tf_buffer)
158 
159  # get the tf at first available time
160  try:
161  transformation = tf_buffer.lookup_transform(
162  target_frame, source_frame, rospy.Time(0), rospy.Duration(1.0)
163  ) # 0.1
164  except (
165  tf2_ros.LookupException,
166  tf2_ros.ConnectivityException,
167  tf2_ros.ExtrapolationException,
168  ):
169  rospy.logerr(
170  "Unable to find the transformation from %s to %s" % source_frame,
171  target_frame,
172  )
173  return transformation
174 
175 
176 def main(args=None):
177  # Initialize ROS node
178  rospy.init_node("deproject_2D_to_3D", anonymous=False)
179 
180  recorder = Deprojection()
181  recorder.run()
182 
183 
184 if __name__ == "__main__":
185  main()
deprojection_2D_to_3D.Deprojection.DEPRECATED_pixel_to_3d
def DEPRECATED_pixel_to_3d(self, u, v, depth, K, T)
Definition: deprojection_2D_to_3D.py:116
deprojection_2D_to_3D.Deprojection.central_pixel_to_3d
def central_pixel_to_3d(self, depth)
Definition: deprojection_2D_to_3D.py:79
deprojection_2D_to_3D.Deprojection.rate
rate
Definition: deprojection_2D_to_3D.py:22
deprojection_2D_to_3D.Deprojection.get_transformation
def get_transformation(self, source_frame, target_frame, tf_cache_duration=2.0)
Definition: deprojection_2D_to_3D.py:155
deprojection_2D_to_3D.Deprojection.__init__
def __init__(self)
Definition: deprojection_2D_to_3D.py:20
deprojection_2D_to_3D.Deprojection.cv2_img
cv2_img
Definition: deprojection_2D_to_3D.py:64
deprojection_2D_to_3D.Deprojection.cx
cx
Definition: deprojection_2D_to_3D.py:26
message_filters::Subscriber
tf2_ros::TransformListener
deprojection_2D_to_3D.Deprojection.fx
fx
Definition: deprojection_2D_to_3D.py:24
deprojection_2D_to_3D.main
def main(args=None)
Definition: deprojection_2D_to_3D.py:176
tf2_ros::Buffer
deprojection_2D_to_3D.Deprojection.callback
def callback(self, msg_rgb, msg_depth)
Definition: deprojection_2D_to_3D.py:62
deprojection_2D_to_3D.Deprojection.transform_point
def transform_point(self, transformation, point_wrt_source)
Definition: deprojection_2D_to_3D.py:149
deprojection_2D_to_3D.Deprojection.cy
cy
Definition: deprojection_2D_to_3D.py:27
deprojection_2D_to_3D.Deprojection.img_shape
img_shape
Definition: deprojection_2D_to_3D.py:66
deprojection_2D_to_3D.Deprojection
Definition: deprojection_2D_to_3D.py:18
deprojection_2D_to_3D.Deprojection.depth
depth
Definition: deprojection_2D_to_3D.py:72
deprojection_2D_to_3D.Deprojection.T
T
Definition: deprojection_2D_to_3D.py:43
deprojection_2D_to_3D.Deprojection.run
def run(self)
Definition: deprojection_2D_to_3D.py:45
deprojection_2D_to_3D.Deprojection.fy
fy
Definition: deprojection_2D_to_3D.py:25


nicla_vision_ros
Author(s): Davide Torielli , Damiano Gasperini , Edoardo Del Bianco , Federico Rollo
autogenerated on Sat Nov 16 2024 03:38:18