camera.py
Go to the documentation of this file.
00001 # Copyright (c) 2008, Willow Garage, Inc.
00002 # All rights reserved.
00003 # 
00004 # Redistribution and use in source and binary forms, with or without
00005 # modification, are permitted provided that the following conditions are met:
00006 # 
00007 #     * Redistributions of source code must retain the above copyright
00008 #       notice, this list of conditions and the following disclaimer.
00009 #     * Redistributions in binary form must reproduce the above copyright
00010 #       notice, this list of conditions and the following disclaimer in the
00011 #       documentation and/or other materials provided with the distribution.
00012 #     * Neither the name of the Willow Garage, Inc. nor the names of its
00013 #       contributors may be used to endorse or promote products derived from
00014 #       this software without specific prior written permission.
00015 # 
00016 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
00017 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00018 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
00019 # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
00020 # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
00021 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
00022 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
00023 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
00024 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
00025 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
00026 # POSSIBILITY OF SUCH DAMAGE.
00027 #
00028 ## @author Hai Nguyen/hai@gatech.edu
00029 #import videre as vd
00030 #import opencv as cv
00031 #from opencv import highgui as hg
00032 from pkg import *
00033 
00034 import cv
00035 import numpy as np
00036 from sensor_msgs.msg import CameraInfo
00037 
00038 import util as ut
00039 import math as m
00040 import time
00041 
00042 
00043 ###################################################################################################
00044 # Cameras
00045 #   The camera classes in this file are separated into two different types of
00046 #   classes.  In the first group are hardware camera classes meant for
00047 #   abstracting away hardware devices that provide a grid of pixels at every
00048 #   time step.  The second group contain abstract mathematical camera classes
00049 #   which satisfies the definition of a camera as a device that transform 3D
00050 #   world points into 2D coordinates in a plane.
00051 #
00052 #   This was done so that different hardware camera classes can be swapped in
00053 #   and out without requiring that the mathematical abstract camera be changed.
00054 #   For example, to work off of a video recorded from disk the geometric camera class
00055 #   can remain the same while the hardware camera class is swapped for one that
00056 #   provide images from a file.
00057 ###################################################################################################
00058 
00059 ###################################################################################################
00060 # Geometric reasoning functions 
00061 ###################################################################################################
00062 def homo_transform3d(R, t):
00063     T = np.matrix(np.zeros((4,4)))
00064     T[0:3, 0:3] = R
00065     T[0:3, 3]   = t
00066     T[3,3]      = 1.0
00067     return T
00068 
00069 #Rotates the coordinate frame counterclockwise
00070 def Rx(a):
00071     return np.matrix([[1,  0,        0,],
00072                       [0,  m.cos(a),  m.sin(a)],
00073                       [0, -m.sin(a),  m.cos(a)]])
00074 
00075 #Rotates the coordinate frame counterclockwise
00076 def Ry(a):
00077     return np.matrix([[m.cos(a), 0, -m.sin(a)],
00078                       [0,       1,       0],
00079                       [m.sin(a), 0,  m.cos(a)]])
00080 
00081 #Rotates the coordinate frame counterclockwise
00082 def Rz(a):
00083     return np.matrix([[ m.cos(a), m.sin(a), 0],
00084                       [-m.sin(a), m.cos(a), 0],
00085                       [ 0,       0,       1]])
00086 
00087 def rotation(rx,ry,rz):
00088     return Rx(rx) * Ry(ry) * Rz(rz)
00089 
00090 def points_on_line(homogeneous2d_line, img_height):
00091     a = homogeneous2d_line[0,0]
00092     b = homogeneous2d_line[1,0]
00093     c = homogeneous2d_line[2,0]
00094     if b == 0:
00095         x = -c/a
00096         return [np.matrix([x, 0, 1]).T, 
00097                 np.matrix([x, img_height, 1])]
00098     else:
00099         return [np.matrix([-c/a, 0, 1]), 
00100                 np.matrix([(-b/a) * img_height - (c/a), img_height, 1])]
00101 
00102 def tuple_to_homo(p):
00103     return np.matrix([p[0], p[1], 1]).T
00104 
00105 def cvpoint_to_homo(p):
00106     return np.matrix([p.x, p.y, 1]).T
00107 
00108 def homo_to_cvpoint(p):
00109     p = p / p[-1,0]
00110     #return cv.Point(int(round(p[0,0])), int(round(p[1,0])))
00111     return (int(round(p[0,0])), int(round(p[1,0])))
00112 
00113 def rodrigues(vec):
00114     cvvec = ut.numpymat2cvmat(vec)
00115     dst   = cv.CreateMat(3, 3, cv.CV_32FC1)
00116     cv.Rodrigues2(cvvec, dst)
00117     return ut.cvmat2numpymat(dst).T
00118 
00119 def normalize_homo_point(point):
00120     point = point / point[-1,0]
00121     return point
00122 
00123 def homo_to_point(homo_point):
00124     p = normalize_homo_point(homo_point)
00125     return p[0:-1,:]
00126 
00127 def point_to_homo(point):
00128     return np.concatenate((point, 1.0+np.zeros((1,point.shape[1]))), axis=0)
00129 
00130 def general_projection_matrix():
00131     P = np.matrix(np.zeros((3,4)))
00132     P[0:3, 0:3] = np.eye(3)
00133     return P
00134 
00135 ###################################################################################################
00136 # Hardware Cameras 
00137 ###################################################################################################
00138 class ROSStereoListener:
00139     def __init__(self, topics, rate=30.0, name='stereo_listener'):
00140         from sensor_msgs.msg import Image
00141         from cv_bridge import CvBridge, CvBridgeError
00142         import hrl_lib.rutils as ru
00143         self.listener = ru.GenericListener(name, [Image, Image], topics, rate)
00144         self.lbridge = CvBridge()
00145         self.rbridge = CvBridge()
00146 
00147     def next(self):
00148         #print 'ros_stereo_listener'
00149         lros, rros =  self.listener.read(allow_duplication=False, willing_to_wait=True, warn=False, quiet=True)
00150         lcv = self.lbridge.imgmsg_to_cv(lros, 'bgr8')
00151         rcv = self.rbridge.imgmsg_to_cv(rros, 'bgr8')
00152         return lcv, rcv
00153 
00154 
00155 class StereoFile:
00156     def __init__(self, left_file, right_file):
00157         self.left  = cv.CreateFileCapture(left_file);
00158         self.right = cv.CreateFileCapture(right_file);
00159         #self.total_frames = hg.cvGetCaptureProperty(self.left, hg.CV_CAP_PROP_FRAME_COUNT)
00160 
00161     def next(self):
00162         tup = (cv.QueryFrame(self.left), cv.QueryFrame(self.right))
00163         #frame_numb = hg.cvGetCaptureProperty(self.left, hg.CV_CAP_PROP_POS_FRAMES )
00164 
00165         #if tup[0] == None or tup[1] == None or frame_numb >= self.total_frames:
00166         if tup[0] == None or tup[1] == None:
00167             raise StopIteration()
00168         return tup
00169 
00170     def __iter__(self):
00171         return self
00172 
00173 ###################################################################################################
00174 # Geometric Cameras
00175 ###################################################################################################
00176 # StereoCamera(ROSCameraCalibration('/wide_stereo/left/image_rect_color'), ROSCameraCalibration('/wide_stereo/right/image_rect_color'))
00177 class ROSStereoCalibration:
00178     def __init__(self, left_chan, right_chan):
00179         self.left  = ROSCameraCalibration(left_chan)
00180         self.right = ROSCameraCalibration(right_chan)
00181         while not self.right.has_msg:
00182             time.sleep(.2)
00183 
00184         self.R = np.matrix(np.eye(3))                  #Using rectified images, no R needed.
00185         self.T = self.right.P[:, 3].copy()             #Copy so that we don't modify P by accident
00186         self.T[0,0] = -self.T[0,0] / self.right.P[0,0] #Adjust for scaling term in projection matrix
00187 
00188     ##
00189     # @param x 
00190     # @param xright 2x1 matrices
00191     def triangulate_3d(self, xleft, xright):
00192         #Klp = np.linalg.inv(self.left.K)
00193         #Krp = np.linalg.inv(self.right.K)
00194         # Use the K embedded in P instead K as 
00195         # rectified images are the result of P
00196         Klp = np.linalg.inv(self.left.P[0:3, 0:3]) 
00197         Krp = np.linalg.inv(self.right.P[0:3, 0:3])
00198     
00199         wleft    = Klp * point_to_homo(xleft)
00200         wright    = Krp * point_to_homo(xright)
00201 
00202         print 'ROSStereoCalibration: xleft', wleft.T
00203         print 'ROSStereoCalibration: xright', wright.T
00204 
00205         A     = np.concatenate((wleft, -self.R * wright), axis=1)
00206         b     = self.T
00207     
00208         x_hat          = np.linalg.inv(A.T * A) * A.T * b
00209         left_estimate  = x_hat[0,0] * wleft
00210         right_estimate = x_hat[1,0] * self.R * wright + self.T
00211         print 'ROSStereoCalibration: alpha, beta', x_hat
00212         print 'ROSStereoCalibration: left est', left_estimate.T
00213         print 'ROSStereoCalibration: right est', right_estimate.T
00214         print 'A * x_hat', (A * x_hat).T
00215         print 'T', b.T
00216         print 'A*x-b', np.linalg.norm((A*x_hat) - b)
00217     
00218         p = (left_estimate + right_estimate)/2.0
00219         print 'ROSStereoCalibration: p', p.T
00220         return {'point': p, 'error':np.linalg.norm(left_estimate- right_estimate)}
00221 
00222 
00223 class ROSCameraCalibration:
00224     def __init__(self, channel):
00225         rospy.Subscriber(channel, CameraInfo, self.camera_info)
00226         self.has_msg = False
00227 
00228     def camera_info(self, msg):
00229         self.distortion = np.matrix(msg.D)
00230         self.K = np.reshape(np.matrix(msg.K), (3,3))
00231         self.R = np.reshape(np.matrix(msg.R), (3,3))
00232         self.P = np.reshape(np.matrix(msg.P), (3,4))
00233         self.w = msg.width
00234         self.h = msg.height
00235         self.has_msg = True
00236 
00237     def project(self, p):
00238         return self.P * p
00239 
00240 
00241 
00242 if __name__ == "__main__":
00243     KNOWN_CAMERAS = {}
00244     test = "test_gain"
00245 
00246     if 'test_hdr' == test:
00247         cv.NamedWindow("left_eye",  1)
00248         cv.NamedWindow("right_eye", 1)
00249 
00250         cv.NamedWindow("left_eye_low",  1)
00251         cv.NamedWindow("right_eye_low", 1)
00252 
00253         cam = VidereHDR(0)
00254         base_exposure = 100
00255 
00256         for ll, lr, l, r in cam:
00257             cv.ShowImage("left_eye_low", ll)
00258             cv.ShowImage("right_eye_low", lr)
00259             cv.ShowImage("left_eye", l)
00260             cv.ShowImage("right_eye", r)
00261             key = cv.WaitKey(10)
00262 
00263             if   'R' == key: #up
00264                 base_exposure = base_exposure + 1
00265                 vd.set_exposure(cam.camera.capture, base_exposure)
00266                 print base_exposure
00267             elif 'T' == key:
00268                 base_exposure = base_exposure - 1
00269                 vd.set_exposure(cam.camera.capture, base_exposure)
00270                 print base_exposure
00271 
00272 
00273 


laser_interface
Author(s): Hai Nguyen and Travis Deyle. Advisor: Prof. Charlie Kemp, Lab: Healthcare Robotics Lab at Georgia Tech
autogenerated on Wed Nov 27 2013 11:45:51