texture_features.py
Go to the documentation of this file.
00001 #
00002 # Copyright (c) 2010, Georgia Tech Research Corporation
00003 # All rights reserved.
00004 #
00005 # Redistribution and use in source and binary forms, with or without
00006 # modification, are permitted provided that the following conditions are met:
00007 #     * Redistributions of source code must retain the above copyright
00008 #       notice, this list of conditions and the following disclaimer.
00009 #     * Redistributions in binary form must reproduce the above copyright
00010 #       notice, this list of conditions and the following disclaimer in the
00011 #       documentation and/or other materials provided with the distribution.
00012 #     * Neither the name of the Georgia Tech Research Corporation nor the
00013 #       names of its contributors may be used to endorse or promote products
00014 #       derived from this software without specific prior written permission.
00015 #
00016 # THIS SOFTWARE IS PROVIDED BY GEORGIA TECH RESEARCH CORPORATION ''AS IS'' AND
00017 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
00018 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
00019 # DISCLAIMED. IN NO EVENT SHALL GEORGIA TECH BE LIABLE FOR ANY DIRECT, INDIRECT,
00020 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
00021 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
00022 # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00023 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
00024 # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
00025 # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00026 #
00027 
00028 #  \author Hai Nguyen (Healthcare Robotics Lab, Georgia Tech.)
00029 
00030 import util as ut #Usage: cv2np, np2cv
00031 import numpy as np
00032 import opencv as cv
00033 import opencv.highgui as hg
00034     
00035 ##
00036 # calculates eigen values of covariance matrix accumulating statistics of sobel filter responses in an image block
00037 #
00038 # @param cv_image opencv image to calculate texture over
00039 # @param blocksize size of block to accumulate statistics (in pixels)
00040 # @param filtersize size of sobel filter to use (in pixels)
00041 # @return numpy matrix of size (width, height, 2) where [:,:,0] is the set of first eigen values and [:,:,1] is the second set
00042 def eigen_texture(cv_image, blocksize=8, filtersize=3):
00043     gray_image = cv.cvCreateImage(cv.cvSize(cv_image.width, cv_image.height), cv.IPL_DEPTH_8U, 1)
00044     eig_tex = cv.cvCreateImage(cv.cvSize(cv_image.width*6, cv_image.height), cv.IPL_DEPTH_32F, 1)
00045     
00046     cv.cvCvtColor(cv_image, gray_image, cv.CV_BGR2GRAY)    
00047     cv.cvCornerEigenValsAndVecs(gray_image, eig_tex, blocksize, filtersize)
00048     eig_tex_np = ut.cv2np(eig_tex)
00049             
00050     eig_tex_np = np.reshape(eig_tex_np, [cv_image.height, cv_image.width, 6])            
00051     return eig_tex_np[:,:,0:2]
00052     
00053 def visualize(eigens):
00054     l1 = eigens[:,:,0]
00055     l2 = eigens[:,:,1]    
00056     m1 = np.min(l1)
00057     m2 = np.min(l2)
00058     r1 = np.max(l1) - m1 
00059     r2 = np.max(l2) - m2
00060     if r1 == 0:
00061         r1 = 1
00062     if r2 == 0:
00063         r2 = 1      
00064     l1cv = ut.np2cv(np.array( (1 - ((l1-m1) / r1)) * 255, dtype='uint8'))
00065     l2cv = ut.np2cv(np.array( (1 - ((l2-m2) / r2)) * 255, dtype='uint8'))
00066     hg.cvNamedWindow('eigen value 1', 1)
00067     hg.cvNamedWindow('eigen value 2', 1)
00068     hg.cvShowImage('eigen value 1', l1cv)
00069     hg.cvShowImage('eigen value 2', l2cv)
00070     while True:
00071         k = hg.cvWaitKey(33)
00072         if k == ' ':
00073             return
00074         if k == 'x':
00075             exit()
00076     
00077         
00078 if __name__ == '__main__':
00079     #hg.cvNamedWindow('win', 1)
00080     im = hg.cvLoadImage('/home/haidai/svn/robot1/src/projects/08_03_dog_commands/dragonfly_color_calibration/untitled folder/camera_image.png')
00081     #hg.cvShowImage('win', im)
00082     for i in range(40):
00083         s = (i+1) * 2
00084         print s
00085         eig_tex_np = eigen_texture(im, blocksize=s, filtersize=3)
00086         visualize(eig_tex_np)
00087 
00088 
00089     
00090 #    pdb.set_trace()        
00091 #    def texture_features(self, block_size=5, filter_size=3):
00092 #        """
00093 #        Calculates the texture features associated with the image.
00094 #        block_size gives the size of the texture neighborhood to be processed
00095 #        filter_size gives the size of the Sobel operator used to find gradient information
00096 #        """
00097 #        #block_size = cv.cvSize(block_size, block_size)
00098 #
00099 #        #convert to grayscale float
00100 #        channels = 1
00101 #        self.gray_image = cv.cvCreateImage(cv.cvSize(self.im_width, self.im_height),
00102 #                                           cv.IPL_DEPTH_8U, #cv.IPL_DEPTH_16U, #cv.IPL_DEPTH_32F,
00103 #                                           channels)
00104 #
00105 #
00106 #        #cv.CV_32FC1, #cv.IPL_DEPTH_32F, #cv.IPL_DEPTH_8U, #cv.IPL_DEPTH_16U, 
00107 #        channels = 1
00108 #        eig_tex = cv.cvCreateImage(cv.cvSize(self.im_width*6, self.im_height),
00109 #                                    cv.IPL_DEPTH_32F, 
00110 #                                    channels)
00111 #
00112 #
00113 #        cv.cvCvtColor(self.image, self.gray_image, cv.CV_BGR2GRAY);
00114 #
00115 #        #cv.cvAdd(const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL );
00116 #        
00117 #        #hg.cvConvertImage(self.image, self.gray_image)
00118 #        
00119 #        cv.cvCornerEigenValsAndVecs(self.gray_image, eig_tex,#CvArr* eigenvv,
00120 #                                    block_size, filter_size)
00121 #
00122 #        eig_tex = ut.cv2np(eig_tex)
00123 #        eig_tex = np.reshape(eig_tex, [self.im_height, self.im_width, 6])
00124 #        #print eig_tex.shape ## [480,640,3]
00125 #        ## (l1, l2, x1, y1, x2, y2), where
00126 #        ## l1, l2 - eigenvalues of M; not sorted
00127 #        ## (x1, y1) - eigenvector corresponding to l1
00128 #        ## (x2, y2) - eigenvector corresponding to l2
00129 #        tex_feat = np.zeros([3, self.im_height * self.im_width], dtype=np.float32)
00130 #        tmp = np.reshape(eig_tex, [self.im_height * self.im_width, 6]).T
00131 #        s = tmp[0] > tmp[1]
00132 #        tex_feat[1:3, s] = tmp[0, s] * tmp[2:4, s]
00133 #        tex_feat[0, s] = tmp[1, s]
00134 #        tex_feat[1:3, -s] = tmp[1, -s] * tmp[4:6, -s]
00135 #        tex_feat[0, -s] = tmp[0, -s]
00136 #        
00137 #        self.tex_feat = tex_feat.T
00138 #        self.tex_image = np.reshape(self.tex_feat, [self.im_height, self.im_width, 3])
00139 
00140 
00141 
00142 
00143 
00144 
00145 
00146 
00147 
00148 
00149 
00150 
00151 
00152 
00153 
00154 
00155 


clutter_segmentation
Author(s): Jason Okerman, Martin Schuster, Advisors: Prof. Charlie Kemp and Jim Regh, Lab: Healthcare Robotics Lab at Georgia Tech
autogenerated on Wed Nov 27 2013 12:07:15