learn_models.py
Go to the documentation of this file.
00001 # Copyright (c) 2008, Willow Garage, Inc.
00002 # All rights reserved.
00003 # 
00004 # Redistribution and use in source and binary forms, with or without
00005 # modification, are permitted provided that the following conditions are met:
00006 # 
00007 #     * Redistributions of source code must retain the above copyright
00008 #       notice, this list of conditions and the following disclaimer.
00009 #     * Redistributions in binary form must reproduce the above copyright
00010 #       notice, this list of conditions and the following disclaimer in the
00011 #       documentation and/or other materials provided with the distribution.
00012 #     * Neither the name of the Willow Garage, Inc. nor the names of its
00013 #       contributors may be used to endorse or promote products derived from
00014 #       this software without specific prior written permission.
00015 # 
00016 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
00017 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00018 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
00019 # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
00020 # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
00021 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
00022 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
00023 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
00024 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
00025 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
00026 # POSSIBILITY OF SUCH DAMAGE.
00027 #
00028 ## @author Hai Nguyen/hai@gatech.edu
00029 import os
00030 import sys
00031 import camera as cam
00032 import re
00033 import time
00034 import random_forest as rf
00035 #import opencv as cv
00036 #import opencv.highgui as hg
00037 import cv
00038 import util as ut
00039 from laser_detector import *
00040 
00041 def count_down(times=3):
00042     for i in range(times):
00043         print 'in ', times - i
00044         time.sleep(1)
00045 
00046 def evaluate_classifier():
00047     rf.evaluate_classifier(rf.RFBreiman, load_pickle('PatchClassifier.dataset.pickle'), 1, .7)
00048 
00049 def show_processed(image, masks, detection, blobs, detector):
00050     masker            = Mask(image)
00051     splitter          = SplitColors(image)
00052     r, g, b           = splitter.split(image)
00053     thresholded_image = masker.mask(masks[0], r, g, b)
00054     draw_detection(thresholded_image, detection)
00055     cv.ShowImage('thresholded', thresholded_image)
00056 
00057     draw_detection(image, detection)
00058     draw_blobs(image, blobs)
00059 
00060     make_visible_binary_image(masks[0])
00061     draw_detection(masks[0], detection)
00062     make_visible_binary_image(masks[1])
00063     make_visible_binary_image(masks[2])
00064 
00065     cv.ShowImage("video",       image)
00066     cv.ShowImage('motion',      masks[1])
00067     cv.ShowImage('intensity',   masks[2])
00068 
00069     key = cv.WaitKey(10)
00070     if detector != None:
00071         if key == 'T': #down
00072             detector.intensity_filter.thres_high = detector.intensity_filter.thres_high - 5
00073             print 'detector.intensity_filter.thres =', detector.intensity_filter.thres_high
00074         if key == 'R':
00075             detector.intensity_filter.thres_high = detector.intensity_filter.thres_high + 5
00076             print 'detector.intensity_filter.thres =', detector.intensity_filter.thres_high
00077         if key == ' ':
00078             cv.WaitKey()
00079 
00080 def confirmation_prompt(confirm_phrase):
00081     print confirm_phrase
00082     print 'y(es)/n(no)'
00083     k = cv.WaitKey()
00084     if k == 'y':
00085         return True
00086     else:
00087         return False
00088 
00089 def learn_run(exposure = LaserPointerDetector.SUN_EXPOSURE, num_examples_to_collect=200, display_during_run = True):
00090     cv.NamedWindow("video",       1)
00091     cv.NamedWindow("thresholded", 1)
00092     cv.NamedWindow('motion',      1)
00093     cv.NamedWindow('intensity',   1)
00094 
00095     cv.MoveWindow("video",       0,   0)
00096     cv.MoveWindow("thresholded", 800, 0)
00097 
00098     cv.MoveWindow("intensity",   0,   600)
00099     cv.MoveWindow("motion",      800, 600)
00100 
00101     #TODO: plug in some image source
00102     #video     = cam.VidereStereo(0, gain=96, exposure=exposure)
00103     frames    = video.next()
00104     detector  = LaserPointerDetector(frames[0], exposure=exposure, 
00105                                     dataset=PatchClassifier.DEFAULT_DATASET_FILE,
00106                                     use_color=False, use_learning=False)
00107     detector2 = LaserPointerDetector(frames[1], exposure=exposure, 
00108                                     dataset=PatchClassifier.DEFAULT_DATASET_FILE,
00109                                     use_color=False, use_learning=False)
00110 
00111     def append_examples_to_file(dataset, file = PatchClassifier.DEFAULT_DATASET_FILE):
00112         try:
00113             loaded_set = load_pickle(file)
00114             dataset.append(loaded_set)
00115         except IOError:
00116             pass
00117         dump_pickle(dataset, file)
00118         print 'Saved examples!'
00119 
00120     #Gather positive examples from laser detector
00121     if confirmation_prompt('gather positive examples?'):
00122         print 'Lets gather some positive examples... we need', num_examples_to_collect, 'examples'
00123         positive_examples_for_classifier = []
00124         count_down(0)
00125         for i in xrange(10):
00126             frames = video.next()
00127             detector.detect(frames[0])
00128             detector2.detect(frames[1])
00129 
00130         for img in video:
00131             image                 = None
00132             combined              = None
00133             motion                = None
00134             intensity             = None
00135             laser_blob            = None
00136             intensity_motion_blob = None
00137 
00138             for raw_image, detect in zip(img, [detector, detector2]):
00139                 before = time.time()
00140                 image, combined, laser_blob, intensity_motion_blob = detect.detect(raw_image)
00141                 diff = time.time() - before
00142                 #print 'took %.2f seconds to run or %.2f fps' % (diff, 1.0/diff)
00143                 if laser_blob != None:
00144                     instance = blob_to_input_instance(image, laser_blob)
00145                     if instance is not None:
00146                         positive_examples_for_classifier.append(instance)
00147                         print 'got', len(positive_examples_for_classifier), 'instances'
00148                 motion, intensity = detect.get_motion_intensity_images()
00149 
00150             show_processed(image, [combined, motion, intensity], laser_blob, intensity_motion_blob, detector2)
00151             if len(positive_examples_for_classifier) > num_examples_to_collect:
00152                 break
00153         positive_instances_dataset = matrix_to_dataset(ut.list_mat_to_mat(positive_examples_for_classifier, axis=1))
00154         append_examples_to_file(positive_instances_dataset)
00155 
00156     if confirmation_prompt('gather negative examples?'):
00157         #Gather negative examples from laser detector
00158         print 'lets gather some negative examples... we need', num_examples_to_collect,' examples'
00159         negative_examples_for_classifier = []
00160         count_down(10)
00161         for i in xrange(10):
00162             frames = video.next()
00163             detector.detect(frames[0])
00164             detector2.detect(frames[1])
00165 
00166         for img in video:
00167             image                 = None
00168             combined              = None
00169             motion                = None
00170             intensity             = None
00171             laser_blob            = None
00172             intensity_motion_blob = None
00173             for raw_image, detect in zip(img, [detector, detector2]):
00174                 image, combined, laser_blob, intensity_motion_blob = detect.detect(raw_image)
00175                 if laser_blob != None:
00176                     instance = blob_to_input_instance(image, laser_blob)
00177                     if instance is not None:
00178                         negative_examples_for_classifier.append(instance)
00179                         print 'got', len(negative_examples_for_classifier), 'instances'
00180                 motion, intensity = detect.get_motion_intensity_images()
00181 
00182             show_processed(image, [combined, motion, intensity], laser_blob, intensity_motion_blob, detector2)
00183             if len(negative_examples_for_classifier) > (num_examples_to_collect*2):
00184                 break
00185         negative_instances_dataset = matrix_to_dataset(ut.list_mat_to_mat(negative_examples_for_classifier, axis=1))
00186         append_examples_to_file(negative_instances_dataset)
00187 
00188     if confirmation_prompt('run classifier?'):
00189         run(exposure, video = video, display=display_during_run)
00190 
00191 def run(exposure, video=None, display=False, debug=False):
00192     if display:
00193         cv.NamedWindow("video", 1)
00194         cv.MoveWindow("video",   0,   0)
00195 
00196     if debug:
00197         cv.NamedWindow('right',       1)
00198         cv.MoveWindow("right", 800,   0)
00199         cv.NamedWindow("thresholded", 1)
00200         cv.NamedWindow('motion',      1)
00201         cv.NamedWindow('intensity',   1)
00202 
00203         cv.MoveWindow("thresholded", 800, 0)
00204         cv.MoveWindow("intensity",   0,   600)
00205         cv.MoveWindow("motion",      800, 600)
00206 
00207     if video is None:
00208         #video    = cam.VidereStereo(0, gain=96, exposure=exposure)
00209         video    = cam.StereoFile('measuring_tape_red_left.avi','measuring_tape_red_right.avi')
00210 
00211     frames = video.next()
00212     detector       = LaserPointerDetector(frames[0], LaserPointerDetector.SUN_EXPOSURE, 
00213                                             use_color=False, use_learning=True)
00214     detector_right = LaserPointerDetector(frames[1], LaserPointerDetector.SUN_EXPOSURE, 
00215                                             use_color=False, use_learning=True, classifier=detector.classifier)
00216     stereo_cam     = cam.KNOWN_CAMERAS['videre_stereo2']
00217 
00218     for i in xrange(10):
00219         frames = video.next()
00220         detector.detect(frames[0])
00221         detector_right.detect(frames[1])
00222 
00223     lt = cv.CreateImage((640,480), 8, 3)
00224     rt = cv.CreateImage((640,480), 8, 3)
00225     for l, r in video:
00226         start_time = time.time()
00227         #l = stereo_cam.camera_left.undistort_img(l)
00228         #r = stereo_cam.camera_right.undistort_img(r)
00229         cv.Copy(l, lt)
00230         cv.Copy(r, rt)
00231         l = lt
00232         r = rt
00233         undistort_time = time.time()
00234 
00235         _, _, right_cam_detection, stats = detector_right.detect(r)
00236         if debug:
00237             draw_blobs(r, stats)
00238             draw_detection(r, right_cam_detection)
00239             cv.ShowImage('right', r)
00240 
00241         image, combined, left_cam_detection, stats = detector.detect(l)
00242         detect_time = time.time()
00243 
00244         if debug: 
00245             motion, intensity = detector.get_motion_intensity_images()
00246             show_processed(l, [combined, motion, intensity], left_cam_detection, stats, detector)
00247         elif display:
00248             #draw_blobs(l, stats)
00249             draw_detection(l, left_cam_detection)
00250             cv.ShowImage('video', l)
00251             cv.WaitKey(10)
00252 
00253         if right_cam_detection != None and left_cam_detection != None:
00254             x  = np.matrix(left_cam_detection['centroid']).T
00255             xp = np.matrix(right_cam_detection['centroid']).T
00256             result = stereo_cam.triangulate_3d(x, xp)
00257             print '3D point located at', result['point'].T, 
00258             print 'distance %.2f error %.3f' % (np.linalg.norm(result['point']),  result['error'])
00259         triangulation_time = time.time()
00260 
00261         diff = time.time() - start_time
00262         print 'Main: Running at %.2f fps, took %.4f s' % (1.0 / diff, diff)
00263         #print '   undistort     took %.4f s' % (undistort_time - start_time)
00264         #print '   detection     took %.4f s' % (detect_time - undistort_time)
00265         #print '   triangulation took %.4f s' % (triangulation_time - detect_time)
00266 
00267 if __name__ == '__main__':
00268     exposure = 0
00269     if sys.argv[2] == 'sun':
00270         exposure = LaserPointerDetector.SUN_EXPOSURE
00271         print 'sun exposure!'
00272     else:
00273         exposure = LaserPointerDetector.NO_SUN_EXPOSURE
00274 
00275     if sys.argv[1] == 'run':
00276         print 'Running only...'
00277         run(exposure = exposure, display = True)
00278 
00279     if sys.argv[1] == 'learn':
00280         print 'Running only...'
00281         learn_run(exposure = exposure)
00282 
00283 
00284 
00285 
00286 
00287 
00288 
00289 
00290 
00291 
00292 
00293 
00294 
00295 
00296 
00297 
00298 
00299 
00300 
00301 
00302 
00303 
00304 
00305 
00306 
00307 
00308 
00309 
00310 
00311 
00312 
00313 
00314 
00315 
00316 
00317 ##=================================================================================================
00318 ##=================================================================================================
00319 
00320 #def display_single(detection_source, mask_window='masked', detection_window='cam'):
00321 #    for frame, mask, loc, blobs in detection_source:
00322 #        make_visible_binary_image(mask)
00323 #        hg.cvShowImage(mask_window, mask)
00324 #        draw_detection(frame, loc)
00325 #        hg.cvShowImage(detection_window, frame)
00326 #        hg.cvWaitKey(10)
00327 #        yield frame, mask, loc, blobs
00328 
00329 #def save_detection(prefix, frame_number, detection_bundle):
00330 #    #frame_number = 1
00331 #    #for frame, mask, loc, blobs in detection_source:
00332 #        frame, mask, loc, blobs = detection_bundle
00333 #        frame_name = prefix + 'frame' + str(frame_number) + '.png'
00334 #        hg.cvSaveImage(frame_name, frame)
00335 #        hg.cvSaveImage(prefix + 'frame' + str(frame_number) + '_mask.png', mask)
00336 #
00337 #        pickle_file = open(prefix + 'frame' + str(frame_number) + '.pickle', 'w')
00338 #        pk.dump(loc, pickle_file)
00339 #        pickle_file.close()
00340 #
00341 #        pickle_file = open(prefix + 'frame' + str(frame_number) + '_blobs.pickle', 'w')
00342 #        pk.dump(blobs, pickle_file)
00343 #        pickle_file.close()
00344 #
00345 #        #frame_number = frame_number + 1
00346 #        #yield frame, mask, loc, blobs
00347 #        return frame, mask, loc, blobs
00348 
00349 ##=================================================================================================
00350 ##=================================================================================================
00351 
00352 #def record_videos(stereo, prefix, folder):
00353 #    videos = []
00354 #    MPEG4 = 0x58564944
00355 #    count = 0
00356 #    while True:
00357 #        key = hg.cvWaitKey()
00358 #        while key != ' ':
00359 #            key = hg.cvWaitKey()
00360 #        left_file  = folder + '/' + prefix+str(count) + '_left.avi'
00361 #        right_file = folder + '/' + prefix+str(count) + '_right.avi'
00362 #        lwrite = hg.cvCreateVideoWriter(left_file,  MPEG4, 30, cvSize(640,480))
00363 #        rwrite = hg.cvCreateVideoWriter(right_file, MPEG4, 30, cvSize(640,480))
00364 #        for left, right in stereo:
00365 #            hg.cvWriteFrame(lwrite, left)
00366 #            hg.cvWriteFrame(rwrite, right)
00367 #            hg.cvShowImage('cam', left)
00368 #            key = hg.cvWaitKey(10)
00369 #            if key == ' ':
00370 #                break
00371 #        count = count + 1
00372 #        videos.append(left_file)
00373 #        videos.append(right_file)
00374 #
00375 #        print 'Do you want to record another clip?'
00376 #        key = hg.cvWaitKey()
00377 #        if key == 'y':
00378 #            continue
00379 #        if key == 'n':
00380 #            break
00381 #    return videos
00382 
00383 #def record_learn(session_name):
00384 #    '''
00385 #        NOTE: THIS SET OF FUNCTIONS NEED TO BE TESTED
00386 #    '''
00387 #    os.mkdir(session_name)
00388 #    hg.cvNamedWindow('cam', 1)
00389 #    sample_image = cv.cvCreateImage(cv.cvSize(640,480), 8, 3)
00390 #    hg.cvShowImage('cam', image)
00391 #    video = cam.VidereStereo(0, gain=96, exposure=LaserPointerDetector.SUN_EXPOSURE)
00392 #
00393 #    print 'Let\'s gather positive examples, press space bar then point the laser pointer',
00394 #    print ' at various objects. Remember to not move the camera! Press spacebar again to stop.'
00395 #    positive_files = record_videos(video, 'positive', session_name)
00396 #
00397 #    print 'Let\'s gather negative examples, press space bar then point the laser pointer',
00398 #    print ' at various objects. Remember to not move the camera! Press spacebar again to stop.'
00399 #    negative_files = record_videos(video, 'negative', session_name)
00400 #
00401 #    print 'Performing detections, writing out images...'
00402 #    files_sets = [('positive/', positive_files), ('negative/', negative_files)]
00403 #    for label, files in files_sets:
00404 #        start_dir = session_name + '/' + label
00405 #        os.mkdir(start_dir)
00406 #        idx = 0
00407 #        for pair in files:
00408 #            for file in pair:
00409 #                video        = cam.video_file(file)
00410 #                detector     = LaserPointerDetector(cv.cvCreateImage(cv.cvSize(640,480), 8, 3), use_color=False, use_learning=False)
00411 #                frame_number = 0
00412 #                for img in video:
00413 #                    image, mask, laser_blob, intensity_motion_blob = detector.detect(img)
00414 #                    save_detection(start_dir + str(idx), frame_number, (image, mask, laser_blob, intensity_motion_blob))
00415 #                    frame_number = frame_number + 1
00416 #                idx = idx + 1
00417 #
00418 #    print 'running color learner'
00419 #    run_color_learner(session_name + '/positive')
00420 #
00421 #    print 'running forest learner...'
00422 #    run_forest_learner(session_name)
00423 
00424 ##=================================================================================================
00425 ##=================================================================================================
00426 
00427 #def as_iter_block(func, input):
00428 #    for p in input:
00429 #        yield func(p)
00430 
00431 #def get_frames_from_folder(folder):
00432 #
00433 #    def files_in(folder):
00434 #        root, dirs, files = os.walk(folder).next()
00435 #        return root, files
00436 #
00437 #    def dir_in(folder):
00438 #        root, dirs, files = os.walk(folder).next()
00439 #        return root, dirs
00440 #
00441 #    def select_frames(reg, name):
00442 #        return None != reg.match(name)
00443 #
00444 #    def append_pref(pref, file):
00445 #        return pref + '/' + file
00446 #
00447 #    reg = re.compile('\dframe\d*\.png')
00448 #    root, files = files_in(folder)
00449 #    selected_files = map(ft.partial(append_pref, root), filter(ft.partial(select_frames, reg), files))
00450 #    return selected_files
00451 
00452 #def run_color_learner(folder):
00453 #    cf = ColorFilter((640,480))
00454 #    def bw_img_loader(filename):
00455 #        return hg.cvLoadImage(filename, hg.CV_LOAD_IMAGE_GRAYSCALE)
00456 #    files = get_frames_from_folder(folder)
00457 #    cf.learn_sequence(it.izip(cam.file_sequence(selected_files, hg.cvLoadImage), 
00458 #                              cam.file_sequence(selected_files, bw_img_loader, '_mask.png'),
00459 #                              cam.file_sequence(selected_files, load_pickle, '.pickle')))
00460 
00461 #def run_forest_learner(folder):
00462 #    pfiles = get_frames_from_folder(folder + '/positive')
00463 #    nfiles = get_frames_from_folder(folder + '/negative')
00464 #    pc = PatchClassifier()
00465 #    pc.learn(positive_examples = it.izip(cam.file_sequence(pfiles, hg.cvLoadImage),
00466 #                                         cam.file_sequence(pfiles, load_pickle, '_blobs.pickle')),
00467 #             negative_examples = it.izip(cam.file_sequence(nfiles, hg.cvLoadImage),
00468 #                                         cam.file_sequence(nfiles, load_pickle, '_blobs.pickle')))
00469 
00470 ##=================================================================================================
00471 ##=================================================================================================
00472 
00473 
00474 
00475     #run(exposure = LaserPointerDetector.SUN_EXPOSURE)
00476     #import sys
00477     #session_name = sys.argv[1]
00478     #record_learn(session_name)
00479 
00480 ##=================================================================================================
00481 ##=================================================================================================
00482 ##                                       SAND BOX
00483 ##=================================================================================================
00484 ##=================================================================================================
00485 ## ===> Sand!
00486 #def triangulate_cressel(camera, left_coord, right_coord, return_dict = True ):
00487 #    """ left_coord, right_coord -> cvPoint
00488 #        returns -> (3X1) vector in stereohead coordinate frame, error.
00489 #                   stereohead coord frame is as defined in transforms.py
00490 #        code copied and modified slightly from self.get_3d_coord
00491 #    """
00492 #
00493 #    right_point = cv.cvCreateMat(3,1,cv.CV_32F)
00494 #    left_point = cv.cvCreateMat(3,1,cv.CV_32F)
00495 #
00496 #    if left_coord.__class__ == cv.cvPoint(0,0).__class__:
00497 #        left_point[0] = float(left_coord.x)                      # u
00498 #        left_point[1] = float(left_coord.y)                      # v
00499 #
00500 #        left_point[2] = 1.0
00501 #    else:
00502 #        left_point[0] = float(left_coord[0])                     # u
00503 #        left_point[1] = float(left_coord[1])                     # v
00504 #        left_point[2] = 1.0
00505 #
00506 #    if right_coord.__class__ == cv.cvPoint(0,0).__class__:
00507 #        right_point[0] = float(right_coord.x)             
00508 #        right_point[1] = float(right_coord.y)
00509 #        right_point[2] = 1.0
00510 #    else:
00511 #        right_point[0] = float(right_coord[0])            
00512 #        right_point[1] = float(right_coord[1])
00513 #        right_point[2] = 1.0
00514 #        # in the normalized image plane _point is the vector denoted as [ u; v; 1 ]
00515 #
00516 #    right_vector  = cv.cvCreateMat(3,1,cv.CV_32F)
00517 #    left_vector   = cv.cvCreateMat(3,1,cv.CV_32F)
00518 #    output_vector = cv.cvCreateMat(3,1,cv.CV_32F)
00519 #
00520 #    left_inv_intrinsic  = camera.camera_left.inv_intrinsic_mat_cv
00521 #    right_inv_intrinsic = camera.camera_right.inv_intrinsic_mat_cv
00522 #    cv.cvGEMM(left_inv_intrinsic , left_point,  1, None, 1, left_vector,  0)   
00523 #    cv.cvGEMM(right_inv_intrinsic, right_point, 1, None, 1, right_vector, 0)
00524 #
00525 #    lv = np.matrix([left_vector[0], left_vector[1], left_vector[2]]).T
00526 #    rv = np.matrix([right_vector[0], right_vector[1], right_vector[2]]).T
00527 #
00528 #    te = camera.R * lv
00529 #
00530 #    a = cv.cvCreateMat( 3, 2, cv.CV_32F )
00531 #    a[0,0], a[1,0], a[2,0] = te[0,0], te[1,0], te[2,0]
00532 #    a[0,1], a[1,1], a[2,1] = rv[0,0], rv[1,0], rv[2,0]
00533 #
00534 #    params = cv.cvCreateMat( 2, 1, cv.CV_32F )
00535 #
00536 #    #linear least squares [X1 -X2]*[al;bet] - trans ~ 0
00537 #    #                            [left right]*[params] -translation ~0
00538 #    translation = cv.cvCreateMat(3,1,cv.CV_32F)
00539 #    translation[0] = -camera.T[0,0]
00540 #    translation[1] = -camera.T[1,0]
00541 #    translation[2] = -camera.T[2,0]
00542 #
00543 #    cv.cvSolve( a, translation, params , cv.CV_SVD )
00544 #    alpha = params[0]
00545 #    beta = -params[1]
00546 #
00547 #    vec_l = alpha * te + camera.T
00548 #    vec_r = beta * rv
00549 #
00550 #    #compute reprojection error
00551 #    detection_error = np.linalg.norm(vec_r-vec_l)
00552 #
00553 #    p = (vec_l + vec_r)/2.
00554 #
00555 #    p = np.row_stack((p, np.matrix([1.])))
00556 #    # my xyz and cressel's are aligned and so I can use the
00557 #    # same transformation matrix -- advait
00558 #    #p = tr._stereoT['right'] * p
00559 #    p = p[0:3]/p[3,0]
00560 #
00561 #    if return_dict:
00562 #        return {'output_vector': p, 'error':detection_error}
00563 #    else:
00564 #        return p, detection_error
00565 ##=================================================================================================
00566 ##=================================================================================================


laser_interface
Author(s): Hai Nguyen and Travis Deyle. Advisor: Prof. Charlie Kemp, Lab: Healthcare Robotics Lab at Georgia Tech
autogenerated on Wed Nov 27 2013 11:45:51