trf_behavior.py
Go to the documentation of this file.
00001 #! /usr/bin/python
00002 
00003 #
00004 # Copyright (c) 2010, Georgia Tech Research Corporation
00005 # All rights reserved.
00006 # 
00007 # Redistribution and use in source and binary forms, with or without
00008 # modification, are permitted provided that the following conditions are met:
00009 #     * Redistributions of source code must retain the above copyright
00010 #       notice, this list of conditions and the following disclaimer.
00011 #     * Redistributions in binary form must reproduce the above copyright
00012 #       notice, this list of conditions and the following disclaimer in the
00013 #       documentation and/or other materials provided with the distribution.
00014 #     * Neither the name of the Georgia Tech Research Corporation nor the
00015 #       names of its contributors may be used to endorse or promote products
00016 #       derived from this software without specific prior written permission.
00017 # 
00018 # THIS SOFTWARE IS PROVIDED BY GEORGIA TECH RESEARCH CORPORATION ''AS IS'' AND
00019 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
00020 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
00021 # DISCLAIMED. IN NO EVENT SHALL GEORGIA TECH BE LIABLE FOR ANY DIRECT, INDIRECT,
00022 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
00023 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
00024 # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00025 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
00026 # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
00027 # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00028 #
00029 
00030 
00031 import roslib; roslib.load_manifest('trf_learn')
00032 import rospy
00033 
00034 import scipy.spatial as sp
00035 import os.path as pt
00036 import numpy as np
00037 import time
00038 import pdb
00039 import os
00040 
00041 import cv
00042 import tf
00043 import tf.transformations as tr
00044 import laser_interface.laser_client as lc
00045 import hrl_camera.ros_camera as rc
00046 import hrl_lib.tf_utils as tfu
00047 import hrl_lib.util as ut
00048 import hrl_lib.viz as viz
00049 import hrl_pr2_lib.devices as hd
00050 
00051 import trf_learn.recognize_3d as r3d
00052 import trf_learn.application_behaviors as ab
00053 import trf_learn.locations_manager as lcm
00054 
00055 class TaskRelevantLearningBehaviors:
00056 
00057     def __init__(self, app_behaviors, tf_listener, optical_frame):
00058         self.mode_click = 'init'
00059         #Import few fuctions from app_behaviors to keep interface clean
00060         self.look_at = app_behaviors.look_at
00061         self.manipulation_posture = app_behaviors.manipulation_posture
00062         self.driving_posture = app_behaviors.driving_posture
00063         self.get_behavior_by_task = app_behaviors.get_behavior_by_task
00064         self.move_base_planner = app_behaviors.move_base_planner
00065         self.location_approach_driving = app_behaviors.location_approach_driving
00066         self.optical_frame = optical_frame
00067 
00068         #Init ROS things
00069         self.tf_listener = tf_listener
00070         self.robot = app_behaviors.robot
00071 
00072         self.prosilica = app_behaviors.prosilica
00073         self.prosilica_cal = rc.ROSCameraCalibration('/prosilica/camera_info')
00074         self.rec_params = r3d.Recognize3DParam()
00075 
00076         self.laser_listener = lc.LaserPointerClient(tf_listener=self.tf_listener)
00077         self.laser_listener.add_double_click_cb(self.click_cb)
00078 
00079         self.feature_ex = r3d.NarrowTextureFeatureExtractor(self.prosilica, 
00080                 hd.PointCloudReceiver('narrow_stereo_textured/points'),
00081                 self.prosilica_cal, 
00082                 self.robot.projector,
00083                 self.tf_listener, self.rec_params)
00084         self.locations_man = lcm.LocationsManager('locations_narrow_v11.pkl', rec_params=self.rec_params) #TODO
00085 
00086     def click_cb(self, point_bl):
00087         if point_bl!= None:
00088             print 'Got point', point_bl.T
00089 
00090             if self.mode_click == 'init':
00091                 self.init_task(point_bl)
00092 
00093             if self.mode_click == 'update_point':
00094                 self.update_point(point_bl)
00095 
00096 
00097     def run(self, mode, save, user_study):
00098         r = rospy.Rate(10)
00099         rospy.loginfo('Ready.')
00100 
00101         if mode == 'update_point':
00102             self.mode_click = 'update_point'
00103             rospy.loginfo('UPDATING POINT')
00104 
00105         #self.init_task(np.matrix([[ 0.94070742, -0.23445448,  1.14915097]]).T)
00106         while not rospy.is_shutdown():
00107             r.sleep()
00108             if mode == 'practice':
00109                 self.practice_task()
00110 
00111             if mode == 'execute':
00112                 t = time.time()
00113                 self.execute_task(save, user_study)
00114                 t2 = time.time()
00115                 print '>> That took', t2 - t, 'seconds'
00116                 exit()
00117 
00118             if mode == 'novision':
00119                 for i in range(10):
00120                     rospy.loginfo('NOVISION: iteration %d.' % i)
00121                     self.execute_task(save, user_study, True)
00122 
00123             if mode == 'update_base':
00124                 self.update_base()
00125 
00126     #######################################################################################
00127     # Utility function
00128     #######################################################################################
00129     def update_point(self, point_bl):
00130         tasks = self.locations_man.data.keys()
00131         for i, k in enumerate(tasks):
00132             print i, k
00133         selected_idx = int(raw_input("Select an action to update point location\n"))
00134         task_id = tasks[selected_idx]
00135         map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
00136         point_map = tfu.transform_points(map_T_base_link, point_bl)
00137         self.locations_man.set_center(task_id, point_map)
00138         self.locations_man.save_database()
00139 
00140     def update_base(self):
00141         tasks = self.locations_man.data.keys()
00142         for i, k in enumerate(tasks):
00143             print i, k
00144         selected_idx = int(raw_input("Select an action to update base location\n"))
00145 
00146         t_current_map, r_current_map = self.robot.base.get_pose()
00147         task_id = tasks[selected_idx]
00148         self.locations_man.update_base_pose(task_id, [t_current_map, r_current_map])
00149         self.locations_man.save_database()
00150 
00151     def unreliable_locs(self):
00152         tasks = self.locations_man.data.keys()
00153         utid = []
00154         for tid in tasks:
00155             if not self.locations_man.is_reliable(tid):
00156                 utid.append(tid)
00157         return utid
00158 
00159     def balance_positives_and_negatives(self, dataset):
00160         #pdb.set_trace() #bug here
00161         poslist = np.where(dataset.outputs == r3d.POSITIVE)[1].A1.tolist()
00162         neglist = np.where(dataset.outputs == r3d.NEGATIVE)[1].A1.tolist()
00163         npoint = min(len(poslist), len(neglist))
00164         assert(npoint > 0)
00165         sindices = poslist[:npoint]+neglist[:npoint]
00166         dataset.pt3d    = dataset.pt3d[:, sindices]
00167         dataset.pt2d    = dataset.pt2d[:, sindices]
00168         dataset.outputs = dataset.outputs[:, sindices]
00169         dataset.inputs  = dataset.inputs[:, sindices]
00170 
00171     def profile_me(self, task_id, point_bl):
00172         for i in range(2):
00173             params = r3d.Recognize3DParam()
00174             params.uncertainty_x = 1.
00175             params.uncertainty_y = .04
00176             params.uncertainty_z = .04
00177             params.n_samples = 5000
00178             params.uni_mix = 0.1
00179             print 'PROFILE_ME ITERATION', i
00180             fea_dict, image_name = self.read_features_save(task_id, point_bl, params)
00181 
00182     def draw_dots_nstuff(self, img, points2d, labels, picked_loc):
00183         pidx = np.where(labels == r3d.POSITIVE)[1].A1.tolist()
00184         nidx = np.where(labels == r3d.NEGATIVE)[1].A1.tolist()
00185         uidx = np.where(labels == r3d.UNLABELED)[1].A1.tolist()
00186 
00187         if picked_loc != None:
00188             r3d.draw_points(img, picked_loc, [255, 0, 0], 4, -1)
00189 
00190         #scale = 1
00191         if len(uidx) > 0:
00192             upoints = points2d[:, uidx]
00193             r3d.draw_points(img, upoints, [255,255,255], 2, -1)
00194 
00195         if len(nidx) > 0:
00196             npoints = points2d[:, nidx]
00197             r3d.draw_points(img, npoints, [0,0,255], 2, -1)
00198 
00199         if len(pidx) > 0:
00200             ppoints = points2d[:, pidx]
00201             r3d.draw_points(img, ppoints, [0,255,0], 2, -1)
00202 
00203     #######################################################################################
00204     # Perception Behaviors
00205     #######################################################################################
00206 
00207     def record_perceptual_data(self, point3d_bl, image_frame, rdict=None, folder_name=None):
00208         rospy.loginfo('saving dataset..')
00209         #self.feature_ex.read(point3d_bl)
00210         if rdict == None:
00211             rospy.loginfo('Getting a kinect reading')
00212             rdict = self.feature_ex.read()['rdict']
00213             #rdict = self.kinect_listener.read()
00214         kimage = rdict['image']
00215         rospy.loginfo('Waiting for calibration.')
00216         while not self.feature_ex.cal.has_msg:
00217             time.sleep(.1)
00218 
00219         #which frames?
00220         rospy.loginfo('Getting transforms.')
00221         #k_T_bl = tfu.transform('openni_rgb_optical_frame', '/base_link', self.tf_listener)
00222         k_T_bl = tfu.transform(image_frame, '/base_link', self.tf_listener)
00223 
00224         tstring = time.strftime('%A_%m_%d_%Y_%I_%M%p')
00225         kimage_name = '%s_highres.png' % tstring
00226         pickle_fname = '%s_interest_point_dataset.pkl' % tstring   
00227         if folder_name != None:
00228             try:
00229                 os.mkdir(folder_name)
00230             except OSError, e:
00231                 print e
00232             kimage_name = pt.join(folder_name, kimage_name)
00233             pickle_fname = pt.join(folder_name, pickle_fname)
00234 
00235         rospy.loginfo('Saving images (basename %s)' % tstring)
00236         cv.SaveImage(kimage_name, kimage)
00237         rospy.loginfo('Saving pickles')
00238 
00239         data_pkl = {'touch_point': point3d_bl,
00240                     'points3d': rdict['points3d'],
00241                     'image': kimage_name,
00242                     'cal': self.feature_ex.cal, 
00243                     'k_T_bl': k_T_bl}
00244                     #'point_touched': point3d_bl}
00245 
00246         ut.save_pickle(data_pkl, pickle_fname)
00247         print 'Recorded to', pickle_fname
00248         return pickle_fname, kimage_name
00249 
00250     def read_features_save(self, task_id, point3d_bl, params=None):
00251         self.robot.projector.set(True)
00252         rospy.sleep(2.)
00253         f = self.feature_ex.read(point3d_bl, params=params)
00254         file_name, kimage_name = self.record_perceptual_data(point3d_bl, self.optical_frame, rdict=f['rdict'], folder_name=task_id)
00255         self.robot.projector.set(False)
00256 
00257         #image_T_bl = tfu.transform(self.optical_frame, 'base_link', self.tf_listener)
00258         #point3d_img = tfu.transform_points(image_T_bl, point3d_bl)
00259         #point2d_img = self.feature_ex.cal.project(point3d_img)
00260 
00261         #img = cv.CloneMat(f['image'])
00262         #self.draw_dots_nstuff(img, f['points2d'], 
00263         #        np.matrix([r3d.UNLABELED]* f['points2d'].shape[1]), 
00264         #        point2d_img)
00265         #self.img_pub.publish(img)
00266 
00267         return f, kimage_name
00268         #self.save_dataset(point, name, f['rdict'])
00269 
00270     #######################################################################################
00271     # Learning Manipulation Behaviors
00272     #######################################################################################
00273     # The behavior can make service calls to a GUI asking users to label
00274     def practice(self, task_id, ctask_id, point3d_bl, stop_fun=None, params=None, 
00275                  negative_cut_off=.5, resolution=.01, max_samples=5):
00276         if params == None:
00277             params = r3d.Recognize3DParam()
00278             params.uncertainty_x = 1.
00279             #param.n_samples = 2000
00280             params.uncertainty_z = .04
00281             params.uni_mix = .1
00282         pstart = time.time()
00283 
00284         kdict, image_name = self.read_features_save(task_id, point3d_bl, params)
00285         #learner = self.locations_man.learners[task_id]
00286         #pdb.set_trace()
00287         behavior = self.get_behavior_by_task(self.locations_man.data[task_id]['task'])
00288         head_pose = self.robot.head.pose()
00289 
00290         kdict['image_T_bl'] = tfu.transform(self.optical_frame, 'base_link', self.tf_listener)
00291         point3d_img = tfu.transform_points(kdict['image_T_bl'], point3d_bl)
00292         point2d_img = self.feature_ex.cal.project(point3d_img)
00293 
00294         labels = []
00295         points3d_tried = []
00296         points2d_tried = []
00297         converged = False
00298         indices_added = []
00299         reset_times = []
00300         task_name = task_id.replace('_', ' ')
00301 
00302         #while not converged or (stop_fun != None and not stop_fun(np.matrix(labels))):
00303         while True:# and (stop_fun != None and not stop_fun(np.matrix(labels))):
00304             if stop_fun != None and stop_fun(np.matrix(labels)):
00305                 rospy.loginfo('Stop satisfied told us to stop loop!')
00306                 break
00307 
00308             if stop_fun == None and len(indices_added) > params.max_points_per_site:
00309                 rospy.loginfo('practice: added enough points from this scan. Limit is %d points.' % params.max_points_per_site)
00310                 break
00311 
00312             #==================================================
00313             # Pick
00314             #==================================================
00315             #Find remaining instances
00316             iter_start = time.time()
00317             print 'input to inverse_indices'
00318             print '>>', indices_added, kdict['instances'].shape[1]
00319             remaining_pt_indices = r3d.inverse_indices(indices_added, kdict['instances'].shape[1])
00320             remaining_instances = kdict['instances'][:, remaining_pt_indices]
00321 
00322             #Ask learner to pick an instance
00323             ridx, selected_dist, converged = self.locations_man.learners[task_id].select_next_instances_no_terminate(remaining_instances)
00324             #if stop_fun == None and converged:
00325             #    rospy.loginfo('practice: Converged! Exiting loop.')
00326             #    break
00327 
00328 
00329             selected_idx = remaining_pt_indices[ridx[0]]
00330             #pdb.set_trace()
00331             indices_added.append(selected_idx)
00332 
00333             #==================================================
00334             # DRAW
00335             #==================================================
00336             img = cv.CloneMat(kdict['image'])
00337             #Draw the center
00338             r3d.draw_points(img, point2d_img, [255, 0, 0], 6, 2)
00339 
00340             #Draw possible points
00341             r3d.draw_points(img, kdict['points2d']+np.matrix([1,1.]).T, [255, 255, 255], 4, -1)
00342 
00343             if len(points2d_tried) > 0:
00344                 _, pos_exp, neg_exp = r3d.separate_by_labels(np.column_stack(points2d_tried), np.matrix(labels))
00345                 r3d.draw_points(img, pos_exp, [50, 255, 0], 8, 1)
00346                 r3d.draw_points(img, neg_exp, [50, 0, 255], 8, 1)
00347 
00348             predictions = np.matrix(self.locations_man.learners[task_id].classify(kdict['instances']))
00349             _, pos_pred, neg_pred = r3d.separate_by_labels(kdict['points2d'], predictions)
00350             r3d.draw_points(img, pos_pred, [255, 204, 51], 3, -1)
00351             r3d.draw_points(img, neg_pred, [51, 204, 255], 3, -1)
00352 
00353             #Draw what we're selecting
00354             r3d.draw_points(img, kdict['points2d'][:, selected_idx], [255, 51, 204], 8, -1)
00355             self.locations_man.publish_image(task_id, img, postfix='_practice_pick')
00356 
00357             #==================================================
00358             # Excecute!!
00359             #==================================================
00360             self.robot.head.set_pose(head_pose, 1)
00361             #self.robot.projector.set(False)
00362             if np.linalg.norm(kdict['points3d'][:, selected_idx] - point3d_bl) > negative_cut_off:
00363                 rospy.loginfo('#########################################')
00364                 rospy.loginfo('Point outside of negative cut off!! Eliminating %s' % (str(kdict['points3d'][:, selected_idx].T)))
00365                 rospy.loginfo('#########################################')
00366                 success = False
00367             else:
00368                 if len(points3d_tried) > 0:
00369                     existing_pts_tree = sp.KDTree(np.array(np.column_stack(points3d_tried).T))
00370                     close_by_indices = existing_pts_tree.query_ball_point(np.array(kdict['points3d'][:, selected_idx]).T, resolution)[0]
00371                     if len(close_by_indices) > 0:
00372                         #labelsm = np.matrix(labels)[0, close_by_indices]
00373                         #ntotal = labelsm.shape[1]
00374                         rospy.loginfo('#########################################')
00375                         rospy.loginfo('Point within resolutio of existing point.') #Labeling %s' % (str(kdict['points3d'][:, selected_idx])))
00376                         rospy.loginfo('#########################################')
00377                         continue
00378                         #This can cause the automated mechanism to propagate false labels.
00379                         #if np.sum(labelsm) > (ntotal/2.0):
00380                         #    success = True
00381                         #else:
00382                         #    success = False
00383                         #rospy.loginfo('as %s' % str(success))
00384                     else:
00385                         self.robot.sound.say('executing ' +  task_name)
00386                         pstart_0 = self.robot.left.pose_cartesian_tf()
00387                         success, _, undo_point_bl = behavior(kdict['points3d'][:, selected_idx])
00388                         #self.did_end_effector_move(pstart)
00389                 else:
00390                     self.robot.sound.say('executing ' +  task_name)
00391                     pstart_0 = self.robot.left.pose_cartesian_tf()
00392                     success, _ , undo_point_bl = behavior(kdict['points3d'][:, selected_idx])
00393                     #self.did_end_effector_move(pstart)
00394             #self.robot.projector.set(True)
00395 
00396             if success:
00397                 color = [0,255,0]
00398                 label = r3d.POSITIVE
00399                 rospy.loginfo('=============================================')
00400                 rospy.loginfo('>> %s successful' % task_id)
00401                 rospy.loginfo('=============================================')
00402                 self.robot.sound.say('action succeeded')
00403             else:
00404                 label = r3d.NEGATIVE
00405                 color = [0,0,255]
00406                 rospy.loginfo('=============================================')
00407                 rospy.loginfo('>> %s NOT successful' % task_id)
00408                 rospy.loginfo('=============================================')
00409                 self.robot.sound.say('action failed')
00410 
00411             #==================================================
00412             # Book keeping
00413             #==================================================
00414             labels.append(label)
00415             points3d_tried.append(kdict['points3d'][:, selected_idx])
00416             points2d_tried.append(kdict['points2d'][:, selected_idx])
00417 
00418             datapoint = {'instances': kdict['instances'][:, selected_idx],
00419                          'points2d':  kdict['points2d'][:, selected_idx],
00420                          'points3d':  kdict['points3d'][:, selected_idx],
00421                          'sizes':     kdict['sizes'],
00422                          'labels':    np.matrix([label])
00423                          }
00424             self.locations_man.add_perceptual_data(task_id, datapoint)
00425             self.locations_man.save_database()
00426             self.locations_man.train(task_id, kdict)
00427             #pdb.set_trace()
00428 
00429             #==================================================
00430             # Reset Environment
00431             #==================================================
00432             reset_start = time.time()
00433             if success and ctask_id != None:
00434                 def any_pos_sf(labels_mat):
00435                     if np.any(r3d.POSITIVE == labels_mat):
00436                         return True
00437                     return False
00438                 #self.practice(ctask_id, None, point3d_bl, stop_fun=any_pos_sf)
00439                 undo_point_map = self.locations_man.data[ctask_id]['center']
00440                 undo_point_bl0 = tfu.transform_points(tfu.transform('base_link', 'map', self.tf_listener), undo_point_map)
00441                 #self.practice(ctask_id, None, undo_point_bl, stop_fun=any_pos_sf)
00442                 #pdb.set_trace()
00443                 num_points_added = self.practice(ctask_id, None, undo_point_bl0, stop_fun=any_pos_sf)
00444                 #if num_points_added > params.max_points_per_site:
00445                 #    #pdb.set_trace()
00446                 #    dataset = self.locations_man.get_perceptual_data(ctask_id)
00447                 #    npoints = dataset.inputs.shape[1]
00448                 #    pts_added_idx = range(npoints - num_points_added, npoints)
00449                 #    pts_remove_idx = pts_added_idx[:(num_points_added - params.max_points_per_site)]
00450                 #    pts_remove_idx.reverse()
00451                 #    rospy.loginfo('Got too many data points for %s throwing out %d points' % (ctask_id, len(pts_remove_idx)))
00452                 #    for idx in pts_remove_idx:
00453                 #        self.locations_man.remove_perceptual_data(ctask_id, idx)
00454 
00455                 #self.locations_man.
00456 
00457             reset_end = time.time()
00458 
00459             #Classify
00460             predictions = np.matrix(self.locations_man.learners[task_id].classify(kdict['instances']))
00461 
00462             #==================================================
00463             # DRAW
00464             #==================================================
00465             img = cv.CloneMat(kdict['image'])
00466 
00467             #Draw the center
00468             r3d.draw_points(img, point2d_img, [255, 0, 0], 6, 2)
00469 
00470             #Draw 'shadows' 
00471             r3d.draw_points(img, kdict['points2d']+np.matrix([1,1.]).T, [255, 255, 255], 4, -1)
00472 
00473             #Draw points tried
00474             _, pos_exp, neg_exp = r3d.separate_by_labels(np.column_stack(points2d_tried), np.matrix(labels))
00475             r3d.draw_points(img, pos_exp, [50, 255, 0], 9, 1)
00476             r3d.draw_points(img, neg_exp, [50, 0, 255], 9, 1)
00477 
00478             _, pos_pred, neg_pred = r3d.separate_by_labels(kdict['points2d'], predictions)
00479             r3d.draw_points(img, pos_pred, [255, 204, 51], 3, -1)
00480             r3d.draw_points(img, neg_pred, [51, 204, 255], 3, -1)
00481 
00482             #Draw what we're selecting
00483             r3d.draw_points(img, points2d_tried[-1], color, 8, -1)
00484             self.locations_man.publish_image(task_id, img, postfix='_practice_result')
00485 
00486             pkname = pt.join(task_id, time.strftime('%A_%m_%d_%Y_%I_%M_%S%p') + '.pkl')
00487             #cv.SaveImage(ffull, img)
00488             ut.save_pickle({'image': image_name,
00489                             'pos': pos_exp,
00490                             'neg': neg_exp,
00491                             'pos_pred': pos_pred,
00492                             'neg_pred': neg_pred,
00493                             'tried': (points2d_tried[-1], label),
00494                             'center': point2d_img}, pkname)
00495 
00496             #ffull = pt.join(task_id, time.strftime('%A_%m_%d_%Y_%I_%M_%S%p') + '.png')
00497             #cv.SaveImage(ffull, img)
00498             #self.img_pub.publish(img)
00499             reset_time = reset_end - reset_start
00500             loop_time = (time.time() - iter_start) - (reset_end - reset_start)
00501             reset_times.append(reset_time)
00502             print '**********************************************************'
00503             print 'Loop took %.3f seconds' % loop_time
00504             print '**********************************************************'
00505             self.locations_man.record_time(task_id, 'practice_loop_time', loop_time)
00506 
00507         if np.any(r3d.POSITIVE == np.matrix(labels)):
00508             self.locations_man.update_execution_record(task_id, 1)
00509 
00510         print '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
00511         print 'returning from', task_id
00512         print '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
00513         pend = time.time()
00514         practice_time = pend - pstart - np.sum(reset_times)
00515         self.locations_man.record_time(task_id, 'practice_func_time', practice_time)
00516         return len(indices_added)
00517 
00518     ##
00519     def execute(self, task_id, point3d_bl, save, max_retries=15, closeness_tolerance=.01, user_study=False):
00520         if user_study:
00521             rospy.loginfo('=====================================================')
00522             rospy.loginfo('user_study is True.  Trying to fail on first attempt!')
00523             rospy.loginfo('=====================================================')
00524         #Extract features
00525         params = r3d.Recognize3DParam()
00526         params.uncertainty_x = 1.
00527         params.uncertainty_z = .03
00528         params.uni_mix = .1
00529         params.n_samples = 1500
00530 
00531         kdict, image_name = self.read_features_save(task_id, point3d_bl, params)
00532         kdict['image_T_bl'] = tfu.transform(self.optical_frame, 'base_link', self.tf_listener)
00533         point3d_img = tfu.transform_points(kdict['image_T_bl'], point3d_bl)
00534         point2d_img = self.feature_ex.cal.project(point3d_img)
00535         head_pose = self.robot.head.pose()
00536 
00537         #Classify
00538         #pdb.set_trace()
00539         predictions = np.matrix(self.locations_man.learners[task_id].classify(kdict['instances']))
00540         pos_indices = np.where(r3d.POSITIVE == predictions)[1].A1
00541         loc2d_max = None
00542         try_num = 0
00543         updated_execution_record = False
00544         if user_study:
00545             first_time = True
00546         else:
00547             first_time = False
00548 
00549         #If find some positives:
00550         if len(pos_indices) > 0:
00551             indices_added = []
00552             remaining_pt_indices = r3d.inverse_indices(indices_added, kdict['instances'].shape[1])
00553             remaining_instances = kdict['instances'][:, remaining_pt_indices]
00554             behavior = self.get_behavior_by_task(self.locations_man.data[task_id]['task'])
00555 
00556             while len(pos_indices) > 0:
00557                 #select from the positive predictions the prediction with the most spatial support
00558                 print 'Num positive points found:', len(pos_indices)
00559 
00560                 #figure out max point
00561                 locs2d = None
00562                 if len(pos_indices) > 1:
00563                     locs2d = kdict['points2d'][:, pos_indices]
00564                     if np.any(np.isnan(locs2d)) or np.any(np.isinf(locs2d)):
00565                         pdb.set_trace()
00566                     locs2d_indices = np.where(False == np.sum(np.isnan(locs2d), 0))[1].A1
00567                     print locs2d[:, locs2d_indices]
00568                     loc2d_max, density_image = r3d.find_max_in_density(locs2d[:, locs2d_indices])
00569                     cv.SaveImage("execute.png", 
00570                             cv.fromarray(255 * (np.rot90(density_image)/np.max(density_image))))
00571                     dists = ut.norm(kdict['points2d'] - loc2d_max)
00572                     selected_idx = np.argmin(dists)
00573                     if not first_time:
00574                         indices_added.append(selected_idx)
00575                 else:
00576                     selected_idx = pos_indices[0]
00577                     loc2d_max = kdict['points2d'][: selected_idx]
00578 
00579                 selected_3d = kdict['points3d'][:, selected_idx]
00580 
00581                 #Draw
00582                 img = cv.CloneMat(kdict['image'])
00583                 r3d.draw_points(img, point2d_img,       [255, 0, 0],    10, -1)
00584                 r3d.draw_points(img, kdict['points2d'], [51, 204, 255], 3, -1)
00585                 if locs2d != None:
00586                     r3d.draw_points(img, locs2d,        [255, 204, 51], 3, -1)
00587                 r3d.draw_points(img, loc2d_max,         [255, 204, 51], 10, -1)
00588                 self.locations_man.publish_image(task_id, img, postfix='_execute_pick')
00589 
00590                 #Execute
00591                 self.robot.head.set_pose(head_pose, 1)
00592                 print '============================================================'
00593                 print '============================================================'
00594                 print 'Try number', try_num
00595                 print '============================================================'
00596                 print '============================================================'
00597                 if first_time:
00598                     print 'original point selected is', selected_3d.T
00599                     selected_3d = selected_3d + np.matrix([0, 0, 0.2]).T
00600                 print 'point selected is', selected_3d.T
00601                 success, _, point_undo_bl = behavior(selected_3d)
00602 
00603                 #Save pickle for viz
00604                 pkname = pt.join(task_id, time.strftime('%A_%m_%d_%Y_%I_%M_%S%p') + '_execute.pkl')
00605                 _, pos_pred, neg_pred = r3d.separate_by_labels(kdict['points2d'], predictions)
00606                 if success:
00607                     label = r3d.POSITIVE
00608                 else:
00609                     label = r3d.NEGATIVE
00610                 ut.save_pickle({'image': image_name,
00611                                 'pos_pred': pos_pred,
00612                                 'neg_pred': neg_pred,
00613                                 'tried': [kdict['points2d'][:, selected_idx], label],
00614                                 'id': task_id,
00615                                 'center': point2d_img}, pkname)
00616 
00617                 try_num = try_num + 1
00618                 if not first_time:
00619                     if success:
00620                         #if save:
00621                         if not updated_execution_record:
00622                             self.locations_man.update_execution_record(task_id, 1.)
00623                             update_execution_record = True
00624                         self.robot.sound.say('action succeeded')
00625                         label = r3d.POSITIVE
00626 
00627                         dataset = self.locations_man.data[task_id]['dataset']
00628                         nneg = np.sum(dataset.outputs == r3d.NEGATIVE) 
00629                         npos = np.sum(dataset.outputs == r3d.POSITIVE)
00630                         if nneg > npos and save:
00631                             datapoint = {'instances': kdict['instances'][:, selected_idx],
00632                                          'points2d':  kdict['points2d'][:, selected_idx],
00633                                          'points3d':  kdict['points3d'][:, selected_idx],
00634                                          'sizes':     kdict['sizes'],
00635                                          'labels':    np.matrix([label])}
00636                             self.locations_man.add_perceptual_data(task_id, datapoint)
00637                             if save:
00638                                 self.locations_man.save_database()
00639                             self.locations_man.train(task_id, kdict)
00640                         break
00641                     else:
00642                         self.robot.sound.say('action failed')
00643                         label = r3d.NEGATIVE
00644                         if not updated_execution_record:
00645                             self.locations_man.update_execution_record(task_id, 0.)
00646                             update_execution_record = True
00647 
00648                         #We were wrong so we add this to our dataset and retrain
00649                         datapoint = {'instances': kdict['instances'][:, selected_idx],
00650                                      'points2d':  kdict['points2d'][:, selected_idx],
00651                                      'points3d':  kdict['points3d'][:, selected_idx],
00652                                      'sizes':     kdict['sizes'],
00653                                      'labels':    np.matrix([label])}
00654                         self.locations_man.add_perceptual_data(task_id, datapoint)
00655                         if save:
00656                             self.locations_man.save_database()
00657                         self.locations_man.train(task_id, kdict)
00658 
00659                         if try_num > max_retries:
00660                             break
00661 
00662                         #Remove point and predict again!
00663                         remaining_pt_indices = r3d.inverse_indices(indices_added, kdict['instances'].shape[1])
00664                         remaining_instances = kdict['instances'][:, remaining_pt_indices]
00665                         predictions = np.matrix(self.locations_man.learners[task_id].classify(remaining_instances))
00666                         remaining_pos_indices = np.where(r3d.POSITIVE == predictions)[1].A1
00667                         pos_indices = remaining_pt_indices[remaining_pos_indices]
00668                 first_time = False
00669 
00670         #If no positives found:
00671         else:
00672             img = cv.CloneMat(kdict['image'])
00673             r3d.draw_points(img, point2d_img,       [255, 0, 0],    10, -1)
00674             r3d.draw_points(img, kdict['points2d'], [51, 204, 255], 3, -1)
00675             self.locations_man.publish_image(task_id, img, postfix='_execute_fail')
00676 
00677             if not updated_execution_record:
00678                 self.locations_man.update_execution_record(task_id, 0.)
00679                 update_execution_record = True
00680 
00681             self.robot.sound.say("Perception failure.  Exploring around expected region.")
00682             #FAIL. Update the location's statistic with this failure. 
00683             if save:
00684                 self.locations_man.update_execution_record(task_id, 0.)
00685             def any_pos_sf(labels_mat):
00686                 if np.any(r3d.POSITIVE == labels_mat):
00687                     return True
00688                 return False
00689             ctask_id = self.locations_man.data[task_id]['complementary_task_id']
00690             self.random_explore_init(task_id, ctask_id,
00691                     point3d_bl, max_retries=max_retries, stop_fun=any_pos_sf, 
00692                     closeness_tolerance=closeness_tolerance, should_reset=False)
00693 
00694     ###
00695     # TODO: WE MIGHT NOT NEED THIS AFTER ALL, MIGHT BE ABLE TO JUST INITIALIZE RANDOMLY!
00696     ###
00697     def random_explore_init(self, task_id, ctask_id, point_bl, stop_fun, 
00698             max_retries=30, closeness_tolerance=.01, positive_escape=.08, should_reset=False):
00699         #rospy.loginfo('random_explore_init: %s' % task_id)
00700         ##case label
00701         #if self.locations_man.data[task_id]['dataset'] != None:
00702         #    case_labels = self.locations_man.data[task_id]['dataset'].outputs.A1.tolist()
00703         #else:
00704         #    case_labels = []
00705         #if stop_fun(case_labels):
00706         #    rospy.loginfo('random_explore_init: stop function satisfied with label set. not executing.')
00707         #    return
00708 
00709         self.look_at(point_bl, True)
00710         rospy.sleep(2.)
00711         params = r3d.Recognize3DParam()
00712         params.uncertainty_x = 1.
00713         params.uncertainty_y = .04
00714         params.uncertainty_z = .04
00715         params.n_samples = 800
00716         params.uni_mix = 0.1
00717 
00718         #profile read_reatures_save
00719         #cProfile.runctx('self.profile_me(task_id, point_bl)', globals(), locals(), filename='read_features_save.prof')
00720         #pdb.set_trace()
00721     
00722         #Scan
00723         fea_dict, image_name = self.read_features_save(task_id, point_bl, params)
00724 
00725         params2 = r3d.Recognize3DParam()
00726         params2.n_samples = 5000
00727         fea_dict2, image_name2 = self.read_features_save(task_id, point_bl, params2)
00728 
00729         behavior = self.get_behavior_by_task(self.locations_man.data[task_id]['task'])
00730         image_T_bl = tfu.transform(self.optical_frame, 'base_link', self.tf_listener)
00731         fea_dict['image_T_bl'] = image_T_bl
00732         
00733         #Rearrange sampled points by distance
00734         dists = ut.norm(fea_dict['points3d'] - point_bl)
00735         ordering = np.argsort(dists).A1
00736         points3d_sampled = fea_dict['points3d'][:, ordering]
00737         points2d_sampled = fea_dict['points2d'][:, ordering]
00738         instances_sampled = fea_dict['instances'][:, ordering]
00739         start_pose = self.robot.head.pose()
00740 
00741         #Figure out where to draw given point for visualization
00742         point3d_img = tfu.transform_points(fea_dict['image_T_bl'], point_bl)
00743         point2d_img = self.feature_ex.cal.project(point3d_img)
00744    
00745         #Book keeping for loop
00746         undo_ret = None
00747         points3d_tried = []
00748         points2d_tried = []
00749         labels = []
00750         sampled_idx = 0
00751         iter_count = 0
00752         need_reset = False
00753         behavior_end_state = False
00754         undo_point_bl = point_bl
00755 
00756         #pdb.set_trace()
00757         while iter_count < max_retries and not stop_fun(np.matrix(labels)) and sampled_idx < points3d_sampled.shape[1]:
00758             #==================================================
00759             # Pick
00760             #==================================================
00761             if len(points3d_tried) > 0 and \
00762                np.any(ut.norm(np.column_stack(points3d_tried) - points3d_sampled[:, sampled_idx]) < closeness_tolerance): 
00763                    sampled_idx = sampled_idx + 1 
00764                    continue
00765 
00766             if len(labels) > 0 and np.sum(labels) == len(labels) and\
00767                np.any(ut.norm(np.column_stack(points3d_tried) - points3d_sampled[:, sampled_idx]) < positive_escape): 
00768                    sampled_idx = sampled_idx + 1 
00769                    continue
00770 
00771             #==================================================
00772             # Excecute!!
00773             #==================================================
00774             self.robot.head.set_pose(start_pose, 1)
00775             #self.robot.projector.set(False)
00776             success, reason, undo_point_bl = behavior(points3d_sampled[:, sampled_idx])
00777             rospy.loginfo('======================================')
00778             rospy.loginfo('%s was %s' % (task_id, str(success)))
00779             rospy.loginfo('======================================')
00780             behavior_end_state = success
00781             #self.robot.projector.set(True)
00782 
00783             #==================================================
00784             # Book keeping
00785             #==================================================
00786             if success:
00787                 label = r3d.POSITIVE
00788                 need_reset = True
00789             else:
00790                 label = r3d.NEGATIVE
00791                 need_reset = False
00792                 
00793             points3d_tried.append(points3d_sampled[:, sampled_idx])
00794             points2d_tried.append(points2d_sampled[:, sampled_idx])
00795             labels.append(label)
00796             datapoint = {'instances': instances_sampled[:, sampled_idx],
00797                          'points3d':  points3d_sampled[:, sampled_idx],
00798                          'points2d':  points2d_sampled[:, sampled_idx],
00799                          'sizes':     fea_dict['sizes'],
00800                          'labels':    np.matrix([label])}
00801             self.locations_man.add_perceptual_data(task_id, datapoint)
00802             self.locations_man.save_database()
00803 
00804             iter_count = iter_count + 1
00805             sampled_idx = sampled_idx + 1
00806 
00807             #==================================================
00808             # DRAW
00809             #==================================================
00810             img = cv.CloneMat(fea_dict['image'])
00811             r3d.draw_points(img, points2d_sampled+np.matrix([1,1.]).T, [0, 0, 0], 3, -1)
00812             r3d.draw_points(img, points2d_sampled, [255, 255, 255], 3, -1)
00813             _, pos_points, neg_points = r3d.separate_by_labels(np.column_stack(points2d_tried), np.matrix(labels))
00814             r3d.draw_points(img, point2d_img, [255, 0, 0], 6, 2)
00815             r3d.draw_points(img, pos_points, [0, 255, 0], 4, -1)
00816             r3d.draw_points(img, neg_points, [0, 0, 255], 4, -1)
00817             r3d.draw_points(img, points2d_tried[-1], [0, 184, 245], 6, -1)
00818             self.locations_man.publish_image(task_id, img)
00819 
00820             pkname = pt.join(task_id, time.strftime('%A_%m_%d_%Y_%I_%M_%S%p') + '_seed.pkl')
00821             ut.save_pickle({'image': image_name,
00822                             'pos': pos_points,
00823                             'neg': neg_points,
00824                             #'pos_pred': pos_pred,
00825                             #'neg_pred': neg_pred,
00826                             'tried': [points2d_tried[-1], label],
00827                             'center': point2d_img}, pkname)
00828 
00829             #ffull = pt.join(task_id, time.strftime('%A_%m_%d_%Y_%I_%M_%S%p') + '.png')
00830             #cv.SaveImage(ffull, img)
00831             #self.img_pub.publish(img)
00832     
00833             #==================================================
00834             # Reset Environment
00835             #==================================================
00836             if need_reset and should_reset:
00837                 self.robot.head.set_pose(start_pose, 1)
00838                 if ctask_id != None:
00839                     #If we were successful, call blind exploration with the undo behavior
00840                     def any_pos_sf(labels_mat):
00841                         if np.any(r3d.POSITIVE == labels_mat):
00842                             return True
00843                         return False
00844 
00845                     #ctask_point = points3d_tried[-1] #points3d_sampled[:, sampled_idx]
00846                     ctask_point = undo_point_bl
00847                     undo_ret = self.random_explore_init(ctask_id, None, ctask_point, stop_fun=any_pos_sf, 
00848                                         max_retries=max_retries, closeness_tolerance=closeness_tolerance, 
00849                                         should_reset=False)
00850                     need_reset = False
00851                     if undo_ret['end_state']:
00852                         behavior_end_state = False
00853                     else:
00854                         behavior_end_state = True
00855 
00856         rospy.loginfo('Tried %d times' % iter_count)
00857         fea_dict_undo = None
00858         if undo_ret != None:
00859             fea_dict_undo = undo_ret['features']
00860 
00861         return {'end_state': behavior_end_state, 'features': fea_dict2, 
00862                 'features_undo': fea_dict_undo, 'undo_point': undo_point_bl}
00863 
00864     #######################################################################################
00865     # Learning Manipulation Behaviors
00866     #######################################################################################
00867     # Initialization phase
00868     #
00869     # @param point_bl 3x1 in base_link
00870     def init_task(self, point_bl):
00871         #If that location is new:
00872         #pdb.set_trace()
00873         self.look_at(point_bl, False)
00874         map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
00875         point_map = tfu.transform_points(map_T_base_link, point_bl)
00876 
00877         #Initialize new location
00878         rospy.loginfo('Select task type:')
00879         for i, ttype in enumerate(self.locations_man.task_types):
00880             print i, ttype
00881         task_type = self.locations_man.task_types[int(raw_input())]
00882         rospy.loginfo('Selected task %s' % task_type)
00883         self.manipulation_posture(task_type)
00884         point_bl = tfu.transform_points(tfu.transform('base_link', 'map', self.tf_listener), point_map)
00885         self.look_at(point_bl, False)
00886 
00887         def has_pos_and_neg(labels):
00888             if np.sum(labels == r3d.POSITIVE) > 0 and np.sum(labels == r3d.NEGATIVE) > 0:
00889                 return True
00890             else:
00891                 return False
00892 
00893         def any_pos_sf(labels_mat):
00894             if np.any(r3d.POSITIVE == labels_mat):
00895                 return True
00896             return False
00897 
00898         #Create new tasks
00899         location_name = raw_input('Enter a name for this location:\n')
00900         ctask_type = self.locations_man.get_complementary_task(task_type)
00901         t_current_map, r_current_map = self.robot.base.get_pose()
00902         task_id = self.locations_man.create_new_location(task_type, 
00903                 np.matrix([0,0,0.]).T, [t_current_map, r_current_map], name=location_name)
00904         ctask_id = self.locations_man.create_new_location(ctask_type, 
00905                 np.matrix([0,0,0.]).T, [t_current_map, r_current_map], name=location_name)
00906 
00907         #Stop when have at least 1 pos and 1 neg
00908         point_bl = tfu.transform_points(tfu.transform('base_link', 'map', self.tf_listener), point_map)
00909         self.look_at(point_bl, True)
00910         ret_dict_action = self.random_explore_init(task_id, ctask_id, point_bl, stop_fun=has_pos_and_neg, should_reset=True)
00911         dset_action = ret_dict_action['features']
00912         dset_undo = ret_dict_action['features_undo']
00913         undo_point_bl = ret_dict_action['undo_point']
00914 
00915         #Lights should be on at this stage!
00916         #If we don't have enought data for reverse action
00917         rospy.loginfo('====================================================')
00918         rospy.loginfo('Don\'t have enough data for reverse action')
00919         if (self.locations_man.data[ctask_id]['dataset'] == None) or \
00920                 not has_pos_and_neg(self.locations_man.data[ctask_id]['dataset'].outputs.A1):
00921             #Turn off the lights 
00922             point_bl = tfu.transform_points(tfu.transform('base_link', 'map', self.tf_listener), point_map)
00923             self.look_at(point_bl, True)
00924             rospy.loginfo('====================================================')
00925             rospy.loginfo('Running random_explore_init on set %s.' % task_id)
00926             ret_dict_action = self.random_explore_init(task_id, None, point_bl, stop_fun=any_pos_sf, should_reset=False)
00927             undo_point_bl = ret_dict_action['undo_point']
00928 
00929             #Practice until stats are met
00930             self.look_at(point_bl, True)
00931             #ret_dict_undo = self.random_explore_init(ctask_id, task_id, point_bl, stop_fun=has_pos_and_neg)
00932             rospy.loginfo('====================================================')
00933             rospy.loginfo('Running random_explore_init on set %s.' % ctask_id)
00934             ret_dict_undo = self.random_explore_init(ctask_id, task_id, undo_point_bl, stop_fun=has_pos_and_neg)
00935             if dset_undo == None:
00936                 dset_undo = ret_dict_undo['features']
00937 
00938         #Figure out behavior centers in map frame
00939         tdataset  = self.locations_man.data[task_id]['dataset']
00940         tpoint_bl = tdataset.pt3d[:, np.where(tdataset.outputs == r3d.POSITIVE)[1].A1[0]]
00941         self.balance_positives_and_negatives(tdataset)
00942 
00943         cdataset  = self.locations_man.data[ctask_id]['dataset']
00944         cpoint_bl = cdataset.pt3d[:, np.where(cdataset.outputs == r3d.POSITIVE)[1].A1[0]]
00945         self.balance_positives_and_negatives(cdataset)
00946 
00947         map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
00948         point_success_map = tfu.transform_points(map_T_base_link, tpoint_bl)
00949         cpoint_success_map = tfu.transform_points(map_T_base_link, cpoint_bl)
00950 
00951         #Set newly created task with learned information
00952         self.locations_man.set_center(task_id, point_success_map)
00953         self.locations_man.set_center(ctask_id, cpoint_success_map)
00954         self.locations_man.data[task_id]['complementary_task_id'] = ctask_id
00955         self.locations_man.data[ctask_id]['complementary_task_id'] = task_id
00956 
00957         self.locations_man.update_execution_record(task_id, 1.)
00958         self.locations_man.update_execution_record(ctask_id, 1.)
00959 
00960         self.locations_man.train(task_id, dset_action, save_pca_images=True)
00961         self.locations_man.train(ctask_id, dset_undo, save_pca_images=True)
00962         self.locations_man.save_database()
00963         rospy.loginfo('Done initializing new location!')
00964         self.driving_posture(task_type)
00965 
00966     def did_end_effector_move(self, p_t0):
00967         #pdb.set_trace()
00968         p0 = p_t0[0]
00969         p1 = self.robot.left.pose_cartesian_tf()[0]
00970         d =  np.linalg.norm(p1 - p0)
00971         if d < .05:
00972             self.robot.sound.say('failed to move')
00973             pdb.set_trace()
00974             return False
00975         else:
00976             return True
00977 
00978     ##
00979     # Practice phase
00980     def practice_task(self):
00981         rospy.loginfo('===================================================')
00982         rospy.loginfo('= Practice Mode!                                  =')
00983         rospy.loginfo('===================================================')
00984         #pdb.set_trace()
00985 
00986         ulocs = self.unreliable_locs()
00987         rospy.loginfo('%d out of %d locs in database are unreliable' \
00988                 % (len(ulocs), len(self.locations_man.data.keys())))
00989 
00990         #Ask user to select a location
00991         tasks = self.locations_man.data.keys()
00992         for i, k in enumerate(tasks):
00993             print i, k
00994         selected_idx = int(raw_input("Select a location to execute action\n"))
00995 
00996         #tid = ulocs[selected_idx]
00997         tid = tasks[selected_idx]
00998         rospy.loginfo('selected %s' % tid)
00999 
01000         #Ask user for practice poses
01001         if not self.locations_man.data[tid].has_key('practice_locations'):
01002             #Get robot poses
01003             map_points = []
01004             for i in range(4):
01005                 raw_input('%d move robot to desired position\n' % i)
01006                 rospy.sleep(1.)
01007                 #pdb.set_trace()
01008                 t_current_map, r_current_map = self.robot.base.get_pose()
01009                 map_points.append([t_current_map, r_current_map])
01010             self.locations_man.data[tid]['practice_locations'] = map_points
01011             self.locations_man.data[tid]['practice_locations_history'] = np.zeros((1, len(map_points)))
01012             self.locations_man.data[tid]['practice_locations_convergence'] = np.zeros((1, len(map_points)))
01013             self.locations_man.save_database()
01014 
01015         #Ask user for canonical pose if it does not exist
01016         if not self.locations_man.data[tid].has_key('base_pose'):
01017             raw_input('move robot to desired end position\n')
01018             trans, rot_quat = self.robot.base.get_pose()
01019             self.locations_man.data[tid]['base_pose'] = [trans, rot_quat]
01020             self.locations_man.save_database()
01021 
01022         point_map = self.locations_man.data[tid]['center']
01023         task_type = self.locations_man.data[tid]['task']
01024         points_added_history = []
01025         #pdb.set_trace()
01026 
01027         unexplored_locs  = np.where(self.locations_man.data[tid]['practice_locations_history'] == 0)[1]
01028         unconverged_locs = np.where(self.locations_man.data[tid]['practice_locations_convergence'] == 0)[1]
01029         rospy.loginfo("Location history: %s" % str(self.locations_man.data[tid]['practice_locations_history']))
01030         run_loop = True
01031 
01032         #If this is a fresh run, start with current location
01033         if unexplored_locs.shape[0] == len(self.locations_man.data[tid]['practice_locations']):
01034             pidx = 3
01035 
01036         #If this is not a fresh run we continue with a location we've never been to before
01037         elif unexplored_locs.shape[0] > 0:
01038             pidx = unexplored_locs[0]
01039             rospy.loginfo("Resuming training from last unexplored location")
01040 
01041         #set the index to an unconverged location
01042         elif unconverged_locs.shape[0] > 0:
01043             pidx = unconverged_locs[0]
01044             rospy.loginfo("Resuming training from unconverged location")
01045 
01046         #if there are no unconverged locations, try to run anyway
01047         else:
01048             rospy.loginfo("WARNING: no unexplored or unconverged location")
01049             pidx = 3
01050             self.locations_man.data[tid]['practice_locations_convergence'][0, pidx] = 0
01051 
01052         #pdb.set_trace()
01053 
01054         #Commence practice!
01055         while True: #%not converged:
01056 
01057             if self.locations_man.data[tid]['practice_locations_convergence'][0, pidx] == 0:
01058                 #Drive to location
01059                 self.driving_posture(task_type)
01060 
01061                 #Move to setup location
01062                 self.robot.sound.say('Driving to practice location')
01063                 rvalue, dist = self.move_base_planner(*self.locations_man.data[tid]['practice_locations'][pidx])
01064                 rospy.loginfo('move ret value %s dist %f' % (str(rvalue), dist))
01065 
01066                 #Move to location where we were first initialized
01067                 self.robot.sound.say('Driving to mechanism location')
01068                 rvalue, dist = self.move_base_planner(*self.locations_man.data[tid]['base_pose'])
01069 
01070                 #Reorient base
01071                 bl_T_map = tfu.transform('base_link', 'map', self.tf_listener)
01072                 point_bl = tfu.transform_points(bl_T_map, point_map)
01073                 ret = self.location_approach_driving(task_type, point_bl)
01074                 self.robot.base.set_pose(self.robot.base.get_pose()[0], self.locations_man.data[tid]['base_pose'][1], 'map')
01075                 if not ret[0]:
01076                     rospy.loginfo('Driving failed!! Resetting.')
01077                     #pdb.set_trace()
01078                     #return False, ret[1]
01079                     continue
01080 
01081                 self.manipulation_posture(task_type)
01082                 bl_T_map = tfu.transform('base_link', 'map', self.tf_listener)
01083                 point_bl = tfu.transform_points(bl_T_map, point_map)
01084                 self.look_at(point_bl, False)
01085 
01086                 #Practice
01087                 self.robot.sound.say('practicing')
01088                 ctid = self.locations_man.data[tid]['complementary_task_id']
01089 
01090                 points_before_t = self.locations_man.data[tid]['dataset'].inputs.shape[1]
01091                 points_before_ct = self.locations_man.data[ctid]['dataset'].inputs.shape[1]
01092 
01093                 points_added = self.practice(tid, ctid,  point_bl)
01094 
01095                 points_added_history.append(points_added)
01096                 points_after_t = self.locations_man.data[tid]['dataset'].inputs.shape[1]
01097                 points_after_ct = self.locations_man.data[ctid]['dataset'].inputs.shape[1]
01098                 self.locations_man.record_time(tid,  'num_points_added_' + tid, points_after_t - points_before_t)
01099                 self.locations_man.record_time(ctid, 'num_points_added_' + tid, points_after_ct - points_before_ct)
01100 
01101                 self.locations_man.data[tid]['practice_locations_history'][0, pidx] += 1
01102                 #If no points added and we have explored all locations
01103                 #if points_added == 0 and np.where(self.locations_man.data[tid]['practice_locations_history'] == 0)[1].shape[0] == 0:
01104                 if points_added == 0:# and np.where(self.locations_man.data[tid]['practice_locations_history'] == 0)[1].shape[0] == 0:
01105                     self.locations_man.data[tid]['practice_locations_convergence'][0, pidx] = 1
01106                     rospy.loginfo('===================================================')
01107                     rospy.loginfo('= LOCATION CONVERGED ')
01108                     rospy.loginfo('Converged locs: %s' % str(self.locations_man.data[tid]['practice_locations_convergence']))
01109                     rospy.loginfo('using instances %d points added' % (points_after_t - points_before_t))
01110                     rospy.loginfo('history %s' % str(points_added_history))
01111                     rospy.loginfo('number of iterations it took %s' % str(np.sum(points_added_history)))
01112                     rospy.loginfo('number of datapoints %s' % str(self.locations_man.data[tid]['dataset'].outputs.shape))
01113                     rospy.loginfo('===================================================')
01114                     if np.where(self.locations_man.data[tid]['practice_locations_convergence'] == 0)[1].shape[0] <= 0:
01115                         break
01116                 else:
01117                     rospy.loginfo('===================================================')
01118                     rospy.loginfo('= Scan converged!')
01119                     rospy.loginfo('Converged locs: %s' % str(self.locations_man.data[tid]['practice_locations_convergence']))
01120                     rospy.loginfo('using instances %d points added' % (points_after_t - points_before_t))
01121                     rospy.loginfo('history %s' % str(points_added_history))
01122                     rospy.loginfo('number of iterations so far %s' % str(np.sum(points_added_history)))
01123                     rospy.loginfo('number of datapoints %s' % str(self.locations_man.data[tid]['dataset'].outputs.shape))
01124                     rospy.loginfo('===================================================')
01125 
01126             pidx = (pidx + 1) % len(self.locations_man.data[tid]['practice_locations'])
01127             self.locations_man.save_database()
01128 
01129         self.driving_posture(task_type)
01130 
01131     ##
01132     # Execution phase
01133     def execute_task(self, save, user_study, novision=False):
01134         rospy.loginfo('===================================================')
01135         rospy.loginfo('= User selection mode!                            =')
01136         rospy.loginfo('===================================================')
01137         tasks = self.locations_man.data.keys()
01138         for i, k in enumerate(tasks):
01139             print i, k
01140 
01141         tid = tasks[int(raw_input())]
01142         task_type = self.locations_man.data[tid]['task']
01143         rospy.loginfo('User selected %s' % tid)
01144 
01145         #record current robot position
01146         if not self.locations_man.data[tid].has_key('execute_locations'):
01147             self.locations_man.data[tid]['execute_locations'] = []
01148         t_current_map, r_current_map = self.robot.base.get_pose()
01149         self.locations_man.data[tid]['execute_locations'].append([t_current_map, r_current_map])
01150         if not user_study and not novision:
01151             self.locations_man.save_database()
01152 
01153         point_map = self.locations_man.data[tid]['center']
01154         #pdb.set_trace()
01155         self.robot.sound.say('Driving')
01156         self.driving_posture(task_type)
01157         rvalue, dist = self.move_base_planner(*self.locations_man.data[tid]['base_pose'])
01158         if not rvalue:
01159             self.robot.sound.say('unable to plan to location')
01160             return False
01161 
01162         bl_T_map = tfu.transform('base_link', 'map', self.tf_listener)
01163         point_bl = tfu.transform_points(bl_T_map, point_map)
01164         ret = self.location_approach_driving(task_type, point_bl)
01165         self.robot.base.set_pose(self.robot.base.get_pose()[0], 
01166                                  self.locations_man.data[tid]['base_pose'][1], 'map')
01167         if not ret[0]:
01168             self.robot.sound.say('unable to get to location!')
01169             return False
01170 
01171         if not novision:
01172             self.manipulation_posture(task_type)
01173         bl_T_map = tfu.transform('base_link', 'map', self.tf_listener)
01174         point_bl = tfu.transform_points(bl_T_map, point_map)
01175         self.look_at(point_bl, False)
01176 
01177         self.robot.sound.say('Executing behavior')
01178         if not novision:
01179             self.execute(tid, point_bl, save, user_study=user_study)
01180         else:
01181             tstring = time.strftime('%A_%m_%d_%Y_%I_%M%p')
01182             kimage_name = '%s_highres.png' % tstring
01183             rospy.sleep(2)
01184             for i in range(10):
01185                 kimage = self.prosilica.get_frame()
01186             cv.SaveImage('/u/haidai/' + kimage_name, kimage)
01187             rospy.loginfo('GOT IMAGE!!!!!!!!!!!!!!!!!!!!!!!!!!!')
01188 
01189 
01190         if not novision:
01191             self.driving_posture(task_type)
01192 
01193 class TestLearner:
01194 
01195     def __init__(self):
01196         self.rec_params = r3d.Recognize3DParam()
01197         self.locations_man = lcm.LocationsManager('locations_narrow_v11.pkl', rec_params=self.rec_params)
01198 
01199     def test_training_set(self):
01200         #Pick
01201         tasks = self.locations_man.data.keys()
01202         for i, k in enumerate(tasks):
01203             print i, k
01204         task_id = tasks[int(raw_input())]
01205 
01206         #Train
01207         dataset = self.locations_man.data[task_id]['dataset']
01208         pca = self.locations_man.data[task_id]['pca']
01209 
01210         nneg = np.sum(dataset.outputs == r3d.NEGATIVE) 
01211         npos = np.sum(dataset.outputs == r3d.POSITIVE)
01212         print '================= Training ================='
01213         print 'NEG examples', nneg
01214         print 'POS examples', npos
01215         print 'TOTAL', dataset.outputs.shape[1]
01216         neg_to_pos_ratio = float(nneg)/float(npos)
01217         #pdb.set_trace()
01218 
01219         #np.random.permutation(range(da
01220         #separate into positive and negative
01221         #take 30%
01222         #train/test
01223         all_costs_scores = []
01224         all_ratio_scores = []
01225         all_bandwidth_scores = []
01226         for i in range(40):
01227             percent = .3
01228             negidx = np.where(dataset.outputs==r3d.NEGATIVE)[1].A1
01229             posidx = np.where(dataset.outputs==r3d.POSITIVE)[1].A1
01230             nneg = np.ceil(len(negidx) * percent)
01231             npos = np.ceil(len(posidx) * percent)
01232             negperm = np.random.permutation(range(len(negidx)))
01233             posperm = np.random.permutation(range(len(posidx)))
01234 
01235             test_pos_idx = negperm[0:nneg]
01236             test_neg_idx = posperm[0:npos]
01237             test_idx = np.concatenate((test_pos_idx, test_neg_idx))
01238 
01239             train_pos_idx = negperm[nneg:] 
01240             train_neg_idx = posperm[npos:]
01241             train_idx = np.concatenate((train_pos_idx, train_neg_idx))
01242 
01243             test_set = dataset.subset(test_idx)
01244             train_set = dataset.subset(train_idx)
01245 
01246             #pdb.set_trace()
01247             ratio_score = []
01248             ratios = [neg_to_pos_ratio * (float(i+1)/10.) for i in range(10)]
01249             for r in ratios:
01250                 print '######################################################'
01251                 print 'ratio is ', r
01252                 svm_params = '-s 0 -t 2 -g .4 -c 1 '
01253                 learner = self.train(train_set, pca, r, svm_params)
01254                 predictions = np.matrix(learner.classify(test_set.inputs))
01255                 conf_mat = r3d.confusion_matrix(test_set.outputs, predictions)
01256                 combined = conf_mat[0,0] + conf_mat[1,1]
01257                 print '!!!!!!!!!!!'
01258                 print conf_mat
01259                 print combined
01260                 print '!!!!!!!!!!!'
01261                 ratio_score.append(combined)
01262 
01263             bandwidth_score = []
01264             bandwidths = []
01265             for i in range(40):
01266                 print '######################################################'
01267                 g = i * .1 #.00625
01268                 bandwidths.append(g)
01269                 print 'g is ', g
01270                 svm_params = '-s 0 -t 2 -g %.2f -c 1 ' % g
01271                 learner = self.train(train_set, pca, neg_to_pos_ratio, svm_params)
01272                 predictions = np.matrix(learner.classify(test_set.inputs))
01273                 conf_mat = r3d.confusion_matrix(test_set.outputs, predictions)
01274                 combined = conf_mat[0,0] + conf_mat[1,1]
01275                 print '!!!!!!!!!!!'
01276                 print conf_mat
01277                 print combined
01278                 print '!!!!!!!!!!!'
01279                 bandwidth_score.append(combined)
01280 
01281             cost_score = []
01282             costs = []
01283             for i in range(40):
01284                 print '######################################################'
01285                 cost = i + 1
01286                 costs.append(cost)
01287                 print 'cost is ', cost
01288                 svm_params = '-s 0 -t 2 -g .4 -c %.2f ' % cost
01289                 learner = self.train(train_set, pca, neg_to_pos_ratio, svm_params)
01290                 predictions = np.matrix(learner.classify(test_set.inputs))
01291                 conf_mat = r3d.confusion_matrix(test_set.outputs, predictions)
01292                 combined = conf_mat[0,0] + conf_mat[1,1]
01293                 print '!!!!!!!!!!!'
01294                 print conf_mat
01295                 print combined
01296                 print '!!!!!!!!!!!'
01297                 cost_score.append(combined)
01298 
01299             #print 'Cost score'
01300             #print costs
01301             #print cost_score, "\n"
01302 
01303             #print 'Ratio score'
01304             #print ratios
01305             #print ratio_score, "\n"
01306 
01307             #print 'Bandwidth score'
01308             #print bandwidths 
01309             #print bandwidth_score, "\n"
01310             all_costs_scores.append(cost_score)
01311             all_ratio_scores.append(ratio_score)
01312             all_bandwidth_scores.append(bandwidth_score)
01313 
01314         
01315         print 'Cost score'
01316         print costs
01317         costs_mean = np.sum(np.matrix(all_costs_scores), 0) / float(len(all_costs_scores))
01318         print costs_mean
01319         print np.max(costs_mean)
01320         print costs[np.argmax(costs_mean)]
01321 
01322         print 'Ratio score'
01323         print ratios
01324         ratios_mean = np.sum(np.matrix(all_ratio_scores), 0) / float(len(all_ratio_scores))
01325         print ratios_mean
01326         print np.max(ratios_mean)
01327         print ratios[np.argmax(ratios_mean)]
01328 
01329         print 'Bandwidth score'
01330         print bandwidths 
01331         bandwidths_mean = np.sum(np.matrix(all_bandwidth_scores), 0) / float(len(all_bandwidth_scores))
01332         print bandwidths_mean
01333         print np.max(bandwidths_mean)
01334         print bandwidths[np.argmax(bandwidths_mean)]
01335 
01336 
01337 
01338 
01339     def train(self, dataset, pca, neg_to_pos_ratio, svm_params):
01340         weight_balance = ' -w0 1 -w1 %.2f' % neg_to_pos_ratio
01341         previous_learner = None
01342         learner = r3d.SVMPCA_ActiveLearner(use_pca=True, 
01343                         reconstruction_std_lim=self.rec_params.reconstruction_std_lim, 
01344                         reconstruction_err_toler=self.rec_params.reconstruction_err_toler,
01345                         old_learner=None, pca=pca)
01346 
01347         rec_params = self.rec_params
01348         inputs_for_pca = dataset.inputs
01349         learner.train(dataset, 
01350                       inputs_for_pca,
01351                       svm_params + weight_balance,
01352                       rec_params.variance_keep)
01353         return learner
01354 
01355 def test_display():
01356     rec_params = r3d.Recognize3DParam()
01357     locations_man = lcm.LocationsManager('locations_narrow_v11.pkl', 
01358             rec_params=rec_params)
01359     location_display = lcm.LocationDisplay(locations_man)
01360     location_display.run()
01361     #location_display.start()
01362 
01363 def launch():
01364     import optparse
01365 
01366     p = optparse.OptionParser()
01367     p.add_option("-d", "--display", action="store_true", default=False)
01368     p.add_option("-p", "--practice", action="store_true", default=False)
01369     p.add_option("-v", "--novision", action="store_true", default=False)
01370     p.add_option("-e", "--execute", action="store_true", default=False)
01371     p.add_option("-b", "--base", action="store_true", default=False)
01372     p.add_option("-q", "--update_point", action="store_true", default=False)
01373     p.add_option("-s", "--static", action="store_true", default=False)
01374     p.add_option("-i", "--init", action="store_true", default=False)
01375     p.add_option("-t", "--test", action="store_true", default=False)
01376     p.add_option("-u", "--user_study", action="store_true", default=False)
01377 
01378     opt, args = p.parse_args()
01379     if opt.display:
01380         test_display()
01381         return
01382 
01383     if opt.practice or opt.execute or opt.init or opt.base or opt.update_point or opt.novision:
01384         rospy.init_node('trf_learn', anonymous=True)
01385         optical_frame = 'high_def_optical_frame'
01386         tf_listener = tf.TransformListener()
01387 
01388         app_behaviors = ab.ApplicationBehaviorsDB(optical_frame, tf_listener)
01389         learn_behaviors = TaskRelevantLearningBehaviors(app_behaviors, tf_listener, optical_frame)
01390 
01391         mode = None
01392         if opt.novision:
01393             mode = 'novision'
01394         if opt.practice:
01395             mode = 'practice'
01396         if opt.execute:
01397             mode = 'execute'
01398         if opt.init:
01399             mode = 'init'
01400         if opt.base:
01401             mode = 'update_base'
01402         if opt.update_point:
01403             mode = 'update_point'
01404 
01405         rospy.loginfo('Using mode %s' % mode)
01406         learn_behaviors.run(mode, not opt.static, opt.user_study)
01407 
01408     if opt.test:
01409         tl = TestLearner()
01410         tl.test_training_set()
01411 
01412 if __name__ == '__main__':
01413     launch()
01414     #cProfile.run('launch()', 'profile_linear_move.prof')
01415 
01416 
01417 
01418 


trf_learn
Author(s): Hai Nguyen (hai@gatech.edu) Advisor: Prof. Charlie Kemp, Lab: Healthcare Robotics Lab at Georgia Tech
autogenerated on Wed Nov 27 2013 11:47:18