linear_move.py
Go to the documentation of this file.
00001 #! /usr/bin/python
00002 import roslib; roslib.load_manifest('hai_sandbox')
00003 import rospy
00004 #import hrl_pr2_lib.pr2_kinematics as pk
00005 
00006 #import pr2_msgs.msg as pm
00007 #import pr2_gripper_reactive_approach.reactive_grasp as rgr
00008 #import pr2_gripper_reactive_approach.controller_manager as con
00009 #from pr2_gripper_sensor_msgs.msg import PR2GripperEventDetectorGoal
00010 #import object_manipulator.convert_functions as cf
00011 #from actionlib_msgs.msg import GoalStatus
00012 from geometry_msgs.msg import PointStamped, Point 
00013 import tf.transformations as tr
00014 from std_msgs.msg import String
00015 import visualization_msgs.msg as vm
00016 #import sensor_msgs.msg as smsg
00017 #import message_filters
00018 import tf
00019 import cv
00020 import functools as ft
00021 
00022 #import copy
00023 import numpy as np
00024 import math
00025 import time
00026 import subprocess as sb
00027 import scipy.spatial as sp
00028 import pdb
00029 import os
00030 import os.path as pt
00031 import shutil
00032 #import math
00033 
00034 import hrl_camera.ros_camera as rc
00035 import hrl_pr2_lib.pr2 as pr2
00036 import hrl_pr2_lib.devices as hd
00037 import hrl_lib.rutils as ru
00038 import hrl_lib.tf_utils as tfu
00039 import hrl_lib.util as ut
00040 #import hrl_lib.prob as pr
00041 import hrl_pr2_lib.linear_move as lm
00042 #import hrl_pr2_lib.devices as de
00043 
00044 #import hrl_pr2_lib.collision_monitor as cmon
00045 import hai_sandbox.recognize_3d as r3d
00046 import hrl_lib.image3d as i3d
00047 import hrl_lib.viz as viz
00048 import cProfile
00049 import threading
00050 import dynamic_reconfigure.client as dr
00051 #from hai_sandbox.recognize_3d import InterestPointDataset
00052 #import psyco
00053 #psyco.full()
00054 
00055 #import hai_sandbox.interest_point_actions as ipa
00056 #import hai_sandbox.kinect_listener as kl
00057 
00058 #from sound_play.msg import SoundRequest
00059 
00060 class TaskError(Exception):
00061     def __init__(self, value):
00062         self.parameter = value
00063 
00064     def __str__(self):
00065         return repr(self.parameter)
00066 
00067 class ActionType:
00068     def __init__(self, inputs, outputs):
00069         self.inputs = inputs
00070         self.outputs = outputs
00071 
00072 class ParamType:
00073     def __init__(self, name, ptype, options=None):
00074         self.name = name
00075         self.ptype = ptype
00076         self.options = options
00077 
00078 class Action:
00079 
00080     def __init__(self, name, params):
00081         self.name = name
00082         self.params = params
00083 
00084 class BehaviorDescriptor:
00085 
00086     def __init__(self):
00087         self.descriptors = {
00088                             'twist':       ActionType([ParamType('angle', 'radian')], [ParamType('success', 'bool')]),
00089                             'linear_move': ActionType([ParamType('start_loc', 'se3'), 
00090                                                        ParamType('movement', 'r3'), 
00091                                                        ParamType('stop', 'discrete', ['pressure', 'pressure_accel'])], 
00092                                                       [ParamType('success', 'bool')]),
00093                             }
00094 
00095         start_location = (np.matrix([0.3, 0.15, 0.9]).T, np.matrix([0., 0., 0., 0.1]))
00096         movement       = np.matrix([.4, 0, 0.]).T
00097         self.seed = [Action('linear_move', [start_location, movement, 'pressure']),
00098                      Action('linear_move', [Action('current_location', [])])]
00099         self.run(self.seed)
00100 
00101     def run(self, seed):
00102         pass
00103 
00104 #TODO move this
00105 class LaserPointerClient:
00106     def __init__(self, target_frame='/base_link', tf_listener=None, robot=None):
00107         self.dclick_cbs = []
00108         #self.dclick_cbs_raw = []
00109         self.point_cbs = []
00110         self.target_frame = target_frame
00111         self.laser_point_base = None
00112         self.robot = robot
00113         self.base_sound_path = (sb.Popen(["rospack", "find", "hai_sandbox"], stdout=sb.PIPE).communicate()[0]).strip()
00114 
00115         if tf_listener == None:
00116             self.tf_listener = tf.TransformListener()
00117         else:
00118             self.tf_listener = tf_listener
00119 
00120         rospy.Subscriber('cursor3d', PointStamped, self.laser_point_handler)
00121         self.double_click = rospy.Subscriber('mouse_left_double_click', String, self.double_click_cb)
00122         self.robot.sound.waveSound(self.base_sound_path + '/sounds/beep.wav').play()
00123 
00124     def transform_point(self, point_stamped):
00125         point_head = point_stamped.point
00126         #Tranform into base link
00127         base_T_head = tfu.transform(self.target_frame, point_stamped.header.frame_id, self.tf_listener)
00128         point_mat_head = tfu.translation_matrix([point_head.x, point_head.y, point_head.z])
00129         point_mat_base = base_T_head * point_mat_head
00130         t_base, _ = tfu.matrix_as_tf(point_mat_base)
00131         return np.matrix(t_base).T
00132         
00133     def laser_point_handler(self, point_stamped):
00134         self.robot.sound.waveSound(self.base_sound_path + '/sounds/blow.wav').play()
00135         self.laser_point_base = self.transform_point(point_stamped)
00136         for f in self.point_cbs:
00137             f(self.laser_point_base)
00138 
00139     def double_click_cb(self, a_str):
00140         rospy.loginfo('Double CLICKED')
00141         self.robot.sound.waveSound(self.base_sound_path + '/sounds/beep.wav').play()
00142         #if self.laser_point_base != None:
00143         for f in self.dclick_cbs:
00144             f(self.laser_point_base)
00145         self.laser_point_base = None
00146         #for f in self.dclick_cb_raw(
00147 
00148     def add_double_click_cb(self, func):
00149         self.dclick_cbs.append(func)
00150 
00151     #def add_double_click_cb_raw(self, func):
00152     #    self.dclick_cbs_raw.append(func)
00153 
00154     def add_point_cb(self, func):
00155         self.point_cbs.append(func)
00156 
00157 
00158 def image_diff_val2(before_frame, after_frame):
00159     br = np.asarray(before_frame)
00160     ar = np.asarray(after_frame)
00161     max_sum = br.shape[0] * br.shape[1] * br.shape[2] * 255.
00162     sdiff = np.abs((np.sum(br) / max_sum) - (np.sum(ar) / max_sum))
00163     #sdiff = np.sum(np.abs(ar - br)) / max_sum
00164     return sdiff
00165 
00166 
00167 class ManipulationBehaviors:
00168 
00169     def __init__(self, arm, pr2_obj, tf_listener=None):
00170         try:
00171             rospy.init_node('linear_move', anonymous=True)
00172         except Exception, e:
00173             rospy.loginfo('call to init_node failed %s' % str(e))
00174         self.movement = lm.LinearReactiveMovement(arm, pr2_obj, tf_listener)
00175 
00176     ##
00177     # reach direction
00178             #move_back_distance, \
00179     def reach(self, point, pressure_thres,\
00180             reach_direction=np.matrix([0,0,0]).T, orientation=None):
00181         MOVEMENT_TOLERANCE = .1
00182         #REACH_TOLERANCE = .1
00183 
00184         #self.movement.set_movement_mode_cart()
00185         #pdb.set_trace()
00186         self.movement.set_pressure_threshold(pressure_thres)
00187         loc_bl = self.movement.arm_obj.pose_cartesian_tf()[0]
00188         front_loc = point.copy()
00189         front_loc[0,0] = max(loc_bl[0,0], .4)
00190         #front_loc[0,0] = loc_bl[0,0]
00191         #pdb.set_trace()
00192 
00193         if orientation == None:
00194             start_loc = self.movement.arm_obj.pose_cartesian_tf()
00195             orientation = start_loc[1]
00196         self.movement.pressure_listener.rezero()
00197         #pdb.set_trace()
00198         #for i in range(2):
00199         r1, residual_error = self.movement.move_absolute((front_loc, orientation), stop='pressure', pressure=pressure_thres)
00200 
00201         #if residual_error > MOVEMENT_TOLERANCE or r1 != None: #if this step fails, we move back then return
00202         #    #self.move_absolute(start_loc, stop='accel')
00203         #    pdb.set_trace()
00204         #    return False, r1, None
00205 
00206         #We expect impact here
00207         pos_error = None
00208         try:
00209             #pdb.set_trace()
00210             #loc_bl = self.movement.current_location()[0]
00211             #reach_direction = loc_bl - point 
00212             #reach_direction = reach_direction / np.linalg.norm(reach_direction)
00213             point_reach = point + reach_direction
00214             r2, pos_error = self.movement.move_absolute((point_reach, \
00215                     self.movement.arm_obj.pose_cartesian_tf()[1]), stop='pressure_accel', pressure=pressure_thres)
00216 
00217         except lm.RobotSafetyError, e:
00218             rospy.loginfo('robot safety error %s' % str(e))
00219             r2 = None
00220 
00221         touch_loc_bl = self.movement.arm_obj.pose_cartesian_tf()
00222         #if r2 == None or r2 == 'pressure' or r2 == 'accel' or (pos_error < (MOVEMENT_TOLERANCE + np.linalg.norm(reach_direction))):
00223         if r2 == 'pressure' or r2 == 'accel' or (pos_error != None and (pos_error < (MOVEMENT_TOLERANCE + np.linalg.norm(reach_direction)))):
00224             self.movement.pressure_listener.rezero()
00225 
00226             # b/c of stiction & low gains, we can't move precisely for small
00227             # distances, so move back then move forward again
00228             #reach_dir = reach_direction / np.linalg.norm(reach_direction)
00229             cur_pose, cur_ang = self.movement.arm_obj.pose_cartesian_tf()
00230 
00231             #p1 = cur_pose - (reach_dir * .05)
00232             #p2 = cur_pose + move_back_distance
00233             #rospy.loginfo('moving back')
00234             #_, pos_error = self.movement.move_absolute((p1, cur_ang), stop='None', pressure=pressure_thres)
00235             #self.movement.pressure_listener.rezero()
00236 
00237             #rospy.loginfo('moving forward again')
00238             #_, pos_error = self.movement.move_absolute((p2, cur_ang), stop='None', pressure=pressure_thres)
00239 
00240             #self.movement.move_relative_gripper(4.*move_back_distance, stop='none', pressure=pressure_thres)
00241             #self.movement.move_relative_gripper(-3.*move_back_distance, stop='none', pressure=pressure_thres)
00242             #self.movement.pressure_listener.rezero()
00243             return True, r2, touch_loc_bl
00244         else:
00245             #pdb.set_trace()
00246             #shouldn't get here
00247             return False, r2, None
00248 
00249     def press(self, direction, press_pressure, contact_pressure):
00250         #make contact first
00251         self.movement.set_movement_mode_cart()
00252         #pdb.set_trace()
00253         r1, diff_1 = self.movement.move_relative_gripper(direction, stop='pressure', pressure=contact_pressure)
00254         #now perform press
00255         if r1 == 'pressure' or r1 == 'accel':
00256             self.movement.set_movement_mode_cart()
00257             r2, diff_2 = self.movement.move_relative_gripper(direction, stop='pressure_accel', pressure=press_pressure)
00258             if r2 == 'pressure' or r2 == 'accel' or r2 == None:
00259                 return True, r2
00260             else:
00261                 return False, r2
00262         else:
00263             return False, r1
00264 
00265 
00266     #def twist(self, angle, stop):
00267     #    pos, rot = self.movement.cman.return_cartesian_pose() # in base_link
00268     #    ax, ay, az = tr.euler_from_quaternion(rot) 
00269     #    nrot = tr.quaternion_from_euler(ax + angle, ay, az)
00270     #    stop_funcs = self.movement._process_stop_option(stop)
00271     #    #return self._move_cartesian(np.matrix(pos).T, np.matrix(nrot).T, 
00272     #    #                            stop_funcs, timeout=self.timeout, settling_time=5.0)
00273     #    return self.movement._move_cartesian(np.matrix(pos).T, np.matrix(nrot).T, \
00274     #            stop_funcs, timeout=self.timeout, settling_time=5.0)
00275 
00276 ###
00277 # One large dictionary indexed by unique ids contain location records.
00278 # These ids are related by spatial location which we use to find locations.
00279 # Operations supported
00280 #
00281 #   0)   loading the locations database
00282 #   0.1) saving locations database
00283 #   1)   adding locations
00284 #   2)   querying given point
00285 #   3)   listing all locations
00286 #             (name, ids)
00287 #   4)   listing locations closest to given point
00288 #             (name, ids)
00289 ###
00290 def separate_by_labels(points, labels):
00291     pidx = np.where(labels == r3d.POSITIVE)[1].A1.tolist()
00292     nidx = np.where(labels == r3d.NEGATIVE)[1].A1.tolist()
00293     uidx = np.where(labels == r3d.UNLABELED)[1].A1.tolist()
00294     return points[:, uidx], points[:, pidx], points[:, nidx]
00295 
00296 
00297 class LocationDisplay(threading.Thread):
00298 
00299     def __init__(self, loc_man): 
00300         threading.Thread.__init__(self)
00301         try:
00302             rospy.init_node('location_display')
00303         except Exception,e:
00304             print e
00305 
00306         self.location_label_pub  = rospy.Publisher('location_label', vm.Marker)
00307         self.location_marker_pub = rospy.Publisher('location_marker', vm.Marker)
00308         self.loc_man = loc_man
00309 
00310     def run(self):
00311         text_scale = .1
00312         text_color = np.matrix([1,0,0,1.]).T
00313 
00314         circle_radii = .9
00315         circle_scale = .2
00316         circle_z = .03
00317         circle_color = np.matrix([1,0,0,1.]).T
00318 
00319         circle_msgs = []
00320         data = self.loc_man.data
00321         pdb.set_trace()
00322         for task_id in data.keys():
00323             point_map = data[task_id]['center']
00324             circle_msgs.append(viz.circle_marker(point_map, circle_radii, circle_scale, circle_color, 'map', circle_z))
00325             
00326         text_msgs = []
00327         for task_id in data.keys():
00328             point_map = data[task_id]['center']
00329             text_msgs.append(viz.text_marker(task_id, point_map, text_color, text_scale, 'map'))
00330 
00331         r = rospy.Rate(2)
00332         while not rospy.is_shutdown():
00333             for c in circle_msgs:
00334                 self.location_marker_pub.publish(c)
00335             for txt in text_msgs:
00336                 self.location_label_pub.publish(txt)
00337             r.sleep()
00338 
00339 
00340 
00341 class LocationManager:
00342 
00343     def __init__(self, name, rec_params):
00344         self.RELIABILITY_RECORD_LIM = 20
00345         self.RELIABILITY_THRES = .9
00346 
00347         self.rec_params = rec_params
00348         self.saved_locations_fname = name
00349         self.LOCATION_ADD_RADIUS = .5
00350         self.tree = None   #spatial indexing tree
00351         self.centers = None #3xn array of centers
00352 
00353         self.ids = [] #ids that corresponds with centers that indexes into data
00354         self.data = {} 
00355 
00356         self.learners = {}
00357         self._load_database()
00358         #pdb.set_trace()
00359         #pdb.set_trace()
00360         self.image_pubs = {}
00361         #self.revert()
00362         #pdb.set_trace()
00363 
00364         #self.save_database()
00365         #exit()
00366         for k in self.data.keys():
00367             self.train(k)
00368             self.image_pubs[k] = r3d.ImagePublisher(k.replace(':', '_'))
00369         #self.task_types = ['light_switch', 'light_rocker', 'drawer']
00370         self.task_types = ['light_switch_down', 'light_switch_up', 
00371                             'light_rocker_down', 'light_rocker_up', 
00372                             'push_drawer', 'pull_drawer']
00373         self.task_pairs = [['light_switch_down', 'light_switch_up'], 
00374                            ['light_rocker_down', 'light_rocker_up'],
00375                            ['pull_drawer', 'push_drawer']]
00376         self.driving_param = {'light_switch_up':   {'coarse': .9, 'fine': .6, 'voi': .4},
00377                               'light_switch_down': {'coarse': .9, 'fine': .6, 'voi': .4},
00378 
00379                               'light_rocker_down': {'coarse': .9, 'fine': .6, 'voi': .4},
00380                               'light_rocker_up':   {'coarse': .9, 'fine': .6, 'voi': .4},
00381 
00382                               'pull_drawer':       {'coarse': .9, 'fine': .5, 'voi': .4},
00383                               'push_drawer':       {'coarse': .9, 'fine': .5, 'voi': .4}}
00384 
00385     def revert(self):
00386         self.centers = self.centers[:, 0:4]
00387         self.ids = self.ids[0:4]
00388         self.data.pop('office_push_drawer')
00389         self.data.pop('office_pull_drawer')
00390         #pdb.set_trace()
00391 
00392     def _load_database(self):
00393         #                           tree         dict
00394         #indexes of locations in tree => ids list => location data
00395         if not os.path.isfile(self.saved_locations_fname):
00396             return
00397         d = ut.load_pickle(self.saved_locations_fname)
00398         self.ids = d['ids']
00399         self.centers = d['centers']
00400         self.data = d['data']
00401         self.tree = sp.KDTree(np.array(self.centers).T)
00402 
00403     def save_database(self):
00404         print 'Saving pickle. DONOT INTERRUPPT!!!'
00405         d = {'centers': self.centers,
00406             'ids': self.ids,
00407             'data': self.data}
00408         try:
00409             shutil.copyfile(self.saved_locations_fname, 
00410                     time.strftime('%m_%d_%Y_%I_%M%p') + '_locations.pkl')
00411         except Exception, e:
00412             print e
00413 
00414         ut.save_pickle(d, self.saved_locations_fname)
00415         print 'SAFE!!!'
00416         #rospy.loginfo('LocationManager: save_database saved db!')
00417 
00418     def get_complementary_task(self, tasktype):
00419         for ta, tb in self.task_pairs:
00420             if ta == tasktype:
00421                 return tb
00422             if tb == tasktype:
00423                 return ta
00424         return None
00425 
00426     def update_base_pose(self, taskid, base_pose):
00427         print 'updating base pose for task', taskid
00428         self.data[taskid]['base_pose'] = base_pose
00429 
00430     def create_new_location(self, task_type, point_map, base_pose, gather_data=True, name=None):
00431         if name == None:
00432             taskid = time.strftime('%A_%m_%d_%Y_%I:%M%p') + ('_%s' % task_type)
00433         else:
00434             taskid = name + ('_%s' % task_type)
00435 
00436         try:
00437             os.mkdir(taskid)
00438         except OSError, e:
00439             print e
00440 
00441         if self.centers == None:
00442             self.centers = point_map
00443         else:
00444             self.centers = np.column_stack((self.centers, point_map))
00445         self.tree = sp.KDTree(np.array(self.centers.T))
00446 
00447         self.ids.append(taskid)
00448         self.data[taskid] = {'task': task_type,
00449                              'center': point_map,
00450                              'base_pose': base_pose,
00451                              'points': point_map,
00452                              'dataset': None,
00453                              'dataset_raw': None,
00454                              'gather_data': gather_data,
00455                              'complementary_task_id': None,
00456                              'pca': None,
00457                              'execution_record': []}
00458         self.image_pubs[taskid] = r3d.ImagePublisher(taskid.replace(':', '_'))
00459         self.save_database()
00460         return taskid
00461 
00462     def record_time(self, task_id, record_name, value):
00463         if not self.data[task_id].has_key('times'):
00464             self.data[task_id]['times'] = {}
00465 
00466         if not self.data[task_id]['times'].has_key(record_name):
00467             self.data[task_id]['times'][record_name] = []
00468 
00469         self.data[task_id]['times'][record_name].append(value)
00470 
00471     def update_execution_record(self, taskid, value):
00472         self.data[taskid]['execution_record'].append(value)
00473 
00474     def is_reliable(self, taskid):
00475         record = self.data[taskid]['execution_record']
00476         if len(record) < self.RELIABILITY_RECORD_LIM:
00477             return False
00478 
00479         if np.sum(record) < (self.RELIABILITY_RECORD_LIM * self.RELIABILITY_THRES):
00480             return False
00481 
00482         return True
00483 
00484     def _id_to_center_idx(self, task_id):
00485         for i, tid in enumerate(self.ids):
00486             if tid == task_id:
00487                 return i
00488         return None
00489 
00490     def add_perceptual_data(self, task_id, fea_dict):
00491         rospy.loginfo('LocationManager: add_perceptual_data - %s adding %d instance(s)' \
00492                 % (task_id, fea_dict['labels'].shape[1]))
00493         current_raw_dataset = self.data[task_id]['dataset_raw']
00494         current_dataset = self.data[task_id]['dataset']
00495 
00496         self.data[task_id]['dataset_raw'] = \
00497                 r3d.InterestPointDataset.add_to_dataset(
00498                         current_raw_dataset, fea_dict['instances'], 
00499                         fea_dict['labels'], fea_dict['points2d'], 
00500                         fea_dict['points3d'], None, None, 
00501                         sizes=fea_dict['sizes'])
00502 
00503         self.data[task_id]['dataset'] = \
00504                 r3d.InterestPointDataset.add_to_dataset(
00505                         current_dataset, fea_dict['instances'], 
00506                         fea_dict['labels'], fea_dict['points2d'], 
00507                         fea_dict['points3d'], None, None, 
00508                         sizes=fea_dict['sizes'])
00509 
00510     def get_perceptual_data(self, task_id):
00511         return self.data[task_id]['dataset']
00512 
00513     def remove_perceptual_data(self, task_id, instance_idx):
00514         self.data[task_id]['dataset'].remove(instance_idx)
00515         self.data[task_id]['dataset_raw'].remove(instance_idx)
00516 
00517     def active_learn_add_data(self, task_id, fea_dict):
00518         #TODO: do something smarter here
00519         self.add_perceptual_data(task_id, fea_dict)
00520 
00521     def update(self, task_id, point_map):
00522         #If close by locations found then add to points list and update center
00523         ldata = self.data[task_id]
00524         ldata['points'] = np.column_stack((point_map, ldata['points']))
00525         ldata['center'] = ldata['points'].mean(1)
00526 
00527         center_idx = self._id_to_center_idx(task_id)
00528         self.centers[:, center_idx] = ldata['center']
00529         self.tree = sp.KDTree(np.array(self.centers).T)
00530 
00531     def set_center(self, task_id, point_map):
00532         ldata = self.data[task_id]
00533         ldata['points'] = point_map
00534         ldata['center'] = point_map
00535         center_idx = self._id_to_center_idx(task_id)
00536         self.centers[:, center_idx] = ldata['center']
00537         self.tree = sp.KDTree(np.array(self.centers).T)
00538 
00539     def publish_image(self, task_id, image, postfix=''):
00540         self.image_pubs[task_id].publish(image)
00541         ffull = pt.join(task_id, time.strftime('%A_%m_%d_%Y_%I_%M_%S%p') + postfix + '.jpg')
00542         cv.SaveImage(ffull, image)
00543 
00544     def list_all(self):
00545         rlist = []
00546         for k in self.data.keys():
00547             rlist.append([k, self.data[k]['task']])
00548         return rlist
00549 
00550     def list_close_by(self, point_map, task=None):
00551         if self.tree != None:
00552             indices = self.tree.query_ball_point(np.array(point_map.T), self.LOCATION_ADD_RADIUS)[0]
00553             print 'list_close_by: indices close by', indices
00554             #pdb.set_trace()
00555             ids_selected = []
00556             for i in indices:
00557                 sid = self.ids[i]
00558                 stask = self.data[sid]['task']
00559                 if task == None:
00560                     ids_selected.append([sid, stask])
00561                 else:
00562                     if task == stask:
00563                         ids_selected.append([sid, stask])
00564             return ids_selected
00565         else:
00566             return []
00567 
00568     def train_all_classifiers(self):
00569         for k in self.data.keys():
00570             self.train(k)
00571 
00572     def train(self, task_id, dset_for_pca=None, save_pca_images=True):
00573         dataset = self.data[task_id]['dataset']
00574         rec_params = self.rec_params
00575         #pdb.set_trace()
00576         if dataset == None:
00577             return
00578 
00579         #rec_params = self.feature_ex.rec_params
00580         nneg = np.sum(dataset.outputs == r3d.NEGATIVE) #TODO: this was copied and pasted from r3d
00581         npos = np.sum(dataset.outputs == r3d.POSITIVE)
00582         print '================= Training ================='
00583         print 'NEG examples', nneg
00584         print 'POS examples', npos
00585         print 'TOTAL', dataset.outputs.shape[1]
00586         neg_to_pos_ratio = float(nneg)/float(npos)
00587         weight_balance = ' -w0 1 -w1 %.2f' % neg_to_pos_ratio
00588         print 'training'
00589         #learner = r3d.SVMPCA_ActiveLearner(use_pca=True)
00590         previous_learner = None
00591         if self.learners.has_key(task_id):
00592             previous_learner = self.learners[task_id]
00593         #pdb.set_trace()
00594         learner = r3d.SVMPCA_ActiveLearner(use_pca=True, 
00595                         reconstruction_std_lim=self.rec_params.reconstruction_std_lim, 
00596                         reconstruction_err_toler=self.rec_params.reconstruction_err_toler,
00597                         old_learner=previous_learner, pca=self.data[task_id]['pca'])
00598 
00599         #TODO: figure out something scaling inputs field!
00600         if dset_for_pca != None:
00601             inputs_for_pca = dset_for_pca['instances']
00602         else:
00603             #inputs_for_pca = self.data[task_id]['pca'].pca_data
00604             inputs_for_pca = dataset.inputs
00605 
00606         learner.train(dataset, 
00607                       inputs_for_pca,
00608                       rec_params.svm_params + weight_balance,
00609                       rec_params.variance_keep)
00610 
00611         self.data[task_id]['pca'] = learner.pca
00612         self.learners[task_id] = learner
00613         if save_pca_images:
00614             #pdb.set_trace()
00615             basis = learner.pca.projection_basis
00616             cv.SaveImage('%s_pca.png' % task_id, r3d.instances_to_image(self.rec_params.win_size, basis, np.min(basis), np.max(basis)))
00617 
00618         #self.learners[task_id] = {'learner': learner, 'dataset': dataset}
00619 
00620 
00621 
00622 class ApplicationBehaviors:
00623 
00624     def __init__(self):
00625 
00626         rospy.init_node('linear_move', anonymous=True)
00627         self.tf_listener = tf.TransformListener()
00628         self.robot = pr2.PR2(self.tf_listener, base=True)
00629         self.behaviors = ManipulationBehaviors('l', self.robot, tf_listener=self.tf_listener)
00630         self.laser_scan = hd.LaserScanner('point_cloud_srv')
00631 
00632         self.prosilica = rc.Prosilica('prosilica', 'polled')
00633         self.prosilica_cal = rc.ROSCameraCalibration('/prosilica/camera_info')
00634 
00635         self.wide_angle_camera_left = rc.ROSCamera('/wide_stereo/left/image_rect_color')
00636         self.wide_angle_configure = dr.Client('wide_stereo_both')
00637 
00638         self.laser_listener = LaserPointerClient(tf_listener=self.tf_listener, robot=self.robot)
00639         self.laser_listener.add_double_click_cb(self.click_cb)
00640         self.rec_params = r3d.Recognize3DParam()
00641         
00642         self.OPTICAL_FRAME = 'high_def_optical_frame'
00643         self.feature_ex = r3d.NarrowTextureFeatureExtractor(self.prosilica, 
00644                 hd.PointCloudReceiver('narrow_stereo_textured/points'),
00645                 self.prosilica_cal, 
00646                 self.robot.projector,
00647                 self.tf_listener, self.rec_params)
00648         #self.feature_ex = r3d.LaserScannerFeatureExtractor(self.prosilica, self.laser_scan, 
00649         #        self.prosilica_cal, self.tf_listener, self.rec_params)
00650         #self.feature_ex = r3d.KinectFeatureExtractor(self.tf_listener, rec_params=self.rec_params)
00651 
00652         self.critical_error = False
00653         #TODO: define start location in frame attached to torso instead of base_link
00654         self.start_location_light_switch = (np.matrix([0.35, 0.30, 1.1]).T, np.matrix([0., 0., 0., 0.1]))
00655         self.start_location_drawer       = (np.matrix([0.20, 0.40, .8]).T,  
00656                                             np.matrix(tr.quaternion_from_euler(np.radians(90.), 0, 0)))
00657         self.folded_pose = np.matrix([ 0.10134791, -0.29295995,  0.41193769]).T
00658         self.create_arm_poses()
00659         self.learners = {}
00660 
00661         #self.load_classifier('light_switch', 'friday_730_light_switch2.pkl')
00662         #self.img_pub = r3d.ImagePublisher('active_learn')
00663         #self.location_display = LocationDisplay(self.locations_man)
00664         #self.location_display.start()
00665         #self.robot.projector.set_prosilica_inhibit(True)
00666         #pdb.set_trace()
00667         self.driving_posture('light_switch_down')
00668         self.robot.projector.set(False)
00669         self.locations_man = LocationManager('locations_narrow_v11.pkl', rec_params=self.rec_params)
00670 
00671     def draw_dots_nstuff(self, img, points2d, labels, picked_loc):
00672         pidx = np.where(labels == r3d.POSITIVE)[1].A1.tolist()
00673         nidx = np.where(labels == r3d.NEGATIVE)[1].A1.tolist()
00674         uidx = np.where(labels == r3d.UNLABELED)[1].A1.tolist()
00675 
00676         if picked_loc != None:
00677             r3d.draw_points(img, picked_loc, [255, 0, 0], 4, -1)
00678 
00679         #scale = 1
00680         if len(uidx) > 0:
00681             upoints = points2d[:, uidx]
00682             r3d.draw_points(img, upoints, [255,255,255], 2, -1)
00683 
00684         if len(nidx) > 0:
00685             npoints = points2d[:, nidx]
00686             r3d.draw_points(img, npoints, [0,0,255], 2, -1)
00687 
00688         if len(pidx) > 0:
00689             ppoints = points2d[:, pidx]
00690             r3d.draw_points(img, ppoints, [0,255,0], 2, -1)
00691 
00692     def create_arm_poses(self):
00693         self.right_tucked = np.matrix([[-0.02362532,  1.10477102, -1.55669475, \
00694                 -2.12282706, -1.41751231, -1.84175899,  0.21436806]]).T
00695 
00696         self.left_tucked = np.matrix([[ 0.05971848,  1.24980184,  1.79045674, \
00697                 -1.68333801, -1.73430635, -0.09838841, -0.08641928]]).T
00698 
00699         #lift the right arm up a little bit
00700         self.r0 = np.matrix([[-0.22774141,  0.7735819 , -1.45102092, \
00701                 -2.12152412, -1.14684579, -1.84850287,  0.21397648]]).T
00702 
00703         #left arm rotates
00704         self.l0 = np.matrix([[ 0.06021592,  1.24844832,  1.78901355, -1.68333801, 1.2, -0.10152105, -0.08641928]]).T
00705 
00706         #left arm moves out
00707         self.l1 = np.matrix([[0.94524406,  1.24726399,  1.78548574, -1.79148173,  1.20027637, -1.0, -0.08633226]]).T
00708 
00709         #left arm rotates outward a little more
00710         self.l2 = np.matrix([[ 1.53180837,  1.24362641,  1.78452361, -1.78829678,  1.1996979,-1.00446167, -0.08741998]]).T
00711 
00712     def untuck(self):
00713         #pdb.set_trace()
00714         if np.linalg.norm(self.robot.left.pose() - self.left_tucked) < .3:
00715             rospy.loginfo('untuck: not in tucked position.  Ignoring request')
00716             return
00717         #assume we are tucked
00718         self.behaviors.movement.set_movement_mode_ik()
00719         self.robot.right.set_pose(self.r0, 1.)
00720         self.robot.left.set_poses(np.column_stack([self.l0, self.l1, self.l2]), \
00721                                   np.array([1., 2., 3.]))
00722         self.robot.right.set_pose(self.right_tucked, 1.)
00723         self.behaviors.movement.set_movement_mode_cart()
00724 
00725     def tuck(self):
00726         #pdb.set_trace()
00727         if np.linalg.norm(self.robot.left.pose() - self.left_tucked) < .5:
00728             rospy.loginfo('tuck: Already tucked. Ignoring request.')
00729             return
00730         #lift the right arm up a little bit
00731         self.behaviors.movement.set_movement_mode_ik()
00732         self.robot.right.set_pose(self.r0, 1.)
00733         self.robot.left.set_poses(np.column_stack([self.l2, self.l1, self.l0, self.left_tucked]), \
00734                                   np.array([4., 5., 6., 7.]))
00735         self.robot.right.set_pose(self.right_tucked, 1.)
00736         self.behaviors.movement.set_movement_mode_cart()
00737 
00738     #def go_to_home_pose(self):
00739     #    #self.behaviors.movement.set_movement_mode_cart()
00740     #    return self.behaviors.movement.move_absolute(self.start_location, stop='pressure')
00741     #    #self.behaviors.movement.set_movement_mode_ik()
00742     #    #return self.behaviors.movement.move_absolute(self.start_location, stop='pressure')
00743 
00744     def camera_change_detect(self, threshold, f, args):
00745         config = self.wide_angle_configure.get_configuration()
00746         config['auto_gain'] = False
00747         config['auto_exposure'] = False
00748         self.wide_angle_configure.update_configuration(config)
00749 
00750         #take before sensor snapshot
00751         start_pose = self.robot.head.pose()
00752         #pdb.set_trace()
00753         #self.robot.head.set_pose(np.radians(np.matrix([1.04, -20]).T), 1)
00754         self.robot.head.set_pose(np.radians(np.matrix([30., -20]).T), 1)
00755         time.sleep(4)
00756         for i in range(7):
00757             before_frame = self.wide_angle_camera_left.get_frame()
00758         cv.SaveImage('before.png', before_frame)
00759         f_return = f(*args)
00760         time.sleep(5)
00761         for i in range(7):
00762             after_frame = self.wide_angle_camera_left.get_frame()
00763 
00764         cv.SaveImage('after.png', after_frame)
00765         sdiff = image_diff_val2(before_frame, after_frame)
00766         self.robot.head.set_pose(start_pose, 1)
00767         self.robot.head.set_pose(start_pose, 1)
00768         time.sleep(3)        
00769         #take after snapshot
00770         #threshold = .03
00771         config['auto_gain'] = True
00772         config['auto_exposure'] = True
00773         self.wide_angle_configure.update_configuration(config)
00774 
00775         rospy.loginfo('camera difference %.4f (thres %.3f)' % (sdiff, threshold))
00776         if sdiff > threshold:
00777             rospy.loginfo('difference detected!')
00778             return True, f_return
00779         else:
00780             rospy.loginfo('NO differences detected!')
00781             return False, f_return
00782 
00783     def close_gripper(self):
00784         GRIPPER_CLOSE = .003
00785         #self.robot.left_gripper.open(True, position=GRIPPER_CLOSE)
00786         self.behaviors.movement.gripper_close()
00787 
00788     def open_gripper(self):
00789         GRIPPER_OPEN = .08
00790         #self.robot.left_gripper.open(True, position=GRIPPER_OPEN)
00791         self.behaviors.movement.gripper_open()
00792 
00793     def light_switch1(self, point, 
00794             point_offset, press_contact_pressure, 
00795             press_pressure, press_distance, visual_change_thres):
00796 
00797         try:
00798             #pdb.set_trace()
00799             #print '===================================================================='
00800             #point = point + point_offset 
00801             rospy.loginfo('reaching to ' + str(point.T))
00802             #pdb.set_trace()
00803             #self.behaviors.movement.gripper_close()
00804             self.close_gripper()
00805             #self.robot.left_gripper.open(True, position=.005)
00806             time.sleep(1)
00807             self.behaviors.movement.pressure_listener.rezero()
00808             #TODO: have go_home check whether it is actually at that location
00809             #self.behaviors.move_absolute(self.start_location, stop='pressure_accel')
00810 
00811             #start_loc = self.current_location()
00812             #pdb.set_trace()
00813             success, reason, touchloc_bl = self.behaviors.reach(point, \
00814                     press_contact_pressure, \
00815                     reach_direction=np.matrix([0.1,0,0]).T)
00816             #r1, pos_error1 = self.behaviors.movement.move_relative_gripper(np.matrix([-.01, 0., 0.]).T, \
00817             #        stop='none', pressure=press_contact_pressure)
00818 
00819             if touchloc_bl != None:
00820                 dist = np.linalg.norm(point - touchloc_bl[0])
00821                 #print '===================================================================='
00822                 #print '===================================================================='
00823                 #TODO assure that reaching motion did touch the point that we intended to touch.
00824                 rospy.loginfo('Touched point is %.3f m away from observed point' % dist)
00825                 #print '===================================================================='
00826                 #print '===================================================================='
00827 
00828             if not success:
00829                 error_msg = 'Reach failed due to "%s"' % reason
00830                 rospy.loginfo(error_msg)
00831                 rospy.loginfo('Failure recovery: moving back')
00832                 self.behaviors.movement.move_absolute(self.start_location_light_switch, stop='accel', \
00833                         pressure=press_contact_pressure)
00834                 #raise TaskError(error_msg)
00835                 return False, None, point+point_offset
00836 
00837             rospy.loginfo('pressing')
00838 
00839             #Should not be making contact
00840             self.behaviors.movement.pressure_listener.rezero()
00841             change, press_ret = self.camera_change_detect(visual_change_thres, \
00842                     self.behaviors.press, \
00843                     (press_distance, press_pressure, press_contact_pressure))
00844             success, reason = press_ret
00845             if not success:
00846                 rospy.loginfo('Press failed due to "%s"' % reason)
00847 
00848             #code reward function
00849             #monitor self collision => collisions with the environment are not self collisions
00850             rospy.loginfo('moving back')
00851             #self.behaviors.movement.set_movement_mode_cart()
00852             r1, pos_error1 = self.behaviors.movement.move_relative_gripper(np.matrix([-.03, 0., 0.]).T, \
00853                     stop='none', pressure=press_contact_pressure)
00854             if r1 != None:
00855                 rospy.loginfo('moving back failed due to "%s"' % r1)
00856                 return change, None, point+point_offset
00857 
00858             rospy.loginfo('reseting')
00859             self.behaviors.movement.pressure_listener.rezero()
00860             r2, pos_error2 = self.behaviors.movement.move_absolute(self.start_location_light_switch, stop='pressure')
00861             if r2 != None and r2 != 'no solution':
00862                 rospy.loginfo('moving back to start location failed due to "%s"' % r2)
00863                 return change, None, point+point_offset
00864             self.behaviors.movement.pressure_listener.rezero()
00865 
00866             rospy.loginfo('DONE.')
00867             return change, touchloc_bl, point+point_offset
00868 
00869         except lm.RobotSafetyError, e:
00870             rospy.loginfo('>>>> ROBOT SAFETY ERROR! RESETTING. %s' % str(e))
00871             self.behaviors.movement.pressure_listener.rezero()
00872             r2, pos_error2 = self.behaviors.movement.move_absolute(self.start_location_light_switch, stop='pressure')
00873             return change, None, point+point_offset
00874 
00875 
00876     def light_rocker_push(self, point, pressure, visual_change_thres, offset):
00877         rospy.loginfo('Reaching')
00878         linear_movement = self.behaviors.movement
00879         #linear_movement.gripper_close()
00880         self.close_gripper()
00881         self.behaviors.movement.pressure_listener.rezero()
00882         #pdb.set_trace()
00883         #try:
00884         self.behaviors.movement.move_absolute(self.start_location_light_switch, stop='pressure_accel', pressure=3000)
00885         #except lm.RobotSafetyError, e:
00886         #    rospy.loginfo('robot safety error %s' % str(e))
00887 
00888         def reach_with_back_up(point, thres, reach_direction):
00889             self.behaviors.reach(point, thres, reach_direction)
00890             try:
00891                 r1, pos_error1 = self.behaviors.movement.move_relative_gripper(np.matrix([-.05, 0., 0.]).T, stop='none')
00892             except lm.RobotSafetyError, e:
00893                 rospy.loginfo('robot safety error %s' % str(e))
00894         change, press_ret = self.camera_change_detect(visual_change_thres, \
00895                                     #self.behaviors.reach, \
00896                                     reach_with_back_up, \
00897                                     #(point, pressure, np.matrix([0,0,0.]).T, np.matrix([.1,0,0]).T))
00898                                     (point, pressure, np.matrix([.1,0,0]).T))
00899         try:
00900             linear_movement.move_relative_gripper(np.matrix([-.1,0,0]).T, stop='accel')
00901             self.behaviors.movement.move_absolute(self.start_location_light_switch, stop='pressure_accel', pressure=3000)
00902         except lm.RobotSafetyError, e:
00903             rospy.loginfo('robot safety error %s' % str(e))
00904 
00905         try:
00906             self.behaviors.movement.move_absolute(self.start_location_light_switch, stop='pressure_accel', pressure=3000)
00907         except lm.RobotSafetyError, e:
00908             rospy.loginfo('robot safety error %s' % str(e))
00909 
00910         rospy.loginfo('Reseting')
00911         return change, '', point+offset
00912 
00913 
00914     def drawer_push(self, point_bl):
00915         PUSH_TOLERANCE = .1
00916         #pdb.set_trace()
00917         linear_movement = self.behaviors.movement
00918         #linear_movement.gripper_open()
00919         #pdb.set_trace()
00920         self.open_gripper()
00921         self.behaviors.movement.pressure_listener.rezero()
00922         #self.robot.left_gripper.open(True, position=.08)
00923         rospy.loginfo("Moving to start location")
00924         #linear_movement.move_absolute((self.start_location_drawer[0], 
00925         #    np.matrix(tr.quaternion_from_euler(np.radians(90.), 0, 0))))
00926         linear_movement.move_absolute((self.start_location_drawer[0], 
00927             #np.matrix(tr.quaternion_from_euler(np.radians(90.), 0, 0))), 
00928             np.matrix(tr.quaternion_from_euler(np.radians(0.), 0, 0))), 
00929             stop='pressure_accel', pressure=1000)
00930 
00931         #calc front loc
00932         self.behaviors.movement.set_pressure_threshold(1000)
00933         loc_bl = self.behaviors.movement.arm_obj.pose_cartesian_tf()[0]
00934         front_loc = point_bl.copy()
00935         front_loc[0,0] = max(loc_bl[0,0], .4)
00936 
00937         #pdb.set_trace()
00938         #move to front
00939         rospy.loginfo("Moving to front location")
00940         #orientation = np.matrix(tr.quaternion_from_euler(np.radians(90.), 0, 0))
00941         orientation = np.matrix(tr.quaternion_from_euler(np.radians(0.), 0, 0))
00942         self.behaviors.movement.pressure_listener.rezero()
00943         r1, residual_error = self.behaviors.movement.move_absolute((front_loc, orientation), 
00944                                 stop='pressure', pressure=1500)
00945         linear_movement.pressure_listener.rezero()
00946 
00947         #move until contact
00948         rospy.loginfo("Touching surface")
00949         try:
00950             linear_movement.move_relative_gripper(np.matrix([.5,0,0]).T, stop='pressure_accel', pressure=100)
00951         except lm.RobotSafetyError, e:
00952             rospy.loginfo('robot safety error %s' % str(e))
00953         contact_loc_bl = linear_movement.arm_obj.pose_cartesian_tf()[0]
00954 
00955         #Push
00956         rospy.loginfo("PUSH!!!")
00957         current_position = self.robot.left.pose_cartesian_tf()
00958         target_position = current_position[0] + np.matrix([.4,0,0.]).T
00959         try:
00960             #linear_movement.move_relative_gripper(np.matrix([.2,0,0]).T, stop='pressure_accel', pressure=6000)
00961             linear_movement.move_absolute((target_position, current_position[1]), stop='pressure_accel', pressure=6000)
00962             linear_movement.move_absolute((target_position, current_position[1]), stop='pressure_accel', pressure=6000)
00963             linear_movement.move_absolute((target_position, current_position[1]), stop='pressure_accel', pressure=6000)
00964         except lm.RobotSafetyError, e:
00965             rospy.loginfo('robot safety error %s' % str(e))
00966 
00967         pushed_loc_bl = linear_movement.arm_obj.pose_cartesian_tf()[0]
00968 
00969         rospy.loginfo("Moving away")
00970         try:
00971             linear_movement.move_relative_gripper(np.matrix([-.05,0,0]).T, stop='accel')
00972         except lm.RobotSafetyError, e:
00973             rospy.loginfo('robot safety error %s' % str(e))
00974         try:
00975             linear_movement.move_relative_gripper(np.matrix([-.10,0,0]).T, stop='accel')
00976         except lm.RobotSafetyError, e:
00977             rospy.loginfo('robot safety error %s' % str(e))
00978         try:
00979             linear_movement.move_relative_gripper(np.matrix([-.1,0,0]).T, stop='accel')
00980         except lm.RobotSafetyError, e:
00981             rospy.loginfo('robot safety error %s' % str(e))
00982 
00983         linear_movement.pressure_listener.rezero()
00984         #linear_movement.move_relative_base(np.matrix([-.2, .3, 0.1]).T, stop='pressure_accel', pressure=300)
00985         linear_movement.move_absolute((self.start_location_drawer[0], 
00986                     #np.matrix(tr.quaternion_from_euler(np.radians(90.), 0, 0))), 
00987                     np.matrix(tr.quaternion_from_euler(np.radians(0.), 0, 0))), 
00988                     stop='pressure_accel', pressure=1000)
00989 
00990         move_dist = np.linalg.norm(contact_loc_bl - pushed_loc_bl)
00991         rospy.loginfo('pushed for distance %.3f' % move_dist)
00992         success = move_dist > PUSH_TOLERANCE
00993         return success, 'pushed', pushed_loc_bl
00994 
00995 
00996     def drawer(self, point):
00997         #Prepare
00998         GRIPPER_OPEN = .08
00999         GRIPPER_CLOSE = .003
01000         MAX_HANDLE_SIZE = .03
01001         linear_movement = self.behaviors.movement
01002         gripper = self.robot.left_gripper
01003 
01004         #gripper.open(True, position=GRIPPER_OPEN)
01005         #linear_movement.gripper_open()
01006         self.open_gripper()
01007         linear_movement.move_absolute((self.start_location_drawer[0], 
01008             #np.matrix(tr.quaternion_from_euler(np.radians(90.), 0, 0))), 
01009             np.matrix(tr.quaternion_from_euler(np.radians(90.), 0, 0))), 
01010             stop='pressure_accel', pressure=1000)
01011 
01012         #Reach
01013         success, reason, touchloc_bl = self.behaviors.reach(point, 300, #np.matrix([0.0, 0, 0]).T, 
01014                              reach_direction=np.matrix([0.1, 0, 0]).T, 
01015                              orientation=np.matrix(tr.quaternion_from_euler(np.radians(90.), 0, 0)))
01016 
01017         #Error recovery
01018         if not success:
01019             error_msg = 'Reach failed due to "%s"' % reason
01020             rospy.loginfo(error_msg)
01021             rospy.loginfo('Failure recovery: moving back')
01022             try:
01023                 linear_movement.move_relative_gripper(np.matrix([-.25,0,0]).T, stop='pressure_accel', pressure=300)
01024             except lm.RobotSafetyError, e:
01025                 rospy.loginfo('robot safety error %s' % str(e))
01026             self.behaviors.movement.move_absolute(self.start_location_drawer, stop='accel', pressure=300)
01027             return False, 'reach failed', point
01028 
01029         #Grasp
01030         GRASP_THRES = 100
01031         try:
01032             linear_movement.move_relative_gripper(np.matrix([-.01,0,0]).T, stop='none')
01033         except lm.RobotSafetyError, e:
01034             rospy.loginfo('robot safety error %s' % str(e))
01035         #lbf, rbf = linear_movement.pressure_listener.get_pressure_readings()
01036         #pdb.set_trace()
01037         self.close_gripper()
01038         #linear_movement.gripper_close()
01039         #gripper.open(True, position=GRIPPER_CLOSE)
01040         #linear_movement.pressure_listener.rezero()
01041         #laf, raf = linear_movement.pressure_listener.get_pressure_readings()
01042 
01043         #linear_movement.move_relative_gripper(np.matrix([-.05,0,0]).T, stop='none')
01044         #gripper.open(True, position=.03)
01045         #linear_movement.pressure_listener.rezero()
01046         #gripper.open(True, position=GRIPPER_CLOSE)
01047         #linear_movement.pressure_listener.rezero()
01048         #bf = np.row_stack((lbf, rbf))
01049         #af = np.row_stack((laf, raf))
01050         #pdb.set_trace()
01051         #grasped_handle = np.any(np.abs(af-bf) > GRASP_THRES) or (gripper.pose()[0,0] > GRIPPER_CLOSE)
01052         grasped_handle = (gripper.pose()[0,0] > GRIPPER_CLOSE) and (gripper.pose()[0,0] < MAX_HANDLE_SIZE)
01053 
01054         if not grasped_handle:
01055             rospy.loginfo('Failed to grasp handle :(')
01056             #linear_movement.gripper_open()
01057             self.open_gripper()
01058             #gripper.open(True, position=GRIPPER_OPEN)
01059             linear_movement.pressure_listener.rezero()
01060             linear_movement.move_relative_gripper(np.matrix([-.25,0,0]).T, stop='pressure_accel', pressure=300)
01061             self.behaviors.movement.move_absolute(self.start_location_drawer, stop='accel', pressure=300)
01062             return False, 'failed to grasp handle', point
01063 
01064         #Pull
01065         linear_movement.pressure_listener.rezero()
01066         linear_movement.move_relative_gripper(np.matrix([-.1,0,0]).T, stop='accel', pressure=2500)
01067         linear_movement.move_absolute(linear_movement.arm_obj.pose_cartesian_tf(), 
01068                                     stop='pressure_accel', pressure=300)
01069         #linear_movement.gripper_close()
01070         #linear_movement.gripper_close()
01071         self.close_gripper()
01072         rospy.sleep(1)
01073         linear_movement.pressure_listener.rezero()
01074         #lap, rap = linear_movement.pressure_listener.get_pressure_readings()
01075         #ap = np.row_stack((lap, rap))
01076         #still_has_handle = np.any(np.abs(ap-af) < GRASP_THRES) or (gripper.pose()[0,0] > GRIPPER_CLOSE)
01077         still_has_handle = gripper.pose()[0,0] > GRIPPER_CLOSE
01078         #pdb.set_trace()
01079         try:
01080             linear_movement.move_relative_base(np.matrix([-.15,0,0]).T, stop='accel', pressure=2500)
01081         except lm.RobotSafetyError, e:
01082             #linear_movement.gripper_open()
01083             self.open_gripper()
01084             linear_movement.pressure_listener.rezero()
01085             rospy.loginfo('robot safety error %s' % str(e))
01086 
01087         #Release & move back 
01088         #linear_movement.gripper_open()
01089         location_handle_bl = linear_movement.arm_obj.pose_cartesian_tf()[0]
01090         #gripper.open(True, position=.08)
01091         #linear_movement.gripper_open()
01092         self.open_gripper()
01093         rospy.sleep(2)
01094         linear_movement.pressure_listener.rezero()
01095 
01096         #linear_movement.move_relative_gripper(np.matrix([-.15, 0, 0]).T, stop='pressure_accel', pressure=300)
01097         linear_movement.move_relative_base(np.matrix([-.2, 0, 0.]).T, stop='pressure_accel', pressure=1000)
01098         linear_movement.move_relative_base(np.matrix([-.1, .2, 0.1]).T, stop='pressure_accel', pressure=1000)
01099         self.behaviors.movement.move_absolute(self.start_location_drawer, stop='pressure_accel', pressure=1000)
01100 
01101         return still_has_handle, 'pulled', location_handle_bl
01102 
01103 
01104     ##
01105     # Drive using within a dist_far distance of point_bl
01106     def drive_approach_behavior(self, point_bl, dist_far):
01107         # navigate close to point
01108         #pdb.set_trace()
01109         map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
01110         point_map = tfu.transform_points(map_T_base_link, point_bl)
01111         t_current_map, r_current_map = self.robot.base.get_pose()
01112         rospy.loginfo('drive_approach_behavior: point is %.3f m away"' % np.linalg.norm(t_current_map[0:2].T - point_map[0:2,0].T))
01113 
01114         point_dist = np.linalg.norm(point_bl)
01115         bounded_dist = np.max(point_dist - dist_far, 0)
01116         point_close_bl = (point_bl / point_dist) * bounded_dist
01117         point_close_map = tfu.transform_points(map_T_base_link, point_close_bl)
01118         rvalue = self.robot.base.set_pose(point_close_map.T.A1.tolist(), \
01119                                           r_current_map, '/map', block=True)
01120         t_end, r_end = self.robot.base.get_pose()
01121         rospy.loginfo('drive_approach_behavior: ended up %.3f m away from laser point' % np.linalg.norm(t_end[0:2] - point_map[0:2,0].T))
01122         rospy.loginfo('drive_approach_behavior: ended up %.3f m away from goal' % np.linalg.norm(t_end[0:2] - point_close_map[0:2,0].T))
01123         rospy.loginfo('drive_approach_behavior: returned %d' % rvalue)
01124         return rvalue
01125 
01126     ##
01127     # Drive so that we are perpendicular to a wall at point_bl (radii voi_radius) 
01128     # stop at dist_approach
01129     def approach_perpendicular_to_surface(self, point_bl, voi_radius, dist_approach):
01130         #return 3
01131         #TODO: Turn to face point
01132         #TODO: make this scan around point instead of total scan of env
01133         #determine normal
01134         #pdb.set_trace()
01135         map_T_base_link0 = tfu.transform('map', 'base_link', self.tf_listener)
01136         point_map0 = tfu.transform_points(map_T_base_link0, point_bl)
01137         #pdb.set_trace()
01138         self.turn_to_point(point_bl, block=False)
01139 
01140         point_bl = tfu.transform_points(tfu.transform('base_link', 'map', self.tf_listener), \
01141                                         point_map0)
01142         point_cloud_bl = self.laser_scan.scan(math.radians(180.), math.radians(-180.), 2.5)
01143         point_cloud_np_bl = ru.pointcloud_to_np(point_cloud_bl)
01144         rospy.loginfo('approach_perpendicular_to_surface: pointcloud size %d' \
01145                 % point_cloud_np_bl.shape[1])
01146         voi_points_bl, limits_bl = i3d.select_rect(point_bl, voi_radius, voi_radius, voi_radius, point_cloud_np_bl)
01147         #TODO: use closest plane instead of closest points determined with KDTree
01148         normal_bl = i3d.calc_normal(voi_points_bl)
01149         point_in_front_mechanism_bl = point_bl + normal_bl * dist_approach
01150         map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
01151         point_in_front_mechanism_map = tfu.transform_points(map_T_base_link, point_in_front_mechanism_bl)
01152 
01153         #Navigate to point (TODO: check for collisions)
01154         point_map = tfu.transform_points(map_T_base_link, point_bl)
01155         t_current_map, r_current_map = self.robot.base.get_pose()
01156         rospy.loginfo('approach_perpendicular_to_surface: driving for %.3f m to front of surface' \
01157                 % np.linalg.norm(t_current_map[0:2] - point_in_front_mechanism_map[0:2,0].T))
01158         #pdb.set_trace()
01159         rvalue = self.robot.base.set_pose(point_in_front_mechanism_map.T.A1.tolist(), r_current_map, 'map')
01160         if rvalue != 3:
01161             return rvalue
01162 
01163         t1_current_map, r1_current_map = self.robot.base.get_pose()
01164         rospy.loginfo('approach_perpendicular_to_surface: %.3f m away from from of surface' % np.linalg.norm(t1_current_map[0:2] - point_in_front_mechanism_map[0:2,0].T))
01165 
01166         #Rotate to face point (TODO: check for collisions)
01167         base_link_T_map = tfu.transform('base_link', 'map', self.tf_listener)
01168         point_bl = tfu.transform_points(base_link_T_map, point_map)
01169         #pdb.set_trace()
01170         self.turn_to_point(point_bl, block=False)
01171         time.sleep(2.)
01172 
01173         return rvalue
01174         #ang = math.atan2(point_bl[1,0], point_bl[0,0])
01175         #self.robot.base.turn_by(ang, block=True)
01176         #pdb.set_trace()
01177 
01178     def approach_location(self, point_bl, coarse_stop, fine_stop, voi_radius=.2):
01179         #return
01180         point_dist = np.linalg.norm(point_bl[0:2,0])
01181         rospy.loginfo('approach_location: Point is %.3f away.' % point_dist)
01182         map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
01183         point_map = tfu.transform_points(map_T_base_link, point_bl)
01184 
01185         dist_theshold = coarse_stop + .1
01186         if point_dist > dist_theshold:
01187             rospy.loginfo('approach_location: Point is greater than %.1f m away (%.3f).  Driving closer.' % (dist_theshold, point_dist))
01188             rospy.loginfo('approach_location: point_bl ' + str(point_bl.T))
01189 
01190             ret = self.drive_approach_behavior(point_bl, dist_far=coarse_stop)
01191             base_link_T_map = tfu.transform('base_link', 'map', self.tf_listener)
01192             point_bl_t1 = tfu.transform_points(base_link_T_map, point_map)
01193             if ret != 3:
01194                 dist_end = np.linalg.norm(point_bl_t1[0:2,0])
01195                 if dist_end > dist_theshold:
01196                     rospy.logerr('approach_location: drive_approach_behavior failed! %.3f' % dist_end)
01197                     self.robot.sound.say("I am unable to navigate to that location")
01198                     return False, 'failed'
01199 
01200             ret = self.approach_perpendicular_to_surface(point_bl_t1, voi_radius=voi_radius, dist_approach=fine_stop)
01201             if ret != 3:
01202                 rospy.logerr('approach_location: approach_perpendicular_to_surface failed!')
01203                 return False, 'failed'
01204 
01205             self.robot.sound.say('done')
01206             rospy.loginfo('approach_location: DONE DRIVING!')
01207             return True, 'done'
01208         else:
01209             return False, 'ignored'
01210 
01211     def turn_to_point(self, point_bl, block=True):
01212         ang = math.atan2(point_bl[1,0], point_bl[0,0])
01213         rospy.loginfo('turn_to_point: turning by %.2f deg' % math.degrees(ang))
01214         #pdb.set_trace()
01215         self.robot.base.turn_by(-ang, block=block, overturn=True)
01216 
01217     #def load_classifier(self, classifier_name, data_file_name):
01218     #    self.learners[classifier_name] = ipa.InterestPointPerception(classifier_name, 
01219     #            data_file_name, self.tf_listener)
01220 
01221     def stationary_light_switch_behavior(self, point_bl):
01222         while True:
01223             print 'Enter a command u(ntuck), s(tart), l(ight), t(uck), e(x)it.'
01224             a = raw_input()
01225             
01226             if a == 'u': 
01227                 self.untuck()
01228 
01229             if a == 's':
01230                 self.behaviors.movement.pressure_listener.rezero()
01231                 self.behaviors.movement.set_movement_mode_cart()
01232                 self.behaviors.movement.move_absolute(self.start_location_light_switch, stop='pressure')
01233                 #pdb.set_trace()
01234                 #TODO: set start location to be some set joint angles
01235 
01236             if a == 'l':
01237                 point_offset = np.matrix([0, 0, 0.03]).T
01238                 success, _ = self.light_switch1(point_bl, point_offset=point_offset, \
01239                         press_contact_pressure=300, \
01240                         press_pressure=3500, press_distance=np.matrix([0,0,-.15]).T, \
01241                         visual_change_thres=.03)
01242                         #move_back_distance=np.matrix([-.0075,0,0]).T,
01243 
01244             if a == 't':
01245                 self.tuck()
01246 
01247             if a == 'x':
01248                 break
01249 
01250     #def location_activated_behaviors(self, point_bl, stored_point=False):
01251     #    driving_param = {'light_switch': {'coarse': .7, 'fine': .5, 'voi': .2},
01252     #                     'drawer':       {'coarse': .7, 'fine': .7, 'voi': .2}}
01253 
01254     #    map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
01255     #    point_map = tfu.transform_points(map_T_base_link, point_bl)
01256     #    matches = self.find_close_by_points(point_map)
01257 
01258     #    if len(matches) > 0:
01259     #        #pdb.set_trace()
01260     #        ldata = self.location_data[self.location_labels[matches[0]]]
01261     #        task = ldata['task']
01262     #        rospy.loginfo('Found closeby location %s' % str(ldata))
01263     #    else:
01264     #        rospy.loginfo( 'No location matches found. Please enter location type:')
01265     #        for i, k in enumerate(driving_param.keys()):
01266     #            rospy.loginfo(' %d %s' %(i,k))
01267     #        task_number = raw_input()
01268     #        task = driving_param.keys()[int(task_number)]
01269 
01270     #    self.robot.sound.say('task %s' % task.replace('_', ' '))
01271     #    rospy.loginfo('Task is %s' % task)
01272     #    if self.approach_location(point_bl, 
01273     #            coarse_stop=driving_param[task]['coarse'], 
01274     #            fine_stop=driving_param[task]['fine'], 
01275     #            voi_radius=driving_param[task]['voi']):
01276     #        return
01277 
01278     #    else:
01279     #        ret = self.approach_perpendicular_to_surface(point_bl, 
01280     #                voi_radius=driving_param[task]['voi'], 
01281     #                dist_approach=driving_param[task]['fine'])
01282 
01283     #        if ret != 3:
01284     #            rospy.logerr('location_activated_behaviors: approach_perpendicular_to_surface failed!')
01285     #            return
01286 
01287     #        base_link_T_map = tfu.transform('base_link', 'map', self.tf_listener)
01288     #        point_bl_t1 = tfu.transform_points(base_link_T_map, point_map)
01289     #        try:
01290     #            self.untuck()
01291     #            self.behaviors.movement.move_absolute(self.start_location, stop='pressure')
01292     #            self.behaviors.movement.pressure_listener.rezero()
01293 
01294     #            if task == 'light_switch':
01295     #                #self.location_add(perturbed_map, task)
01296     #                # TODO: what happens when we first encounter this location?! experiment n times to create dataset?
01297     #                self.practice(point_bl_t1, 
01298     #                        ft.partial(self.light_switch1, 
01299     #                            point_offset=np.matrix([0,0,.03]).T,
01300     #                            press_contact_pressure=300,
01301     #                            move_back_distance=np.matrix([-.0075,0,0]).T,
01302     #                            press_pressure=3500,
01303     #                            press_distance=np.matrix([0,0,-.15]).T,
01304     #                            visual_change_thres=.03), 
01305     #                        'light_switch')
01306     #                self.tuck()
01307 
01308     #                if False: #Old branch where we retry blindly
01309     #                    MAX_RETRIES = 15
01310     #                    rospy.loginfo('location_activated_behaviors: go_home_pose')
01311     #                    #self.go_to_home_pose()
01312     #                    self.behaviors.movement.move_absolute(self.start_location, stop='pressure')
01313     #                    gaussian = pr.Gaussian(np.matrix([ 0,      0,      0.]).T, \
01314     #                                           np.matrix([[1.,     0,      0], \
01315     #                                                      [0, .02**2,      0], \
01316     #                                                      [0,      0, .02**2]]))
01317     #                    retry_count = 0
01318     #                    success = False
01319     #                    gaussian_noise = np.matrix([0, 0, 0.0]).T
01320     #                    point_offset = np.matrix([0, 0, 0.03]).T
01321     #                    while not success:
01322     #                        perturbation = gaussian_noise
01323     #                        perturbed_point_bl = point_bl_t1 + perturbation
01324     #                        success, _ = self.light_switch1(perturbed_point_bl, point_offset=point_offset, \
01325     #                                press_contact_pressure=300, move_back_distance=np.matrix([-.0075,0,0]).T,\
01326     #                                press_pressure=3500, press_distance=np.matrix([0,0,-.15]).T, \
01327     #                                visual_change_thres=.03)
01328     #                        gaussian_noise = gaussian.sample()
01329     #                        gaussian_noise[0,0] = 0
01330     #                        retry_count = retry_count + 1 
01331 
01332     #                        if retry_count > MAX_RETRIES:
01333     #                            self.robot.sound.say('giving up tried %d times already' % MAX_RETRIES)
01334     #                            break
01335     #                        elif not success:
01336     #                             self.robot.sound.say('retrying')
01337 
01338     #                    if success:
01339     #                        self.robot.sound.say('successful!')
01340 
01341     #                        if not stored_point or retry_count > 1:
01342     #                            map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
01343     #                            perturbed_map = tfu.transform_points(map_T_base_link, perturbed_point_bl)
01344     #                            self.location_add(perturbed_map, task)
01345     #                            self.robot.sound.say('recorded point')
01346 
01347     #                        #if retry_count > 1:
01348     #                        #    if not self.add_perturbation_to_location(point_map, perturbation):
01349     #                        #        self.robot.sound.say('unable to add perturbation to database! please fix')
01350     #                    self.tuck()
01351 
01352     #
01353     #            if task == 'drawer':
01354     #                self.drawer(point_bl_t1)
01355     #                self.tuck()
01356     #                self.robot.sound.say('done')
01357     #                self.location_add(point_map, task)
01358     #                
01359     #                #except lm.RobotSafetyError, e:
01360     #                #    rospy.loginfo('location_activated_behaviors: Caught a robot safety exception "%s"' % str(e.parameter))
01361     #                #    #self.behaviors.movement.move_absolute(self.start_location, stop='accel')
01362 
01363     #        except lm.RobotSafetyError, e:
01364     #            rospy.loginfo('location_activated_behaviors: Caught a robot safety exception "%s"' % str(e.parameter))
01365     #            self.behaviors.movement.move_absolute(self.start_location, stop='accel')
01366     #
01367     #        except TaskError, e:
01368     #            rospy.loginfo('location_activated_behaviors: TaskError: %s' % str(e.parameter))
01369     #        rospy.loginfo('location_activated_behaviors: DONE MANIPULATION!')
01370     #        self.robot.sound.say('done')
01371 
01372     def location_approach_driving(self, task, point_bl):
01373         #Get closer if point is far away
01374         ap_result = self.approach_location(point_bl, 
01375                         coarse_stop=self.locations_man.driving_param[task]['coarse'], 
01376                         fine_stop=self.locations_man.driving_param[task]['fine'], 
01377                         voi_radius=self.locations_man.driving_param[task]['voi'])
01378 
01379         if ap_result[1] == 'failed':
01380             return False, 'approach_location failed'
01381 
01382         if ap_result[1] == 'ignore':
01383             #reorient with planner
01384             ret = self.approach_perpendicular_to_surface(point_bl, 
01385                     voi_radius=self.locations_man.driving_param[task]['voi'], 
01386                     dist_approach=self.locations_man.driving_param[task]['fine'])
01387             if ret != 3:
01388                 rospy.logerr('location_approach_driving: approach_perpendicular_to_surface failed!')
01389                 return False, 'approach_perpendicular_to_surface failed'
01390             else:
01391                 return True, None
01392 
01393         return True, None
01394 
01395 
01396         #if self.approach_location(point_bl, 
01397         #        coarse_stop=self.locations_man.driving_param[task]['coarse'], 
01398         #        fine_stop=self.locations_man.driving_param[task]['fine'], 
01399         #        voi_radius=self.locations_man.driving_param[task]['voi']):
01400         #    #rospy.logerr('location_approach_driving: intial approach failed')
01401         #    return True, 'initial approach'
01402         #else:
01403 
01404     def get_behavior_by_task(self, task_type):
01405         if task_type == 'light_switch_down':
01406             return ft.partial(self.light_switch1, 
01407                         #point_offset=np.matrix([0,0,.03]).T,
01408                         point_offset=np.matrix([0,0, -.08]).T,
01409                         press_contact_pressure=300,
01410                         #move_back_distance=np.matrix([-.0075,0,0]).T,
01411                         press_pressure=6000,
01412                         press_distance=np.matrix([0.01,0,-.15]).T,
01413                         visual_change_thres=.025)
01414 
01415         elif task_type == 'light_switch_up':
01416             return ft.partial(self.light_switch1, 
01417                         #point_offset=np.matrix([0,0,-.08]).T,
01418                         point_offset=np.matrix([0,0,.08]).T,
01419                         press_contact_pressure=300,
01420                         #move_back_distance=np.matrix([-.0075,0,0]).T,
01421                         press_pressure=6000,
01422                         press_distance=np.matrix([0.01,0,.15]).T,
01423                         visual_change_thres=.025)
01424 
01425         elif task_type == 'light_rocker_up':
01426             return ft.partial(self.light_rocker_push,
01427                         pressure=500,
01428                         visual_change_thres=.025, offset=np.matrix([0,0,-.05]).T)
01429 
01430         elif task_type == 'light_rocker_down':
01431             return ft.partial(self.light_rocker_push,
01432                         pressure=500,
01433                         visual_change_thres=.025, offset=np.matrix([0,0,.05]).T)
01434 
01435         elif task_type == 'pull_drawer':
01436             return self.drawer
01437 
01438         elif task_type == 'push_drawer':
01439             return self.drawer_push
01440 
01441         else:
01442             pdb.set_trace()
01443             #TODO
01444 
01445 
01446     def manipulation_posture(self, task_type):
01447         self.robot.projector.set(False)
01448         for i in range(3):
01449             self.prosilica.get_frame()
01450         self.robot.projector.set(True)
01451         #rospy.sleep(1)
01452 
01453         self.robot.left_gripper.open(False, .005)
01454         #self.robot.right_gripper.open(True, .005)
01455         self.behaviors.movement.pressure_listener.rezero()
01456 
01457         if task_type == 'light_switch_down' or task_type == 'light_switch_up':
01458             if np.linalg.norm(self.start_location_light_switch[0] - self.robot.left.pose_cartesian_tf()[0]) < .3:
01459                 return
01460             self.robot.torso.set_pose(.2, True)
01461             self.untuck()
01462             self.behaviors.movement.move_absolute(self.start_location_light_switch, stop='pressure')
01463             self.behaviors.movement.pressure_listener.rezero()
01464 
01465         elif task_type == 'light_rocker_up' or task_type == 'light_rocker_down':
01466             if np.linalg.norm(self.start_location_light_switch[0] - self.robot.left.pose_cartesian_tf()[0]) < .3:
01467                 return
01468             self.robot.torso.set_pose(.2, True)
01469             self.untuck()
01470             self.behaviors.movement.move_absolute(self.start_location_light_switch, stop='pressure')
01471             self.behaviors.movement.pressure_listener.rezero()
01472 
01473         elif task_type == 'pull_drawer' or task_type == 'push_drawer':
01474             if np.linalg.norm(self.start_location_drawer[0] - self.robot.left.pose_cartesian_tf()[0]) < .3:
01475                 return
01476             self.robot.torso.set_pose(0.01, True)
01477             self.untuck()
01478             self.behaviors.movement.move_absolute(self.start_location_drawer, stop='pressure')
01479             self.behaviors.movement.pressure_listener.rezero()
01480         else:
01481             pdb.set_trace()
01482 
01483 
01484     def driving_posture(self, task_type):
01485         self.robot.projector.set(False)
01486         self.close_gripper()
01487 
01488         if np.linalg.norm(self.folded_pose - self.robot.left.pose_cartesian_tf()[0]) < .1:
01489             return
01490         #TODO: specialize this
01491         self.robot.torso.set_pose(0.03, True)
01492         self.robot.left_gripper.open(False, .005)
01493         #self.robot.right_gripper.open(True, .005)
01494         self.behaviors.movement.pressure_listener.rezero()
01495 
01496         if task_type == 'light_switch_down' or task_type == 'light_switch_up':
01497             self.tuck()
01498 
01499         elif task_type == 'light_rocker_up' or task_type == 'light_rocker_down':
01500             self.tuck()
01501 
01502         elif task_type == 'pull_drawer' or task_type == 'push_drawer':
01503             self.tuck()
01504         else:
01505             pdb.set_trace()
01506 
01507 
01508     def look_at(self, point_bl, block=True):
01509         #pdb.set_trace()
01510         #self.robot.head.look_at(point_bl-np.matrix([0,0,.2]).T, 
01511         #        pointing_frame=self.OPTICAL_FRAME, 
01512         #        pointing_axis=np.matrix([1,0,0.]).T, 
01513         #        wait=block)
01514         self.robot.head.look_at(point_bl-np.matrix([0,0,.15]).T, pointing_frame=self.OPTICAL_FRAME, 
01515                 pointing_axis=np.matrix([1,0,0.]).T, wait=block)
01516 
01517 
01518     ##
01519     # Initialization phase
01520     #
01521     # @param point_bl 3x1 in base_link
01522     def scenario_user_clicked_at_location(self, point_bl):
01523         #pdb.set_trace()
01524         #If that location is new:
01525         self.look_at(point_bl, False)
01526         map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
01527         point_map = tfu.transform_points(map_T_base_link, point_bl)
01528         close_by_locs = self.locations_man.list_close_by(point_map)
01529         #close_by_locs = self.locations_man.list_all()#(point_map)
01530 
01531         #if False:
01532         #if True:
01533         #if len(close_by_locs) <= 0:
01534         if True:
01535             #Initialize new location
01536             rospy.loginfo('Select task type:')
01537             for i, ttype in enumerate(self.locations_man.task_types):
01538                 print i, ttype
01539             task_type = self.locations_man.task_types[int(raw_input())]
01540             rospy.loginfo('Selected task %s' % task_type)
01541 
01542             #pdb.set_trace()
01543             self.driving_posture(task_type)
01544             #ret = self.location_approach_driving(task_type, point_bl)
01545             #if not ret[0]:
01546             #    return False, ret[1]
01547 
01548             #pdb.set_trace()
01549             self.manipulation_posture(task_type)
01550             point_bl = tfu.transform_points(tfu.transform('base_link', 'map', self.tf_listener), point_map)
01551             self.look_at(point_bl, False)
01552 
01553             #rospy.loginfo('if existing dataset exists enter that dataset\'s name')
01554             #print 'friday_730_light_switch2.pkl'
01555             #filename = raw_input()
01556 
01557             #If we have data for perceptual bias
01558             #if len(filename) > 0:
01559             #    dataset = ut.load_pickle(filename)
01560             #    task_id = self.locations_man.create_new_location(task_type, point_map)
01561 
01562             #    #Assume that this is file is 100% reliable
01563             #    for n in range(LocationManager.RELIABILITY_RECORD_LIM):
01564             #        self.locations_man.update_execution_record(task_id, 1)
01565             #    self.locations_man.data[task_id]['dataset'] = dataset
01566             #    #rec_params = self.feature_ex.rec_params
01567             #    #def train(self, rec_params, dataset, task_id):
01568             #    self.locations_man.train(self.feature_ex.rec_params, dataset, task_id)
01569             #    #TODO: we actually want to find only the successful location
01570             #    #pdb.set_trace()
01571             #    self.execute_behavior(self.get_behavior_by_task(task_type), task_id, point_bl)
01572 
01573             ##If we don't have data
01574             #else:
01575             def has_pos_and_neg(labels):
01576                 if np.sum(labels == r3d.POSITIVE) > 0 and np.sum(labels == r3d.NEGATIVE) > 0:
01577                     return True
01578                 else:
01579                     return False
01580 
01581             def any_pos_sf(labels_mat):
01582                 if np.any(r3d.POSITIVE == labels_mat):
01583                     return True
01584                 return False
01585 
01586             #Create new tasks
01587             location_name = raw_input('Enter a name for this location:\n')
01588             ctask_type = self.locations_man.get_complementary_task(task_type)
01589             t_current_map, r_current_map = self.robot.base.get_pose()
01590             task_id = self.locations_man.create_new_location(task_type, 
01591                     np.matrix([0,0,0.]).T, [t_current_map, r_current_map], name=location_name)
01592             ctask_id = self.locations_man.create_new_location(ctask_type, 
01593                     np.matrix([0,0,0.]).T, [t_current_map, r_current_map], name=location_name)
01594 
01595             #Stop when have at least 1 pos and 1 neg
01596             #pdb.set_trace()
01597             point_bl = tfu.transform_points(tfu.transform('base_link', 'map', self.tf_listener), point_map)
01598             self.look_at(point_bl, True)
01599             ret_dict_action = self.seed_dataset_explore(task_id, ctask_id, point_bl, stop_fun=has_pos_and_neg, should_reset=True)
01600             dset_action = ret_dict_action['features']
01601             dset_undo = ret_dict_action['features_undo']
01602             undo_point_bl = ret_dict_action['undo_point']
01603 
01604             #Lights should be on at this stage!
01605             #pdb.set_trace()
01606             #If we don't have enought data for reverse action
01607             rospy.loginfo('====================================================')
01608             rospy.loginfo('Don\'t have enough data for reverse action')
01609             if (self.locations_man.data[ctask_id]['dataset'] == None) or \
01610                     not has_pos_and_neg(self.locations_man.data[ctask_id]['dataset'].outputs.A1):
01611                 #Turn off the lights 
01612                 point_bl = tfu.transform_points(tfu.transform('base_link', 'map', self.tf_listener), point_map)
01613                 self.look_at(point_bl, True)
01614                 rospy.loginfo('====================================================')
01615                 rospy.loginfo('Running seed_dataset_explore on set %s.' % task_id)
01616                 ret_dict_action = self.seed_dataset_explore(task_id, None, point_bl, stop_fun=any_pos_sf, should_reset=False)
01617                 undo_point_bl = ret_dict_action['undo_point']
01618 
01619                 #Practice until stats are met
01620                 self.look_at(point_bl, True)
01621                 #ret_dict_undo = self.seed_dataset_explore(ctask_id, task_id, point_bl, stop_fun=has_pos_and_neg)
01622                 rospy.loginfo('====================================================')
01623                 rospy.loginfo('Running seed_dataset_explore on set %s.' % ctask_id)
01624                 ret_dict_undo = self.seed_dataset_explore(ctask_id, task_id, undo_point_bl, stop_fun=has_pos_and_neg)
01625                 if dset_undo == None:
01626                     dset_undo = ret_dict_undo['features']
01627 
01628             #Figure out behavior centers in map frame
01629             tdataset  = self.locations_man.data[task_id]['dataset']
01630             tpoint_bl = tdataset.pt3d[:, np.where(tdataset.outputs == r3d.POSITIVE)[1].A1[0]]
01631             self.balance_positives_and_negatives(tdataset)
01632 
01633             cdataset  = self.locations_man.data[ctask_id]['dataset']
01634             cpoint_bl = cdataset.pt3d[:, np.where(cdataset.outputs == r3d.POSITIVE)[1].A1[0]]
01635             self.balance_positives_and_negatives(cdataset)
01636 
01637             map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
01638             point_success_map = tfu.transform_points(map_T_base_link, tpoint_bl)
01639             cpoint_success_map = tfu.transform_points(map_T_base_link, cpoint_bl)
01640 
01641             #Set newly created task with learned information
01642             self.locations_man.set_center(task_id, point_success_map)
01643             self.locations_man.set_center(ctask_id, cpoint_success_map)
01644             self.locations_man.data[task_id]['complementary_task_id'] = ctask_id
01645             self.locations_man.data[ctask_id]['complementary_task_id'] = task_id
01646 
01647             self.locations_man.update_execution_record(task_id, 1.)
01648             self.locations_man.update_execution_record(ctask_id, 1.)
01649 
01650             #param = r3d.Recognize3DParam()
01651             #param.uncertainty_x = 1.
01652             #param.n_samples = 2000
01653             #param.uni_mix = .1
01654             #kdict, fname = self.read_features_save(task_id, point3d_bl, param)
01655             self.locations_man.train(task_id, dset_action, save_pca_images=True)
01656             self.locations_man.train(ctask_id, dset_undo, save_pca_images=True)
01657             self.locations_man.save_database()
01658             rospy.loginfo('Done initializing new location!')
01659             self.driving_posture(task_type)
01660         #else:
01661         #    task_id, task_type = close_by_locs[0]
01662         #    ctask_id = self.locations_man.data[task_id]['complementary_task_id']
01663         #    rospy.loginfo('Selected task %s' % task_type)
01664 
01665         #    self.driving_posture(task_type)
01666         #    ret = self.location_approach_driving(task_type, point_bl)
01667         #    if not ret[0]:
01668         #        return False, ret[1]
01669         #    #pdb.set_trace()
01670         #    self.manipulation_posture(task_type)
01671         #    point_bl = tfu.transform_points(tfu.transform('base_link', 'map', self.tf_listener), point_map)
01672         #    self.look_at(point_bl, False)
01673         #    #close_by_locs = self.locations_man.list_close_by(point_map)
01674 
01675         #    if self.locations_man.is_reliable(task_id):
01676         #        self.execute_behavior(task_id, point_bl)
01677         #    else:
01678         #        self.practice(task_id, ctask_id, point_bl)
01679 
01680         #    self.driving_posture(task_type)
01681 
01682     #def add_to_practice_points_map(self, point_bl):
01683     #    map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
01684     #    point_map = tfu.transform_points(map_T_base_link, point_bl)
01685     #    self.practice_points_map.append(point_map)
01686     #    rospy.loginfo('Added a new practice point!')
01687 
01688 
01689     def move_base_planner(self, trans, rot):
01690         #pdb.set_trace()
01691         p_bl = tfu.transform_points(tfu.transform('base_link', 'map', self.tf_listener), np.matrix(trans).T)
01692         #Do this to clear out any hallucinated obstacles
01693         self.turn_to_point(p_bl)
01694         rvalue = self.robot.base.set_pose(trans, rot, '/map', block=True)
01695         p_bl = tfu.transform_points(tfu.transform('base_link', 'map', self.tf_listener), np.matrix(trans).T)
01696         #pdb.set_trace()
01697         self.robot.base.move_to(p_bl[0:2,0], True)
01698         t_end, r_end = self.robot.base.get_pose()
01699         return rvalue==3, np.linalg.norm(t_end[0:2] - np.array(trans)[0:2])
01700 
01701     def update_base(self):
01702         tasks = self.locations_man.data.keys()
01703         for i, k in enumerate(tasks):
01704             print i, k
01705         selected_idx = int(raw_input("Select an action to update base location\n"))
01706 
01707         t_current_map, r_current_map = self.robot.base.get_pose()
01708         task_id = tasks[selected_idx]
01709         self.locations_man.update_base_pose(task_id, [t_current_map, r_current_map])
01710         self.locations_man.save_database()
01711 
01712 
01713     ##
01714     # Practice phase
01715     def scenario_practice_run_mode(self):
01716         rospy.loginfo('===================================================')
01717         rospy.loginfo('= Practice Mode!                                  =')
01718         rospy.loginfo('===================================================')
01719         #pdb.set_trace()
01720 
01721         ulocs = self.unreliable_locs()
01722         rospy.loginfo('%d out of %d locs in database are unreliable' \
01723                 % (len(ulocs), len(self.locations_man.data.keys())))
01724 
01725         #Ask user to select a location
01726         tasks = self.locations_man.data.keys()
01727         for i, k in enumerate(tasks):
01728             print i, k
01729         #for tidx, tid in enumerate(ulocs):
01730         #    print tidx, tid
01731         selected_idx = int(raw_input("Select a location to execute action\n"))
01732 
01733         #tid = ulocs[selected_idx]
01734         tid = tasks[selected_idx]
01735         rospy.loginfo('selected %s' % tid)
01736 
01737         #Ask user for practice poses
01738         #pdb.set_trace()
01739         if not self.locations_man.data[tid].has_key('practice_locations'):
01740         #if True:
01741             #Get robot poses
01742             map_points = []
01743             for i in range(4):
01744                 raw_input('%d move robot to desired position\n' % i)
01745                 rospy.sleep(1.)
01746                 #pdb.set_trace()
01747                 t_current_map, r_current_map = self.robot.base.get_pose()
01748                 map_points.append([t_current_map, r_current_map])
01749             self.locations_man.data[tid]['practice_locations'] = map_points
01750             self.locations_man.data[tid]['practice_locations_history'] = np.zeros((1, len(map_points)))
01751             self.locations_man.data[tid]['practice_locations_convergence'] = np.zeros((1, len(map_points)))
01752 
01753             self.locations_man.save_database()
01754 
01755         #Ask user for canonical pose if it does not exist
01756         if not self.locations_man.data[tid].has_key('base_pose'):
01757             raw_input('move robot to desired end position\n')
01758             trans, rot_quat = self.robot.base.get_pose()
01759             self.locations_man.data[tid]['base_pose'] = [trans, rot_quat]
01760             self.locations_man.save_database()
01761 
01762         point_map = self.locations_man.data[tid]['center']
01763         task_type = self.locations_man.data[tid]['task']
01764         points_added_history = []
01765 
01766         unexplored_locs  = np.where(self.locations_man.data[tid]['practice_locations_history'] == 0)[1]
01767         unconverged_locs = np.where(self.locations_man.data[tid]['practice_locations_convergence'] == 0)[1]
01768         rospy.loginfo("Location history: %s" % str(self.locations_man.data[tid]['practice_locations_history']))
01769         run_loop = True
01770         #pdb.set_trace()
01771 
01772         #If this is a fresh run, start with current location
01773         if unexplored_locs.shape[0] == len(self.locations_man.data[tid]['practice_locations']):
01774             pidx = 3
01775 
01776         #If this is not a fresh run we continue with a location we've never been to before
01777         elif unexplored_locs.shape[0] > 0:
01778             pidx = unexplored_locs[0]
01779             rospy.loginfo("Resuming training from last unexplored location")
01780 
01781         #set the index to an unconverged location
01782         elif unconverged_locs.shape[0] > 0:
01783             pidx = unconverged_locs[0]
01784             rospy.loginfo("Resuming training from unconverged location")
01785 
01786         #if there are no unconverged locations, try to run anyway
01787         else:
01788             rospy.loginfo("WARNING: no unexplored or unconverged location")
01789             pidx = 3
01790             self.locations_man.data[tid]['practice_locations_convergence'][0, pidx] = 0
01791 
01792         #Commence practice!
01793         while True: #%not converged:
01794 
01795             if self.locations_man.data[tid]['practice_locations_convergence'][0, pidx] == 0:
01796                 #Drive to location
01797                 self.driving_posture(task_type)
01798 
01799                 #Move to setup location
01800                 self.robot.sound.say('Driving to practice location')
01801                 rvalue, dist = self.move_base_planner(*self.locations_man.data[tid]['practice_locations'][pidx])
01802                 rospy.loginfo('move ret value %s dist %f' % (str(rvalue), dist))
01803 
01804                 #Move to location where we were first initialized
01805                 self.robot.sound.say('Driving to mechanism location')
01806                 rvalue, dist = self.move_base_planner(*self.locations_man.data[tid]['base_pose'])
01807 
01808                 #Reorient base
01809                 bl_T_map = tfu.transform('base_link', 'map', self.tf_listener)
01810                 point_bl = tfu.transform_points(bl_T_map, point_map)
01811                 ret = self.location_approach_driving(task_type, point_bl)
01812                 self.robot.base.set_pose(self.robot.base.get_pose()[0], self.locations_man.data[tid]['base_pose'][1], 'map')
01813                 if not ret[0]:
01814                     rospy.loginfo('Driving failed!! Resetting.')
01815                     #pdb.set_trace()
01816                     #return False, ret[1]
01817                     continue
01818 
01819                 self.manipulation_posture(task_type)
01820                 bl_T_map = tfu.transform('base_link', 'map', self.tf_listener)
01821                 point_bl = tfu.transform_points(bl_T_map, point_map)
01822                 self.look_at(point_bl, False)
01823 
01824                 #Practice
01825                 self.robot.sound.say('practicing')
01826                 ctid = self.locations_man.data[tid]['complementary_task_id']
01827 
01828                 points_before_t = self.locations_man.data[tid]['dataset'].inputs.shape[1]
01829                 points_before_ct = self.locations_man.data[ctid]['dataset'].inputs.shape[1]
01830 
01831                 points_added = self.practice(tid, ctid,  point_bl)
01832 
01833                 points_added_history.append(points_added)
01834                 points_after_t = self.locations_man.data[tid]['dataset'].inputs.shape[1]
01835                 points_after_ct = self.locations_man.data[ctid]['dataset'].inputs.shape[1]
01836                 self.locations_man.record_time(tid,  'num_points_added_' + tid, points_after_t - points_before_t)
01837                 self.locations_man.record_time(ctid, 'num_points_added_' + tid, points_after_ct - points_before_ct)
01838 
01839                 self.locations_man.data[tid]['practice_locations_history'][0, pidx] += 1
01840                 #If no points added and we have explored all locations
01841                 #if points_added == 0 and np.where(self.locations_man.data[tid]['practice_locations_history'] == 0)[1].shape[0] == 0:
01842                 if points_added == 0:# and np.where(self.locations_man.data[tid]['practice_locations_history'] == 0)[1].shape[0] == 0:
01843                     self.locations_man.data[tid]['practice_locations_convergence'][0, pidx] = 1
01844                     rospy.loginfo('===================================================')
01845                     rospy.loginfo('= LOCATION CONVERGED ')
01846                     rospy.loginfo('Converged locs: %s' % str(self.locations_man.data[tid]['practice_locations_convergence']))
01847                     rospy.loginfo('using instances %d points added' % (points_after_t - points_before_t))
01848                     rospy.loginfo('history %s' % str(points_added_history))
01849                     rospy.loginfo('number of iterations it took %s' % str(np.sum(points_added_history)))
01850                     rospy.loginfo('number of datapoints %s' % str(self.locations_man.data[tid]['dataset'].outputs.shape))
01851                     rospy.loginfo('===================================================')
01852                     if np.where(self.locations_man.data[tid]['practice_locations_convergence'] == 0)[1].shape[0] <= 0:
01853                         break
01854                 else:
01855                     rospy.loginfo('===================================================')
01856                     rospy.loginfo('= Scan converged!')
01857                     rospy.loginfo('Converged locs: %s' % str(self.locations_man.data[tid]['practice_locations_convergence']))
01858                     rospy.loginfo('using instances %d points added' % (points_after_t - points_before_t))
01859                     rospy.loginfo('history %s' % str(points_added_history))
01860                     rospy.loginfo('number of iterations so far %s' % str(np.sum(points_added_history)))
01861                     rospy.loginfo('number of datapoints %s' % str(self.locations_man.data[tid]['dataset'].outputs.shape))
01862                     rospy.loginfo('===================================================')
01863 
01864             pidx = (pidx + 1) % len(self.locations_man.data[tid]['practice_locations'])
01865             self.locations_man.save_database()
01866 
01867         self.driving_posture(task_type)
01868         #ulocs = self.unreliable_locs()
01869         #rospy.loginfo('%d out of %d locs in database are unreliable' \
01870         #        % (len(ulocs), len(self.locations_man.data.keys())))
01871 
01872     ##
01873     # Execution phase
01874     def scenario_user_select_location(self, save, user_study):
01875         rospy.loginfo('===================================================')
01876         rospy.loginfo('= User selection mode!                            =')
01877         rospy.loginfo('===================================================')
01878         tasks = self.locations_man.data.keys()
01879         for i, k in enumerate(tasks):
01880             print i, k
01881 
01882         tid = tasks[int(raw_input())]
01883         task_type = self.locations_man.data[tid]['task']
01884         rospy.loginfo('User selected %s' % tid)
01885         #self.driving_posture(task_type)
01886         #self.manipulation_posture(task_type)
01887         #return
01888 
01889         #record current robot position
01890         if not self.locations_man.data[tid].has_key('execute_locations'):
01891             self.locations_man.data[tid]['execute_locations'] = []
01892         t_current_map, r_current_map = self.robot.base.get_pose()
01893         self.locations_man.data[tid]['execute_locations'].append([t_current_map, r_current_map])
01894         if not user_study:
01895             self.locations_man.save_database()
01896 
01897         point_map = self.locations_man.data[tid]['center']
01898         #pdb.set_trace()
01899         self.robot.sound.say('Driving')
01900         self.driving_posture(task_type)
01901         rvalue, dist = self.move_base_planner(*self.locations_man.data[tid]['base_pose'])
01902         if not rvalue:
01903             self.robot.sound.say('unable to plan to location')
01904             return False
01905 
01906         bl_T_map = tfu.transform('base_link', 'map', self.tf_listener)
01907         point_bl = tfu.transform_points(bl_T_map, point_map)
01908         ret = self.location_approach_driving(task_type, point_bl)
01909         self.robot.base.set_pose(self.robot.base.get_pose()[0], 
01910                                  self.locations_man.data[tid]['base_pose'][1], 'map')
01911         if not ret[0]:
01912             self.robot.sound.say('unable to get to location!')
01913             return False
01914 
01915         self.manipulation_posture(task_type)
01916         bl_T_map = tfu.transform('base_link', 'map', self.tf_listener)
01917         point_bl = tfu.transform_points(bl_T_map, point_map)
01918         self.look_at(point_bl, False)
01919 
01920         self.robot.sound.say('Executing behavior')
01921         self.execute_behavior(tid, point_bl, save, user_study=user_study)
01922         self.driving_posture(task_type)
01923         #exit()
01924 
01925     def unreliable_locs(self):
01926         tasks = self.locations_man.data.keys()
01927         utid = []
01928         for tid in tasks:
01929             if not self.locations_man.is_reliable(tid):
01930                 utid.append(tid)
01931         return utid
01932 
01933 
01934 
01935     def click_cb(self, point_bl):
01936         #self.look_at(point_bl)
01937         #return
01938         #point_bl = np.matrix([ 0.68509375,  0.06559023,  1.22422832]).T
01939         #self.stationary_light_switch_behavior(point_bl)
01940         #mode = 'autonomous'
01941         #mode = 'light switch'
01942         mode = 'location_execute'
01943         #mode = 'practice'
01944 
01945         if point_bl!= None:
01946             print 'Got point', point_bl.T
01947             #1) User points at location and selects a behavior.
01948             if mode == 'location_execute':
01949                 self.scenario_user_clicked_at_location(point_bl)
01950 
01951             #if mode == 'practice':
01952             #    self.add_to_practice_points_map(point_bl)
01953 
01954 
01955                 ##If that location is new:
01956                 #map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
01957                 #point_map = tfu.transform_points(map_T_base_link, point_bl)
01958                 #close_by_locs = self.locations_man.list_close_by(point_map)
01959 
01960                 #if len(close_by_locs) <= 0:
01961                 #    #initialize new location
01962                 #    rospy.loginfo('Select task type:')
01963                 #    for i, ttype in enumerate(self.locations_man.task_types):
01964                 #        print i, ttype
01965                 #    task_type = self.locations_man[int(raw_input())]
01966                 #    task_id = self.locations_man.create_new_location(task_type, point_map)
01967 
01968                 #    rospy.loginfo('if existing dataset exists enter that dataset\'s name')
01969                 #    print 'friday_730_light_switch2.pkl'
01970                 #    filename = raw_input()
01971                 #    if len(filename) > 0:
01972                 #        dataset = ut.load_pickle(filename)
01973                 #        self.locations_man.data[task_id]['dataset'] = dataset
01974                 #        self.locations_man.train(dataset, task_id)
01975                 #    else:
01976                 #        self.last_ditch_execution(
01977 
01978                 #elif len(close_by_locs) == 1:
01979                 #    task_id, task = close_by_locs[0]
01980                 #    rospy.loginfo('Executing task %s with id % s', task, task_id)
01981                 #    self.execute_behavior(task_id, point_bl)
01982 
01983                 #elif len(close_by_locs) > 1:
01984                 #    #TODO: implement this case
01985                 #    rospy.logerr('ERROR: unimplemented')
01986                 #    pdb.set_trace()
01987                 #    self.execute_behavior(task_id, point_bl)
01988                 #else:
01989                 #    rospy.logerr('ERROR: we shouldn\'t have reached here')
01990                 #    pdb.set_trace()
01991 
01992 
01993             if mode == 'live_label':
01994                 #self.execute_behavior(point_bl, 
01995                 light_switch_beh = ft.partial(self.light_switch1, 
01996                                         point_offset=np.matrix([0,0,.03]).T,
01997                                         press_contact_pressure=300,
01998                                         #move_back_distance=np.matrix([-.0075,0,0]).T,
01999                                         press_pressure=3500,
02000                                         press_distance=np.matrix([0,0,-.15]).T,
02001                                         visual_change_thres=.03)
02002                 point_map = tfu.transform_points(tfu.transform('map', 'base_link', self.tf_listener), point_bl)
02003 
02004                 while not rospy.is_shutdown():
02005                     point_bl_cur = tfu.transform_points(tfu.transform('base_link', 'map', self.tf_listener), point_map)
02006                     self.execute_behavior(point_bl_cur, light_switch_beh, 'light_switch')
02007 
02008             if mode == 'capture data':
02009                 self.robot.head.look_at(point_bl, 'base_link', True)
02010                 self.robot.sound.say("taking a scan")
02011                 #self.record_perceptual_data(point_bl)
02012                 #pdb.set_trace()
02013                 #rdict = self.feature_ex.kinect_listener.read()
02014                 rdict = self.feature_ex.read()['rdict']
02015                 #self.record_perceptual_data(point_bl, 'openni_rgb_optical_frame', rdict=rdict)
02016                 self.record_perceptual_data(point_bl, self.OPTICAL_FRAME, rdict=rdict)
02017                 self.robot.sound.say("saved scan")
02018 
02019             if mode == 'light switch':
02020                 point_offset = np.matrix([0, 0, 0.03]).T
02021                 success, _ = self.light_switch1(point_bl, point_offset=point_offset, \
02022                         press_contact_pressure=300,\
02023                         # move_back_distance=np.matrix([-.0075,0,0]).T,\
02024                         press_pressure=3500, press_distance=np.matrix([0,0,-.15]).T, \
02025                         visual_change_thres=.03)
02026 
02027                 self.behaviors.movement.pressure_listener.rezero()
02028                 self.behaviors.movement.set_movement_mode_cart()
02029                 self.behaviors.movement.move_absolute(self.start_location_light_switch, stop='pressure')
02030 
02031 
02032             #if mode == 'autonomous learn':
02033             #    def light_behavior(point):
02034             #        point_offset = np.matrix([0, 0, 0.03]).T
02035             #        success, _ = self.light_switch1(point, point_offset=point_offset, \
02036             #                        press_contact_pressure=300, move_back_distance=np.matrix([-.0075,0,0]).T,\
02037             #                        press_pressure=3500, press_distance=np.matrix([0,0,-.15]).T, \
02038             #                        visual_change_thres=.03)
02039             #        if success:
02040             #            return 1.0
02041             #        else:
02042             #            return 0.0
02043 
02044             #    self.untuck()
02045             #    self.behaviors.movement.move_absolute(self.start_location, stop='pressure')
02046             #    self.behaviors.movement.pressure_listener.rezero()
02047             #    self.autonomous_learn(point_bl, light_behavior, 'light_switch')
02048 
02049             #if mode == 'location activated':
02050             #    self.location_activated_behaviors(point_bl)
02051 
02052         #elif mode == 'location activated':
02053         #    all_locs = self.locations_man.list_all()
02054         #    for i, pair in enumerate(all_locs):
02055         #        key, task = pair
02056         #        print i, task, key
02057 
02058         #    rospy.loginfo('Select location to execute action')
02059         #    selected = int(raw_input())
02060 
02061     def run(self, mode, save, user_study):
02062         #point = np.matrix([ 0.60956734, -0.00714498,  1.22718197]).T
02063         #print 'RECORDING'
02064         #self.record_perceptual_data(point)
02065         #print 'DONE RECORDING'
02066         #kdict, fname = self.read_features_save(task_id, point3d_bl, param)
02067         #point = np.matrix([ 0.60956734, -0.00714498,  1.22718197]).T
02068         #f = self.feature_ex.read(point, self.rec_params)
02069 
02070         r = rospy.Rate(10)
02071         rospy.loginfo('Ready.')
02072         while not rospy.is_shutdown():
02073             r.sleep()
02074             if mode == 'practice':
02075                 self.scenario_practice_run_mode()
02076 
02077             if mode == 'execute':
02078                 t = time.time()
02079                 self.scenario_user_select_location(save, user_study)
02080                 t2 = time.time()
02081                 print '>> That took', t2 - t, 'seconds'
02082                 exit()
02083 
02084             if mode == 'update_base':
02085                 self.update_base()
02086 
02087 
02088     #def record_perceptual_data_laser_scanner(self, point_touched_bl):
02089     #    #what position should the robot be in?
02090     #    #set arms to non-occluding pose
02091 
02092     #    #record region around the finger where you touched
02093     #    rospy.loginfo('Getting laser scan.')
02094     #    points = []
02095     #    for i in range(3):
02096     #        rospy.loginfo('scan %d' % i)
02097     #        points.append(self.laser_scan.scan(math.radians(180.), math.radians(-180.), 20./3.))
02098 
02099     #    rospy.loginfo('Getting Prosilica image.')
02100     #    prosilica_image = self.prosilica.get_frame()
02101     #    rospy.loginfo('Getting image from left wide angle camera.')
02102     #    left_image  = self.wide_angle_camera_left.get_frame()
02103     #    rospy.loginfo('Getting image from right wide angle camera.')
02104     #    right_image = self.wide_angle_camera_left.get_frame()
02105     #    rospy.loginfo('Waiting for calibration.')
02106     #    while self.prosilica_cal.has_msg == False:
02107     #        time.sleep(.1)
02108 
02109     #    #which frames?
02110     #    rospy.loginfo('Getting transforms.')
02111     #    pro_T_bl = tfu.transform('/self.OPTICAL_FRAMEhigh_def_optical_frame', '/base_link', self.tf_listener)
02112     #    laser_T_bl = tfu.transform('/laser_tilt_link', '/base_link', self.tf_listener)
02113     #    tstring = time.strftime('%A_%m_%d_%Y_%I:%M%p')
02114     #    prosilica_name = '%s_highres.png' % tstring
02115     #    left_name = '%s_left.png' % tstring
02116     #    right_name = '%s_right.png' % tstring
02117     #    rospy.loginfo('Saving images (basename %s)' % tstring)
02118     #    cv.SaveImage(prosilica_name, prosilica_image)
02119     #    cv.SaveImage(left_name, left_image)
02120     #    cv.SaveImage(right_name, right_image)
02121 
02122     #    rospy.loginfo('Saving pickles')
02123     #    pickle_fname = '%s_interest_point_dataset.pkl' % tstring   
02124 
02125     #    data_pkl = {'touch_point': point_touched_bl,
02126     #                'points_laser': points,
02127     #                'laser_T_bl': laser_T_bl, 
02128     #                'pro_T_bl': pro_T_bl,
02129 
02130     #                'high_res': prosilica_name,
02131     #                'prosilica_cal': self.prosilica_cal, 
02132 
02133     #                'left_image': left_name,
02134     #                'left_cal': self.left_cal,
02135 
02136     #                'right_image': right_name,
02137     #                'right_cal': self.right_cal}
02138     #                #'point_touched': point_touched_bl}
02139     #                
02140 
02141     #    ut.save_pickle(data_pkl, pickle_fname)
02142     #    print 'Recorded to', pickle_fname
02143 
02144     def record_perceptual_data(self, point3d_bl, image_frame, rdict=None, folder_name=None):
02145         rospy.loginfo('saving dataset..')
02146         #self.feature_ex.read(point3d_bl)
02147         if rdict == None:
02148             rospy.loginfo('Getting a kinect reading')
02149             rdict = self.feature_ex.read()['rdict']
02150             #rdict = self.kinect_listener.read()
02151         kimage = rdict['image']
02152         rospy.loginfo('Waiting for calibration.')
02153         while not self.feature_ex.cal.has_msg:
02154             time.sleep(.1)
02155 
02156         #which frames?
02157         rospy.loginfo('Getting transforms.')
02158         #k_T_bl = tfu.transform('openni_rgb_optical_frame', '/base_link', self.tf_listener)
02159         k_T_bl = tfu.transform(image_frame, '/base_link', self.tf_listener)
02160 
02161         tstring = time.strftime('%A_%m_%d_%Y_%I_%M%p')
02162         kimage_name = '%s_highres.png' % tstring
02163         pickle_fname = '%s_interest_point_dataset.pkl' % tstring   
02164         if folder_name != None:
02165             try:
02166                 os.mkdir(folder_name)
02167             except OSError, e:
02168                 print e
02169             kimage_name = pt.join(folder_name, kimage_name)
02170             pickle_fname = pt.join(folder_name, pickle_fname)
02171 
02172         rospy.loginfo('Saving images (basename %s)' % tstring)
02173         cv.SaveImage(kimage_name, kimage)
02174         rospy.loginfo('Saving pickles')
02175 
02176         data_pkl = {'touch_point': point3d_bl,
02177                     'points3d': rdict['points3d'],
02178                     'image': kimage_name,
02179                     'cal': self.feature_ex.cal, 
02180                     'k_T_bl': k_T_bl}
02181                     #'point_touched': point3d_bl}
02182 
02183         ut.save_pickle(data_pkl, pickle_fname)
02184         print 'Recorded to', pickle_fname
02185         return pickle_fname, kimage_name
02186 
02187     #def gather_interest_point_dataset(self, point):
02188     #    gaussian = pr.Gaussian(np.matrix([0, 0, 0.]).T, \
02189     #            np.matrix([[1., 0, 0], \
02190     #                       [0, .02**2, 0], \
02191     #                       [0, 0, .02**2]]))
02192 
02193     #    for i in range(100):
02194     #        # perturb_point
02195     #        gaussian_noise = gaussian.sample()
02196     #        gaussian_noise[0,0] = 0
02197     #        success_off, touchloc_bl = self.light_switch1(point, 
02198     #                        point_offset=np.matrix([-.15, 0, 0]).T, press_contact_pressure=300, 
02199     #                        move_back_distance=np.matrix([-.005,0,0]).T, press_pressure=2500, 
02200     #                        press_distance=np.matrix([0,0,-.15]).T, visual_change_thres=.03)
02201     #        rospy.loginfo('Lights turned off? %s' % str(success_off))
02202 
02203     #        pdb.set_trace()
02204     #        self.behaviors.movement.move_absolute((np.matrix([.15, .45, 1.3]).T, self.start_location[1]), stop='pressure_accel')
02205     #        self.record_perceptual_data(touchloc_bl)
02206     #        self.behaviors.movement.move_absolute(self.start_location, stop='pressure_accel')
02207     #        if success_off:
02208     #            self.behaviors.movement.move_absolute((np.matrix([.15, .45, 1.3]).T, self.start_location[1]), stop='pressure_accel')
02209     #            self.record_perceptual_data(touchloc_bl)
02210     #            self.behaviors.movement.move_absolute(self.start_location, stop='pressure_accel')
02211 
02212     #            success_on, touchloc_bl2 = self.light_switch1(point, 
02213     #                            point_offset=np.matrix([-.15,0,-.10]).T, press_contact_pressure=300, 
02214     #                            move_back_distance=np.matrix([-0.005, 0, 0]).T, press_pressure=2500, 
02215     #                            press_distance=np.matrix([0,0,.1]).T, visual_change_thres=.03)
02216     #            ##1
02217     #            #if success_on:
02218     #            #    self.movement.behaviors.move_absolute((np.matrix([.15, .45, 1.3]).T, self.start_location[1]), stop='pressure_accel')
02219     #            #    self.record_perceptual_data(touchloc_bl)
02220     #            #    self.movement.behaviors.move_absolute(self.start_location, stop='pressure_accel')
02221     #            #Turn on lights
02222     #            #success_on, touchloc_bl = self.light_switch1(npoint, 
02223     #        else:
02224     #            return
02225 
02226     ##
02227     # The behavior can make service calls to a GUI asking users to label
02228     def practice(self, task_id, ctask_id, point3d_bl, stop_fun=None, params=None, 
02229             negative_cut_off=.5, resolution=.01, max_samples=5):
02230         if params == None:
02231             params = r3d.Recognize3DParam()
02232             params.uncertainty_x = 1.
02233             #param.n_samples = 2000
02234             params.uncertainty_z = .04
02235             params.uni_mix = .1
02236         pstart = time.time()
02237 
02238         kdict, image_name = self.read_features_save(task_id, point3d_bl, params)
02239         #learner = self.locations_man.learners[task_id]
02240         #pdb.set_trace()
02241         behavior = self.get_behavior_by_task(self.locations_man.data[task_id]['task'])
02242         head_pose = self.robot.head.pose()
02243 
02244         kdict['image_T_bl'] = tfu.transform(self.OPTICAL_FRAME, 'base_link', self.tf_listener)
02245         point3d_img = tfu.transform_points(kdict['image_T_bl'], point3d_bl)
02246         point2d_img = self.feature_ex.cal.project(point3d_img)
02247 
02248         labels = []
02249         points3d_tried = []
02250         points2d_tried = []
02251         converged = False
02252         indices_added = []
02253         reset_times = []
02254 
02255         #while not converged or (stop_fun != None and not stop_fun(np.matrix(labels))):
02256         while True:# and (stop_fun != None and not stop_fun(np.matrix(labels))):
02257             if stop_fun != None and stop_fun(np.matrix(labels)):
02258                 rospy.loginfo('Stop satisfied told us to stop loop!')
02259                 break
02260 
02261             if stop_fun == None and len(indices_added) > params.max_points_per_site:
02262                 rospy.loginfo('practice: added enough points from this scan. Limit is %d points.' % params.max_points_per_site)
02263                 break
02264 
02265             #==================================================
02266             # Pick
02267             #==================================================
02268             #Find remaining instances
02269             iter_start = time.time()
02270             print 'input to inverse_indices'
02271             print '>>', indices_added, kdict['instances'].shape[1]
02272             remaining_pt_indices = r3d.inverse_indices(indices_added, kdict['instances'].shape[1])
02273             remaining_instances = kdict['instances'][:, remaining_pt_indices]
02274 
02275             #Ask learner to pick an instance
02276             ridx, selected_dist, converged = self.locations_man.learners[task_id].select_next_instances_no_terminate(remaining_instances)
02277             #if stop_fun == None and converged:
02278             #    rospy.loginfo('practice: Converged! Exiting loop.')
02279             #    break
02280 
02281 
02282             selected_idx = remaining_pt_indices[ridx[0]]
02283             #pdb.set_trace()
02284             indices_added.append(selected_idx)
02285 
02286             #==================================================
02287             # DRAW
02288             #==================================================
02289             img = cv.CloneMat(kdict['image'])
02290             #Draw the center
02291             r3d.draw_points(img, point2d_img, [255, 0, 0], 6, 2)
02292 
02293             #Draw possible points
02294             r3d.draw_points(img, kdict['points2d']+np.matrix([1,1.]).T, [255, 255, 255], 4, -1)
02295 
02296             if len(points2d_tried) > 0:
02297                 _, pos_exp, neg_exp = separate_by_labels(np.column_stack(points2d_tried), np.matrix(labels))
02298                 r3d.draw_points(img, pos_exp, [50, 255, 0], 8, 1)
02299                 r3d.draw_points(img, neg_exp, [50, 0, 255], 8, 1)
02300 
02301             predictions = np.matrix(self.locations_man.learners[task_id].classify(kdict['instances']))
02302             _, pos_pred, neg_pred = separate_by_labels(kdict['points2d'], predictions)
02303             r3d.draw_points(img, pos_pred, [255, 204, 51], 3, -1)
02304             r3d.draw_points(img, neg_pred, [51, 204, 255], 3, -1)
02305 
02306             #Draw what we're selecting
02307             r3d.draw_points(img, kdict['points2d'][:, selected_idx], [255, 51, 204], 8, -1)
02308             self.locations_man.publish_image(task_id, img, postfix='_practice_pick')
02309 
02310             #==================================================
02311             # Excecute!!
02312             #==================================================
02313             self.robot.head.set_pose(head_pose, 1)
02314             #self.robot.projector.set(False)
02315             if np.linalg.norm(kdict['points3d'][:, selected_idx] - point3d_bl) > negative_cut_off:
02316                 rospy.loginfo('#########################################')
02317                 rospy.loginfo('Point outside of negative cut off!! Eliminating %s' % (str(kdict['points3d'][:, selected_idx].T)))
02318                 rospy.loginfo('#########################################')
02319                 success = False
02320             else:
02321                 if len(points3d_tried) > 0:
02322                     existing_pts_tree = sp.KDTree(np.array(np.column_stack(points3d_tried).T))
02323                     close_by_indices = existing_pts_tree.query_ball_point(np.array(kdict['points3d'][:, selected_idx]).T, resolution)[0]
02324                     if len(close_by_indices) > 0:
02325                         #labelsm = np.matrix(labels)[0, close_by_indices]
02326                         #ntotal = labelsm.shape[1]
02327                         rospy.loginfo('#########################################')
02328                         rospy.loginfo('Point within resolutio of existing point.') #Labeling %s' % (str(kdict['points3d'][:, selected_idx])))
02329                         rospy.loginfo('#########################################')
02330                         continue
02331                         #This can cause the automated mechanism to propagate false labels.
02332                         #if np.sum(labelsm) > (ntotal/2.0):
02333                         #    success = True
02334                         #else:
02335                         #    success = False
02336                         #rospy.loginfo('as %s' % str(success))
02337                     else:
02338                         success, _, undo_point_bl = behavior(kdict['points3d'][:, selected_idx])
02339                 else:
02340                     success, _ , undo_point_bl = behavior(kdict['points3d'][:, selected_idx])
02341             #self.robot.projector.set(True)
02342 
02343             if success:
02344                 color = [0,255,0]
02345                 label = r3d.POSITIVE
02346                 rospy.loginfo('=============================================')
02347                 rospy.loginfo('>> behavior successful')
02348                 rospy.loginfo('=============================================')
02349                 self.robot.sound.say('action succeeded')
02350             else:
02351                 label = r3d.NEGATIVE
02352                 color = [0,0,255]
02353                 rospy.loginfo('=============================================')
02354                 rospy.loginfo('>> behavior NOT successful')
02355                 rospy.loginfo('=============================================')
02356                 self.robot.sound.say('action failed')
02357 
02358             #==================================================
02359             # Book keeping
02360             #==================================================
02361             labels.append(label)
02362             points3d_tried.append(kdict['points3d'][:, selected_idx])
02363             points2d_tried.append(kdict['points2d'][:, selected_idx])
02364 
02365             datapoint = {'instances': kdict['instances'][:, selected_idx],
02366                          'points2d':  kdict['points2d'][:, selected_idx],
02367                          'points3d':  kdict['points3d'][:, selected_idx],
02368                          'sizes':     kdict['sizes'],
02369                          'labels':    np.matrix([label])
02370                          }
02371             self.locations_man.add_perceptual_data(task_id, datapoint)
02372             self.locations_man.save_database()
02373             self.locations_man.train(task_id, kdict)
02374             #pdb.set_trace()
02375 
02376             #==================================================
02377             # Reset Environment
02378             #==================================================
02379             reset_start = time.time()
02380             if success and ctask_id != None:
02381                 def any_pos_sf(labels_mat):
02382                     if np.any(r3d.POSITIVE == labels_mat):
02383                         return True
02384                     return False
02385                 #self.practice(ctask_id, None, point3d_bl, stop_fun=any_pos_sf)
02386                 undo_point_map = self.locations_man.data[ctask_id]['center']
02387                 undo_point_bl0 = tfu.transform_points(tfu.transform('base_link', 'map', self.tf_listener), undo_point_map)
02388                 #self.practice(ctask_id, None, undo_point_bl, stop_fun=any_pos_sf)
02389                 #pdb.set_trace()
02390                 num_points_added = self.practice(ctask_id, None, undo_point_bl0, stop_fun=any_pos_sf)
02391                 #if num_points_added > params.max_points_per_site:
02392                 #    #pdb.set_trace()
02393                 #    dataset = self.locations_man.get_perceptual_data(ctask_id)
02394                 #    npoints = dataset.inputs.shape[1]
02395                 #    pts_added_idx = range(npoints - num_points_added, npoints)
02396                 #    pts_remove_idx = pts_added_idx[:(num_points_added - params.max_points_per_site)]
02397                 #    pts_remove_idx.reverse()
02398                 #    rospy.loginfo('Got too many data points for %s throwing out %d points' % (ctask_id, len(pts_remove_idx)))
02399                 #    for idx in pts_remove_idx:
02400                 #        self.locations_man.remove_perceptual_data(ctask_id, idx)
02401 
02402                 #self.locations_man.
02403 
02404             reset_end = time.time()
02405 
02406             #Classify
02407             predictions = np.matrix(self.locations_man.learners[task_id].classify(kdict['instances']))
02408 
02409             #==================================================
02410             # DRAW
02411             #==================================================
02412             img = cv.CloneMat(kdict['image'])
02413 
02414             #Draw the center
02415             r3d.draw_points(img, point2d_img, [255, 0, 0], 6, 2)
02416 
02417             #Draw 'shadows' 
02418             r3d.draw_points(img, kdict['points2d']+np.matrix([1,1.]).T, [255, 255, 255], 4, -1)
02419 
02420             #Draw points tried
02421             _, pos_exp, neg_exp = separate_by_labels(np.column_stack(points2d_tried), np.matrix(labels))
02422             r3d.draw_points(img, pos_exp, [50, 255, 0], 9, 1)
02423             r3d.draw_points(img, neg_exp, [50, 0, 255], 9, 1)
02424 
02425             _, pos_pred, neg_pred = separate_by_labels(kdict['points2d'], predictions)
02426             r3d.draw_points(img, pos_pred, [255, 204, 51], 3, -1)
02427             r3d.draw_points(img, neg_pred, [51, 204, 255], 3, -1)
02428 
02429             #Draw what we're selecting
02430             r3d.draw_points(img, points2d_tried[-1], color, 8, -1)
02431             self.locations_man.publish_image(task_id, img, postfix='_practice_result')
02432 
02433             pkname = pt.join(task_id, time.strftime('%A_%m_%d_%Y_%I_%M_%S%p') + '.pkl')
02434             #cv.SaveImage(ffull, img)
02435             ut.save_pickle({'image': image_name,
02436                             'pos': pos_exp,
02437                             'neg': neg_exp,
02438                             'pos_pred': pos_pred,
02439                             'neg_pred': neg_pred,
02440                             'tried': (points2d_tried[-1], label),
02441                             'center': point2d_img}, pkname)
02442 
02443             #ffull = pt.join(task_id, time.strftime('%A_%m_%d_%Y_%I_%M_%S%p') + '.png')
02444             #cv.SaveImage(ffull, img)
02445             #self.img_pub.publish(img)
02446             reset_time = reset_end - reset_start
02447             loop_time = (time.time() - iter_start) - (reset_end - reset_start)
02448             reset_times.append(reset_time)
02449             print '**********************************************************'
02450             print 'Loop took %.3f seconds' % loop_time
02451             print '**********************************************************'
02452             self.locations_man.record_time(task_id, 'practice_loop_time', loop_time)
02453 
02454         if np.any(r3d.POSITIVE == np.matrix(labels)):
02455             self.locations_man.update_execution_record(task_id, 1)
02456 
02457         print '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
02458         print 'returning from', task_id
02459         print '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
02460         pend = time.time()
02461         practice_time = pend - pstart - np.sum(reset_times)
02462         self.locations_man.record_time(task_id, 'practice_func_time', practice_time)
02463         return len(indices_added)
02464 
02465 
02466     def balance_positives_and_negatives(self, dataset):
02467         #pdb.set_trace() #bug here
02468         poslist = np.where(dataset.outputs == r3d.POSITIVE)[1].A1.tolist()
02469         neglist = np.where(dataset.outputs == r3d.NEGATIVE)[1].A1.tolist()
02470         npoint = min(len(poslist), len(neglist))
02471         assert(npoint > 0)
02472         sindices = poslist[:npoint]+neglist[:npoint]
02473         dataset.pt3d    = dataset.pt3d[:, sindices]
02474         dataset.pt2d    = dataset.pt2d[:, sindices]
02475         dataset.outputs = dataset.outputs[:, sindices]
02476         dataset.inputs  = dataset.inputs[:, sindices]
02477 
02478     def profile_me(self, task_id, point_bl):
02479         for i in range(2):
02480             params = r3d.Recognize3DParam()
02481             params.uncertainty_x = 1.
02482             params.uncertainty_y = .04
02483             params.uncertainty_z = .04
02484             params.n_samples = 5000
02485             params.uni_mix = 0.1
02486             print 'PROFILE_ME ITERATION', i
02487             fea_dict, image_name = self.read_features_save(task_id, point_bl, params)
02488 
02489 
02490     ###
02491     # TODO: GASP!! WE MIGHT NOT NEED THIS AFTER ALL, JUST INITIALIZE RANDOMLY!
02492     ###
02493     def seed_dataset_explore(self, task_id, ctask_id, point_bl, stop_fun, 
02494             max_retries=30, closeness_tolerance=.01, positive_escape=.08, should_reset=False):
02495         #rospy.loginfo('seed_dataset_explore: %s' % task_id)
02496         ##case label
02497         #if self.locations_man.data[task_id]['dataset'] != None:
02498         #    case_labels = self.locations_man.data[task_id]['dataset'].outputs.A1.tolist()
02499         #else:
02500         #    case_labels = []
02501         #if stop_fun(case_labels):
02502         #    rospy.loginfo('seed_dataset_explore: stop function satisfied with label set. not executing.')
02503         #    return
02504 
02505         self.look_at(point_bl, True)
02506         rospy.sleep(2.)
02507         params = r3d.Recognize3DParam()
02508         params.uncertainty_x = 1.
02509         params.uncertainty_y = .04
02510         params.uncertainty_z = .04
02511         params.n_samples = 800
02512         params.uni_mix = 0.1
02513 
02514 
02515         #profile read_reatures_save
02516         #cProfile.runctx('self.profile_me(task_id, point_bl)', globals(), locals(), filename='read_features_save.prof')
02517         #pdb.set_trace()
02518     
02519         #Scan
02520         fea_dict, image_name = self.read_features_save(task_id, point_bl, params)
02521 
02522         params2 = r3d.Recognize3DParam()
02523         params2.n_samples = 5000
02524         fea_dict2, image_name2 = self.read_features_save(task_id, point_bl, params2)
02525 
02526         behavior = self.get_behavior_by_task(self.locations_man.data[task_id]['task'])
02527         image_T_bl = tfu.transform(self.OPTICAL_FRAME, 'base_link', self.tf_listener)
02528         fea_dict['image_T_bl'] = image_T_bl
02529         
02530         #Rearrange sampled points by distance
02531         dists = ut.norm(fea_dict['points3d'] - point_bl)
02532         ordering = np.argsort(dists).A1
02533         points3d_sampled = fea_dict['points3d'][:, ordering]
02534         points2d_sampled = fea_dict['points2d'][:, ordering]
02535         instances_sampled = fea_dict['instances'][:, ordering]
02536         start_pose = self.robot.head.pose()
02537 
02538         #Figure out where to draw given point for visualization
02539         point3d_img = tfu.transform_points(fea_dict['image_T_bl'], point_bl)
02540         point2d_img = self.feature_ex.cal.project(point3d_img)
02541    
02542         #Book keeping for loop
02543         undo_ret = None
02544         points3d_tried = []
02545         points2d_tried = []
02546         labels = []
02547         sampled_idx = 0
02548         iter_count = 0
02549         need_reset = False
02550         behavior_end_state = False
02551         undo_point_bl = point_bl
02552 
02553         #pdb.set_trace()
02554         while iter_count < max_retries and not stop_fun(np.matrix(labels)) and sampled_idx < points3d_sampled.shape[1]:
02555             #==================================================
02556             # Pick
02557             #==================================================
02558             if len(points3d_tried) > 0 and \
02559                np.any(ut.norm(np.column_stack(points3d_tried) - points3d_sampled[:, sampled_idx]) < closeness_tolerance): 
02560                    sampled_idx = sampled_idx + 1 
02561                    continue
02562 
02563             if len(labels) > 0 and np.sum(labels) == len(labels) and\
02564                np.any(ut.norm(np.column_stack(points3d_tried) - points3d_sampled[:, sampled_idx]) < positive_escape): 
02565                    #pdb.set_trace()
02566                    sampled_idx = sampled_idx + 1 
02567                    continue
02568 
02569             #==================================================
02570             # Excecute!!
02571             #==================================================
02572             self.robot.head.set_pose(start_pose, 1)
02573             #self.robot.projector.set(False)
02574             success, reason, undo_point_bl = behavior(points3d_sampled[:, sampled_idx])
02575             rospy.loginfo('======================================')
02576             rospy.loginfo('%s was %s' % (task_id, str(success)))
02577             rospy.loginfo('======================================')
02578             behavior_end_state = success
02579             #self.robot.projector.set(True)
02580 
02581             #==================================================
02582             # Book keeping
02583             #==================================================
02584             if success:
02585                 label = r3d.POSITIVE
02586                 need_reset = True
02587             else:
02588                 label = r3d.NEGATIVE
02589                 need_reset = False
02590                 
02591             points3d_tried.append(points3d_sampled[:, sampled_idx])
02592             points2d_tried.append(points2d_sampled[:, sampled_idx])
02593             labels.append(label)
02594             datapoint = {'instances': instances_sampled[:, sampled_idx],
02595                          'points3d':  points3d_sampled[:, sampled_idx],
02596                          'points2d':  points2d_sampled[:, sampled_idx],
02597                          'sizes':     fea_dict['sizes'],
02598                          'labels':    np.matrix([label])}
02599             self.locations_man.add_perceptual_data(task_id, datapoint)
02600             self.locations_man.save_database()
02601 
02602             iter_count = iter_count + 1
02603             sampled_idx = sampled_idx + 1
02604 
02605             #==================================================
02606             # DRAW
02607             #==================================================
02608             img = cv.CloneMat(fea_dict['image'])
02609             r3d.draw_points(img, points2d_sampled+np.matrix([1,1.]).T, [0, 0, 0], 3, -1)
02610             r3d.draw_points(img, points2d_sampled, [255, 255, 255], 3, -1)
02611             _, pos_points, neg_points = separate_by_labels(np.column_stack(points2d_tried), np.matrix(labels))
02612             r3d.draw_points(img, point2d_img, [255, 0, 0], 6, 2)
02613             r3d.draw_points(img, pos_points, [0, 255, 0], 4, -1)
02614             r3d.draw_points(img, neg_points, [0, 0, 255], 4, -1)
02615             r3d.draw_points(img, points2d_tried[-1], [0, 184, 245], 6, -1)
02616             self.locations_man.publish_image(task_id, img)
02617 
02618             pkname = pt.join(task_id, time.strftime('%A_%m_%d_%Y_%I_%M_%S%p') + '_seed.pkl')
02619             ut.save_pickle({'image': image_name,
02620                             'pos': pos_points,
02621                             'neg': neg_points,
02622                             #'pos_pred': pos_pred,
02623                             #'neg_pred': neg_pred,
02624                             'tried': [points2d_tried[-1], label],
02625                             'center': point2d_img}, pkname)
02626 
02627             #ffull = pt.join(task_id, time.strftime('%A_%m_%d_%Y_%I_%M_%S%p') + '.png')
02628             #cv.SaveImage(ffull, img)
02629             #self.img_pub.publish(img)
02630     
02631             #==================================================
02632             # Reset Environment
02633             #==================================================
02634             if need_reset and should_reset:
02635                 self.robot.head.set_pose(start_pose, 1)
02636                 if ctask_id != None:
02637                     #If we were successful, call blind exploration with the undo behavior
02638                     def any_pos_sf(labels_mat):
02639                         if np.any(r3d.POSITIVE == labels_mat):
02640                             return True
02641                         return False
02642 
02643                     #ctask_point = points3d_tried[-1] #points3d_sampled[:, sampled_idx]
02644                     ctask_point = undo_point_bl
02645                     undo_ret = self.seed_dataset_explore(ctask_id, None, ctask_point, stop_fun=any_pos_sf, 
02646                                         max_retries=max_retries, closeness_tolerance=closeness_tolerance, 
02647                                         should_reset=False)
02648                     need_reset = False
02649                     if undo_ret['end_state']:
02650                         behavior_end_state = False
02651                     else:
02652                         behavior_end_state = True
02653 
02654         rospy.loginfo('Tried %d times' % iter_count)
02655         fea_dict_undo = None
02656         if undo_ret != None:
02657             fea_dict_undo = undo_ret['features']
02658 
02659         return {'end_state': behavior_end_state, 'features': fea_dict2, 
02660                 'features_undo': fea_dict_undo, 'undo_point': undo_point_bl}
02661 
02662     def read_features_save(self, task_id, point3d_bl, params=None):
02663         self.robot.projector.set(True)
02664         rospy.sleep(2.)
02665         f = self.feature_ex.read(point3d_bl, params=params)
02666         file_name, kimage_name = self.record_perceptual_data(point3d_bl, self.OPTICAL_FRAME, rdict=f['rdict'], folder_name=task_id)
02667         self.robot.projector.set(False)
02668 
02669         #image_T_bl = tfu.transform(self.OPTICAL_FRAME, 'base_link', self.tf_listener)
02670         #point3d_img = tfu.transform_points(image_T_bl, point3d_bl)
02671         #point2d_img = self.feature_ex.cal.project(point3d_img)
02672 
02673         #img = cv.CloneMat(f['image'])
02674         #self.draw_dots_nstuff(img, f['points2d'], 
02675         #        np.matrix([r3d.UNLABELED]* f['points2d'].shape[1]), 
02676         #        point2d_img)
02677         #self.img_pub.publish(img)
02678 
02679         return f, kimage_name
02680         #self.save_dataset(point, name, f['rdict'])
02681 
02682 
02683 
02684     ##
02685     #TODO: test this
02686     def execute_behavior(self, task_id, point3d_bl, save, max_retries=15, closeness_tolerance=.01, user_study=False):
02687         if user_study:
02688             rospy.loginfo('=====================================================')
02689             rospy.loginfo('user_study is True.  Trying to fail on first attempt!')
02690             rospy.loginfo('=====================================================')
02691         #Extract features
02692         params = r3d.Recognize3DParam()
02693         params.uncertainty_x = 1.
02694         params.uncertainty_z = .03
02695         params.uni_mix = .1
02696         params.n_samples = 1500
02697 
02698         kdict, image_name = self.read_features_save(task_id, point3d_bl, params)
02699         kdict['image_T_bl'] = tfu.transform(self.OPTICAL_FRAME, 'base_link', self.tf_listener)
02700         point3d_img = tfu.transform_points(kdict['image_T_bl'], point3d_bl)
02701         point2d_img = self.feature_ex.cal.project(point3d_img)
02702         head_pose = self.robot.head.pose()
02703 
02704         #Classify
02705         #pdb.set_trace()
02706         predictions = np.matrix(self.locations_man.learners[task_id].classify(kdict['instances']))
02707         pos_indices = np.where(r3d.POSITIVE == predictions)[1].A1
02708         loc2d_max = None
02709         try_num = 0
02710         updated_execution_record = False
02711         if user_study:
02712             first_time = True
02713         else:
02714             first_time = False
02715 
02716         #If find some positives:
02717         if len(pos_indices) > 0:
02718             indices_added = []
02719             remaining_pt_indices = r3d.inverse_indices(indices_added, kdict['instances'].shape[1])
02720             remaining_instances = kdict['instances'][:, remaining_pt_indices]
02721             behavior = self.get_behavior_by_task(self.locations_man.data[task_id]['task'])
02722 
02723             while len(pos_indices) > 0:
02724                 #select from the positive predictions the prediction with the most spatial support
02725                 print 'Num positive points found:', len(pos_indices)
02726 
02727                 #figure out max point
02728                 locs2d = None
02729                 if len(pos_indices) > 1:
02730                     locs2d = kdict['points2d'][:, pos_indices]
02731                     if np.any(np.isnan(locs2d)) or np.any(np.isinf(locs2d)):
02732                         pdb.set_trace()
02733                     locs2d_indices = np.where(False == np.sum(np.isnan(locs2d), 0))[1].A1
02734                     print locs2d[:, locs2d_indices]
02735                     loc2d_max, density_image = r3d.find_max_in_density(locs2d[:, locs2d_indices])
02736                     cv.SaveImage("execute_behavior.png", 255 * (np.rot90(density_image)/np.max(density_image)))
02737                     dists = ut.norm(kdict['points2d'] - loc2d_max)
02738                     selected_idx = np.argmin(dists)
02739                     if not first_time:
02740                         indices_added.append(selected_idx)
02741                 else:
02742                     selected_idx = pos_indices[0]
02743                     loc2d_max = kdict['points2d'][: selected_idx]
02744 
02745                 selected_3d = kdict['points3d'][:, selected_idx]
02746 
02747                 #Draw
02748                 img = cv.CloneMat(kdict['image'])
02749                 r3d.draw_points(img, point2d_img,       [255, 0, 0],    10, -1)
02750                 r3d.draw_points(img, kdict['points2d'], [51, 204, 255], 3, -1)
02751                 if locs2d != None:
02752                     r3d.draw_points(img, locs2d,        [255, 204, 51], 3, -1)
02753                 r3d.draw_points(img, loc2d_max,         [255, 204, 51], 10, -1)
02754                 self.locations_man.publish_image(task_id, img, postfix='_execute_pick')
02755 
02756                 #Execute
02757                 self.robot.head.set_pose(head_pose, 1)
02758                 print '============================================================'
02759                 print '============================================================'
02760                 print 'Try number', try_num
02761                 print '============================================================'
02762                 print '============================================================'
02763                 if first_time:
02764                     print 'original point selected is', selected_3d.T
02765                     selected_3d = selected_3d + np.matrix([0, 0, 0.2]).T
02766                 print 'point selected is', selected_3d.T
02767                 success, _, point_undo_bl = behavior(selected_3d)
02768 
02769                 #Save pickle for viz
02770                 pkname = pt.join(task_id, time.strftime('%A_%m_%d_%Y_%I_%M_%S%p') + '_execute.pkl')
02771                 _, pos_pred, neg_pred = separate_by_labels(kdict['points2d'], predictions)
02772                 if success:
02773                     label = r3d.POSITIVE
02774                 else:
02775                     label = r3d.NEGATIVE
02776                 ut.save_pickle({'image': image_name,
02777                                 'pos_pred': pos_pred,
02778                                 'neg_pred': neg_pred,
02779                                 'tried': [kdict['points2d'][:, selected_idx], label],
02780                                 'center': point2d_img}, pkname)
02781 
02782                 try_num = try_num + 1
02783                 if not first_time:
02784                     if success:
02785                         #if save:
02786                         if not updated_execution_record:
02787                             self.locations_man.update_execution_record(task_id, 1.)
02788                             update_execution_record = True
02789                         self.robot.sound.say('action succeeded')
02790                         label = r3d.POSITIVE
02791 
02792                         dataset = self.locations_man.data[task_id]['dataset']
02793                         nneg = np.sum(dataset.outputs == r3d.NEGATIVE) 
02794                         npos = np.sum(dataset.outputs == r3d.POSITIVE)
02795                         if nneg > npos and save:
02796                             datapoint = {'instances': kdict['instances'][:, selected_idx],
02797                                          'points2d':  kdict['points2d'][:, selected_idx],
02798                                          'points3d':  kdict['points3d'][:, selected_idx],
02799                                          'sizes':     kdict['sizes'],
02800                                          'labels':    np.matrix([label])}
02801                             self.locations_man.add_perceptual_data(task_id, datapoint)
02802                             if save:
02803                                 self.locations_man.save_database()
02804                             self.locations_man.train(task_id, kdict)
02805                         break
02806                     else:
02807                         self.robot.sound.say('action failed')
02808                         label = r3d.NEGATIVE
02809                         if not updated_execution_record:
02810                             self.locations_man.update_execution_record(task_id, 0.)
02811                             update_execution_record = True
02812 
02813                         #We were wrong so we add this to our dataset and retrain
02814                         datapoint = {'instances': kdict['instances'][:, selected_idx],
02815                                      'points2d':  kdict['points2d'][:, selected_idx],
02816                                      'points3d':  kdict['points3d'][:, selected_idx],
02817                                      'sizes':     kdict['sizes'],
02818                                      'labels':    np.matrix([label])}
02819                         self.locations_man.add_perceptual_data(task_id, datapoint)
02820                         if save:
02821                             self.locations_man.save_database()
02822                         self.locations_man.train(task_id, kdict)
02823 
02824                         if try_num > max_retries:
02825                             break
02826 
02827                         #Remove point and predict again!
02828                         remaining_pt_indices = r3d.inverse_indices(indices_added, kdict['instances'].shape[1])
02829                         remaining_instances = kdict['instances'][:, remaining_pt_indices]
02830                         predictions = np.matrix(self.locations_man.learners[task_id].classify(remaining_instances))
02831                         remaining_pos_indices = np.where(r3d.POSITIVE == predictions)[1].A1
02832                         pos_indices = remaining_pt_indices[remaining_pos_indices]
02833                 first_time = False
02834 
02835         #If no positives found:
02836         else:
02837             img = cv.CloneMat(kdict['image'])
02838             r3d.draw_points(img, point2d_img,       [255, 0, 0],    10, -1)
02839             r3d.draw_points(img, kdict['points2d'], [51, 204, 255], 3, -1)
02840             self.locations_man.publish_image(task_id, img, postfix='_execute_fail')
02841 
02842             if not updated_execution_record:
02843                 self.locations_man.update_execution_record(task_id, 0.)
02844                 update_execution_record = True
02845 
02846             self.robot.sound.say("Perception failure.  Exploring around expected region.")
02847             #FAIL. Update the location's statistic with this failure. 
02848             if save:
02849                 self.locations_man.update_execution_record(task_id, 0.)
02850             def any_pos_sf(labels_mat):
02851                 if np.any(r3d.POSITIVE == labels_mat):
02852                     return True
02853                 return False
02854             ctask_id = self.locations_man.data[task_id]['complementary_task_id']
02855             self.seed_dataset_explore(task_id, ctask_id,
02856                     point3d_bl, max_retries=max_retries, stop_fun=any_pos_sf, 
02857                     closeness_tolerance=closeness_tolerance, should_reset=False)
02858 
02859     
02860     #def autonomous_learn(self, point3d_bl, behavior, object_name): 
02861     #    # We learn, but must moderate between spatial cues and requirements of
02862     #    # the learner. Spatial cue is a heuristic that can guide to positive
02863     #    # examples. Learning heuristic reduces the number of experiments to
02864     #    # perform given that we know that we are generally *not* successful
02865     #    # (assume that this procedure launches only during non mission critial circumstances).
02866     #    # So in the case where we're actively learning we're going to ignore the spatial heuristic.
02867     #    # Well... can we incorporate distance to the selected 3d point as a feature?
02868     #    # ah!
02869     #    learn_manager = self.learners[object_name]
02870     #    #scan and extract features
02871     #    self.robot.head.look_at(point3d_bl, 'base_link', True)
02872     #    learn_manager.scan(point3d_bl)
02873     #    gaussian = pr.Gaussian(np.matrix([ 0,      0,     0.]).T, \
02874     #                           np.matrix([[1.,     0,      0], \
02875     #                                      [0, .02**2,      0], \
02876     #                                      [0,      0, .02**2]]))
02877 
02878     #    #pdb.set_trace()
02879     #    gaussian_noise = np.matrix([0,0,0.]).T
02880     #    while not learn_manager.is_ready():
02881     #         pi = point3d_bl + gaussian_noise
02882     #         label = behavior(pi)
02883     #         #look at point, then try to add again
02884     #         if not learn_manager.add_example(pi, np.matrix([label])):
02885     #             rospy.logerr('Unable to extract features from point %s' % str(pi.T))
02886     #             continue
02887     #         learn_manager.train()
02888     #         learn_manager.draw_and_send()
02889     #         gaussian_noise = gaussian.sample()
02890     #         gaussian_noise[0,0] = 0
02891 
02892     #    #Acquire data
02893     #    #Given image, cloud, 3d point ask, extract features.
02894     #    #while no_interruptions and stopping_criteria_not_reached
02895     #    #    maximally_informative_point = get maximally informative point
02896     #    #    label = behavior(maximally_informative_point)
02897     #    #    retrain!
02898     #    converged = False
02899     #    while not converged:
02900     #        indices, dists = learn_manager.select_next_instances(1)
02901     #        if idx != None:
02902     #            pt2d = learn_manager.points2d[:, indices[0]]
02903     #            pt3d = learn_manager.points3d[:, indices[0]]
02904     #            label = behavior(pt3d)
02905     #            #learn_manager.add_example(pt3d, np.matrix([label]), pt2d)
02906     #            if not learn_manager.add_example(pi, np.matrix([label])):
02907     #                rospy.logerr('Unable to extract features from point %s' % str(pi.T))
02908     #                continue
02909     #            learn_manager.train()
02910     #            learn_manager.draw_and_send()
02911     #        else:
02912     #            converged = True
02913 
02914 
02915 class TestLearner:
02916 
02917     def __init__(self):
02918         self.rec_params = r3d.Recognize3DParam()
02919         self.locations_man = LocationManager('locations_narrow_v11.pkl', rec_params=self.rec_params)
02920 
02921     def test_training_set(self):
02922         #Pick
02923         tasks = self.locations_man.data.keys()
02924         for i, k in enumerate(tasks):
02925             print i, k
02926         task_id = tasks[int(raw_input())]
02927 
02928         #Train
02929         dataset = self.locations_man.data[task_id]['dataset']
02930         pca = self.locations_man.data[task_id]['pca']
02931 
02932         nneg = np.sum(dataset.outputs == r3d.NEGATIVE) 
02933         npos = np.sum(dataset.outputs == r3d.POSITIVE)
02934         print '================= Training ================='
02935         print 'NEG examples', nneg
02936         print 'POS examples', npos
02937         print 'TOTAL', dataset.outputs.shape[1]
02938         neg_to_pos_ratio = float(nneg)/float(npos)
02939         #pdb.set_trace()
02940 
02941         #np.random.permutation(range(da
02942         #separate into positive and negative
02943         #take 30%
02944         #train/test
02945         all_costs_scores = []
02946         all_ratio_scores = []
02947         all_bandwidth_scores = []
02948         for i in range(40):
02949             percent = .3
02950             negidx = np.where(dataset.outputs==r3d.NEGATIVE)[1].A1
02951             posidx = np.where(dataset.outputs==r3d.POSITIVE)[1].A1
02952             nneg = np.ceil(len(negidx) * percent)
02953             npos = np.ceil(len(posidx) * percent)
02954             negperm = np.random.permutation(range(len(negidx)))
02955             posperm = np.random.permutation(range(len(posidx)))
02956 
02957             test_pos_idx = negperm[0:nneg]
02958             test_neg_idx = posperm[0:npos]
02959             test_idx = np.concatenate((test_pos_idx, test_neg_idx))
02960 
02961             train_pos_idx = negperm[nneg:] 
02962             train_neg_idx = posperm[npos:]
02963             train_idx = np.concatenate((train_pos_idx, train_neg_idx))
02964 
02965             test_set = dataset.subset(test_idx)
02966             train_set = dataset.subset(train_idx)
02967 
02968             #pdb.set_trace()
02969             ratio_score = []
02970             ratios = [neg_to_pos_ratio * (float(i+1)/10.) for i in range(10)]
02971             for r in ratios:
02972                 print '######################################################'
02973                 print 'ratio is ', r
02974                 svm_params = '-s 0 -t 2 -g .4 -c 1 '
02975                 learner = self.train(train_set, pca, r, svm_params)
02976                 predictions = np.matrix(learner.classify(test_set.inputs))
02977                 conf_mat = r3d.confusion_matrix(test_set.outputs, predictions)
02978                 combined = conf_mat[0,0] + conf_mat[1,1]
02979                 print '!!!!!!!!!!!'
02980                 print conf_mat
02981                 print combined
02982                 print '!!!!!!!!!!!'
02983                 ratio_score.append(combined)
02984 
02985             bandwidth_score = []
02986             bandwidths = []
02987             for i in range(40):
02988                 print '######################################################'
02989                 g = i * .1 #.00625
02990                 bandwidths.append(g)
02991                 print 'g is ', g
02992                 svm_params = '-s 0 -t 2 -g %.2f -c 1 ' % g
02993                 learner = self.train(train_set, pca, neg_to_pos_ratio, svm_params)
02994                 predictions = np.matrix(learner.classify(test_set.inputs))
02995                 conf_mat = r3d.confusion_matrix(test_set.outputs, predictions)
02996                 combined = conf_mat[0,0] + conf_mat[1,1]
02997                 print '!!!!!!!!!!!'
02998                 print conf_mat
02999                 print combined
03000                 print '!!!!!!!!!!!'
03001                 bandwidth_score.append(combined)
03002 
03003             cost_score = []
03004             costs = []
03005             for i in range(40):
03006                 print '######################################################'
03007                 cost = i + 1
03008                 costs.append(cost)
03009                 print 'cost is ', cost
03010                 svm_params = '-s 0 -t 2 -g .4 -c %.2f ' % cost
03011                 learner = self.train(train_set, pca, neg_to_pos_ratio, svm_params)
03012                 predictions = np.matrix(learner.classify(test_set.inputs))
03013                 conf_mat = r3d.confusion_matrix(test_set.outputs, predictions)
03014                 combined = conf_mat[0,0] + conf_mat[1,1]
03015                 print '!!!!!!!!!!!'
03016                 print conf_mat
03017                 print combined
03018                 print '!!!!!!!!!!!'
03019                 cost_score.append(combined)
03020 
03021             #print 'Cost score'
03022             #print costs
03023             #print cost_score, "\n"
03024 
03025             #print 'Ratio score'
03026             #print ratios
03027             #print ratio_score, "\n"
03028 
03029             #print 'Bandwidth score'
03030             #print bandwidths 
03031             #print bandwidth_score, "\n"
03032             all_costs_scores.append(cost_score)
03033             all_ratio_scores.append(ratio_score)
03034             all_bandwidth_scores.append(bandwidth_score)
03035 
03036         
03037         print 'Cost score'
03038         print costs
03039         costs_mean = np.sum(np.matrix(all_costs_scores), 0) / float(len(all_costs_scores))
03040         print costs_mean
03041         print np.max(costs_mean)
03042         print costs[np.argmax(costs_mean)]
03043 
03044         print 'Ratio score'
03045         print ratios
03046         ratios_mean = np.sum(np.matrix(all_ratio_scores), 0) / float(len(all_ratio_scores))
03047         print ratios_mean
03048         print np.max(ratios_mean)
03049         print ratios[np.argmax(ratios_mean)]
03050 
03051         print 'Bandwidth score'
03052         print bandwidths 
03053         bandwidths_mean = np.sum(np.matrix(all_bandwidth_scores), 0) / float(len(all_bandwidth_scores))
03054         print bandwidths_mean
03055         print np.max(bandwidths_mean)
03056         print bandwidths[np.argmax(bandwidths_mean)]
03057 
03058 
03059 
03060 
03061     def train(self, dataset, pca, neg_to_pos_ratio, svm_params):
03062         weight_balance = ' -w0 1 -w1 %.2f' % neg_to_pos_ratio
03063         previous_learner = None
03064         learner = r3d.SVMPCA_ActiveLearner(use_pca=True, 
03065                         reconstruction_std_lim=self.rec_params.reconstruction_std_lim, 
03066                         reconstruction_err_toler=self.rec_params.reconstruction_err_toler,
03067                         old_learner=None, pca=pca)
03068 
03069         rec_params = self.rec_params
03070         inputs_for_pca = dataset.inputs
03071         learner.train(dataset, 
03072                       inputs_for_pca,
03073                       svm_params + weight_balance,
03074                       rec_params.variance_keep)
03075         return learner
03076 
03077 
03078 def test_display():
03079     rec_params = r3d.Recognize3DParam()
03080     locations_man = LocationManager('locations_narrow_v11.pkl', 
03081             rec_params=rec_params)
03082     location_display = LocationDisplay(locations_man)
03083     location_display.run()
03084     #location_display.start()
03085     
03086 
03087 
03088 def launch():
03089     import optparse
03090     p = optparse.OptionParser()
03091     p.add_option("-d", "--display", action="store_true", default=False)
03092     p.add_option("-p", "--practice", action="store_true", default=False)
03093     p.add_option("-e", "--execute", action="store_true", default=False)
03094     p.add_option("-b", "--base", action="store_true", default=False)
03095     p.add_option("-s", "--static", action="store_true", default=False)
03096     p.add_option("-i", "--init", action="store_true", default=False)
03097     p.add_option("-t", "--test", action="store_true", default=False)
03098     p.add_option("-u", "--user_study", action="store_true", default=False)
03099 
03100     opt, args = p.parse_args()
03101     if opt.display:
03102         test_display()
03103         return
03104 
03105     if opt.practice or opt.execute or opt.init or opt.base:
03106         l = ApplicationBehaviors()
03107         mode = None
03108         if opt.practice:
03109             mode = 'practice'
03110         if opt.execute:
03111             mode = 'execute'
03112         if opt.init:
03113             mode = 'init'
03114         if opt.base:
03115             mode = 'update_base'
03116         rospy.loginfo('Using mode %s' % mode)
03117         l.run(mode, not opt.static, opt.user_study)
03118 
03119     if opt.test:
03120         tl = TestLearner()
03121         tl.test_training_set()
03122 
03123 
03124 if __name__ == '__main__':
03125     launch()
03126     #cProfile.run('launch()', 'profile_linear_move.prof')
03127 
03128 
03129 
03130 
03131 
03132 
03133 
03134 
03135 
03136 
03137 
03138 
03139 
03140 
03141 
03142 
03143 
03144 
03145 
03146 
03147 
03148 
03149 
03150 
03151 
03152 
03153 
03154 
03155 
03156 
03157         #return {'points3d': np.column_stack(points3d_tried),
03158         #        'instances': np.column_stack(instances_tried),
03159         #        'points2d': np.column_stack(points2d_tried),
03160         #        'labels': np.matrix(labels),
03161         #        'sizes': fea_dict['sizes']}
03162 
03163 
03164     #def blind_exploration3(self, task_id, behavior, undo_behavior, point_bl, stop_fun, 
03165     #        max_retries=15, closeness_tolerance=.01, fea_dict=None):
03166     #    params = r3d.Recognize3DParam()
03167     #    params.uncertainty_x = 1.
03168     #    params.uncertainty_y = .02
03169     #    params.uncertainty_z = .02
03170     #    params.n_samples = 400
03171     #    params.uni_mix = 0.
03172     #    #MAX_RETRIES = 20
03173     #
03174 
03175     #    # instances, locs2d, locs3d, image, rdict, sizes = 
03176     #    if fea_dict == None:
03177     #        fea_dict, _ = self.read_features_save(task_id, point_bl, params)
03178     #        image_T_bl = tfu.transform('openni_rgb_optical_frame', 'base_link', self.tf_listener)
03179     #        fea_dict['image_T_bl'] = image_T_bl
03180     #    #fea_dict = self.feature_ex.read(expected_loc_bl=point_bl, params=params)
03181     #    
03182     #    dists = ut.norm(fea_dict['points3d'] - point_bl)
03183     #    ordering = np.argsort(dists).A1
03184     #    points3d_sampled = fea_dict['points3d'][:, ordering]
03185     #    points2d_sampled = fea_dict['points2d'][:, ordering]
03186     #    instances_sampled = fea_dict['instances'][:, ordering]
03187     #    start_pose = self.robot.head.pose()
03188 
03189     #    point3d_img = tfu.transform_points(fea_dict['image_T_bl'], point_bl)
03190     #    point2d_img = self.feature_ex.cal.project(point3d_img)
03191    
03192     #    sampled_idx = 0
03193     #    iter_count = 0
03194     #    labels = []
03195     #    points_tried = []
03196     #    tinstances = []
03197     #    sp2d = []
03198     #    while iter_count < max_retries and not stop_fun(np.matrix(labels)):
03199     #        if len(points_tried)> 0 and \
03200     #           np.any(ut.norm(np.column_stack(points_tried) - points3d_sampled[:, sampled_idx]) < closeness_tolerance):
03201     #            sampled_idx = sampled_idx + 1
03202     #            continue
03203 
03204     #        #pdb.set_trace()
03205     #        #self.robot.sound.say('executing behavior')
03206     #        self.robot.head.set_pose(start_pose, 1)
03207     #        success, reason = behavior(points3d_sampled[:, sampled_idx])
03208     #        iter_count = iter_count + 1
03209     #        points_tried.append(points3d_sampled[:, sampled_idx])
03210     #        tinstances.append(instances_sampled[:, sampled_idx])
03211     #        sp2d.append(points2d_sampled[:, sampled_idx])
03212     #        sampled_idx = sampled_idx + 1
03213 
03214     #        #tinstances.append(fea_dict['instances'][:,iter_count])
03215     #        #sp2d.append(fea_dict['points2d'][:,iter_count])
03216     #        #add point and label to points tried
03217     #        if success:
03218     #            labels.append(r3d.POSITIVE)
03219     #            if undo_behavior != None:
03220     #                #If we were successful, call blind exploration with the undo behavior
03221     #                def any_pos_sf(labels_mat):
03222     #                    if np.any(r3d.POSITIVE == labels_mat):
03223     #                        return True
03224     #                    return False
03225     #                if task_id != None:
03226     #                    utid = self.locations_man.create_undo_task(task_id)
03227     #                else:
03228     #                    utid = None
03229     #                #TODO: gather instances for undo action
03230     #                #TODO: figure out why position of point_bl is shifted in second call
03231     #                self.seed_dataset_explore(utid, undo_behavior, None, point_bl, any_pos_sf, 
03232     #                        max_retries, fea_dict=fea_dict)
03233     #                #success, reason = undo_behavior(points3d_sampled[:, 'iter_count'])
03234     #        else:
03235     #            labels.append(r3d.NEGATIVE)
03236 
03237     #        #Visualization
03238     #        img = cv.CloneMat(fea_dict['image'])
03239     #        r3d.draw_points(img, points2d_sampled, [255, 255, 255], 2, -1)
03240     #        _, pos_points, neg_points = separate_by_labels(np.column_stack(sp2d), np.matrix(labels))
03241     #        r3d.draw_points(img, point2d_img, [255, 0, 0], 4, 2)
03242     #        r3d.draw_points(img, pos_points, [0, 255, 0], 2, -1)
03243     #        r3d.draw_points(img, neg_points, [0, 0, 255], 2, -1)
03244     #        r3d.draw_points(img, sp2d[-1], [0, 184, 245], 3, -1)
03245     #        self.img_pub.publish(img)
03246     #
03247     #    rospy.loginfo('tried %d times' % iter_count)
03248     #    return {'points3d': np.column_stack(points_tried),
03249     #            'instances': np.column_stack(tinstances),
03250     #            'points2d': np.column_stack(sp2d),
03251     #            'labels': np.matrix(labels),
03252     #            'sizes': fea_dict['sizes']}
03253     #   #if iter_count > MAX_RETRIES:
03254     #   #    self.robot.sound.say('giving up tried %d times already' % MAX_RETRIES)
03255     #   #    break
03256     #   #elif not success:
03257     #   #     self.robot.sound.say('retrying')
03258 
03259     #   return points tried record
03260             #success, _ = self.light_switch1(perturbed_point_bl, point_offset=point_offset, \
03261             #        press_contact_pressure=300, move_back_distance=np.matrix([-.0075,0,0]).T,\
03262             #        press_pressure=3500, press_distance=np.matrix([0,0,-.15]).T, \
03263             #        visual_change_thres=.03)
03264 
03265     #   points tried = []
03266     #   while we have not succeeded and not stop_fun(points tried):
03267     #       label = behavior(point)
03268     #       add point and label to points tried
03269     #       perturb point
03270     #   return points tried record
03271 
03272 
03273     #def load_classifier(self, name, fname):
03274     #    print 'loading classifier'
03275     #    dataset = ut.load_pickle(fname)
03276     #    self.train(dataset, name)
03277 
03278         #self.location_labels = []
03279         #self.location_data = []
03280         #if os.path.isfile(self.saved_locations_fname):
03281         #    location_data = ut.load_pickle(self.saved_locations_fname) #each col is a 3d point, 3xn mat
03282         #    for idx, rloc in enumerate(location_data):
03283         #        self.location_centers.append(rloc['center'])
03284         #        self.location_labels.append(idx)
03285         #    self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
03286         #    self.location_data = location_data
03287         #if os.path.isfile(self.saved_locations_fname):
03288         #    location_data = ut.load_pickle(self.saved_locations_fname) #each col is a 3d point, 3xn mat
03289         #    for idx, rloc in enumerate(location_data):
03290         #        self.location_centers.append(rloc['center'])
03291         #        self.location_labels.append(idx)
03292         #    self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
03293         #    self.location_data = location_data
03294         #pass
03295 
03296         #location_idx = self.location_labels[close_by_locs[0]]
03297         #ldata = self.location_data[location_idx]
03298 
03299         #rospy.loginfo('location_add: point close to %d at %s.' % (location_idx, str(ldata['center'].T)))
03300         #ldata['points'].append(point_map)
03301         #ldata['center'] = np.column_stack(ldata['points']).mean(1)
03302         #self.location_centers[location_idx] = ldata['center']
03303         #self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
03304 #    def update_center(self, center_id, point_map):
03305 #        #If close by locations found then add to points list and update center
03306 #        location_idx = self.location_labels[close_by_locs[0]]
03307 #        ldata = self.location_data[location_idx]
03308 #
03309 #        rospy.loginfo('location_add: point close to %d at %s.' % (location_idx, str(ldata['center'].T)))
03310 #        ldata['points'].append(point_map)
03311 #        ldata['center'] = np.column_stack(ldata['points']).mean(1)
03312 #        self.location_centers[location_idx] = ldata['center']
03313 #        self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
03314 #
03315 
03316 
03317     #def location_add(self, point_map, task, data):
03318     #    close_by_locs = self.find_close_by_points_match_task(point_map, task)
03319     #    if len(close_by_locs) == 0:
03320     #        rospy.loginfo('location_add: point not close to any existing location. creating new record.')
03321     #        self.location_data.append({
03322     #            'task': task, 
03323     #            'center': point_map, 
03324     #            'perceptual_dataset': None,
03325     #            'points':[point_map]})
03326     #        self.location_centers.append(point_map)
03327     #        self.location_labels.append(len(self.location_data) - 1)
03328     #        self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
03329     #    else:
03330     #        #If close by locations found then add to points list and update center
03331     #        location_idx = self.location_labels[close_by_locs[0]]
03332     #        ldata = self.location_data[location_idx]
03333 
03334     #        rospy.loginfo('location_add: point close to %d at %s.' % (location_idx, str(ldata['center'].T)))
03335     #        ldata['points'].append(point_map)
03336     #        ldata['center'] = np.column_stack(ldata['points']).mean(1)
03337     #        self.location_centers[location_idx] = ldata['center']
03338     #        self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
03339 
03340     #    ut.save_pickle(self.location_data, self.saved_locations_fname)
03341     #    rospy.loginfo('location_add: saved point in map.')
03342 
03343 #    def find_close_by_points(self, point_map):
03344 #        if self.locations_tree != None:
03345 #            close_by_locs = self.locations_tree.query_ball_point(np.array(point_map.T), self.LOCATION_ADD_RADIUS)[0]
03346 #            return close_by_locs
03347 #        else:
03348 #            return []
03349 
03350 #   3)   listing all locations
03351 #   4)   listing locations closest to given point, with and without task
03352 
03353 
03354     #def find_close_by_points_match_task(self, point_map, task):
03355     #    matches = self.find_close_by_points(point_map)
03356     #    task_matches = []
03357     #    for m in matches:
03358     #        idx = self.location_labels[m]
03359     #        ldata = self.location_data[idx]
03360     #        if ldata['task'] == task:
03361     #            task_matches.append(m)
03362     #    return task_matches
03363     
03364 #class PickPointsCloseToStartLocation:
03365 #
03366 #    def __init__(self, point_bl, closeness_tolerance=.01, max_retries=20):
03367 #        self.params = r3d.Recognize3DParam()
03368 #        self.params.uncertainty_x = 1.
03369 #        self.params.uncertainty_y = .02
03370 #        self.params.uncertainty_z = .02
03371 #        self.params.n_samples = 400
03372 #        self.params.uni_mix = 0.
03373 #
03374 #        self.sampled_idx = 0
03375 #        self.iter_count = 0
03376 #        self.max_retries = max_retries
03377 #        self.closeness_tolerance = closeness_tolerance
03378 #
03379 #        self.points3d_tried = []
03380 #        self.points2d_tried = []
03381 #        self.instances_tried = []
03382 #
03383 #    def process_scan(self, fea_dict):
03384 #        dists = ut.norm(fea_dict['points3d'] - point_bl)
03385 #        ordering = np.argsort(dists).A1
03386 #
03387 #        self.points3d_sampled = fea_dict['points3d'][:, ordering]
03388 #        self.points2d_sampled = fea_dict['points2d'][:, ordering]
03389 #        self.instances_sampled = fea_dict['instances'][:, ordering]
03390 #
03391 #    def get_params(self):
03392 #        return self.params
03393 #
03394 #    def stop(self):
03395 #        return self.iter_count > max_retries
03396 #
03397 #    def pick_next(self):
03398 #        while len(self.points3d_tried) > 0 \
03399 #                and np.any(ut.norm(np.column_stack(self.points3d_tried) - self.points3d_sampled[:, self.sampled_idx]) < self.closeness_tolerance):
03400 #            self.sampled_idx = self.sampled_idx + 1 
03401 #
03402 #        self.points3d_tried.append(self.points3d_sampled[:, self.sampled_idx])
03403 #        self.points2d_tried.append(self.points2d_sampled[:, self.sampled_idx])
03404 #        self.instances_tried.append(self.instances_sampled[:, self.sampled_idx])
03405 #        self.iter_count = iter_count + 1
03406 #
03407 #        return {'points3d':  self.points3d_sampled[:, self.sampled_idx],
03408 #                'points2d':  self.points2d_sampled[:, self.sampled_idx],
03409 #                'instances': self.instances_sampled[:, self.sampled_idx]}
03410 #
03411 #    def get_instances_used(self):
03412 #        if len(self.points3d_sampled) > 0:
03413 #            return {'points3d': np.column_stack(self.points3d_sampled),
03414 #                    'points2d': np.column_stack(self.points2d_sampled),
03415 #                    'instances': np.column_stack(self.instances_sampled)}
03416 #        else:
03417 #            return None
03418 #
03419 #class PickPointsUsingActiveLearning:
03420 #
03421 #    def __init__(self, locations_manager):
03422 #        self.params = r3d.Recognize3DParam()
03423 #        self.params.uncertainty_x = 1.
03424 #        self.params.n_samples = 2000
03425 #        self.params.uni_mix = .1
03426 #
03427 #        self.points3d_tried = []
03428 #        self.points2d_tried = []
03429 #        self.instances_tried = []
03430 #
03431 #    def process_scan(self, fea_dict):
03432 #
03433 #    def get_params(self):
03434 #
03435 #    def pick_next(self):
03436 #
03437 #    def stop(self):
03438 #
03439 #    def get_instances_used(self):
03440 
03441 
03442         #self.LOCATION_ADD_RADIUS = .5
03443         #self.kinect_listener = kl.KinectListener()
03444         #self.kinect_cal = rc.ROSCameraCalibration('camera/rgb/camera_info')
03445 
03446         #self.kinect_img_sub = message_filters.Subscriber('/camera/rgb/image_color', smsg.Image)
03447         #self.kinect_depth_sub = message_filters.Subscriber('/camera/depth/points2', smsg.PointCloud2)
03448         #ts = message_filters.TimeSynchronizer([image_sub, depth_sub], 10)
03449         #ts.registerCallback(callback)
03450 
03451         #self.load_classifier('light_switch', 'labeled_light_switch_data.pkl')
03452         #self.start_location = (np.matrix([0.25, 0.30, 1.3]).T, np.matrix([0., 0., 0., 0.1]))
03453 
03454         #loading stored locations
03455         #self.saved_locations_fname = 'saved_locations.pkl'
03456         #self.location_centers = []
03457         #self.location_labels = []
03458         #self.location_data = []
03459         #self.locations_tree = None
03460 
03461         #if os.path.isfile(self.saved_locations_fname):
03462         #    location_data = ut.load_pickle(self.saved_locations_fname) #each col is a 3d point, 3xn mat
03463         #    for idx, rloc in enumerate(location_data):
03464         #        self.location_centers.append(rloc['center'])
03465         #        self.location_labels.append(idx)
03466         #    self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
03467         #    self.location_data = location_data
03468 
03469         # joint angles used for tuck
03470         #pdb.set_trace()
03471         #self.untuck()
03472         #self.behaviors.movement.set_movement_mode_ik()
03473         #self.movement.set_movement_mode_ik()
03474         #self.tuck()
03475         #self.r1 = np.matrix([[-0.31006769,  1.2701541 , -2.07800829, -1.45963243, -4.35290489,
03476         #                 -1.86052221,  5.07369192]]).T
03477         #self.l0 = np.matrix([[  1.05020383,  -0.34464327,   0.05654   ,  -2.11967694,
03478         #                 -10.69100221,  -1.95457839,  -3.99544713]]).T
03479         #self.l1 = np.matrix([[  1.06181076,   0.42026402,   0.78775801,  -2.32394841,
03480         #                 -11.36144995,  -1.93439025,  -3.14650108]]).T
03481         #self.l2 = np.matrix([[  0.86275197,   0.93417818,   0.81181124,  -2.33654346,
03482         #                 -11.36121856,  -2.14040499,  -3.15655164]]).T
03483         #self.l3 = np.matrix([[ 0.54339568,  1.2537778 ,  1.85395725, -2.27255481, -9.92394984,
03484         #                 -0.86489749, -3.00261708]]).T
03485 
03486 
03487 
03488     #def train(self, dataset, name):
03489     #    rec_params = self.feature_ex.rec_params
03490     #    nneg = np.sum(dataset.outputs == r3d.NEGATIVE) #TODO: this was copied and pasted from r3d
03491     #    npos = np.sum(dataset.outputs == r3d.POSITIVE)
03492     #    print '================= Training ================='
03493     #    print 'NEG examples', nneg
03494     #    print 'POS examples', npos
03495     #    print 'TOTAL', dataset.outputs.shape[1]
03496     #    neg_to_pos_ratio = float(nneg)/float(npos)
03497     #    weight_balance = ' -w0 1 -w1 %.2f' % neg_to_pos_ratio
03498     #    print 'training'
03499     #    learner = r3d.SVMPCA_ActiveLearner(use_pca=True)
03500     #    #TODO: figure out something scaling inputs field!
03501     #    learner.train(dataset, dataset.inputs,
03502     #                  rec_params.svm_params + weight_balance,
03503     #                  rec_params.variance_keep)
03504     #    self.learners[name] = {'learner': learner, 'dataset': dataset}
03505     #    print 'done loading'
03506 
03507 
03508 
03509     #def tuck(self):
03510     #    ldiff = np.linalg.norm(pr2.diff_arm_pose(self.robot.left.pose(), self.l3))
03511     #            # np.linalg.norm(self.robot.left.pose() - self.l3)
03512     #    rdiff = np.linalg.norm(pr2.diff_arm_pose(self.robot.right.pose(), self.r1))
03513     #    #rdiff = np.linalg.norm(self.robot.right.pose() - self.r1)
03514     #    if ldiff < .3 and rdiff < .3:
03515     #        rospy.loginfo('tuck: Already tucked. Ignoring request.')
03516     #        return
03517     #    self.robot.right.set_pose(self.r1, block=False)
03518     #    self.robot.left.set_pose(self.l0, block=True)
03519     #    poses = np.column_stack([self.l0, self.l1, self.l2, self.l3])
03520     #    #pdb.set_trace()
03521     #    self.robot.left.set_poses(poses, np.array([0., 1.5, 3, 4.5]))
03522 
03523 
03524     #def untuck(self):
03525     #    if np.linalg.norm(self.robot.left.pose() - self.l0) < .3:
03526     #        rospy.loginfo('untuck: Already untucked. Ignoring request.')
03527     #        return
03528     #    self.robot.right.set_pose(self.r1, 2., block=False)
03529     #    self.robot.left.set_pose(self.l3, 2.,  block=True)
03530     #    poses = np.column_stack([self.l3, self.l2, self.l1, self.l0])
03531     #    self.robot.left.set_poses(poses, np.array([0., 3., 6., 9.])/2.)
03532 
03533 
03534             #if len(self.location_centers) < 1:
03535             #    return
03536             #rospy.loginfo('click_cb: double clicked but no 3d point given')
03537             #rospy.loginfo('click_cb: will use the last successful location given')
03538 
03539             #base_link_T_map = tfu.transform('base_link', 'map', self.tf_listener)
03540             #point_bl = tfu.transform_points(base_link_T_map, self.location_centers[-1])
03541             #rospy.loginfo('click_cb: using ' + str(self.location_centers[-1].T))
03542             #self.location_activated_behaviors(point_bl, stored_point=True)
03543 
03544 
03545     #def find_close_by_points(self, point_map):
03546     #    if self.locations_tree != None:
03547     #        close_by_locs = self.locations_tree.query_ball_point(np.array(point_map.T), self.LOCATION_ADD_RADIUS)[0]
03548     #        return close_by_locs
03549     #    else:
03550     #        return []
03551 
03552     #def find_close_by_points_match_task(self, point_map, task):
03553     #    matches = self.find_close_by_points(point_map)
03554     #    task_matches = []
03555     #    for m in matches:
03556     #        idx = self.location_labels[m]
03557     #        ldata = self.location_data[idx]
03558     #        if ldata['task'] == task:
03559     #            task_matches.append(m)
03560     #    return task_matches
03561 
03562     #def location_add(self, point_map, task, data):
03563     #    close_by_locs = self.find_close_by_points_match_task(point_map, task)
03564     #    if len(close_by_locs) == 0:
03565     #        rospy.loginfo('location_add: point not close to any existing location. creating new record.')
03566     #        self.location_data.append({
03567     #            'task': task, 
03568     #            'center': point_map, 
03569     #            'perceptual_dataset': None,
03570     #            'points':[point_map]})
03571     #        self.location_centers.append(point_map)
03572     #        self.location_labels.append(len(self.location_data) - 1)
03573     #        self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
03574     #    else:
03575     #        #If close by locations found then add to points list and update center
03576     #        location_idx = self.location_labels[close_by_locs[0]]
03577     #        ldata = self.location_data[location_idx]
03578 
03579     #        rospy.loginfo('location_add: point close to %d at %s.' % (location_idx, str(ldata['center'].T)))
03580     #        ldata['points'].append(point_map)
03581     #        ldata['center'] = np.column_stack(ldata['points']).mean(1)
03582     #        self.location_centers[location_idx] = ldata['center']
03583     #        self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
03584 
03585     #    ut.save_pickle(self.location_data, self.saved_locations_fname)
03586     #    rospy.loginfo('location_add: saved point in map.')
03587 
03588 
03589     #def location_add(self, point_map, task):
03590     #    close_by_locs = self.find_close_by_points_match_task(point_map, task)
03591     #    if len(close_by_locs) == 0:
03592     #        rospy.loginfo('location_add: point not close to any existing location. creating new record.')
03593     #        self.location_data.append({
03594     #            'task': task, 
03595     #            'center': point_map, 
03596     #            'points':[point_map]})
03597     #        self.location_centers.append(point_map)
03598     #        self.location_labels.append(len(self.location_data) - 1)
03599     #        self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
03600 
03601     #    else:
03602     #        #If close by locations found then add to points list and update center
03603     #        location_idx = self.location_labels[close_by_locs[0]]
03604     #        ldata = self.location_data[location_idx]
03605 
03606     #        rospy.loginfo('location_add: point close to %d at %s.' % (location_idx, str(ldata['center'].T)))
03607     #        ldata['points'].append(point_map)
03608     #        ldata['center'] = np.column_stack(ldata['points']).mean(1)
03609     #        self.location_centers[location_idx] = ldata['center']
03610     #        self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
03611 
03612     #    ut.save_pickle(self.location_data, self.saved_locations_fname)
03613     #    rospy.loginfo('location_add: saved point in map.')
03614 
03615 
03616     #def record_processed_data_kinect2(self, point3d_bl, kinect_fea):
03617     #    instances, locs2d_image, locs3d_bl, image = kinect_fea #self.feature_ex.read(point3d_bl)
03618     #    #rospy.loginfo('Getting a kinect reading')
03619 
03620     #    tstring = time.strftime('%A_%m_%d_%Y_%I:%M%p')
03621     #    kimage_name = '%s_highres.png' % tstring
03622     #    cv.SaveImage(kimage_name, kimage)
03623 
03624     #    preprocessed_dict = {'instances': instances,
03625     #                         'points2d': locs2d_image,
03626     #                         'points3d': locs3d_bl,
03627     #                         'image': kimage_name,
03628     #                         'labels': labels,
03629     #                         'sizes': feature_extractor.sizes}
03630 
03631 
03632         #self.feature_ex.read(point3d_bl)
03633         #rdict = self.kinect_listener.read()
03634         #kimage = rdict['image']
03635         #rospy.loginfo('Waiting for calibration.')
03636         #while self.kinect_cal.has_msg == False:
03637         #    time.sleep(.1)
03638 
03639         #which frames?
03640         #rospy.loginfo('Getting transforms.')
03641         #k_T_bl = tfu.transform('openni_rgb_optical_frame', '/base_link', self.tf_listener)
03642         #tstring = time.strftime('%A_%m_%d_%Y_%I:%M%p')
03643         #kimage_name = '%s_highres.png' % tstring
03644         #rospy.loginfo('Saving images (basename %s)' % tstring)
03645         #cv.SaveImage(kimage_name, kimage)
03646         #rospy.loginfo('Saving pickles')
03647         #pickle_fname = '%s_interest_point_dataset.pkl' % tstring   
03648 
03649         #data_pkl = {'touch_point': point3d_bl,
03650         #            'points3d': rdict['points3d'],
03651         #            'image': kimage_name,
03652         #            'cal': self.prosilica_cal, 
03653         #            'k_T_bl': k_T_bl}
03654                     #'point_touched': point3d_bl}
03655 
03656         #ut.save_pickle(data_pkl, pickle_fname)
03657         #print 'Recorded to', pickle_fname
03658 
03659 
03660 
03661 
03662             #npoint = point + gaussian_noise
03663             #success_off, touchloc_bl = self.light_switch1(npoint, 
03664             #pdb.set_trace()
03665 
03666 
03667 
03668 
03669 #    ##
03670 #    # The behavior can make service calls to a GUI asking users to label
03671 #    def repeat_action(self, task_id, ctask_id, point3d_bl, sampling_object, stop_fun, fea_dict=None):
03672 #
03673 #        # instances, locs2d_image, locs3d_bl, image, raw_dict = 
03674 #        #kf_dict = self.feature_ex.read(point3d_bl)
03675 #        params = r3d.Recognize3DParam()
03676 #        params.uncertainty_x = 1.
03677 #        params.n_samples = 2000
03678 #        params.uni_mix = .1
03679 #
03680 #        kdict, fname = self.read_features_save(task_id, point3d_bl, params)
03681 #        learner = self.locations_man.learners[task_id]
03682 #        behavior = self.get_behavior_by_task(self.locations_man.data[task_id]['task'])
03683 #        undo_behavior = self.get_undo_behavior_by_task(self.locations_man.data[task_id]['task'])
03684 #        start_pose = self.robot.head.pose()
03685 #
03686 #        kdict['image_T_bl'] = tfu.transform('openni_rgb_optical_frame', 'base_link', self.tf_listener)
03687 #        point3d_img = tfu.transform_points(kdict['image_T_bl'], point3d_bl)
03688 #        point2d_img = self.feature_ex.cal.project(point3d_img)
03689 #
03690 #        labels = []
03691 #        points3d_tried = []
03692 #        points2d_tried = []
03693 #        converged = False
03694 #        indices_added = []
03695 #        pdb.set_trace()
03696 #
03697 #
03698 #        while not converged and not stop_fun(np.matrix(labels)):
03699 #            #Find remaining instances
03700 #            remaining_pt_indices = r3d.inverse_indices(indices_added, kdict['instances'].shape[1])
03701 #            remaining_instances = kdict['instances'][:, remaining_pt_indices]
03702 #
03703 #            #Ask learner to pick an instance
03704 #            ridx, selected_dist, converged = learner.select_next_instances_no_terminate(remaining_instances)
03705 #            selected_idx = remaining_pt_indices[ridx]
03706 #            indices_added.append(selected_idx)
03707 #
03708 #            #draw
03709 #            img = cv.CloneMat(kdict['image'])
03710 #            #Draw the center
03711 #            r3d.draw_points(img, point2d_img, [255, 0, 0], 4, 2)
03712 #            #Draw possible points
03713 #            r3d.draw_points(img, kdict['points2d'], [255, 255, 255], 2, -1)
03714 #            #Draw what we have so far
03715 #            if len(points2d_tried) > 0:
03716 #                _, pos_exp, neg_exp = separate_by_labels(np.column_stack(points2d_tried), np.matrix(labels))
03717 #                r3d.draw_points(img, pos_exp, [0, 255, 0], 3, 1)
03718 #                r3d.draw_points(img, neg_exp, [0, 0, 255], 3, 1)
03719 #
03720 #            predictions = np.matrix(learner.classify(kdict['instances']))
03721 #            _, pos_pred, neg_pred = separate_by_labels(kdict['points2d'], predictions)
03722 #            r3d.draw_points(img, pos_pred, [0, 255, 0], 2, -1)
03723 #            r3d.draw_points(img, neg_pred, [0, 0, 255], 2, -1)
03724 #
03725 #            #Draw what we're selecting
03726 #            r3d.draw_points(img, kdict['points2d'][:, selected_idx], [0, 184, 245], 3, -1)
03727 #            self.img_pub.publish(img)
03728 #
03729 #            #Get label for instance
03730 #            self.robot.head.set_pose(start_pose, 1)
03731 #
03732 #            #EXCECUTE!!
03733 #            success, reason = behavior(kdict['points3d'][:, selected_idx])
03734 #            if success:
03735 #                color = [0,255,0]
03736 #                label = r3d.POSITIVE
03737 #                def any_pos_sf(labels_mat):
03738 #                    if np.any(r3d.POSITIVE == labels_mat):
03739 #                        return True
03740 #                    return False
03741 #                utid = self.locations_man.create_undo_task(task_id)
03742 #                self.blind_exploration2(utid, undo_behavior, None, point3d_bl, any_pos_sf, 
03743 #                        max_retries=max_undo_retries, fea_dict=kdict)
03744 #
03745 #            else:
03746 #                label = r3d.NEGATIVE
03747 #                color = [0,0,255]
03748 #
03749 #            labels.append(label)
03750 #            points3d_tried.append(kdict['points3d'][:, selected_idx])
03751 #            points2d_tried.append(kdict['points2d'][:, selected_idx])
03752 #
03753 #            datapoint = {'instances': kdict['instances'][:, selected_idx],
03754 #                         'points2d':  kdict['points2d'][:, selected_idx],
03755 #                         'points3d':  kdict['points3d'][:, selected_idx],
03756 #                         'sizes':     kdict['sizes'],
03757 #                         'labels':    np.matrix([label])
03758 #                         }
03759 #            self.locations_man.add_perceptual_data(task_id, datapoint)
03760 #            self.locations_man.save_database()
03761 #            self.locations_man.train(task_id)
03762 #
03763 #            #Classify
03764 #            predictions = np.matrix(learner.classify(kdict['instances']))
03765 #
03766 #            #Draw
03767 #            img = cv.CloneMat(kdict['image'])
03768 #            _, pos_exp, neg_exp = separate_by_labels(np.column_stack(points2d_tried), np.matrix(labels))
03769 #            r3d.draw_points(img, point2d_img, [255, 0, 0], 4, 2)
03770 #            r3d.draw_points(img, kdict['points2d'], [255, 255, 255], 2, -1)
03771 #            r3d.draw_points(img, pos_exp, [0, 255, 0], 3, 1)
03772 #            r3d.draw_points(img, neg_exp, [0, 0, 255], 3, 1)
03773 #
03774 #            _, pos_pred, neg_pred = separate_by_labels(kdict['points2d'], predictions)
03775 #            r3d.draw_points(img, pos_pred, [0, 255, 0], 2, -1)
03776 #            r3d.draw_points(img, neg_pred, [0, 0, 255], 2, -1)
03777 #            r3d.draw_points(img, points2d_tried[-1], color, 3, -1)
03778 #
03779 #            #publish
03780 #            self.img_pub.publish(img)
03781 
03782 
03783 
03784 
03785 
03786 
03787     #Save dataset in the location's folder
03788     #def save_dataset(self, task_id, point, rdict):
03789     #    pt.join(task_id, 
03790     #    self.locations_man
03791     #    self.record_perceptual_data(point, rdict)
03792     #    #TODO...
03793 
03794     #TODO TEST
03795     #BOOKMARK 3/7 4:03 AM
03796     #LAST DITCH EXECUTION(point, stop_fun):
03797     #def blind_exploration(self, behavior, point_bl, stop_fun, max_retries=15):
03798     #    gaussian = pr.Gaussian(np.matrix([ 0,      0,      0.]).T, \
03799     #                           np.matrix([[1.,     0,      0], \
03800     #                                      [0, .02**2,      0], \
03801     #                                      [0,      0, .02**2]]))
03802 
03803     #    iter_count = 0
03804     #    gaussian_noise = np.matrix([0, 0, 0.0]).T #We want to try the given point first
03805     #    labels = []
03806     #    points_tried = []
03807 
03808     #    #while we have not succeeded and not stop_fun(points tried):
03809     #    while iter_count < MAX_RETRIES and stop_fun(np.matrix(labels)):
03810     #        perturbation = gaussian_noise
03811     #        perturbed_point_bl = point_bl + perturbation
03812 
03813     #        self.robot.sound.say('executing behavior')
03814     #        success, reason = behavior(perturbed_point_bl)
03815     #        points_tried.append(perturbed_point_bl)
03816 
03817     #        #add point and label to points tried
03818     #        if success:
03819     #            labels.append(r3d.POSITIVE)
03820     #        else:
03821     #            labels.append(r3d.NEGATIVE)
03822 
03823     #        #perturb point
03824     #        gaussian_noise = gaussian.sample()
03825     #        gaussian_noise[0,0] = 0
03826     #        iter_count = iter_count + 1 
03827     #   
03828     #   self.robot.sound.say('tried %d times' % iter_count)
03829     #   return np.column_stack(points_tried)
03830 
03831     #def blind_exploration2(self, task_id, behavior, undo_behavior, point_bl, stop_fun, 
03832     #        max_retries=15, closeness_tolerance=.005, fea_dict=None):
03833     #    params = r3d.Recognize3DParam()
03834     #    params.uncertainty_x = 1.
03835     #    params.uncertainty_y = .02
03836     #    params.uncertainty_z = .02
03837     #    params.n_samples = 400
03838     #    params.uni_mix = 0.
03839     #    MAX_RETRIES = 20
03840     #
03841     #    if fea_dict == None:
03842     #        fea_dict, _ = self.read_features_save(task_id, point_bl, params)
03843     #    
03844     #    dists = ut.norm(fea_dict['points3d'] - point_bl)
03845     #    ordering = np.argsort(dists).A1
03846     #    points3d_sampled = fea_dict['points3d'][:, ordering]
03847     #    points2d_sampled = fea_dict['points2d'][:, ordering]
03848     #    instances_sampled = fea_dict['instances'][:, ordering]
03849 
03850     #    labels = []
03851     #    points_tried = []
03852     #    tinstances = []
03853     #    sp2d = []
03854 
03855     #    labels.append(r3d.POSITIVE)
03856     #    points_tried.append(points3d_sampled[:, 0])
03857     #    tinstances.append(instances_sampled[:, 0])
03858     #    sp2d.append(points2d_sampled[:, 0])
03859 
03860     #    labels.append(r3d.NEGATIVE)
03861     #    points_tried.append(points3d_sampled[:, 1])
03862     #    tinstances.append(instances_sampled[:, 1])
03863     #    sp2d.append(points2d_sampled[:, 1])
03864 
03865     #    return {'points3d': np.column_stack(points_tried),
03866     #            'instances': np.column_stack(tinstances),
03867     #            'points2d': np.column_stack(sp2d),
03868     #            'labels': np.matrix(labels),
03869     #            'sizes': fea_dict['sizes']}
03870 
03871         #def __init__(self, object_name, labeled_data_fname, tf_listener):
03872         #make learner
03873         #learner = SVMActiveLearnerApp()
03874         #labeled_light_switch_dataset = ut.load_pickle(data_file_name)
03875         #learner.train(labeled_light_switch_dataset, 
03876         #              labeled_light_switch_dataset.sizes['intensity']
03877         #              self.params.variance_keep)
03878         #self.learners[classifier_name] = learner
03879 
03880 
03881     #def locate_light_switch(self):
03882     #    #capture data
03883     #    pointcloud_msg = self.laser_scan.scan(math.radians(180.), math.radians(-180.), 20.)
03884     #    prosilica_image = self.prosilica.get_frame() #TODO check if this is a cvmat
03885     #    while self.prosilica_cal.has_msg == False:
03886     #        time.sleep(.1)
03887 
03888     #    #preprocess 
03889     #    ic_data = IntensityCloudData(pointcloud_msg, prosilica_image, 
03890     #                    tfu.transform('/high_def_optical_frame', '/base_link', self.tf_listener), 
03891     #                    self.prosilica_cal,                                                       
03892     #                    r3d.Recognize3DParam())
03893     #    instances = ic_data.extract_vectorized_features()
03894 
03895     #    results = []
03896     #    for i in range(instances.shape[1]):
03897     #        nlabel = self.learners['light_switch'].classify(instances[:, i])
03898     #        results.append(nlabel)
03899 
03900     #    results = np.matrix(results)
03901     #    positive_indices = np.where(results == r3d.POSITIVE)[1]
03902 
03903     #    #want 3d location of each instance
03904     #    positive_points_3d = ic_data.sampled_points[:, positive_indices]
03905 
03906     #    #return a random point for now
03907     #    rindex = np.random.randint(0, len(positive_indices))
03908     #    return positive_points_3d[:,rindex]
03909 
03910 
03911     #def add_perturbation_to_location(self, point_map, perturbation):
03912     #    locs = self.find_close_by_points(point_map)
03913     #    if locs != None:
03914     #        location = self.location_data[self.location_labels(locs[0])]
03915     #        if not location.has_key('perturbation'):
03916     #            location['perturbation'] = []
03917     #        location['perturbation'].append(perturbation)
03918     #        return True
03919     #    return False
03920 
03921 
03922 
03923 
03924 
03925 
03926 
03927 
03928 
03929 
03930 
03931 
03932 
03933 
03934 
03935 
03936                 #self.go_to_home_pose()
03937                 #print '>>>> POINT IS', point_bl_t1.T
03938                 #point_bl_t1 = np.matrix([[ 0.73846737,  0.07182931,  0.55951065]]).T
03939         #DIST_THRESHOLD = .8 for lightswitch
03940         #DIST_THRESHOLD = .85 #for drawers
03941         #DIST_APPROACH = .5
03942         #COARSE_STOP = .7
03943         #FINE_STOP = .7
03944         #VOI_RADIUS = .2
03945 
03946         #point_dist = np.linalg.norm(point_bl_t0[0:2,0])
03947         #rospy.loginfo('run_behaviors: Point is %.3f away.' % point_dist)
03948         #map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
03949         #point_map = tfu.transform_points(map_T_base_link, point_bl_t0)
03950 
03951         #if point_dist > DIST_THRESHOLD:
03952         #    rospy.loginfo('run_behaviors: Point is greater than %.1f m away (%.3f).  Driving closer.' % (DIST_THRESHOLD, point_dist))
03953         #    ##self.turn_to_point(point_bl_t0)
03954         #    rospy.loginfo( 'run_behaviors: CLICKED on point_bl ' + str(point_bl_t0.T))
03955 
03956         #    ret = self.drive_approach_behavior(point_bl_t0, dist_far=COARSE_STOP)
03957         #    if ret != 3:
03958         #        base_link_T_map = tfu.transform('base_link', 'map', self.tf_listener)
03959         #        point_bl_t1 = tfu.transform_points(base_link_T_map, point_map)
03960         #        dist_end = np.linalg.norm(point_bl_t1[0:2,0])
03961         #        if dist_end > DIST_THRESHOLD:
03962         #            rospy.logerr('run_behaviors: drive_approach_behavior failed! %.3f' % dist_end)
03963         #            self.robot.sound.say("I am unable to navigate to that location")
03964         #            return
03965 
03966         #    base_link_T_map = tfu.transform('base_link', 'map', self.tf_listener)
03967         #    point_bl_t1 = tfu.transform_points(base_link_T_map, point_map)
03968 
03969         #    ret = self.approach_perpendicular_to_surface(point_bl_t1, voi_radius=VOI_RADIUS, dist_approach=FINE_STOP)
03970         #    if ret != 3:
03971         #        rospy.logerr('run_behaviors: approach_perpendicular_to_surface failed!')
03972         #        return
03973 
03974         #    #map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
03975         #    #point_bl_t2 = tfu.transform_points(base_link_T_map, point_map)
03976         #    self.robot.sound.say('done')
03977         #    rospy.loginfo('run_behaviors: DONE DRIVING!')
03978         #elif False:
03979 
03980 
03981 
03982 
03983 
03984         #if tf_listener == None:
03985         #    self.tf_listener = tf.TransformListener()
03986         #else:
03987         #    self.tf_listener = tf_listener
03988 
03989         #self.pr2 = pr2_obj
03990         #self.cman = con.ControllerManager(arm, self.tf_listener, using_slip_controller=1)
03991         #self.reactive_gr = rgr.ReactiveGrasper(self.cman)
03992         #if arm == 'l':
03993         #    ptopic = '/pressure/l_gripper_motor'
03994         #    self.arm_obj = self.pr2.left
03995         #    self.ik_frame = 'l_wrist_roll_link'
03996         #    self.tool_frame = 'l_gripper_tool_frame'
03997         #else:
03998         #    ptopic = '/pressure/r_gripper_motor'
03999         #    self.arm_obj = self.pr2.right
04000         #    self.ik_frame = 'r_wrist_roll_link'
04001         #    self.tool_frame = 'r_gripper_tool_frame'
04002         #self.movement_mode = 'ik' #or cart
04003 
04004         #rospy.Subscriber('cursor3d', PointStamped, self.laser_point_handler)
04005         #self.double_click = rospy.Subscriber('mouse_left_double_click', String, self.double_click_cb)
04006 
04007     #def set_movement_mode_ik(self):
04008     #    self.movement_mode = 'ik'
04009     #    self.reactive_gr.cm.switch_to_joint_mode()
04010     #    self.reactive_gr.cm.freeze_arm()
04011 
04012     #def set_movement_mode_cart(self):
04013     #    self.movement_mode = 'cart'
04014 
04015 
04016 
04017 
04018 
04019 
04020 
04021                 #pdb.set_trace()
04022                 #self.gather_interest_point_dataset(point)
04023                 #point = np.matrix([ 0.60956734, -0.00714498,  1.22718197]).T
04024                 #pressure_parameters = range(1900, 2050, 30)
04025 
04026                 #self.record_perceptual_data(point)
04027                 #successes = []
04028                 #parameters = [np.matrix([-.15, 0, 0]).T, 300, np.matrix([-.005, 0, 0]).T, 3500, np.matrix([0,0,-.15]).T, .03]
04029 
04030                 #for p in pressure_parameters:
04031                 #    experiment = []
04032                 #    for i in range(4):
04033                 #        #Turn off lights
04034                 #        rospy.loginfo('Experimenting with press_pressure = %d' % p)
04035                 #        success_off = self.light_switch1(point, 
04036                 #                        point_offset=np.matrix([-.15,0,0]).T, press_contact_pressure=300, move_back_distance=np.matrix([-.005,0,0]).T,\
04037                 #                        press_pressure=3500, press_distance=np.matrix([0,0,-.15]).T, visual_change_thres=.03)
04038                 #        experiment.append(success_off)
04039                 #        rospy.loginfo('Lights turned off? %s' % str(success_off))
04040                 #        return
04041 
04042                 #        #Turn on lights
04043                 #        success_on = self.light_switch1(point, 
04044                 #                        point_offset=np.matrix([-.15,0,-.10]).T, press_contact_pressure=300, move_back_distance=np.matrix([-0.005, 0, 0]).T,
04045                 #                        press_pressure=3500, press_distance=np.matrix([0,0,.1]).T, visual_change_thres=.03)
04046                 #        #def light_switch1(self, point, 
04047                 #        #        point_offset, press_contact_pressure, move_back_distance,
04048                 #        #        press_pressure, press_distance, visual_change_thres):
04049 
04050                 #        print 'Lights turned on?', success_on
04051                 #    successes.append(experiment)
04052 
04053                 #ut.save_pickle({'pressure': pressure_parameters, 
04054                 #                'successes': successes}, 'pressure_variation_results.pkl')
04055 
04056 
04057 
04058 
04059 
04060 
04061 
04062 
04063 
04064 
04065         #return self.pressure_listener.check_threshold() or self.pressure_listener.check_safety_threshold()
04066         ##stop if you hit a tip, side, back, or palm
04067         #(left_touching, right_touching, palm_touching) = self.reactive_gr.check_guarded_move_contacts()
04068         ##saw a contact, freeze the arm
04069         #if left_touching or right_touching or palm_touching:
04070         #    rospy.loginfo("CONTACT made!")
04071         #    return True
04072         #else:
04073         #    return False
04074 
04075         #print 'move returning'
04076         #return whether the left and right fingers were touching
04077         #return (left_touching, right_touching, palm_touching)
04078 
04079 
04080 
04081 
04082     #def execute_action_list(self):
04083 
04084     #def run(self, seed):
04085     #    # search for pairs of perception operators and manipulation operators that would work
04086     #    population = 10
04087     #    seeds = []
04088     #    for i in range(population):
04089     #        aseed = copy.deepcopy(seed)
04090     #        # 'bool', 'radian', 'se3', 'r3', 'discrete', 
04091     #        new_seed_actions = []
04092     #        for action in aseed:
04093 
04094     #            if replace_action:
04095     #                pass
04096 
04097     #            if delete_action:
04098     #                pass
04099     #            
04100     #            if insert_action:
04101     #                #pick random action from descriptors list
04102     #                new_action = 
04103     #                new_seed_actions += new_action
04104     #                pass
04105     #            
04106     #            if perturb_parameter:
04107     #                num_params = len(action.params)
04108     #                rand_param_idx = ...
04109     #                self.descriptors[action.name].params[rand_param_idx]
04110     #                rand_param_types[rand_param_types]
04111 
04112 
04113     #            #can replace/delete/insert action
04114     #            #can pick a parameter and perturb it
04115 
04116     #    #pdb.set_trace()
04117     #    print seed
04118 
04119         #point = np.matrix([0.63125642, -0.02918334, 1.2303758 ]).T
04120         #print 'move direction', movement.T
04121         #print 'CORRECTING', point.T
04122         #print 'NEW', point.T
04123         #start_location = (np.matrix([0.25, 0.15, 0.7]).T, np.matrix([0., 0., 0., 0.1]))
04124         #movement = np.matrix([.4, 0., 0.]).T
04125         #what other behavior would I want?
04126         # touch then move away..
04127         # move back but more slowly..
04128         # want a safe physical
04129         #   a safe exploration strategy
04130         #self.behaviors.linear_move(self.behaviors.current_location(), back_alittle, stop='none')
04131         #loc_before = self.behaviors.current_location()[0]
04132         #loc_after = self.behaviors.current_location()[0]
04133         #pdb.set_trace()
04134         #self.behaviors.linear_move(self.behaviors.current_location(), down, stop='pressure_accel')
04135         #self.behaviors.linear_move(self.behaviors.current_location(), back, stop='none')
04136         #pdb.set_trace()
04137         #b.twist(math.radians(30.))
04138         #bd = BehaviorDescriptor()
04139         #movement = point - self.behaviors.current_location()[0]
04140         #pdb.set_trace()
04141         #self.behaviors.linear_move(self.behaviors.current_location(), movement, stop='pressure_accel')
04142 
04143         #loc = self.behaviors.current_location()[0]
04144         #front_loc = point.copy()
04145         #front_loc[0,0] = loc[0,0]
04146         #self.behaviors.set_pressure_threshold(150)
04147         #self.behaviors.move_absolute((front_loc, self.behaviors.current_location()[1]), stop='pressure_accel')
04148         #self.behaviors.move_absolute((point, self.behaviors.current_location()[1]), stop='pressure_accel')
04149 
04150 
04151 
04152 
04153     #def detect_event(self):
04154     #    self.behaviors.cman._start_gripper_event_detector(timeout=40.)
04155     #    stop_func = self.behaviors._tactile_stop_func
04156     #    while stop_func():
04157 
04158         #pass
04159         #self.robot = pr2.PR2()
04160         #self.kin = pk.PR2Kinematics(self.robot.tf_listener)
04161 
04162     #def linear_move(self, start_location, direction, distance, arm):
04163     #    if arm == 'left':
04164     #        arm_kin = self.kin.left
04165     #    else:
04166     #        arm_kin = self.kin.right
04167 
04168     #    start_pose = arm_kin.ik(start_location)
04169     #    loc = start_location[0:3, 4]
04170     #    end_location = loc + distance*direction
04171     #    end_pose = arm_kin.ik(end_location)
04172 
04173     #    self.robot.left_arm.set_pose(start_pose, 5.)             #!!!
04174     #    self.robot.left_arm.set_pose(end_pose, 5.)               #!!!
04175 
04176             ##stop if you hit a tip, side, back, or palm
04177             #(left_touching, right_touching, palm_touching) = rg.check_guarded_move_contacts()
04178             ##saw a contact, freeze the arm
04179             #if left_touching or right_touching or palm_touching:
04180             #    rospy.loginfo("saw contact")
04181             #    rg.cm.switch_to_joint_mode()
04182             #    rg.cm.freeze_arm()
04183             #    break
04184 
04185     #import pdb
04186     #start_location = [0.34, 0.054, 0.87] + [0.015454981255042808, -0.02674860197736427, -0.012255429236635201, 0.999447577565171]
04187     #direction = np.matrix([1., 0., 0.]).T
04188 
04189     #self.reactive_l.move_cartesian_step(start_location, blocking = 1)
04190     #(left_touching, right_touching, palm_touching) = self.reactive_l.guarded_move_cartesian(grasp_pose, 10.0, 5.0)
04191         #self.cman_r     = con.ControllerManager('r')
04192         #self.reactive_r = rgr.ReactiveGrasper(self.cman_r)
04193 
04194         #self.cman_r.start_joint_controllers()
04195         #self.reactive_r.start_gripper_controller()
04196     
04197         #(pos, rot) = self.cman.return_cartesian_pose()
04198         #pdb.set_trace()
04199         #currentgoal = pos + rot
04200         #currentgoal[2] -= .05
04201         #self.reactive_l.move_cartesian_step(currentgoal, blocking = 1)
04202         #(left_touching, right_touching, palm_touching) = self.reactive_l.guarded_move_cartesian(grasp_pose, 10.0, 5.0)
04203         #exit()
04204         #end_loc = start_location + direction * distance
04205         #self.reactive_l.move_cartesian_step(start_loc, blocking = 1)
04206         #self.reactive_l.move_cartesian_step(end_loc, blocking = 1)
04207     #left_pose = b.robot.left.pose()
04208     #left_cart = ut.load_pickle('start_pose.pkl')
04209     #pdb.set_trace()
04210     #kin_sol = b.kin.left.ik(left_cart)
04211     #b.robot.left.set_pose(kin_sol, 5.)
04212     ##b.linear_move(left_cart)
04213     ##left_cart = b.kin.left.fk(left_pose)
04214     ##pdb.set_trace()
04215     #print left_cart
04216 
04217     #(pos, rot) = cm.return_cartesian_pose()
04218     #currentgoal = pos+rot
04219     #currentgoal[2] -= .05
04220     #rg.move_cartesian_step(currentgoal, blocking = 1)
04221     #exit()
04222 
04223 
04224 #b.linear_move()
04225 #cart_pose = kin.left.fk('torso_lift_link', 'l_wrist_roll_link', joints)
04226 #kin.left.ik(cart_pose, 'torso_lift_link')
04227 
04228     #def light_switch1_on(self, point, press_pressure=3500, press_contact_pressure=150):
04229     #    point = point + np.matrix([-.15, 0, -0.20]).T
04230 
04231     #    success, reason = self.behaviors.reach(point)
04232     #    if not success:
04233     #        rospy.loginfo('Reach failed due to "%s"' % reason)
04234 
04235     #    rospy.loginfo('PRESSING')
04236     #    success, reason = self.behaviors.press(np.matrix([0, 0, .20]).T, \
04237     #            press_pressure, press_contact_pressure)
04238     #    if not success:
04239     #        rospy.loginfo('Press failed due to "%s"' % reason)
04240     #        return 
04241 
04242     #    rospy.loginfo('RESETING')
04243     #    r2 = self.behaviors.move_absolute(self.start_location, stop='pressure_accel')
04244     #    if r2 != None:
04245     #        rospy.loginfo('moving back to start location failed due to "%s"' % r2)
04246     #        return 
04247 
04248     #    print 'DONE.'
04249 
04250 
04251     #def _tactile_stop_func(self):
04252     #    r1 = self.pressure_listener.check_threshold() 
04253     #    r2 = self.pressure_listener.check_safety_threshold()
04254     #    if r1:
04255     #        rospy.loginfo('Pressure exceeded!')
04256     #    if r2:
04257     #        rospy.loginfo('Pressure safety limit EXCEEDED!')
04258     #    return r1 or r2
04259 
04260 
04261 
04262 
04263 
04264 
04265 
04266         #r1 = self.pressure_listener.check_threshold() 
04267         #r2 = self.pressure_listener.check_safety_threshold()
04268         #if r1:
04269         #    rospy.loginfo('Pressure exceeded!')
04270         #if r2:
04271         #    rospy.loginfo('Pressure safety limit EXCEEDED!')
04272         #pressure_state = r1 or r2
04273         #pressure_state = self.pressure_listener.check_threshold() or self.pressure_listener.check_safety_threshold()
04274         #action finished (trigger seen)
04275 
04276 
04277 
04278 
04279     #def optimize_parameters(self, x0, x_range, behavior, objective_func, reset_env_func, reset_param):
04280     #    reset_retries = 3
04281     #    num_params = len(x0)
04282     #    x = copy.deepcopy(x0)
04283 
04284     #    # for each parameter
04285     #    #for i in range(num_params):
04286     #    while i < num_params:
04287     #        #search for a good setting
04288     #        not_converged = True
04289     #        xmin = x_range[i, 0]
04290     #        xmax = x_range[i, 1]
04291 
04292     #        while not_converged:
04293     #            current_val = x[i]
04294     #            candidates_i = [(x[i] + xmin) / 2., (x[i] + xmax) / 2.]
04295     #            successes = []
04296     #            for cand in candidates_i:
04297     #                x[i] = cand
04298     #                success = behavior(x)
04299     #                if success:
04300     #                    for reset_i in range(reset_retries):
04301     #                        reset_success = reset_env_func(*reset_param)
04302     #                        if reset_success:
04303     #                            break
04304     #                successes.append(success)
04305 
04306     #            if successes[0] and successes[1]:
04307     #                raise RuntimeException('What? this isn\'t suppose to happen.')
04308     #            elif successes[0] and not successes[1]:
04309     #                next_val = candidates_i[0]
04310     #            elif successes[1] and not successes[0]:
04311     #                next_val = candidates_i[1]
04312     #            else:
04313     #                raise RuntimeException('What? this isn\'t suppose to happen.')
04314 
04315 
04316     #        #if all the trials are bad
04317     #        if not test(successes):
04318     #            #go back by 1 parameter
04319     #            i = i - 1
04320 
04321 
04322     #        #if there are more than one good parameter
04323     #        for p in params
04324     #            ... = objective_func(p)
04325 
04326     #        i = i + 1
04327 
04328     #    return x
04329 
04330 


hai_sandbox
Author(s): Hai Nguyen
autogenerated on Wed Nov 27 2013 11:46:56