scraps.py
Go to the documentation of this file.
00001 
00002 class TaskError(Exception):
00003     def __init__(self, value):
00004         self.parameter = value
00005 
00006     def __str__(self):
00007         return repr(self.parameter)
00008 
00009 class ActionType:
00010     def __init__(self, inputs, outputs):
00011         self.inputs = inputs
00012         self.outputs = outputs
00013 
00014 class ParamType:
00015     def __init__(self, name, ptype, options=None):
00016         self.name = name
00017         self.ptype = ptype
00018         self.options = options
00019 
00020 class Action:
00021 
00022     def __init__(self, name, params):
00023         self.name = name
00024         self.params = params
00025 
00026 class BehaviorDescriptor:
00027 
00028     def __init__(self):
00029         self.descriptors = {
00030                             'twist':       ActionType([ParamType('angle', 'radian')], [ParamType('success', 'bool')]),
00031                             'linear_move': ActionType([ParamType('start_loc', 'se3'), 
00032                                                        ParamType('movement', 'r3'), 
00033                                                        ParamType('stop', 'discrete', ['pressure', 'pressure_accel'])], 
00034                                                       [ParamType('success', 'bool')]),
00035                             }
00036 
00037         start_location = (np.matrix([0.3, 0.15, 0.9]).T, np.matrix([0., 0., 0., 0.1]))
00038         movement       = np.matrix([.4, 0, 0.]).T
00039         self.seed = [Action('linear_move', [start_location, movement, 'pressure']),
00040                      Action('linear_move', [Action('current_location', [])])]
00041         self.run(self.seed)
00042 
00043     def run(self, seed):
00044         pass
00045 
00046     #def go_to_home_pose(self):
00047     #    #self.behaviors.movement.set_movement_mode_cart()
00048     #    return self.behaviors.movement.move_absolute(self.start_location, stop='pressure')
00049     #    #self.behaviors.movement.set_movement_mode_ik()
00050     #    #return self.behaviors.movement.move_absolute(self.start_location, stop='pressure')
00051 
00052 
00053     #def location_activated_behaviors(self, point_bl, stored_point=False):
00054     #    driving_param = {'light_switch': {'coarse': .7, 'fine': .5, 'voi': .2},
00055     #                     'drawer':       {'coarse': .7, 'fine': .7, 'voi': .2}}
00056 
00057     #    map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
00058     #    point_map = tfu.transform_points(map_T_base_link, point_bl)
00059     #    matches = self.find_close_by_points(point_map)
00060 
00061     #    if len(matches) > 0:
00062     #        #pdb.set_trace()
00063     #        ldata = self.location_data[self.location_labels[matches[0]]]
00064     #        task = ldata['task']
00065     #        rospy.loginfo('Found closeby location %s' % str(ldata))
00066     #    else:
00067     #        rospy.loginfo( 'No location matches found. Please enter location type:')
00068     #        for i, k in enumerate(driving_param.keys()):
00069     #            rospy.loginfo(' %d %s' %(i,k))
00070     #        task_number = raw_input()
00071     #        task = driving_param.keys()[int(task_number)]
00072 
00073     #    self.robot.sound.say('task %s' % task.replace('_', ' '))
00074     #    rospy.loginfo('Task is %s' % task)
00075     #    if self.approach_location(point_bl, 
00076     #            coarse_stop=driving_param[task]['coarse'], 
00077     #            fine_stop=driving_param[task]['fine'], 
00078     #            voi_radius=driving_param[task]['voi']):
00079     #        return
00080 
00081     #    else:
00082     #        ret = self.approach_perpendicular_to_surface(point_bl, 
00083     #                voi_radius=driving_param[task]['voi'], 
00084     #                dist_approach=driving_param[task]['fine'])
00085 
00086     #        if ret != 3:
00087     #            rospy.logerr('location_activated_behaviors: approach_perpendicular_to_surface failed!')
00088     #            return
00089 
00090     #        base_link_T_map = tfu.transform('base_link', 'map', self.tf_listener)
00091     #        point_bl_t1 = tfu.transform_points(base_link_T_map, point_map)
00092     #        try:
00093     #            self.untuck()
00094     #            self.behaviors.movement.move_absolute(self.start_location, stop='pressure')
00095     #            self.behaviors.movement.pressure_listener.rezero()
00096 
00097     #            if task == 'light_switch':
00098     #                #self.location_add(perturbed_map, task)
00099     #                # TODO: what happens when we first encounter this location?! experiment n times to create dataset?
00100     #                self.practice(point_bl_t1, 
00101     #                        ft.partial(self.light_switch1, 
00102     #                            point_offset=np.matrix([0,0,.03]).T,
00103     #                            press_contact_pressure=300,
00104     #                            move_back_distance=np.matrix([-.0075,0,0]).T,
00105     #                            press_pressure=3500,
00106     #                            press_distance=np.matrix([0,0,-.15]).T,
00107     #                            visual_change_thres=.03), 
00108     #                        'light_switch')
00109     #                self.tuck()
00110 
00111     #                if False: #Old branch where we retry blindly
00112     #                    MAX_RETRIES = 15
00113     #                    rospy.loginfo('location_activated_behaviors: go_home_pose')
00114     #                    #self.go_to_home_pose()
00115     #                    self.behaviors.movement.move_absolute(self.start_location, stop='pressure')
00116     #                    gaussian = pr.Gaussian(np.matrix([ 0,      0,      0.]).T, \
00117     #                                           np.matrix([[1.,     0,      0], \
00118     #                                                      [0, .02**2,      0], \
00119     #                                                      [0,      0, .02**2]]))
00120     #                    retry_count = 0
00121     #                    success = False
00122     #                    gaussian_noise = np.matrix([0, 0, 0.0]).T
00123     #                    point_offset = np.matrix([0, 0, 0.03]).T
00124     #                    while not success:
00125     #                        perturbation = gaussian_noise
00126     #                        perturbed_point_bl = point_bl_t1 + perturbation
00127     #                        success, _ = self.light_switch1(perturbed_point_bl, point_offset=point_offset, \
00128     #                                press_contact_pressure=300, move_back_distance=np.matrix([-.0075,0,0]).T,\
00129     #                                press_pressure=3500, press_distance=np.matrix([0,0,-.15]).T, \
00130     #                                visual_change_thres=.03)
00131     #                        gaussian_noise = gaussian.sample()
00132     #                        gaussian_noise[0,0] = 0
00133     #                        retry_count = retry_count + 1 
00134 
00135     #                        if retry_count > MAX_RETRIES:
00136     #                            self.robot.sound.say('giving up tried %d times already' % MAX_RETRIES)
00137     #                            break
00138     #                        elif not success:
00139     #                             self.robot.sound.say('retrying')
00140 
00141     #                    if success:
00142     #                        self.robot.sound.say('successful!')
00143 
00144     #                        if not stored_point or retry_count > 1:
00145     #                            map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
00146     #                            perturbed_map = tfu.transform_points(map_T_base_link, perturbed_point_bl)
00147     #                            self.location_add(perturbed_map, task)
00148     #                            self.robot.sound.say('recorded point')
00149 
00150     #                        #if retry_count > 1:
00151     #                        #    if not self.add_perturbation_to_location(point_map, perturbation):
00152     #                        #        self.robot.sound.say('unable to add perturbation to database! please fix')
00153     #                    self.tuck()
00154 
00155     #
00156     #            if task == 'drawer':
00157     #                self.drawer(point_bl_t1)
00158     #                self.tuck()
00159     #                self.robot.sound.say('done')
00160     #                self.location_add(point_map, task)
00161     #                
00162     #                #except lm.RobotSafetyError, e:
00163     #                #    rospy.loginfo('location_activated_behaviors: Caught a robot safety exception "%s"' % str(e.parameter))
00164     #                #    #self.behaviors.movement.move_absolute(self.start_location, stop='accel')
00165 
00166     #        except lm.RobotSafetyError, e:
00167     #            rospy.loginfo('location_activated_behaviors: Caught a robot safety exception "%s"' % str(e.parameter))
00168     #            self.behaviors.movement.move_absolute(self.start_location, stop='accel')
00169     #
00170     #        except TaskError, e:
00171     #            rospy.loginfo('location_activated_behaviors: TaskError: %s' % str(e.parameter))
00172     #        rospy.loginfo('location_activated_behaviors: DONE MANIPULATION!')
00173     #        self.robot.sound.say('done')
00174 
00175 
00176         #if self.approach_location(point_bl, 
00177         #        coarse_stop=self.locations_man.driving_param[task]['coarse'], 
00178         #        fine_stop=self.locations_man.driving_param[task]['fine'], 
00179         #        voi_radius=self.locations_man.driving_param[task]['voi']):
00180         #    #rospy.logerr('location_approach_driving: intial approach failed')
00181         #    return True, 'initial approach'
00182         #else:
00183 
00184 
00185     #def load_classifier(self, classifier_name, data_file_name):
00186     #    self.learners[classifier_name] = ipa.InterestPointPerception(classifier_name, 
00187     #            data_file_name, self.tf_listener)
00188 
00189 
00190 
00191 
00192 
00193 
00194 
00195 
00196 
00197 
00198 
00199 
00200 
00201 
00202 
00203 
00204 
00205 
00206 
00207 
00208 
00209 
00210 
00211 
00212 
00213 
00214 
00215 
00216 
00217 
00218 
00219         #return {'points3d': np.column_stack(points3d_tried),
00220         #        'instances': np.column_stack(instances_tried),
00221         #        'points2d': np.column_stack(points2d_tried),
00222         #        'labels': np.matrix(labels),
00223         #        'sizes': fea_dict['sizes']}
00224 
00225 
00226     #def blind_exploration3(self, task_id, behavior, undo_behavior, point_bl, stop_fun, 
00227     #        max_retries=15, closeness_tolerance=.01, fea_dict=None):
00228     #    params = r3d.Recognize3DParam()
00229     #    params.uncertainty_x = 1.
00230     #    params.uncertainty_y = .02
00231     #    params.uncertainty_z = .02
00232     #    params.n_samples = 400
00233     #    params.uni_mix = 0.
00234     #    #MAX_RETRIES = 20
00235     #
00236 
00237     #    # instances, locs2d, locs3d, image, rdict, sizes = 
00238     #    if fea_dict == None:
00239     #        fea_dict, _ = self.read_features_save(task_id, point_bl, params)
00240     #        image_T_bl = tfu.transform('openni_rgb_optical_frame', 'base_link', self.tf_listener)
00241     #        fea_dict['image_T_bl'] = image_T_bl
00242     #    #fea_dict = self.feature_ex.read(expected_loc_bl=point_bl, params=params)
00243     #    
00244     #    dists = ut.norm(fea_dict['points3d'] - point_bl)
00245     #    ordering = np.argsort(dists).A1
00246     #    points3d_sampled = fea_dict['points3d'][:, ordering]
00247     #    points2d_sampled = fea_dict['points2d'][:, ordering]
00248     #    instances_sampled = fea_dict['instances'][:, ordering]
00249     #    start_pose = self.robot.head.pose()
00250 
00251     #    point3d_img = tfu.transform_points(fea_dict['image_T_bl'], point_bl)
00252     #    point2d_img = self.feature_ex.cal.project(point3d_img)
00253    
00254     #    sampled_idx = 0
00255     #    iter_count = 0
00256     #    labels = []
00257     #    points_tried = []
00258     #    tinstances = []
00259     #    sp2d = []
00260     #    while iter_count < max_retries and not stop_fun(np.matrix(labels)):
00261     #        if len(points_tried)> 0 and \
00262     #           np.any(ut.norm(np.column_stack(points_tried) - points3d_sampled[:, sampled_idx]) < closeness_tolerance):
00263     #            sampled_idx = sampled_idx + 1
00264     #            continue
00265 
00266     #        #pdb.set_trace()
00267     #        #self.robot.sound.say('executing behavior')
00268     #        self.robot.head.set_pose(start_pose, 1)
00269     #        success, reason = behavior(points3d_sampled[:, sampled_idx])
00270     #        iter_count = iter_count + 1
00271     #        points_tried.append(points3d_sampled[:, sampled_idx])
00272     #        tinstances.append(instances_sampled[:, sampled_idx])
00273     #        sp2d.append(points2d_sampled[:, sampled_idx])
00274     #        sampled_idx = sampled_idx + 1
00275 
00276     #        #tinstances.append(fea_dict['instances'][:,iter_count])
00277     #        #sp2d.append(fea_dict['points2d'][:,iter_count])
00278     #        #add point and label to points tried
00279     #        if success:
00280     #            labels.append(r3d.POSITIVE)
00281     #            if undo_behavior != None:
00282     #                #If we were successful, call blind exploration with the undo behavior
00283     #                def any_pos_sf(labels_mat):
00284     #                    if np.any(r3d.POSITIVE == labels_mat):
00285     #                        return True
00286     #                    return False
00287     #                if task_id != None:
00288     #                    utid = self.locations_man.create_undo_task(task_id)
00289     #                else:
00290     #                    utid = None
00291     #                #TODO: gather instances for undo action
00292     #                #TODO: figure out why position of point_bl is shifted in second call
00293     #                self.seed_dataset_explore(utid, undo_behavior, None, point_bl, any_pos_sf, 
00294     #                        max_retries, fea_dict=fea_dict)
00295     #                #success, reason = undo_behavior(points3d_sampled[:, 'iter_count'])
00296     #        else:
00297     #            labels.append(r3d.NEGATIVE)
00298 
00299     #        #Visualization
00300     #        img = cv.CloneMat(fea_dict['image'])
00301     #        r3d.draw_points(img, points2d_sampled, [255, 255, 255], 2, -1)
00302     #        _, pos_points, neg_points = separate_by_labels(np.column_stack(sp2d), np.matrix(labels))
00303     #        r3d.draw_points(img, point2d_img, [255, 0, 0], 4, 2)
00304     #        r3d.draw_points(img, pos_points, [0, 255, 0], 2, -1)
00305     #        r3d.draw_points(img, neg_points, [0, 0, 255], 2, -1)
00306     #        r3d.draw_points(img, sp2d[-1], [0, 184, 245], 3, -1)
00307     #        self.img_pub.publish(img)
00308     #
00309     #    rospy.loginfo('tried %d times' % iter_count)
00310     #    return {'points3d': np.column_stack(points_tried),
00311     #            'instances': np.column_stack(tinstances),
00312     #            'points2d': np.column_stack(sp2d),
00313     #            'labels': np.matrix(labels),
00314     #            'sizes': fea_dict['sizes']}
00315     #   #if iter_count > MAX_RETRIES:
00316     #   #    self.robot.sound.say('giving up tried %d times already' % MAX_RETRIES)
00317     #   #    break
00318     #   #elif not success:
00319     #   #     self.robot.sound.say('retrying')
00320 
00321     #   return points tried record
00322             #success, _ = self.light_switch1(perturbed_point_bl, point_offset=point_offset, \
00323             #        press_contact_pressure=300, move_back_distance=np.matrix([-.0075,0,0]).T,\
00324             #        press_pressure=3500, press_distance=np.matrix([0,0,-.15]).T, \
00325             #        visual_change_thres=.03)
00326 
00327     #   points tried = []
00328     #   while we have not succeeded and not stop_fun(points tried):
00329     #       label = behavior(point)
00330     #       add point and label to points tried
00331     #       perturb point
00332     #   return points tried record
00333 
00334 
00335     #def load_classifier(self, name, fname):
00336     #    print 'loading classifier'
00337     #    dataset = ut.load_pickle(fname)
00338     #    self.train(dataset, name)
00339 
00340         #self.location_labels = []
00341         #self.location_data = []
00342         #if os.path.isfile(self.saved_locations_fname):
00343         #    location_data = ut.load_pickle(self.saved_locations_fname) #each col is a 3d point, 3xn mat
00344         #    for idx, rloc in enumerate(location_data):
00345         #        self.location_centers.append(rloc['center'])
00346         #        self.location_labels.append(idx)
00347         #    self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
00348         #    self.location_data = location_data
00349         #if os.path.isfile(self.saved_locations_fname):
00350         #    location_data = ut.load_pickle(self.saved_locations_fname) #each col is a 3d point, 3xn mat
00351         #    for idx, rloc in enumerate(location_data):
00352         #        self.location_centers.append(rloc['center'])
00353         #        self.location_labels.append(idx)
00354         #    self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
00355         #    self.location_data = location_data
00356         #pass
00357 
00358         #location_idx = self.location_labels[close_by_locs[0]]
00359         #ldata = self.location_data[location_idx]
00360 
00361         #rospy.loginfo('location_add: point close to %d at %s.' % (location_idx, str(ldata['center'].T)))
00362         #ldata['points'].append(point_map)
00363         #ldata['center'] = np.column_stack(ldata['points']).mean(1)
00364         #self.location_centers[location_idx] = ldata['center']
00365         #self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
00366 #    def update_center(self, center_id, point_map):
00367 #        #If close by locations found then add to points list and update center
00368 #        location_idx = self.location_labels[close_by_locs[0]]
00369 #        ldata = self.location_data[location_idx]
00370 #
00371 #        rospy.loginfo('location_add: point close to %d at %s.' % (location_idx, str(ldata['center'].T)))
00372 #        ldata['points'].append(point_map)
00373 #        ldata['center'] = np.column_stack(ldata['points']).mean(1)
00374 #        self.location_centers[location_idx] = ldata['center']
00375 #        self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
00376 #
00377 
00378 
00379     #def location_add(self, point_map, task, data):
00380     #    close_by_locs = self.find_close_by_points_match_task(point_map, task)
00381     #    if len(close_by_locs) == 0:
00382     #        rospy.loginfo('location_add: point not close to any existing location. creating new record.')
00383     #        self.location_data.append({
00384     #            'task': task, 
00385     #            'center': point_map, 
00386     #            'perceptual_dataset': None,
00387     #            'points':[point_map]})
00388     #        self.location_centers.append(point_map)
00389     #        self.location_labels.append(len(self.location_data) - 1)
00390     #        self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
00391     #    else:
00392     #        #If close by locations found then add to points list and update center
00393     #        location_idx = self.location_labels[close_by_locs[0]]
00394     #        ldata = self.location_data[location_idx]
00395 
00396     #        rospy.loginfo('location_add: point close to %d at %s.' % (location_idx, str(ldata['center'].T)))
00397     #        ldata['points'].append(point_map)
00398     #        ldata['center'] = np.column_stack(ldata['points']).mean(1)
00399     #        self.location_centers[location_idx] = ldata['center']
00400     #        self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
00401 
00402     #    ut.save_pickle(self.location_data, self.saved_locations_fname)
00403     #    rospy.loginfo('location_add: saved point in map.')
00404 
00405 #    def find_close_by_points(self, point_map):
00406 #        if self.locations_tree != None:
00407 #            close_by_locs = self.locations_tree.query_ball_point(np.array(point_map.T), self.LOCATION_ADD_RADIUS)[0]
00408 #            return close_by_locs
00409 #        else:
00410 #            return []
00411 
00412 #   3)   listing all locations
00413 #   4)   listing locations closest to given point, with and without task
00414 
00415 
00416     #def find_close_by_points_match_task(self, point_map, task):
00417     #    matches = self.find_close_by_points(point_map)
00418     #    task_matches = []
00419     #    for m in matches:
00420     #        idx = self.location_labels[m]
00421     #        ldata = self.location_data[idx]
00422     #        if ldata['task'] == task:
00423     #            task_matches.append(m)
00424     #    return task_matches
00425     
00426 #class PickPointsCloseToStartLocation:
00427 #
00428 #    def __init__(self, point_bl, closeness_tolerance=.01, max_retries=20):
00429 #        self.params = r3d.Recognize3DParam()
00430 #        self.params.uncertainty_x = 1.
00431 #        self.params.uncertainty_y = .02
00432 #        self.params.uncertainty_z = .02
00433 #        self.params.n_samples = 400
00434 #        self.params.uni_mix = 0.
00435 #
00436 #        self.sampled_idx = 0
00437 #        self.iter_count = 0
00438 #        self.max_retries = max_retries
00439 #        self.closeness_tolerance = closeness_tolerance
00440 #
00441 #        self.points3d_tried = []
00442 #        self.points2d_tried = []
00443 #        self.instances_tried = []
00444 #
00445 #    def process_scan(self, fea_dict):
00446 #        dists = ut.norm(fea_dict['points3d'] - point_bl)
00447 #        ordering = np.argsort(dists).A1
00448 #
00449 #        self.points3d_sampled = fea_dict['points3d'][:, ordering]
00450 #        self.points2d_sampled = fea_dict['points2d'][:, ordering]
00451 #        self.instances_sampled = fea_dict['instances'][:, ordering]
00452 #
00453 #    def get_params(self):
00454 #        return self.params
00455 #
00456 #    def stop(self):
00457 #        return self.iter_count > max_retries
00458 #
00459 #    def pick_next(self):
00460 #        while len(self.points3d_tried) > 0 \
00461 #                and np.any(ut.norm(np.column_stack(self.points3d_tried) - self.points3d_sampled[:, self.sampled_idx]) < self.closeness_tolerance):
00462 #            self.sampled_idx = self.sampled_idx + 1 
00463 #
00464 #        self.points3d_tried.append(self.points3d_sampled[:, self.sampled_idx])
00465 #        self.points2d_tried.append(self.points2d_sampled[:, self.sampled_idx])
00466 #        self.instances_tried.append(self.instances_sampled[:, self.sampled_idx])
00467 #        self.iter_count = iter_count + 1
00468 #
00469 #        return {'points3d':  self.points3d_sampled[:, self.sampled_idx],
00470 #                'points2d':  self.points2d_sampled[:, self.sampled_idx],
00471 #                'instances': self.instances_sampled[:, self.sampled_idx]}
00472 #
00473 #    def get_instances_used(self):
00474 #        if len(self.points3d_sampled) > 0:
00475 #            return {'points3d': np.column_stack(self.points3d_sampled),
00476 #                    'points2d': np.column_stack(self.points2d_sampled),
00477 #                    'instances': np.column_stack(self.instances_sampled)}
00478 #        else:
00479 #            return None
00480 #
00481 #class PickPointsUsingActiveLearning:
00482 #
00483 #    def __init__(self, locations_manager):
00484 #        self.params = r3d.Recognize3DParam()
00485 #        self.params.uncertainty_x = 1.
00486 #        self.params.n_samples = 2000
00487 #        self.params.uni_mix = .1
00488 #
00489 #        self.points3d_tried = []
00490 #        self.points2d_tried = []
00491 #        self.instances_tried = []
00492 #
00493 #    def process_scan(self, fea_dict):
00494 #
00495 #    def get_params(self):
00496 #
00497 #    def pick_next(self):
00498 #
00499 #    def stop(self):
00500 #
00501 #    def get_instances_used(self):
00502 
00503 
00504         #self.LOCATION_ADD_RADIUS = .5
00505         #self.kinect_listener = kl.KinectListener()
00506         #self.kinect_cal = rc.ROSCameraCalibration('camera/rgb/camera_info')
00507 
00508         #self.kinect_img_sub = message_filters.Subscriber('/camera/rgb/image_color', smsg.Image)
00509         #self.kinect_depth_sub = message_filters.Subscriber('/camera/depth/points2', smsg.PointCloud2)
00510         #ts = message_filters.TimeSynchronizer([image_sub, depth_sub], 10)
00511         #ts.registerCallback(callback)
00512 
00513         #self.load_classifier('light_switch', 'labeled_light_switch_data.pkl')
00514         #self.start_location = (np.matrix([0.25, 0.30, 1.3]).T, np.matrix([0., 0., 0., 0.1]))
00515 
00516         #loading stored locations
00517         #self.saved_locations_fname = 'saved_locations.pkl'
00518         #self.location_centers = []
00519         #self.location_labels = []
00520         #self.location_data = []
00521         #self.locations_tree = None
00522 
00523         #if os.path.isfile(self.saved_locations_fname):
00524         #    location_data = ut.load_pickle(self.saved_locations_fname) #each col is a 3d point, 3xn mat
00525         #    for idx, rloc in enumerate(location_data):
00526         #        self.location_centers.append(rloc['center'])
00527         #        self.location_labels.append(idx)
00528         #    self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
00529         #    self.location_data = location_data
00530 
00531         # joint angles used for tuck
00532         #pdb.set_trace()
00533         #self.untuck()
00534         #self.behaviors.movement.set_movement_mode_ik()
00535         #self.movement.set_movement_mode_ik()
00536         #self.tuck()
00537         #self.r1 = np.matrix([[-0.31006769,  1.2701541 , -2.07800829, -1.45963243, -4.35290489,
00538         #                 -1.86052221,  5.07369192]]).T
00539         #self.l0 = np.matrix([[  1.05020383,  -0.34464327,   0.05654   ,  -2.11967694,
00540         #                 -10.69100221,  -1.95457839,  -3.99544713]]).T
00541         #self.l1 = np.matrix([[  1.06181076,   0.42026402,   0.78775801,  -2.32394841,
00542         #                 -11.36144995,  -1.93439025,  -3.14650108]]).T
00543         #self.l2 = np.matrix([[  0.86275197,   0.93417818,   0.81181124,  -2.33654346,
00544         #                 -11.36121856,  -2.14040499,  -3.15655164]]).T
00545         #self.l3 = np.matrix([[ 0.54339568,  1.2537778 ,  1.85395725, -2.27255481, -9.92394984,
00546         #                 -0.86489749, -3.00261708]]).T
00547 
00548 
00549 
00550     #def train(self, dataset, name):
00551     #    rec_params = self.feature_ex.rec_params
00552     #    nneg = np.sum(dataset.outputs == r3d.NEGATIVE) #TODO: this was copied and pasted from r3d
00553     #    npos = np.sum(dataset.outputs == r3d.POSITIVE)
00554     #    print '================= Training ================='
00555     #    print 'NEG examples', nneg
00556     #    print 'POS examples', npos
00557     #    print 'TOTAL', dataset.outputs.shape[1]
00558     #    neg_to_pos_ratio = float(nneg)/float(npos)
00559     #    weight_balance = ' -w0 1 -w1 %.2f' % neg_to_pos_ratio
00560     #    print 'training'
00561     #    learner = r3d.SVMPCA_ActiveLearner(use_pca=True)
00562     #    #TODO: figure out something scaling inputs field!
00563     #    learner.train(dataset, dataset.inputs,
00564     #                  rec_params.svm_params + weight_balance,
00565     #                  rec_params.variance_keep)
00566     #    self.learners[name] = {'learner': learner, 'dataset': dataset}
00567     #    print 'done loading'
00568 
00569 
00570 
00571     #def tuck(self):
00572     #    ldiff = np.linalg.norm(pr2.diff_arm_pose(self.robot.left.pose(), self.l3))
00573     #            # np.linalg.norm(self.robot.left.pose() - self.l3)
00574     #    rdiff = np.linalg.norm(pr2.diff_arm_pose(self.robot.right.pose(), self.r1))
00575     #    #rdiff = np.linalg.norm(self.robot.right.pose() - self.r1)
00576     #    if ldiff < .3 and rdiff < .3:
00577     #        rospy.loginfo('tuck: Already tucked. Ignoring request.')
00578     #        return
00579     #    self.robot.right.set_pose(self.r1, block=False)
00580     #    self.robot.left.set_pose(self.l0, block=True)
00581     #    poses = np.column_stack([self.l0, self.l1, self.l2, self.l3])
00582     #    #pdb.set_trace()
00583     #    self.robot.left.set_poses(poses, np.array([0., 1.5, 3, 4.5]))
00584 
00585 
00586     #def untuck(self):
00587     #    if np.linalg.norm(self.robot.left.pose() - self.l0) < .3:
00588     #        rospy.loginfo('untuck: Already untucked. Ignoring request.')
00589     #        return
00590     #    self.robot.right.set_pose(self.r1, 2., block=False)
00591     #    self.robot.left.set_pose(self.l3, 2.,  block=True)
00592     #    poses = np.column_stack([self.l3, self.l2, self.l1, self.l0])
00593     #    self.robot.left.set_poses(poses, np.array([0., 3., 6., 9.])/2.)
00594 
00595 
00596             #if len(self.location_centers) < 1:
00597             #    return
00598             #rospy.loginfo('click_cb: double clicked but no 3d point given')
00599             #rospy.loginfo('click_cb: will use the last successful location given')
00600 
00601             #base_link_T_map = tfu.transform('base_link', 'map', self.tf_listener)
00602             #point_bl = tfu.transform_points(base_link_T_map, self.location_centers[-1])
00603             #rospy.loginfo('click_cb: using ' + str(self.location_centers[-1].T))
00604             #self.location_activated_behaviors(point_bl, stored_point=True)
00605 
00606 
00607     #def find_close_by_points(self, point_map):
00608     #    if self.locations_tree != None:
00609     #        close_by_locs = self.locations_tree.query_ball_point(np.array(point_map.T), self.LOCATION_ADD_RADIUS)[0]
00610     #        return close_by_locs
00611     #    else:
00612     #        return []
00613 
00614     #def find_close_by_points_match_task(self, point_map, task):
00615     #    matches = self.find_close_by_points(point_map)
00616     #    task_matches = []
00617     #    for m in matches:
00618     #        idx = self.location_labels[m]
00619     #        ldata = self.location_data[idx]
00620     #        if ldata['task'] == task:
00621     #            task_matches.append(m)
00622     #    return task_matches
00623 
00624     #def location_add(self, point_map, task, data):
00625     #    close_by_locs = self.find_close_by_points_match_task(point_map, task)
00626     #    if len(close_by_locs) == 0:
00627     #        rospy.loginfo('location_add: point not close to any existing location. creating new record.')
00628     #        self.location_data.append({
00629     #            'task': task, 
00630     #            'center': point_map, 
00631     #            'perceptual_dataset': None,
00632     #            'points':[point_map]})
00633     #        self.location_centers.append(point_map)
00634     #        self.location_labels.append(len(self.location_data) - 1)
00635     #        self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
00636     #    else:
00637     #        #If close by locations found then add to points list and update center
00638     #        location_idx = self.location_labels[close_by_locs[0]]
00639     #        ldata = self.location_data[location_idx]
00640 
00641     #        rospy.loginfo('location_add: point close to %d at %s.' % (location_idx, str(ldata['center'].T)))
00642     #        ldata['points'].append(point_map)
00643     #        ldata['center'] = np.column_stack(ldata['points']).mean(1)
00644     #        self.location_centers[location_idx] = ldata['center']
00645     #        self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
00646 
00647     #    ut.save_pickle(self.location_data, self.saved_locations_fname)
00648     #    rospy.loginfo('location_add: saved point in map.')
00649 
00650 
00651     #def location_add(self, point_map, task):
00652     #    close_by_locs = self.find_close_by_points_match_task(point_map, task)
00653     #    if len(close_by_locs) == 0:
00654     #        rospy.loginfo('location_add: point not close to any existing location. creating new record.')
00655     #        self.location_data.append({
00656     #            'task': task, 
00657     #            'center': point_map, 
00658     #            'points':[point_map]})
00659     #        self.location_centers.append(point_map)
00660     #        self.location_labels.append(len(self.location_data) - 1)
00661     #        self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
00662 
00663     #    else:
00664     #        #If close by locations found then add to points list and update center
00665     #        location_idx = self.location_labels[close_by_locs[0]]
00666     #        ldata = self.location_data[location_idx]
00667 
00668     #        rospy.loginfo('location_add: point close to %d at %s.' % (location_idx, str(ldata['center'].T)))
00669     #        ldata['points'].append(point_map)
00670     #        ldata['center'] = np.column_stack(ldata['points']).mean(1)
00671     #        self.location_centers[location_idx] = ldata['center']
00672     #        self.locations_tree = sp.KDTree(np.array(np.column_stack(self.location_centers).T))
00673 
00674     #    ut.save_pickle(self.location_data, self.saved_locations_fname)
00675     #    rospy.loginfo('location_add: saved point in map.')
00676 
00677 
00678     #def record_processed_data_kinect2(self, point3d_bl, kinect_fea):
00679     #    instances, locs2d_image, locs3d_bl, image = kinect_fea #self.feature_ex.read(point3d_bl)
00680     #    #rospy.loginfo('Getting a kinect reading')
00681 
00682     #    tstring = time.strftime('%A_%m_%d_%Y_%I:%M%p')
00683     #    kimage_name = '%s_highres.png' % tstring
00684     #    cv.SaveImage(kimage_name, kimage)
00685 
00686     #    preprocessed_dict = {'instances': instances,
00687     #                         'points2d': locs2d_image,
00688     #                         'points3d': locs3d_bl,
00689     #                         'image': kimage_name,
00690     #                         'labels': labels,
00691     #                         'sizes': feature_extractor.sizes}
00692 
00693 
00694         #self.feature_ex.read(point3d_bl)
00695         #rdict = self.kinect_listener.read()
00696         #kimage = rdict['image']
00697         #rospy.loginfo('Waiting for calibration.')
00698         #while self.kinect_cal.has_msg == False:
00699         #    time.sleep(.1)
00700 
00701         #which frames?
00702         #rospy.loginfo('Getting transforms.')
00703         #k_T_bl = tfu.transform('openni_rgb_optical_frame', '/base_link', self.tf_listener)
00704         #tstring = time.strftime('%A_%m_%d_%Y_%I:%M%p')
00705         #kimage_name = '%s_highres.png' % tstring
00706         #rospy.loginfo('Saving images (basename %s)' % tstring)
00707         #cv.SaveImage(kimage_name, kimage)
00708         #rospy.loginfo('Saving pickles')
00709         #pickle_fname = '%s_interest_point_dataset.pkl' % tstring   
00710 
00711         #data_pkl = {'touch_point': point3d_bl,
00712         #            'points3d': rdict['points3d'],
00713         #            'image': kimage_name,
00714         #            'cal': self.prosilica_cal, 
00715         #            'k_T_bl': k_T_bl}
00716                     #'point_touched': point3d_bl}
00717 
00718         #ut.save_pickle(data_pkl, pickle_fname)
00719         #print 'Recorded to', pickle_fname
00720 
00721 
00722 
00723 
00724             #npoint = point + gaussian_noise
00725             #success_off, touchloc_bl = self.light_switch1(npoint, 
00726             #pdb.set_trace()
00727 
00728 
00729 
00730 
00731 #    ##
00732 #    # The behavior can make service calls to a GUI asking users to label
00733 #    def repeat_action(self, task_id, ctask_id, point3d_bl, sampling_object, stop_fun, fea_dict=None):
00734 #
00735 #        # instances, locs2d_image, locs3d_bl, image, raw_dict = 
00736 #        #kf_dict = self.feature_ex.read(point3d_bl)
00737 #        params = r3d.Recognize3DParam()
00738 #        params.uncertainty_x = 1.
00739 #        params.n_samples = 2000
00740 #        params.uni_mix = .1
00741 #
00742 #        kdict, fname = self.read_features_save(task_id, point3d_bl, params)
00743 #        learner = self.locations_man.learners[task_id]
00744 #        behavior = self.get_behavior_by_task(self.locations_man.data[task_id]['task'])
00745 #        undo_behavior = self.get_undo_behavior_by_task(self.locations_man.data[task_id]['task'])
00746 #        start_pose = self.robot.head.pose()
00747 #
00748 #        kdict['image_T_bl'] = tfu.transform('openni_rgb_optical_frame', 'base_link', self.tf_listener)
00749 #        point3d_img = tfu.transform_points(kdict['image_T_bl'], point3d_bl)
00750 #        point2d_img = self.feature_ex.cal.project(point3d_img)
00751 #
00752 #        labels = []
00753 #        points3d_tried = []
00754 #        points2d_tried = []
00755 #        converged = False
00756 #        indices_added = []
00757 #        pdb.set_trace()
00758 #
00759 #
00760 #        while not converged and not stop_fun(np.matrix(labels)):
00761 #            #Find remaining instances
00762 #            remaining_pt_indices = r3d.inverse_indices(indices_added, kdict['instances'].shape[1])
00763 #            remaining_instances = kdict['instances'][:, remaining_pt_indices]
00764 #
00765 #            #Ask learner to pick an instance
00766 #            ridx, selected_dist, converged = learner.select_next_instances_no_terminate(remaining_instances)
00767 #            selected_idx = remaining_pt_indices[ridx]
00768 #            indices_added.append(selected_idx)
00769 #
00770 #            #draw
00771 #            img = cv.CloneMat(kdict['image'])
00772 #            #Draw the center
00773 #            r3d.draw_points(img, point2d_img, [255, 0, 0], 4, 2)
00774 #            #Draw possible points
00775 #            r3d.draw_points(img, kdict['points2d'], [255, 255, 255], 2, -1)
00776 #            #Draw what we have so far
00777 #            if len(points2d_tried) > 0:
00778 #                _, pos_exp, neg_exp = separate_by_labels(np.column_stack(points2d_tried), np.matrix(labels))
00779 #                r3d.draw_points(img, pos_exp, [0, 255, 0], 3, 1)
00780 #                r3d.draw_points(img, neg_exp, [0, 0, 255], 3, 1)
00781 #
00782 #            predictions = np.matrix(learner.classify(kdict['instances']))
00783 #            _, pos_pred, neg_pred = separate_by_labels(kdict['points2d'], predictions)
00784 #            r3d.draw_points(img, pos_pred, [0, 255, 0], 2, -1)
00785 #            r3d.draw_points(img, neg_pred, [0, 0, 255], 2, -1)
00786 #
00787 #            #Draw what we're selecting
00788 #            r3d.draw_points(img, kdict['points2d'][:, selected_idx], [0, 184, 245], 3, -1)
00789 #            self.img_pub.publish(img)
00790 #
00791 #            #Get label for instance
00792 #            self.robot.head.set_pose(start_pose, 1)
00793 #
00794 #            #EXCECUTE!!
00795 #            success, reason = behavior(kdict['points3d'][:, selected_idx])
00796 #            if success:
00797 #                color = [0,255,0]
00798 #                label = r3d.POSITIVE
00799 #                def any_pos_sf(labels_mat):
00800 #                    if np.any(r3d.POSITIVE == labels_mat):
00801 #                        return True
00802 #                    return False
00803 #                utid = self.locations_man.create_undo_task(task_id)
00804 #                self.blind_exploration2(utid, undo_behavior, None, point3d_bl, any_pos_sf, 
00805 #                        max_retries=max_undo_retries, fea_dict=kdict)
00806 #
00807 #            else:
00808 #                label = r3d.NEGATIVE
00809 #                color = [0,0,255]
00810 #
00811 #            labels.append(label)
00812 #            points3d_tried.append(kdict['points3d'][:, selected_idx])
00813 #            points2d_tried.append(kdict['points2d'][:, selected_idx])
00814 #
00815 #            datapoint = {'instances': kdict['instances'][:, selected_idx],
00816 #                         'points2d':  kdict['points2d'][:, selected_idx],
00817 #                         'points3d':  kdict['points3d'][:, selected_idx],
00818 #                         'sizes':     kdict['sizes'],
00819 #                         'labels':    np.matrix([label])
00820 #                         }
00821 #            self.locations_man.add_perceptual_data(task_id, datapoint)
00822 #            self.locations_man.save_database()
00823 #            self.locations_man.train(task_id)
00824 #
00825 #            #Classify
00826 #            predictions = np.matrix(learner.classify(kdict['instances']))
00827 #
00828 #            #Draw
00829 #            img = cv.CloneMat(kdict['image'])
00830 #            _, pos_exp, neg_exp = separate_by_labels(np.column_stack(points2d_tried), np.matrix(labels))
00831 #            r3d.draw_points(img, point2d_img, [255, 0, 0], 4, 2)
00832 #            r3d.draw_points(img, kdict['points2d'], [255, 255, 255], 2, -1)
00833 #            r3d.draw_points(img, pos_exp, [0, 255, 0], 3, 1)
00834 #            r3d.draw_points(img, neg_exp, [0, 0, 255], 3, 1)
00835 #
00836 #            _, pos_pred, neg_pred = separate_by_labels(kdict['points2d'], predictions)
00837 #            r3d.draw_points(img, pos_pred, [0, 255, 0], 2, -1)
00838 #            r3d.draw_points(img, neg_pred, [0, 0, 255], 2, -1)
00839 #            r3d.draw_points(img, points2d_tried[-1], color, 3, -1)
00840 #
00841 #            #publish
00842 #            self.img_pub.publish(img)
00843 
00844 
00845 
00846 
00847 
00848 
00849     #Save dataset in the location's folder
00850     #def save_dataset(self, task_id, point, rdict):
00851     #    pt.join(task_id, 
00852     #    self.locations_man
00853     #    self.record_perceptual_data(point, rdict)
00854     #    #TODO...
00855 
00856     #TODO TEST
00857     #BOOKMARK 3/7 4:03 AM
00858     #LAST DITCH EXECUTION(point, stop_fun):
00859     #def blind_exploration(self, behavior, point_bl, stop_fun, max_retries=15):
00860     #    gaussian = pr.Gaussian(np.matrix([ 0,      0,      0.]).T, \
00861     #                           np.matrix([[1.,     0,      0], \
00862     #                                      [0, .02**2,      0], \
00863     #                                      [0,      0, .02**2]]))
00864 
00865     #    iter_count = 0
00866     #    gaussian_noise = np.matrix([0, 0, 0.0]).T #We want to try the given point first
00867     #    labels = []
00868     #    points_tried = []
00869 
00870     #    #while we have not succeeded and not stop_fun(points tried):
00871     #    while iter_count < MAX_RETRIES and stop_fun(np.matrix(labels)):
00872     #        perturbation = gaussian_noise
00873     #        perturbed_point_bl = point_bl + perturbation
00874 
00875     #        self.robot.sound.say('executing behavior')
00876     #        success, reason = behavior(perturbed_point_bl)
00877     #        points_tried.append(perturbed_point_bl)
00878 
00879     #        #add point and label to points tried
00880     #        if success:
00881     #            labels.append(r3d.POSITIVE)
00882     #        else:
00883     #            labels.append(r3d.NEGATIVE)
00884 
00885     #        #perturb point
00886     #        gaussian_noise = gaussian.sample()
00887     #        gaussian_noise[0,0] = 0
00888     #        iter_count = iter_count + 1 
00889     #   
00890     #   self.robot.sound.say('tried %d times' % iter_count)
00891     #   return np.column_stack(points_tried)
00892 
00893     #def blind_exploration2(self, task_id, behavior, undo_behavior, point_bl, stop_fun, 
00894     #        max_retries=15, closeness_tolerance=.005, fea_dict=None):
00895     #    params = r3d.Recognize3DParam()
00896     #    params.uncertainty_x = 1.
00897     #    params.uncertainty_y = .02
00898     #    params.uncertainty_z = .02
00899     #    params.n_samples = 400
00900     #    params.uni_mix = 0.
00901     #    MAX_RETRIES = 20
00902     #
00903     #    if fea_dict == None:
00904     #        fea_dict, _ = self.read_features_save(task_id, point_bl, params)
00905     #    
00906     #    dists = ut.norm(fea_dict['points3d'] - point_bl)
00907     #    ordering = np.argsort(dists).A1
00908     #    points3d_sampled = fea_dict['points3d'][:, ordering]
00909     #    points2d_sampled = fea_dict['points2d'][:, ordering]
00910     #    instances_sampled = fea_dict['instances'][:, ordering]
00911 
00912     #    labels = []
00913     #    points_tried = []
00914     #    tinstances = []
00915     #    sp2d = []
00916 
00917     #    labels.append(r3d.POSITIVE)
00918     #    points_tried.append(points3d_sampled[:, 0])
00919     #    tinstances.append(instances_sampled[:, 0])
00920     #    sp2d.append(points2d_sampled[:, 0])
00921 
00922     #    labels.append(r3d.NEGATIVE)
00923     #    points_tried.append(points3d_sampled[:, 1])
00924     #    tinstances.append(instances_sampled[:, 1])
00925     #    sp2d.append(points2d_sampled[:, 1])
00926 
00927     #    return {'points3d': np.column_stack(points_tried),
00928     #            'instances': np.column_stack(tinstances),
00929     #            'points2d': np.column_stack(sp2d),
00930     #            'labels': np.matrix(labels),
00931     #            'sizes': fea_dict['sizes']}
00932 
00933         #def __init__(self, object_name, labeled_data_fname, tf_listener):
00934         #make learner
00935         #learner = SVMActiveLearnerApp()
00936         #labeled_light_switch_dataset = ut.load_pickle(data_file_name)
00937         #learner.train(labeled_light_switch_dataset, 
00938         #              labeled_light_switch_dataset.sizes['intensity']
00939         #              self.params.variance_keep)
00940         #self.learners[classifier_name] = learner
00941 
00942 
00943     #def locate_light_switch(self):
00944     #    #capture data
00945     #    pointcloud_msg = self.laser_scan.scan(math.radians(180.), math.radians(-180.), 20.)
00946     #    prosilica_image = self.prosilica.get_frame() #TODO check if this is a cvmat
00947     #    while self.prosilica_cal.has_msg == False:
00948     #        time.sleep(.1)
00949 
00950     #    #preprocess 
00951     #    ic_data = IntensityCloudData(pointcloud_msg, prosilica_image, 
00952     #                    tfu.transform('/high_def_optical_frame', '/base_link', self.tf_listener), 
00953     #                    self.prosilica_cal,                                                       
00954     #                    r3d.Recognize3DParam())
00955     #    instances = ic_data.extract_vectorized_features()
00956 
00957     #    results = []
00958     #    for i in range(instances.shape[1]):
00959     #        nlabel = self.learners['light_switch'].classify(instances[:, i])
00960     #        results.append(nlabel)
00961 
00962     #    results = np.matrix(results)
00963     #    positive_indices = np.where(results == r3d.POSITIVE)[1]
00964 
00965     #    #want 3d location of each instance
00966     #    positive_points_3d = ic_data.sampled_points[:, positive_indices]
00967 
00968     #    #return a random point for now
00969     #    rindex = np.random.randint(0, len(positive_indices))
00970     #    return positive_points_3d[:,rindex]
00971 
00972 
00973     #def add_perturbation_to_location(self, point_map, perturbation):
00974     #    locs = self.find_close_by_points(point_map)
00975     #    if locs != None:
00976     #        location = self.location_data[self.location_labels(locs[0])]
00977     #        if not location.has_key('perturbation'):
00978     #            location['perturbation'] = []
00979     #        location['perturbation'].append(perturbation)
00980     #        return True
00981     #    return False
00982 
00983 
00984 
00985 
00986 
00987 
00988 
00989 
00990 
00991 
00992 
00993 
00994 
00995 
00996 
00997 
00998                 #self.go_to_home_pose()
00999                 #print '>>>> POINT IS', point_bl_t1.T
01000                 #point_bl_t1 = np.matrix([[ 0.73846737,  0.07182931,  0.55951065]]).T
01001         #DIST_THRESHOLD = .8 for lightswitch
01002         #DIST_THRESHOLD = .85 #for drawers
01003         #DIST_APPROACH = .5
01004         #COARSE_STOP = .7
01005         #FINE_STOP = .7
01006         #VOI_RADIUS = .2
01007 
01008         #point_dist = np.linalg.norm(point_bl_t0[0:2,0])
01009         #rospy.loginfo('run_behaviors: Point is %.3f away.' % point_dist)
01010         #map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
01011         #point_map = tfu.transform_points(map_T_base_link, point_bl_t0)
01012 
01013         #if point_dist > DIST_THRESHOLD:
01014         #    rospy.loginfo('run_behaviors: Point is greater than %.1f m away (%.3f).  Driving closer.' % (DIST_THRESHOLD, point_dist))
01015         #    ##self.turn_to_point(point_bl_t0)
01016         #    rospy.loginfo( 'run_behaviors: CLICKED on point_bl ' + str(point_bl_t0.T))
01017 
01018         #    ret = self.drive_approach_behavior(point_bl_t0, dist_far=COARSE_STOP)
01019         #    if ret != 3:
01020         #        base_link_T_map = tfu.transform('base_link', 'map', self.tf_listener)
01021         #        point_bl_t1 = tfu.transform_points(base_link_T_map, point_map)
01022         #        dist_end = np.linalg.norm(point_bl_t1[0:2,0])
01023         #        if dist_end > DIST_THRESHOLD:
01024         #            rospy.logerr('run_behaviors: drive_approach_behavior failed! %.3f' % dist_end)
01025         #            self.robot.sound.say("I am unable to navigate to that location")
01026         #            return
01027 
01028         #    base_link_T_map = tfu.transform('base_link', 'map', self.tf_listener)
01029         #    point_bl_t1 = tfu.transform_points(base_link_T_map, point_map)
01030 
01031         #    ret = self.approach_perpendicular_to_surface(point_bl_t1, voi_radius=VOI_RADIUS, dist_approach=FINE_STOP)
01032         #    if ret != 3:
01033         #        rospy.logerr('run_behaviors: approach_perpendicular_to_surface failed!')
01034         #        return
01035 
01036         #    #map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
01037         #    #point_bl_t2 = tfu.transform_points(base_link_T_map, point_map)
01038         #    self.robot.sound.say('done')
01039         #    rospy.loginfo('run_behaviors: DONE DRIVING!')
01040         #elif False:
01041 
01042 
01043 
01044 
01045 
01046         #if tf_listener == None:
01047         #    self.tf_listener = tf.TransformListener()
01048         #else:
01049         #    self.tf_listener = tf_listener
01050 
01051         #self.pr2 = pr2_obj
01052         #self.cman = con.ControllerManager(arm, self.tf_listener, using_slip_controller=1)
01053         #self.reactive_gr = rgr.ReactiveGrasper(self.cman)
01054         #if arm == 'l':
01055         #    ptopic = '/pressure/l_gripper_motor'
01056         #    self.arm_obj = self.pr2.left
01057         #    self.ik_frame = 'l_wrist_roll_link'
01058         #    self.tool_frame = 'l_gripper_tool_frame'
01059         #else:
01060         #    ptopic = '/pressure/r_gripper_motor'
01061         #    self.arm_obj = self.pr2.right
01062         #    self.ik_frame = 'r_wrist_roll_link'
01063         #    self.tool_frame = 'r_gripper_tool_frame'
01064         #self.movement_mode = 'ik' #or cart
01065 
01066         #rospy.Subscriber('cursor3d', PointStamped, self.laser_point_handler)
01067         #self.double_click = rospy.Subscriber('mouse_left_double_click', String, self.double_click_cb)
01068 
01069     #def set_movement_mode_ik(self):
01070     #    self.movement_mode = 'ik'
01071     #    self.reactive_gr.cm.switch_to_joint_mode()
01072     #    self.reactive_gr.cm.freeze_arm()
01073 
01074     #def set_movement_mode_cart(self):
01075     #    self.movement_mode = 'cart'
01076 
01077 
01078 
01079 
01080 
01081 
01082 
01083                 #pdb.set_trace()
01084                 #self.gather_interest_point_dataset(point)
01085                 #point = np.matrix([ 0.60956734, -0.00714498,  1.22718197]).T
01086                 #pressure_parameters = range(1900, 2050, 30)
01087 
01088                 #self.record_perceptual_data(point)
01089                 #successes = []
01090                 #parameters = [np.matrix([-.15, 0, 0]).T, 300, np.matrix([-.005, 0, 0]).T, 3500, np.matrix([0,0,-.15]).T, .03]
01091 
01092                 #for p in pressure_parameters:
01093                 #    experiment = []
01094                 #    for i in range(4):
01095                 #        #Turn off lights
01096                 #        rospy.loginfo('Experimenting with press_pressure = %d' % p)
01097                 #        success_off = self.light_switch1(point, 
01098                 #                        point_offset=np.matrix([-.15,0,0]).T, press_contact_pressure=300, move_back_distance=np.matrix([-.005,0,0]).T,\
01099                 #                        press_pressure=3500, press_distance=np.matrix([0,0,-.15]).T, visual_change_thres=.03)
01100                 #        experiment.append(success_off)
01101                 #        rospy.loginfo('Lights turned off? %s' % str(success_off))
01102                 #        return
01103 
01104                 #        #Turn on lights
01105                 #        success_on = self.light_switch1(point, 
01106                 #                        point_offset=np.matrix([-.15,0,-.10]).T, press_contact_pressure=300, move_back_distance=np.matrix([-0.005, 0, 0]).T,
01107                 #                        press_pressure=3500, press_distance=np.matrix([0,0,.1]).T, visual_change_thres=.03)
01108                 #        #def light_switch1(self, point, 
01109                 #        #        point_offset, press_contact_pressure, move_back_distance,
01110                 #        #        press_pressure, press_distance, visual_change_thres):
01111 
01112                 #        print 'Lights turned on?', success_on
01113                 #    successes.append(experiment)
01114 
01115                 #ut.save_pickle({'pressure': pressure_parameters, 
01116                 #                'successes': successes}, 'pressure_variation_results.pkl')
01117 
01118 
01119 
01120 
01121 
01122 
01123 
01124 
01125 
01126 
01127         #return self.pressure_listener.check_threshold() or self.pressure_listener.check_safety_threshold()
01128         ##stop if you hit a tip, side, back, or palm
01129         #(left_touching, right_touching, palm_touching) = self.reactive_gr.check_guarded_move_contacts()
01130         ##saw a contact, freeze the arm
01131         #if left_touching or right_touching or palm_touching:
01132         #    rospy.loginfo("CONTACT made!")
01133         #    return True
01134         #else:
01135         #    return False
01136 
01137         #print 'move returning'
01138         #return whether the left and right fingers were touching
01139         #return (left_touching, right_touching, palm_touching)
01140 
01141 
01142 
01143 
01144     #def execute_action_list(self):
01145 
01146     #def run(self, seed):
01147     #    # search for pairs of perception operators and manipulation operators that would work
01148     #    population = 10
01149     #    seeds = []
01150     #    for i in range(population):
01151     #        aseed = copy.deepcopy(seed)
01152     #        # 'bool', 'radian', 'se3', 'r3', 'discrete', 
01153     #        new_seed_actions = []
01154     #        for action in aseed:
01155 
01156     #            if replace_action:
01157     #                pass
01158 
01159     #            if delete_action:
01160     #                pass
01161     #            
01162     #            if insert_action:
01163     #                #pick random action from descriptors list
01164     #                new_action = 
01165     #                new_seed_actions += new_action
01166     #                pass
01167     #            
01168     #            if perturb_parameter:
01169     #                num_params = len(action.params)
01170     #                rand_param_idx = ...
01171     #                self.descriptors[action.name].params[rand_param_idx]
01172     #                rand_param_types[rand_param_types]
01173 
01174 
01175     #            #can replace/delete/insert action
01176     #            #can pick a parameter and perturb it
01177 
01178     #    #pdb.set_trace()
01179     #    print seed
01180 
01181         #point = np.matrix([0.63125642, -0.02918334, 1.2303758 ]).T
01182         #print 'move direction', movement.T
01183         #print 'CORRECTING', point.T
01184         #print 'NEW', point.T
01185         #start_location = (np.matrix([0.25, 0.15, 0.7]).T, np.matrix([0., 0., 0., 0.1]))
01186         #movement = np.matrix([.4, 0., 0.]).T
01187         #what other behavior would I want?
01188         # touch then move away..
01189         # move back but more slowly..
01190         # want a safe physical
01191         #   a safe exploration strategy
01192         #self.behaviors.linear_move(self.behaviors.current_location(), back_alittle, stop='none')
01193         #loc_before = self.behaviors.current_location()[0]
01194         #loc_after = self.behaviors.current_location()[0]
01195         #pdb.set_trace()
01196         #self.behaviors.linear_move(self.behaviors.current_location(), down, stop='pressure_accel')
01197         #self.behaviors.linear_move(self.behaviors.current_location(), back, stop='none')
01198         #pdb.set_trace()
01199         #b.twist(math.radians(30.))
01200         #bd = BehaviorDescriptor()
01201         #movement = point - self.behaviors.current_location()[0]
01202         #pdb.set_trace()
01203         #self.behaviors.linear_move(self.behaviors.current_location(), movement, stop='pressure_accel')
01204 
01205         #loc = self.behaviors.current_location()[0]
01206         #front_loc = point.copy()
01207         #front_loc[0,0] = loc[0,0]
01208         #self.behaviors.set_pressure_threshold(150)
01209         #self.behaviors.move_absolute((front_loc, self.behaviors.current_location()[1]), stop='pressure_accel')
01210         #self.behaviors.move_absolute((point, self.behaviors.current_location()[1]), stop='pressure_accel')
01211 
01212 
01213 
01214 
01215     #def detect_event(self):
01216     #    self.behaviors.cman._start_gripper_event_detector(timeout=40.)
01217     #    stop_func = self.behaviors._tactile_stop_func
01218     #    while stop_func():
01219 
01220         #pass
01221         #self.robot = pr2.PR2()
01222         #self.kin = pk.PR2Kinematics(self.robot.tf_listener)
01223 
01224     #def linear_move(self, start_location, direction, distance, arm):
01225     #    if arm == 'left':
01226     #        arm_kin = self.kin.left
01227     #    else:
01228     #        arm_kin = self.kin.right
01229 
01230     #    start_pose = arm_kin.ik(start_location)
01231     #    loc = start_location[0:3, 4]
01232     #    end_location = loc + distance*direction
01233     #    end_pose = arm_kin.ik(end_location)
01234 
01235     #    self.robot.left_arm.set_pose(start_pose, 5.)             #!!!
01236     #    self.robot.left_arm.set_pose(end_pose, 5.)               #!!!
01237 
01238             ##stop if you hit a tip, side, back, or palm
01239             #(left_touching, right_touching, palm_touching) = rg.check_guarded_move_contacts()
01240             ##saw a contact, freeze the arm
01241             #if left_touching or right_touching or palm_touching:
01242             #    rospy.loginfo("saw contact")
01243             #    rg.cm.switch_to_joint_mode()
01244             #    rg.cm.freeze_arm()
01245             #    break
01246 
01247     #import pdb
01248     #start_location = [0.34, 0.054, 0.87] + [0.015454981255042808, -0.02674860197736427, -0.012255429236635201, 0.999447577565171]
01249     #direction = np.matrix([1., 0., 0.]).T
01250 
01251     #self.reactive_l.move_cartesian_step(start_location, blocking = 1)
01252     #(left_touching, right_touching, palm_touching) = self.reactive_l.guarded_move_cartesian(grasp_pose, 10.0, 5.0)
01253         #self.cman_r     = con.ControllerManager('r')
01254         #self.reactive_r = rgr.ReactiveGrasper(self.cman_r)
01255 
01256         #self.cman_r.start_joint_controllers()
01257         #self.reactive_r.start_gripper_controller()
01258     
01259         #(pos, rot) = self.cman.return_cartesian_pose()
01260         #pdb.set_trace()
01261         #currentgoal = pos + rot
01262         #currentgoal[2] -= .05
01263         #self.reactive_l.move_cartesian_step(currentgoal, blocking = 1)
01264         #(left_touching, right_touching, palm_touching) = self.reactive_l.guarded_move_cartesian(grasp_pose, 10.0, 5.0)
01265         #exit()
01266         #end_loc = start_location + direction * distance
01267         #self.reactive_l.move_cartesian_step(start_loc, blocking = 1)
01268         #self.reactive_l.move_cartesian_step(end_loc, blocking = 1)
01269     #left_pose = b.robot.left.pose()
01270     #left_cart = ut.load_pickle('start_pose.pkl')
01271     #pdb.set_trace()
01272     #kin_sol = b.kin.left.ik(left_cart)
01273     #b.robot.left.set_pose(kin_sol, 5.)
01274     ##b.linear_move(left_cart)
01275     ##left_cart = b.kin.left.fk(left_pose)
01276     ##pdb.set_trace()
01277     #print left_cart
01278 
01279     #(pos, rot) = cm.return_cartesian_pose()
01280     #currentgoal = pos+rot
01281     #currentgoal[2] -= .05
01282     #rg.move_cartesian_step(currentgoal, blocking = 1)
01283     #exit()
01284 
01285 
01286 #b.linear_move()
01287 #cart_pose = kin.left.fk('torso_lift_link', 'l_wrist_roll_link', joints)
01288 #kin.left.ik(cart_pose, 'torso_lift_link')
01289 
01290     #def light_switch1_on(self, point, press_pressure=3500, press_contact_pressure=150):
01291     #    point = point + np.matrix([-.15, 0, -0.20]).T
01292 
01293     #    success, reason = self.behaviors.reach(point)
01294     #    if not success:
01295     #        rospy.loginfo('Reach failed due to "%s"' % reason)
01296 
01297     #    rospy.loginfo('PRESSING')
01298     #    success, reason = self.behaviors.press(np.matrix([0, 0, .20]).T, \
01299     #            press_pressure, press_contact_pressure)
01300     #    if not success:
01301     #        rospy.loginfo('Press failed due to "%s"' % reason)
01302     #        return 
01303 
01304     #    rospy.loginfo('RESETING')
01305     #    r2 = self.behaviors.move_absolute(self.start_location, stop='pressure_accel')
01306     #    if r2 != None:
01307     #        rospy.loginfo('moving back to start location failed due to "%s"' % r2)
01308     #        return 
01309 
01310     #    print 'DONE.'
01311 
01312 
01313     #def _tactile_stop_func(self):
01314     #    r1 = self.pressure_listener.check_threshold() 
01315     #    r2 = self.pressure_listener.check_safety_threshold()
01316     #    if r1:
01317     #        rospy.loginfo('Pressure exceeded!')
01318     #    if r2:
01319     #        rospy.loginfo('Pressure safety limit EXCEEDED!')
01320     #    return r1 or r2
01321 
01322 
01323 
01324 
01325 
01326 
01327 
01328         #r1 = self.pressure_listener.check_threshold() 
01329         #r2 = self.pressure_listener.check_safety_threshold()
01330         #if r1:
01331         #    rospy.loginfo('Pressure exceeded!')
01332         #if r2:
01333         #    rospy.loginfo('Pressure safety limit EXCEEDED!')
01334         #pressure_state = r1 or r2
01335         #pressure_state = self.pressure_listener.check_threshold() or self.pressure_listener.check_safety_threshold()
01336         #action finished (trigger seen)
01337 
01338 
01339 
01340 
01341     #def optimize_parameters(self, x0, x_range, behavior, objective_func, reset_env_func, reset_param):
01342     #    reset_retries = 3
01343     #    num_params = len(x0)
01344     #    x = copy.deepcopy(x0)
01345 
01346     #    # for each parameter
01347     #    #for i in range(num_params):
01348     #    while i < num_params:
01349     #        #search for a good setting
01350     #        not_converged = True
01351     #        xmin = x_range[i, 0]
01352     #        xmax = x_range[i, 1]
01353 
01354     #        while not_converged:
01355     #            current_val = x[i]
01356     #            candidates_i = [(x[i] + xmin) / 2., (x[i] + xmax) / 2.]
01357     #            successes = []
01358     #            for cand in candidates_i:
01359     #                x[i] = cand
01360     #                success = behavior(x)
01361     #                if success:
01362     #                    for reset_i in range(reset_retries):
01363     #                        reset_success = reset_env_func(*reset_param)
01364     #                        if reset_success:
01365     #                            break
01366     #                successes.append(success)
01367 
01368     #            if successes[0] and successes[1]:
01369     #                raise RuntimeException('What? this isn\'t suppose to happen.')
01370     #            elif successes[0] and not successes[1]:
01371     #                next_val = candidates_i[0]
01372     #            elif successes[1] and not successes[0]:
01373     #                next_val = candidates_i[1]
01374     #            else:
01375     #                raise RuntimeException('What? this isn\'t suppose to happen.')
01376 
01377 
01378     #        #if all the trials are bad
01379     #        if not test(successes):
01380     #            #go back by 1 parameter
01381     #            i = i - 1
01382 
01383 
01384     #        #if there are more than one good parameter
01385     #        for p in params
01386     #            ... = objective_func(p)
01387 
01388     #        i = i + 1
01389 
01390     #    return x
01391 
01392 
01393 
01394 
01395     #def autonomous_learn(self, point3d_bl, behavior, object_name): 
01396     #    # We learn, but must moderate between spatial cues and requirements of
01397     #    # the learner. Spatial cue is a heuristic that can guide to positive
01398     #    # examples. Learning heuristic reduces the number of experiments to
01399     #    # perform given that we know that we are generally *not* successful
01400     #    # (assume that this procedure launches only during non mission critial circumstances).
01401     #    # So in the case where we're actively learning we're going to ignore the spatial heuristic.
01402     #    # Well... can we incorporate distance to the selected 3d point as a feature?
01403     #    # ah!
01404     #    learn_manager = self.learners[object_name]
01405     #    #scan and extract features
01406     #    self.robot.head.look_at(point3d_bl, 'base_link', True)
01407     #    learn_manager.scan(point3d_bl)
01408     #    gaussian = pr.Gaussian(np.matrix([ 0,      0,     0.]).T, \
01409     #                           np.matrix([[1.,     0,      0], \
01410     #                                      [0, .02**2,      0], \
01411     #                                      [0,      0, .02**2]]))
01412 
01413     #    #pdb.set_trace()
01414     #    gaussian_noise = np.matrix([0,0,0.]).T
01415     #    while not learn_manager.is_ready():
01416     #         pi = point3d_bl + gaussian_noise
01417     #         label = behavior(pi)
01418     #         #look at point, then try to add again
01419     #         if not learn_manager.add_example(pi, np.matrix([label])):
01420     #             rospy.logerr('Unable to extract features from point %s' % str(pi.T))
01421     #             continue
01422     #         learn_manager.train()
01423     #         learn_manager.draw_and_send()
01424     #         gaussian_noise = gaussian.sample()
01425     #         gaussian_noise[0,0] = 0
01426 
01427     #    #Acquire data
01428     #    #Given image, cloud, 3d point ask, extract features.
01429     #    #while no_interruptions and stopping_criteria_not_reached
01430     #    #    maximally_informative_point = get maximally informative point
01431     #    #    label = behavior(maximally_informative_point)
01432     #    #    retrain!
01433     #    converged = False
01434     #    while not converged:
01435     #        indices, dists = learn_manager.select_next_instances(1)
01436     #        if idx != None:
01437     #            pt2d = learn_manager.points2d[:, indices[0]]
01438     #            pt3d = learn_manager.points3d[:, indices[0]]
01439     #            label = behavior(pt3d)
01440     #            #learn_manager.add_example(pt3d, np.matrix([label]), pt2d)
01441     #            if not learn_manager.add_example(pi, np.matrix([label])):
01442     #                rospy.logerr('Unable to extract features from point %s' % str(pi.T))
01443     #                continue
01444     #            learn_manager.train()
01445     #            learn_manager.draw_and_send()
01446     #        else:
01447     #            converged = True
01448 
01449 
01450 
01451     #def gather_interest_point_dataset(self, point):
01452     #    gaussian = pr.Gaussian(np.matrix([0, 0, 0.]).T, \
01453     #            np.matrix([[1., 0, 0], \
01454     #                       [0, .02**2, 0], \
01455     #                       [0, 0, .02**2]]))
01456 
01457     #    for i in range(100):
01458     #        # perturb_point
01459     #        gaussian_noise = gaussian.sample()
01460     #        gaussian_noise[0,0] = 0
01461     #        success_off, touchloc_bl = self.light_switch1(point, 
01462     #                        point_offset=np.matrix([-.15, 0, 0]).T, press_contact_pressure=300, 
01463     #                        move_back_distance=np.matrix([-.005,0,0]).T, press_pressure=2500, 
01464     #                        press_distance=np.matrix([0,0,-.15]).T, visual_change_thres=.03)
01465     #        rospy.loginfo('Lights turned off? %s' % str(success_off))
01466 
01467     #        pdb.set_trace()
01468     #        self.behaviors.movement.move_absolute((np.matrix([.15, .45, 1.3]).T, self.start_location[1]), stop='pressure_accel')
01469     #        self.record_perceptual_data(touchloc_bl)
01470     #        self.behaviors.movement.move_absolute(self.start_location, stop='pressure_accel')
01471     #        if success_off:
01472     #            self.behaviors.movement.move_absolute((np.matrix([.15, .45, 1.3]).T, self.start_location[1]), stop='pressure_accel')
01473     #            self.record_perceptual_data(touchloc_bl)
01474     #            self.behaviors.movement.move_absolute(self.start_location, stop='pressure_accel')
01475 
01476     #            success_on, touchloc_bl2 = self.light_switch1(point, 
01477     #                            point_offset=np.matrix([-.15,0,-.10]).T, press_contact_pressure=300, 
01478     #                            move_back_distance=np.matrix([-0.005, 0, 0]).T, press_pressure=2500, 
01479     #                            press_distance=np.matrix([0,0,.1]).T, visual_change_thres=.03)
01480     #            ##1
01481     #            #if success_on:
01482     #            #    self.movement.behaviors.move_absolute((np.matrix([.15, .45, 1.3]).T, self.start_location[1]), stop='pressure_accel')
01483     #            #    self.record_perceptual_data(touchloc_bl)
01484     #            #    self.movement.behaviors.move_absolute(self.start_location, stop='pressure_accel')
01485     #            #Turn on lights
01486     #            #success_on, touchloc_bl = self.light_switch1(npoint, 
01487     #        else:
01488     #            return
01489 
01490 
01491     #def record_perceptual_data_laser_scanner(self, point_touched_bl):
01492     #    #what position should the robot be in?
01493     #    #set arms to non-occluding pose
01494 
01495     #    #record region around the finger where you touched
01496     #    rospy.loginfo('Getting laser scan.')
01497     #    points = []
01498     #    for i in range(3):
01499     #        rospy.loginfo('scan %d' % i)
01500     #        points.append(self.laser_scan.scan(math.radians(180.), math.radians(-180.), 20./3.))
01501 
01502     #    rospy.loginfo('Getting Prosilica image.')
01503     #    prosilica_image = self.prosilica.get_frame()
01504     #    rospy.loginfo('Getting image from left wide angle camera.')
01505     #    left_image  = self.wide_angle_camera_left.get_frame()
01506     #    rospy.loginfo('Getting image from right wide angle camera.')
01507     #    right_image = self.wide_angle_camera_left.get_frame()
01508     #    rospy.loginfo('Waiting for calibration.')
01509     #    while self.prosilica_cal.has_msg == False:
01510     #        time.sleep(.1)
01511 
01512     #    #which frames?
01513     #    rospy.loginfo('Getting transforms.')
01514     #    pro_T_bl = tfu.transform('/self.OPTICAL_FRAMEhigh_def_optical_frame', '/base_link', self.tf_listener)
01515     #    laser_T_bl = tfu.transform('/laser_tilt_link', '/base_link', self.tf_listener)
01516     #    tstring = time.strftime('%A_%m_%d_%Y_%I:%M%p')
01517     #    prosilica_name = '%s_highres.png' % tstring
01518     #    left_name = '%s_left.png' % tstring
01519     #    right_name = '%s_right.png' % tstring
01520     #    rospy.loginfo('Saving images (basename %s)' % tstring)
01521     #    cv.SaveImage(prosilica_name, prosilica_image)
01522     #    cv.SaveImage(left_name, left_image)
01523     #    cv.SaveImage(right_name, right_image)
01524 
01525     #    rospy.loginfo('Saving pickles')
01526     #    pickle_fname = '%s_interest_point_dataset.pkl' % tstring   
01527 
01528     #    data_pkl = {'touch_point': point_touched_bl,
01529     #                'points_laser': points,
01530     #                'laser_T_bl': laser_T_bl, 
01531     #                'pro_T_bl': pro_T_bl,
01532 
01533     #                'high_res': prosilica_name,
01534     #                'prosilica_cal': self.prosilica_cal, 
01535 
01536     #                'left_image': left_name,
01537     #                'left_cal': self.left_cal,
01538 
01539     #                'right_image': right_name,
01540     #                'right_cal': self.right_cal}
01541     #                #'point_touched': point_touched_bl}
01542     #                
01543 
01544     #    ut.save_pickle(data_pkl, pickle_fname)
01545     #    print 'Recorded to', pickle_fname
01546 
01547 
01548 
01549             #if mode == 'autonomous learn':
01550             #    def light_behavior(point):
01551             #        point_offset = np.matrix([0, 0, 0.03]).T
01552             #        success, _ = self.light_switch1(point, point_offset=point_offset, \
01553             #                        press_contact_pressure=300, move_back_distance=np.matrix([-.0075,0,0]).T,\
01554             #                        press_pressure=3500, press_distance=np.matrix([0,0,-.15]).T, \
01555             #                        visual_change_thres=.03)
01556             #        if success:
01557             #            return 1.0
01558             #        else:
01559             #            return 0.0
01560 
01561             #    self.untuck()
01562             #    self.behaviors.movement.move_absolute(self.start_location, stop='pressure')
01563             #    self.behaviors.movement.pressure_listener.rezero()
01564             #    self.autonomous_learn(point_bl, light_behavior, 'light_switch')
01565 
01566             #if mode == 'location activated':
01567             #    self.location_activated_behaviors(point_bl)
01568 
01569         #elif mode == 'location activated':
01570         #    all_locs = self.locations_man.list_all()
01571         #    for i, pair in enumerate(all_locs):
01572         #        key, task = pair
01573         #        print i, task, key
01574 
01575         #    rospy.loginfo('Select location to execute action')
01576         #    selected = int(raw_input())
01577 
01578 
01579             #if mode == 'practice':
01580             #    self.add_to_practice_points_map(point_bl)
01581 
01582 
01583                 ##If that location is new:
01584                 #map_T_base_link = tfu.transform('map', 'base_link', self.tf_listener)
01585                 #point_map = tfu.transform_points(map_T_base_link, point_bl)
01586                 #close_by_locs = self.locations_man.list_close_by(point_map)
01587 
01588                 #if len(close_by_locs) <= 0:
01589                 #    #initialize new location
01590                 #    rospy.loginfo('Select task type:')
01591                 #    for i, ttype in enumerate(self.locations_man.task_types):
01592                 #        print i, ttype
01593                 #    task_type = self.locations_man[int(raw_input())]
01594                 #    task_id = self.locations_man.create_new_location(task_type, point_map)
01595 
01596                 #    rospy.loginfo('if existing dataset exists enter that dataset\'s name')
01597                 #    print 'friday_730_light_switch2.pkl'
01598                 #    filename = raw_input()
01599                 #    if len(filename) > 0:
01600                 #        dataset = ut.load_pickle(filename)
01601                 #        self.locations_man.data[task_id]['dataset'] = dataset
01602                 #        self.locations_man.train(dataset, task_id)
01603                 #    else:
01604                 #        self.last_ditch_execution(
01605 
01606                 #elif len(close_by_locs) == 1:
01607                 #    task_id, task = close_by_locs[0]
01608                 #    rospy.loginfo('Executing task %s with id % s', task, task_id)
01609                 #    self.execute_behavior(task_id, point_bl)
01610 
01611                 #elif len(close_by_locs) > 1:
01612                 #    #TODO: implement this case
01613                 #    rospy.logerr('ERROR: unimplemented')
01614                 #    pdb.set_trace()
01615                 #    self.execute_behavior(task_id, point_bl)
01616                 #else:
01617                 #    rospy.logerr('ERROR: we shouldn\'t have reached here')
01618                 #    pdb.set_trace()
01619 


trf_learn
Author(s): Hai Nguyen (hai@gatech.edu) Advisor: Prof. Charlie Kemp, Lab: Healthcare Robotics Lab at Georgia Tech
autogenerated on Wed Nov 27 2013 11:47:18