00001
00002
00003 import numpy as np, math
00004 import scipy.stats as stats
00005 from threading import RLock
00006
00007 import roslib; roslib.load_manifest('hrl_pr2_lib')
00008 import rospy
00009
00010 from hrl_lib.util import save_pickle, load_pickle
00011 from hrl_lib.msg import FloatArray
00012 from hrl_lib.rutils import GenericListener, ros_to_dict, RateCaller
00013 from hrl_lib.data_process import signal_smooth, signal_variance, signal_list_variance
00014 from tf.transformations import *
00015
00016 from std_msgs.msg import Float64MultiArray
00017 from pr2_msgs.msg import AccelerometerState, PressureState
00018 from sensor_msgs.msg import JointState
00019
00020 import threading
00021 import functools as ft
00022 import Queue
00023
00024 import time, string
00025
00026 node_name = "arm_perception_monitor"
00027
00028 def log(str):
00029 rospy.loginfo(node_name + ": " + str)
00030
00031
00032
00033
00034
00035
00036
00037 def accel_state_processor(msg):
00038 xyz = np.array([0.]*3)
00039 if msg.samples is None or len(msg.samples) == 0:
00040 return None
00041 for samp in msg.samples:
00042 xyz[0] += samp.x
00043 xyz[1] += samp.y
00044 xyz[2] += samp.z
00045 xyz[0] /= len(msg.samples)
00046 xyz[1] /= len(msg.samples)
00047 xyz[2] /= len(msg.samples)
00048 return (msg.header.stamp.to_nsec(), xyz)
00049
00050 r_jt_idx_list = [17, 18, 16, 20, 19, 21, 22]
00051 l_jt_idx_list = [31, 32, 30, 34, 33, 35, 36]
00052 joint_nm_list = ['shoulder_pan', 'shoulder_lift', 'upper_arm_roll',
00053 'elbow_flex', 'forearm_roll', 'wrist_flex',
00054 'wrist_roll']
00055
00056
00057
00058
00059
00060 def joints_state_processor(msg, right_arm=True, angles_velocities_efforts=0):
00061 ret = np.array([0.] * 7)
00062 for i,nm in enumerate(joint_nm_list):
00063 if right_arm:
00064 idx = r_jt_idx_list[i]
00065 if msg.name[idx] != 'r_'+nm+'_joint':
00066 raise RuntimeError('joint angle name does not match. Expected: %s, Actual: %s i: %d'%('r_'+nm+'_joint', msg.name[idx], i))
00067 if angles_velocities_efforts == 1:
00068 ret[i] = msg.velocity[idx]
00069 elif angles_velocities_efforts == 2:
00070 ret[i] = msg.effort[idx]
00071 else:
00072 ret[i] = msg.position[idx]
00073 else:
00074
00075 idx = l_jt_idx_list[i]
00076 if msg.name[idx] != 'l_'+nm+'_joint':
00077 raise RuntimeError('joint angle name does not match. Expected: %s, Actual: %s i: %d'%('r_'+nm+'_joint', msg.name[idx], i))
00078 if angles_velocities_efforts == 1:
00079 ret[i] = msg.velocity[idx]
00080 elif angles_velocities_efforts == 2:
00081 ret[i] = msg.effort[idx]
00082 else:
00083 ret[i] = msg.position[idx]
00084 return (msg.header.stamp.to_nsec(), ret)
00085
00086 def pressure_state_processor(msg, right_finger_tip=True, indicies=None):
00087 ret = np.array([0.] * len(indicies))
00088 if indicies is None:
00089 indicies = range(len(msg.r_finger_tip))
00090 for i, ind in enumerate(indicies):
00091 if right_finger_tip:
00092 ret[i] = msg.r_finger_tip[ind]
00093 else:
00094 ret[i] = msg.l_finger_tip[ind]
00095 return (msg.header.stamp.to_nsec(), ret)
00096
00097
00098
00099
00100 class PeriodicLogger():
00101
00102
00103
00104
00105
00106
00107 def __init__(self, callback, rate=0.01, args=None):
00108 self.ret = []
00109 self.cb = callback
00110 self.rate = rate
00111 self.args = args
00112 self.is_running = False
00113 self.max_calls = None
00114 self.num_calls = 0
00115 self.beg_time = 0.
00116 self.thread = None
00117
00118
00119
00120
00121 def start(self, max_calls=None):
00122 if self.is_running:
00123 return
00124 self.max_calls = max_calls
00125 self.is_running = True
00126 self.num_calls = 0
00127 self.beg_time = rospy.Time.now().to_sec()
00128 self.thread = threading.Timer(self.rate, self._run)
00129 self.thread.start()
00130
00131 def _run(self):
00132 if not self.is_running:
00133 return
00134
00135 act_time = self.beg_time + self.rate * (self.num_calls + 2)
00136 interval = act_time - rospy.Time.now().to_sec()
00137 self.thread = threading.Timer(interval, self._run)
00138 self.thread.start()
00139
00140 if self.args is None:
00141 retval = self.cb()
00142 else:
00143 retval = self.cb(*self.args)
00144 self.ret += [retval]
00145
00146 self.num_calls += 1
00147
00148 if self.max_calls is not None:
00149 if self.num_calls == self.max_calls:
00150 self.is_running = False
00151 return
00152
00153
00154
00155
00156 def stop(self):
00157 self.thread.cancel()
00158 if not self.is_running:
00159 return None
00160 self.is_running = False
00161 return self.ret
00162
00163
00164
00165 def get_ret_vals(self):
00166 if self.is_running:
00167 return None
00168 return self.ret
00169
00170
00171
00172
00173
00174 class PeriodicMonitor():
00175
00176
00177
00178
00179
00180
00181 def __init__(self, callback, rate=0.01, args=None):
00182 self.ret = []
00183 self.cb = callback
00184 self.rate = rate
00185 self.args = args
00186 self.is_running = False
00187 self.num_calls = 0
00188 self.beg_time = 0.
00189 self.thread = None
00190 self.mean_model = None
00191 self.variance_model = None
00192 self.std_devs = 0.
00193 self.failure = False
00194
00195
00196
00197
00198
00199 def start(self, mean_model, variance_model, std_devs=2.5, max_calls=None,
00200 contingency=None, contingency_args=None):
00201 if len(mean_model) != len(variance_model):
00202 log("Models must be of same length")
00203 return
00204 if self.is_running:
00205 return
00206 self.is_running = True
00207 self.mean_model = mean_model
00208 self.variance_model = variance_model
00209 self.std_devs = std_devs
00210 self.max_calls = max_calls
00211 self.contingency = contingency
00212 self.contincency_args = contingency_args
00213 self.model_index = 0
00214 self.failure = False
00215
00216 self.num_calls = 0
00217 self.beg_time = rospy.Time.now().to_sec()
00218 self.thread = threading.Timer(self.rate, self._run)
00219 self.thread.start()
00220
00221 def _run(self):
00222 if not self.is_running:
00223 return
00224
00225 act_time = self.beg_time + self.rate * (self.num_calls + 2)
00226 interval = act_time - rospy.Time.now().to_sec()
00227 self.thread = threading.Timer(interval, self._run)
00228 self.thread.start()
00229
00230 if self.args is None:
00231 retval = self.cb()
00232 else:
00233 retval = self.cb(*self.args)
00234
00235
00236 for coord_i in len(retval[1]):
00237 diff = abs(retval[1][coord_i] - self.mean_model[self.model_index][coord_i])
00238 deviation = np.sqrt(self.variance_model[self.model_index][coord_i])
00239 if diff > self.std_devs * deviation:
00240
00241 self.failure = True
00242 self.is_running = False
00243
00244 if contingency_args is None:
00245 self.contingency()
00246 else:
00247 self.contingency(*contingency_args)
00248 return
00249 self.ret += [retval]
00250 self.model_index += 1
00251 if self.model_index == len(self.mean_model):
00252 self.is_running = False
00253 return
00254
00255
00256 if not self.max_calls is None:
00257 self.max_calls -= 1
00258 if self.max_calls == 0:
00259 self.is_running = False
00260 return
00261
00262
00263
00264
00265 def stop(self):
00266 self.thread.cancel()
00267 if not self.is_running:
00268 return None
00269 self.is_running = False
00270 return self.ret
00271
00272
00273
00274 def get_ret_vals(self):
00275 if self.is_running:
00276 return None
00277 return self.ret
00278
00279
00280 def has_failed():
00281 return self.failure
00282
00283
00284 def wait_for_completion(rate=0.01):
00285 while self.is_running and not rospy.is_shutdown():
00286 rospy.sleep(rate)
00287 return not self.failure
00288
00289
00290
00291
00292
00293
00294
00295
00296
00297
00298
00299
00300
00301
00302 class ArmPerceptionMonitor( ):
00303
00304
00305
00306
00307
00308
00309
00310 def __init__(self, arm, tf_listener=None, rate=0.001,
00311 percept_mon_list=None, model_zeros=None):
00312 log("Initializing arm perception listeners")
00313
00314 self.rate = rate
00315
00316 if arm == 0:
00317 armc = "r"
00318 is_right_arm = True
00319 else:
00320 armc = "l"
00321 is_right_arm = False
00322
00323 self.model_zeros = model_zeros
00324
00325 self.perceptions = {}
00326
00327 self.perception_names = [ "accelerometer",
00328 "joint_angles",
00329 "joint_velocities",
00330 "joint_efforts",
00331 "r_finger_periph_pressure",
00332 "r_finger_pad_pressure",
00333 "l_finger_periph_pressure",
00334 "l_finger_pad_pressure",
00335 "gripper_pose"]
00336
00337 if percept_mon_list is None:
00338 percept_mon_list = self.perception_names
00339
00340 if "accelerometer" in percept_mon_list:
00341 accel_listener = GenericListener("accel_mon_node", AccelerometerState,
00342 "accelerometer/" + armc + "_gripper_motor",
00343 rate, accel_state_processor)
00344 self.perceptions["accelerometer"] = accel_listener.read
00345
00346 if "joint_angles" in percept_mon_list:
00347 joint_angles_processor = ft.partial(joints_state_processor,
00348 right_arm=is_right_arm,
00349 angles_velocities_efforts=0)
00350 joint_angles_listener = GenericListener("joint_angles_mon_node", JointState,
00351 "joint_states", rate, joint_angles_processor)
00352 self.perceptions["joint_angles"] = joint_angles_listener.read
00353
00354 if "joint_velocities" in percept_mon_list:
00355 joint_velocities_processor = ft.partial(joints_state_processor,
00356 right_arm=is_right_arm,
00357 angles_velocities_efforts=1)
00358 joint_velocities_listener = GenericListener("joint_efforts_mon_node", JointState,
00359 "joint_states", rate, joint_velocities_processor)
00360 self.perceptions["joint_velocities"] = joint_velocities_listener.read
00361
00362 if "joint_efforts" in percept_mon_list:
00363 joint_efforts_processor = ft.partial(joints_state_processor,
00364 right_arm=is_right_arm,
00365 angles_velocities_efforts=2)
00366 joint_efforts_listener = GenericListener("joint_efforts_mon_node", JointState,
00367 "joint_states", rate, joint_efforts_processor)
00368 self.perceptions["joint_efforts"] = joint_efforts_listener.read
00369
00370 if "r_finger_periph_pressure" in percept_mon_list:
00371 r_finger_periph_pressure_processor = ft.partial(pressure_state_processor,
00372 right_finger_tip=True, indicies=range(1,7))
00373 r_finger_periph_pressure_listener = GenericListener("pressure_mon_node", PressureState,
00374 "pressure/" + armc + "_gripper_motor", rate,
00375 r_finger_periph_pressure_processor)
00376 self.perceptions["r_finger_periph_pressure"] = r_finger_periph_pressure_listener.read
00377
00378 if "r_finger_pad_pressure" in percept_mon_list:
00379 r_finger_pad_pressure_processor = ft.partial(pressure_state_processor,
00380 right_finger_tip=True, indicies=range(7,22))
00381 r_finger_pad_pressure_listener = GenericListener("pressure_mon_node", PressureState,
00382 "pressure/" + armc + "_gripper_motor", rate,
00383 r_finger_pad_pressure_processor)
00384 self.perceptions["r_finger_pad_pressure"] = r_finger_pad_pressure_listener.read
00385
00386 if "l_finger_periph_pressure" in percept_mon_list:
00387 l_finger_periph_pressure_processor = ft.partial(pressure_state_processor,
00388 right_finger_tip=False, indicies=range(1,7))
00389 l_finger_periph_pressure_listener = GenericListener("pressure_mon_node", PressureState,
00390 "pressure/" + armc + "_gripper_motor", rate,
00391 l_finger_periph_pressure_processor)
00392 self.perceptions["l_finger_periph_pressure"] = l_finger_periph_pressure_listener.read
00393
00394 if "l_finger_pad_pressure" in percept_mon_list:
00395 l_finger_pad_pressure_processor = ft.partial(pressure_state_processor,
00396 right_finger_tip=False, indicies=range(7,22))
00397 l_finger_pad_pressure_listener = GenericListener("pressure_mon_node", PressureState,
00398 "pressure/" + armc + "_gripper_motor", rate,
00399 l_finger_pad_pressure_processor)
00400 self.perceptions["l_finger_pad_pressure"] = l_finger_pad_pressure_listener.read
00401
00402 for k in self.perceptions:
00403 self.perceptions[k] = ft.partial(self.perceptions[k], willing_to_wait=False,
00404 quiet=True,
00405 warn=False, allow_duplication=True)
00406
00407 if "gripper_pose" in percept_mon_list:
00408 def gripper_pose_lookup():
00409 lct = tf_listener.getLatestCommonTime("/torso_lift_link", "/" + armc + "_wrist_roll_link")
00410 pos, quat = tf_listener.lookupTransform("/torso_lift_link", "/" + armc + "_wrist_roll_link", lct)
00411 return (lct.to_nsec(), np.array(pos + quat))
00412
00413 self.perceptions["gripper_pose"] = gripper_pose_lookup
00414
00415
00416
00417
00418
00419
00420
00421
00422 self.clear_vars()
00423
00424 self.logger = RateCaller(self._gather_perception, self.rate)
00425
00426 log("Finished initialization")
00427
00428 def _gather_perception(self):
00429 t1 = rospy.Time.now().to_sec()
00430 for k in self.perceptions:
00431 self.datasets[k][-1] += [self.perceptions[k]()]
00432 t2 = rospy.Time.now().to_sec()
00433 import random
00434 if random.randint(0,100)==0:
00435 print "Time:", t1-t2
00436
00437
00438
00439 def clear_vars(self):
00440 self.datasets = {}
00441 self.models = {}
00442 for k in self.perceptions:
00443 self.datasets[k] = []
00444 self.models[k] = {"mean" : None, "variance" : None}
00445
00446 self.active = False
00447
00448
00449
00450
00451
00452
00453 def start_training(self, duration=None):
00454 if self.active:
00455 log("Perception already active.")
00456 return
00457 self.active = True
00458
00459 for k in self.perceptions:
00460 self.datasets[k] += [[]]
00461 self.logger.run()
00462
00463 if not duration is None:
00464 threading.Timer(self.stop_training, duration)
00465
00466
00467
00468
00469 def stop_training(self):
00470 if not self.active:
00471 log("Nothing to stop.")
00472 return
00473
00474 self.logger.stop()
00475
00476 self.active = False
00477 return len(self.datasets[self.datasets.keys()[0]][-1])
00478
00479
00480
00481
00482
00483 def save(self, filename):
00484 save_pickle((self.datasets, self.models), filename)
00485
00486
00487
00488
00489
00490 def load(self, filename):
00491 self.datasets, self.models = load_pickle(filename)
00492
00493
00494
00495
00496
00497
00498
00499
00500
00501
00502
00503
00504
00505
00506
00507
00508
00509
00510
00511
00512
00513
00514
00515
00516
00517
00518
00519
00520
00521
00522
00523
00524
00525
00526
00527
00528
00529
00530
00531
00532
00533
00534
00535
00536
00537
00538
00539
00540
00541
00542
00543
00544
00545
00546
00547
00548
00549
00550
00551
00552
00553
00554
00555
00556
00557
00558
00559
00560
00561
00562
00563
00564
00565
00566
00567
00568
00569
00570
00571
00572
00573
00574
00575
00576
00577
00578
00579
00580
00581
00582
00583
00584
00585
00586
00587
00588
00589
00590
00591
00592 def get_zeros(self, time=4.):
00593 self._zeros = {}
00594 for k in self.perceptions:
00595 self._zeros[k] = None
00596 self._n = 0
00597
00598 monitor = RateCaller(self._sum_values, self.rate)
00599 monitor.run()
00600 rospy.sleep(time)
00601 monitor.stop()
00602
00603 for k in self.perceptions:
00604 self._zeros[k] /= self._n
00605 return self._zeros
00606
00607 def _sum_values(self):
00608 for k in self.perceptions:
00609 add_data = np.array(self.perceptions[k]()[1])
00610 if self._zeros[k] is None:
00611 self._zeros[k] = np.copy(add_data)
00612 else:
00613 self._zeros[k] += add_data
00614 self._n += 1
00615
00616
00617
00618 def setup_monitoring(self, models,
00619 std_dev_dict=None, noise_dev_dict=None, duration=None,
00620 std_dev_default=2.0, noise_dev_default=0.25,
00621 tol_thresh_dict=None,
00622 contingency=None, window_size=70, current_zeros=None,
00623 sampling_rate = 1,
00624 transform_dict=None, verbose=True, collide=True):
00625 self.std_dev_default = std_dev_default
00626 self.noise_dev_default = noise_dev_default
00627
00628 self.models = models
00629 self.current_data = {}
00630 self.std_dev_dict = std_dev_dict
00631 self.noise_dev_dict = {}
00632 self.contingency = contingency
00633 self.window_size = window_size
00634 self.current_zeros = {}
00635 self.transform_dict = transform_dict
00636 self.tol_thresh_dict = tol_thresh_dict
00637 self.sampling_rate = sampling_rate
00638 self.verbose = verbose
00639 self.collide = collide
00640 self.sum_data = {}
00641 self.cur_pt = 0
00642 self.failure = False
00643 self.dur_timer = None
00644 self.avg_list = {}
00645 self.cum_avg = {}
00646 self.c = {}
00647 self.locks = {}
00648
00649 for k in self.perceptions:
00650 self.current_data[k] = [None] * window_size
00651 self.sum_data[k] = None
00652 self.avg_list[k] = []
00653 self.c[k] = None
00654 self.locks[k] = threading.Lock()
00655
00656 self.create_max_min()
00657
00658
00659
00660
00661
00662 def create_max_min(self):
00663 for k in self.models:
00664 deviation = self.models[k]["variance"]
00665
00666 if self.std_dev_dict is not None and k in self.std_dev_dict:
00667 std_dev = np.array(self.std_dev_dict[k])
00668 else:
00669 std_dev = self.std_dev_default
00670
00671
00672
00673
00674 if self.tol_thresh_dict is not None and k in self.tol_thresh_dict:
00675 tol_thresh = np.array(self.tol_thresh_dict[k])
00676 else:
00677 tol_thresh = 0.
00678
00679 self.models[k]["dev"] = (std_dev * deviation + tol_thresh)
00680
00681
00682
00683
00684 self.models[k]["max"] = self.models[k]["mean"] + self.models[k]["dev"]
00685 self.models[k]["min"] = self.models[k]["mean"] - self.models[k]["dev"]
00686
00687
00688
00689
00690
00691
00692
00693
00694 def begin_monitoring(self, models=None, model_zeros=None,
00695 std_dev_dict=None, noise_dev_dict=None, duration=None,
00696 std_dev_default=2.0, noise_dev_default=0.25,
00697 tol_thresh_dict=None,
00698 contingency=None, window_size=70, current_zeros=None,
00699 sampling_rate = 1,
00700 only_pressure=False,
00701 transform_dict=None, verbose=True, collide=True):
00702 if self.active:
00703 log("Perception already active.")
00704 return
00705 self.active = True
00706
00707
00708 self.setup_monitoring(models,
00709 std_dev_dict=std_dev_dict, noise_dev_dict=noise_dev_dict,
00710 duration=duration, std_dev_default=std_dev_default,
00711 noise_dev_default=noise_dev_default,
00712 tol_thresh_dict=tol_thresh_dict,
00713 contingency=contingency, window_size=window_size,
00714 sampling_rate=sampling_rate,
00715 current_zeros=current_zeros, transform_dict=transform_dict,
00716 verbose=verbose, collide=collide)
00717
00718
00719 if model_zeros is not None:
00720 self.model_zeros = model_zeros
00721 self.cur_pt = 0
00722 self.collision_times = {}
00723 self.collision_sums = {}
00724 self.cur_col_time = 0
00725 self.current_zeros = {}
00726 self.z_sum = {}
00727 self.z_rsum = {}
00728 self.z_list = {}
00729 self.min_prob = 100000.0
00730 self.cur_mals = {}
00731 self.mals = None
00732 self.only_pressure = only_pressure
00733
00734 self.monitor = RateCaller(self._monitor_data, self.rate * self.sampling_rate)
00735 self.monitor.run()
00736
00737
00738
00739 if not duration is None:
00740 self.dur_timer = threading.Timer(self.end_monitoring, duration)
00741 self.dur_timer.start()
00742
00743 def _stable_summer(self, percept, data):
00744
00745 k = percept
00746
00747
00748
00749
00750
00751
00752 if self.c[k] is None:
00753 self.c[k] = np.array([0.]*len(data))
00754 if self.sum_data[k] is None:
00755 self.sum_data[k] = np.copy(data)
00756 else:
00757 y = data - self.c[k]
00758 t = self.sum_data[k] + y
00759 self.c[k] = (t - self.sum_data[k]) - y
00760 self.sum_data[k] = t
00761
00762
00763
00764 def _monitor_data(self):
00765 avg_dict = {}
00766 sttime = rospy.Time.now().to_sec()
00767 for k in self.perceptions:
00768
00769 add_data = self.perceptions[k]()[1]
00770
00771
00772
00773
00774
00775
00776 self._stable_summer(k, add_data)
00777
00778
00779 if self.cur_pt >= self.window_size:
00780
00781 avg_dict[k] = self.sum_data[k] / self.window_size
00782 if not k in self.current_zeros:
00783 if not self.only_pressure:
00784 mean = np.array(self.models[k]["mean"][self.cur_pt * self.sampling_rate])
00785 else:
00786 mean = np.array(self.models[k]["mean"][0])
00787 self.current_zeros[k] = mean - avg_dict[k]
00788
00789
00790 avg_dict[k] += self.current_zeros[k]
00791 self.avg_list[k] += [avg_dict[k]] * self.sampling_rate
00792
00793
00794
00795
00796
00797
00798
00799
00800
00801
00802
00803
00804
00805
00806
00807
00808
00809
00810
00811
00812
00813
00814
00815
00816
00817
00818
00819
00820
00821 rem_data = self.current_data[k][self.cur_pt % self.window_size]
00822 self._stable_summer(k, -rem_data)
00823
00824 self.current_data[k][self.cur_pt % self.window_size] = add_data
00825
00826 if self.cur_pt >= self.window_size:
00827 collision_detected = self.collision_detect(avg_dict)
00828 if self.collide and collision_detected:
00829 print "collision reported"
00830 self.failure = True
00831 if not self.contingency is None:
00832 self.contingency()
00833 self.monitor.stop()
00834 return
00835
00836
00837 self.cur_pt += 1
00838
00839 if not self.only_pressure:
00840 if self.cur_pt * self.sampling_rate >= 1.00 * len(self.models[self.models.keys()[0]]["mean"]):
00841 print "ending early:", self.cur_pt * self.sampling_rate
00842 self.end_monitoring()
00843 endtime = rospy.Time.now().to_sec()
00844
00845
00846
00847
00848
00849
00850
00851
00852
00853 def end_monitoring(self):
00854 if not self.active:
00855 log("Nothing to stop.")
00856 return self.avg_list
00857
00858 print "-------------------------"
00859 print "z averages:"
00860 self.z_avg = {}
00861 for k in self.z_sum:
00862 self.z_avg[k] = self.z_sum[k] / self.cur_pt
00863 print k, ":", self.z_avg[k]
00864 print "-------------------------"
00865 print "MINIMUM PROB"
00866 print self.min_prob
00867 print "-------------------------"
00868
00869
00870
00871 self.monitor.stop()
00872 if self.dur_timer is not None:
00873 self.dur_timer.cancel()
00874 self.dur_timer = None
00875
00876 self.active = False
00877 return self.avg_list
00878
00879
00880
00881
00882 def collision_detect(self, avg_dict):
00883
00884 z = {}
00885 mal_sum = 0.
00886 for k in avg_dict:
00887
00888
00889
00890
00891
00892 if not self.only_pressure:
00893 mean = self.models[k]["mean"][self.cur_pt * self.sampling_rate]
00894 dev = self.models[k]["dev"][self.cur_pt * self.sampling_rate]
00895 else:
00896 mean = self.models[k]["mean"][0]
00897 dev = self.models[k]["dev"][0]
00898
00899 cur_mal = ((avg_dict[k] - mean) / dev) ** 2
00900 self.cur_mals[k] = cur_mal
00901 mal_sum += np.add.reduce( ( (avg_dict[k] - mean) / dev) ** 2 )
00902 z[k] = np.fabs(avg_dict[k] - mean) / dev
00903
00904 if not k in self.z_sum:
00905 self.z_sum[k] = np.copy(z[k])
00906 else:
00907 self.z_sum[k] += z[k]
00908
00909
00910
00911
00912
00913
00914
00915 collision_detected = self.collision_filtering(z)
00916
00917
00918 return collision_detected
00919
00920 def collision_filtering_mal(self, mal):
00921 prob = 1.
00922 TIME_WINDOW = 4
00923 if self.mals is None:
00924 self.mals = np.array([0.] * TIME_WINDOW)
00925 self.mals[self.cur_pt % TIME_WINDOW] = mal
00926
00927 if self.cur_pt < TIME_WINDOW:
00928 return False
00929 mal_avg = np.add.reduce(self.mals / TIME_WINDOW)
00930
00931 MAL_THRESH = 8.0
00932 if mal_avg > MAL_THRESH:
00933 print "Collision with mal dist:", mal_avg
00934 for k in self.cur_mals:
00935 print k, self.cur_mals[k]
00936 return True
00937 return False
00938
00939 def collision_filtering(self, z):
00940 prob = 1.
00941 TIME_WINDOW = 4
00942 for k in z:
00943 if not k in self.z_rsum:
00944 self.z_rsum[k] = np.copy(z[k])
00945 self.z_list[k] = [np.array([0.]*len(z[k]))] * TIME_WINDOW
00946 else:
00947 self.z_rsum[k] += z[k]
00948
00949 self.z_rsum[k] -= self.z_list[k][self.cur_pt % TIME_WINDOW]
00950 self.z_list[k][self.cur_pt % TIME_WINDOW] = z[k]
00951
00952
00953 prob *= np.multiply.reduce(2 * stats.norm.cdf(-self.z_rsum[k] / TIME_WINDOW))
00954
00955
00956 if self.cur_pt < TIME_WINDOW:
00957 return False
00958
00959 if prob < self.min_prob:
00960 self.min_prob = prob
00961
00962
00963 if not self.only_pressure:
00964
00965 CONFIDENCE = 2.0e-6
00966 else:
00967
00968 CONFIDENCE = 1.e-5
00969 if self.cur_pt % 20 == 0:
00970 print "Current pt:", self.cur_pt, ", probability:", prob
00971 if prob < CONFIDENCE:
00972 print "probability:", prob
00973 print "collision returned"
00974 return True
00975
00976
00977
00978
00979
00980
00981
00982
00983 return False
00984
00985
00986
00987
00988
00989
00990
00991
00992
00993
00994
00995
00996
00997
00998
00999
01000
01001
01002
01003
01004
01005
01006
01007
01008
01009
01010
01011
01012
01013
01014
01015
01016
01017
01018
01019
01020
01021
01022
01023
01024
01025
01026
01027
01028
01029
01030
01031
01032
01033
01034
01035
01036
01037
01038
01039
01040
01041
01042
01043
01044
01045
01046
01047
01048
01049
01050
01051
01052
01053
01054
01055
01056
01057
01058
01059
01060 def simulate_monitoring(self, monitor_data, models=None, model_zeros=None,
01061 std_dev_dict=None, noise_dev_dict=None,
01062 duration=None,
01063 std_dev_default=2.0, noise_dev_default=0.25,
01064 tol_thresh_dict=None,
01065 contingency=None, window_size=70, current_zeros=None,
01066 transform_dict=None, verbose=True, collide=True):
01067 self.setup_monitoring(std_dev_dict=std_dev_dict, noise_dev_dict=noise_dev_dict,
01068 duration=duration, std_dev_default=std_dev_default,
01069 tol_thresh_dict=tol_thresh_dict,
01070 noise_dev_default=noise_dev_default, contingency=contingency, window_size=window_size,
01071 current_zeros=current_zeros, transform_dict=transform_dict,
01072 verbose=verbose, collide=collide)
01073 if models is not None:
01074 self.models = models
01075 if model_zeros is not None:
01076 self.model_zeros = model_zeros
01077 self.cur_pt = self.window_size
01078 collision = None
01079
01080 for i in range(len(monitor_data[monitor_data.keys()[0]])):
01081 for k in monitor_data:
01082 index, diff = self.collision_detect(k, monitor_data[k][i])
01083 if index != -1:
01084 collision = (k, index, diff)
01085 self.cur_pt += 1
01086 return collision
01087
01088 def resample_and_thin_data(self, monitor_data, sample_rate):
01089 ret_data = {}
01090 for k in monitor_data:
01091 ret_data[k] = {}
01092 ret_data[k]["mean"] = monitor_data[k]["mean"][::sample_rate]
01093 ret_data[k]["variance"] = monitor_data[k]["variance"][::sample_rate]
01094 return ret_data
01095
01096 def split_signals(datasets):
01097
01098 for perception in datasets:
01099 data_list = datasets[perception]
01100
01101
01102 lens = [len(m) for m in data_list]
01103 max_len = np.max(lens)
01104
01105
01106 if smooth_wind_dict is None or smooth_wind_dict[perception] is None:
01107 smooth_wind = smooth_wind_default
01108 else:
01109 smooth_wind = smooth_wind_dict[perception]
01110
01111 ret_means, ret_vars, ret_mean_models, ret_noise_vars = [], [], [], []
01112 ret_times, noise_vars = [], []
01113
01114 num_coords = len(data_list[0][0][1])
01115 for coord in range(num_coords):
01116 mean_models, variance_models = [], []
01117 times = None
01118 for stream in data_list:
01119
01120 stream_coord = np.array(zip(*zip(*stream)[1])[coord])
01121 cur_mean_model = signal_smooth(stream_coord, smooth_wind)
01122 mean_models += [cur_mean_model]
01123
01124
01125 noise_vars += [ ( sum([(x - y) ** 2 for x,y in zip(cur_mean_model,stream_coord)]) /
01126 len(cur_mean_model) ) ]
01127
01128
01129 avg_means_model = np.array([0.] * max_len)
01130 for i in range(max_len):
01131 n = 0
01132 for j in range(len(mean_models)):
01133 if i < len(mean_models[j]):
01134 avg_means_model[i] += mean_models[j][i]
01135 n += 1
01136 avg_means_model[i] /= n
01137
01138 if var_wind_dict is None or var_wind_dict[perception] is None:
01139 var_wind = var_wind_default
01140 else:
01141 var_wind = var_wind_dict[perception]
01142
01143
01144 vars_model = signal_list_variance(mean_models, avg_means_model, var_wind)
01145 vars_model = signal_smooth(vars_model, var_smooth_wind)
01146 vars_model = signal_smooth(vars_model, var_smooth_wind + 23)
01147
01148 ret_times += [times]
01149 ret_means += [avg_means_model]
01150 ret_vars += [vars_model]
01151 ret_mean_models += [mean_models]
01152 ret_noise_vars += [np.average(noise_vars)]
01153
01154
01155
01156 self.models[perception]["mean"] = np.array(zip(*ret_means))
01157 self.models[perception]["variance"] = np.array(zip(*ret_vars))
01158 a = ret_mean_models
01159 b = []
01160 for stream in range(len(a[0])):
01161 t1 = []
01162 for val in range(len(a[0][0])):
01163 t2 = []
01164 for coord in range(len(a)):
01165 if val < len(a[coord][stream]):
01166 t2 += [a[coord][stream][val]]
01167 t1 += [np.array(t2)]
01168 b += [t1]
01169
01170 self.models[perception]["smoothed_signals"] = b
01171 self.models[perception]["noise_variance"] = np.array(ret_noise_vars)
01172
01173 return self.models
01174
01175
01176
01177
01178
01179
01180
01181
01182
01183
01184 def generate_mean_grasp(datasets):
01185
01186 smooth_wind_default = 234
01187 means = {}
01188 for perception in datasets:
01189 data_list = datasets[perception]
01190
01191
01192 lens = [len(m) for m in data_list]
01193 max_len = np.max(lens)
01194
01195 ret_means = []
01196
01197 num_coords = len(data_list[0][0][1])
01198 for coord in range(num_coords):
01199 mean_models, variance_models = [], []
01200 for stream in data_list:
01201
01202 stream_coord = np.array(zip(*zip(*stream)[1])[coord])
01203 cur_mean_model = signal_smooth(stream_coord, smooth_wind)
01204 mean_models += [cur_mean_model]
01205
01206
01207 avg_means_model = np.array([0.] * max_len)
01208 for i in range(max_len):
01209 n = 0
01210 for j in range(len(mean_models)):
01211 if i < len(mean_models[j]):
01212 avg_means_model[i] += mean_models[j][i]
01213 n += 1
01214 avg_means_model[i] /= n
01215
01216 ret_means += [avg_means_model]
01217
01218 means[perception] = np.array(zip(*ret_means))
01219
01220 return means
01221
01222 if __name__ == '__main__':
01223 rospy.init_node(node_name, anonymous=True)
01224
01225 apm = ArmPerceptionMonitor(0, 0.001)
01226 for x in range(3):
01227 raw_input("Begin arm")
01228 apm.start_training()
01229 rospy.sleep(1.)
01230 apm.stop_training()
01231 rospy.sleep(0.1)
01232 models = apm.generate_models()
01233 apm.pickle_datasets("pickles//apmtest.pickle")
01234 means = models["accelerometer"]["mean"]
01235 vars = models["accelerometer"]["variance"]
01236
01237 xm, ym, zm = zip(*means)
01238 xv, yv, zv = zip(*vars)
01239 xv = map(np.sqrt, xv)
01240 yv = map(np.sqrt, yv)
01241 zv = map(np.sqrt, zv)
01242 std_dev = 2.5
01243 xmmax = [m + np.sqrt(v) * std_dev for m, v in zip(xm, xv)]
01244 ymmax = [m + np.sqrt(v) * std_dev for m, v in zip(ym, yv)]
01245 zmmax = [m + np.sqrt(v) * std_dev for m, v in zip(zm, zv)]
01246 xmmin = [m - np.sqrt(v) * std_dev for m, v in zip(xm, xv)]
01247 ymmin = [m - np.sqrt(v) * std_dev for m, v in zip(ym, yv)]
01248 zmmin = [m - np.sqrt(v) * std_dev for m, v in zip(zm, zv)]
01249
01250 import matplotlib.pyplot as plt
01251 plt.subplot(321)
01252 plt.plot(xm)
01253 plt.plot(xmmax)
01254 plt.plot(xmmin)
01255
01256 plt.title("X mean")
01257 plt.subplot(323)
01258 plt.plot(ym)
01259 plt.plot(ymmax)
01260 plt.plot(ymmin)
01261 plt.title("Y mean")
01262 plt.subplot(325)
01263 plt.plot(zm)
01264 plt.plot(zmmax)
01265 plt.plot(zmmin)
01266 plt.title("Z mean")
01267
01268 plt.subplot(322)
01269 plt.plot(xv)
01270 plt.title("X variance")
01271 plt.subplot(324)
01272 plt.plot(yv)
01273 plt.title("Y variance")
01274 plt.subplot(326)
01275 plt.plot(zv)
01276 plt.title("Z variance")
01277
01278 plt.show()