toyexample.py
Go to the documentation of this file.
00001 #/usr/bin/python
00002 
00003 import numpy as np
00004 
00005 class Transformation:
00006   def __init__(self, value):
00007     self.value = value
00008     
00009   def apply(self, image, value=None):
00010     """Apply transformation to a image, i.e. shift the entire image"""
00011     if value==None:
00012       value = self.value
00013     
00014     image_new = [None]*len(image)
00015     for i, val in enumerate(image):
00016       idx = i + value
00017       if idx >= len(image_new):
00018         break
00019       if idx < 0:
00020         continue
00021       image_new[idx] = val
00022 
00023     return image_new
00024 
00025   def applyInverse(self, image):
00026     return self.apply(image, value=-self.value)
00027     
00028   def applyToScene(self, scene, obj_id):
00029     """Apply transformation to a scene, i.e. move one object"""
00030     scene.moveObject(obj_id, self.value)
00031     
00032   
00033 class Scene:
00034   def __init__(self, image_size):
00035     self.image_size = image_size
00036     self.objects = {}
00037     #self.z_current = 0
00038     
00039   def addObject(self, id, object_pattern, color_pattern, offset):
00040     # order of adding object matters  
00041     #z = self.z_current
00042     #self.z_current += 1
00043     
00044     self.objects[id] = [offset, object_pattern, color_pattern]
00045   
00046   def moveObject(self, obj_id, value):
00047     assert obj_id in self.objects.keys()
00048     self.objects[obj_id][0] += value
00049 
00050   def generateImage(self, shift=0):
00051     image = [0] * self.image_size
00052     color_image = ['-'] * self.image_size
00053     for id, obj in self.objects.items():
00054       offset, pattern, color_pattern, = obj
00055       offset += shift
00056       rlim = min(self.image_size, offset + len(pattern))
00057       for i in range(offset, rlim):
00058         if pattern[i-offset] == image[i]:
00059           print "Warn: obj %s collides" % id
00060         image[i] = pattern[i-offset]
00061         color_image[i] = color_pattern[i-offset]
00062     return image, color_image
00063     
00064   def transformAndGenerateImage(self, value):
00065     # non-destructive
00066     image, color_image = self.generateImage(value)
00067     return image, color_image
00068   
00069   def display(self):
00070     image, color_image = self.generateImage()
00071     return " ".join(map(str, image)), " ".join(map(str, color_image))
00072     
00073     
00074 class Segmenter:
00075   def __init__(self, image_size):
00076     self.image_size = image_size
00077 
00078     # model of the object to be tracked in current frame
00079     self.model_object = [None]*image_size
00080     # model of the object in color; only depth info is used to compute this
00081     self.model_color = [None]*image_size
00082     # motion detected in current frame; 
00083     # >0 means has occluded, <0 has revealed, 0 no change
00084     self.model_motion_map = [None]*image_size
00085 
00086     # these are candidates that could be considered as part
00087     # of the model by using other priors
00088     self.model_candidates = [None]*image_size
00089     self.model_color_candidates = [None]*image_size
00090     
00091     self.image_last = None
00092     self.color_image_last = None
00093     self.T_last = None
00094     
00095   #def projectModelToImage(self):
00096     #image = [None] * image_size
00097     #for i in enumerate(range(offset, image_size)):
00098       
00099   def accumulate(self, vals_to_add, vals_to_add_color):
00100     for i, (val, cval) in enumerate(zip(vals_to_add, vals_to_add_color)):
00101       if val != None:
00102         self.model_object[i] = val
00103         self.model_color[i] = cval
00104     
00105   def step(self, image, color_image, T=None):
00106     if self.image_last == None:
00107       assert (T==None)
00108       self.image_last = image
00109       self.color_image_last = color_image
00110       return
00111   
00112     assert (T!=None)
00113     
00114     # clean-up model by removing points that are inconsistent in the model
00115     # (and which are not occluded)
00116     # caveat: with sensor noise we might actually remove too many points
00117     for idx, (m_i, i_i) in enumerate(zip(T.apply(self.model_object), image)):
00118       if m_i > i_i:
00119         shifted_idx = idx - T.value # hacky
00120         self.model_object[shifted_idx] = None
00121         self.model_color[shifted_idx] = None
00122     
00123     self.model_object = T.apply(self.model_object)
00124     self.model_color = T.apply(self.model_color)
00125     
00126     # detect motion and update motion map
00127     motion_map = self.detectMotion(self.image_last, image)
00128     print "raw motion map"
00129     print motion_map
00130     self.model_motion_map = self.mergeMotionMap(motion_map)
00131     
00132     # detect color changes
00133     color_change_map = []
00134     for i,j in zip(self.color_image_last, color_image):
00135       if i != j:
00136         color_change_map.append (1)
00137       else:
00138         color_change_map.append (0)
00139     print "color_change_map"
00140     print color_change_map
00141 
00142     self.model_candidates = []
00143     self.model_color_candidates = []
00144     for img_last, climg_last, img_cur, climg_cur, mm, cm in zip(T.apply(self.image_last), T.apply(self.color_image_last), \
00145       image, color_image, T.apply(self.model_motion_map), T.apply(color_change_map)):    
00146       if mm == 0 and img_last == img_cur and cm != 0:  # no change in depth, T-consistent, but color changed
00147         self.model_candidates.append(img_cur)
00148         self.model_color_candidates.append(climg_cur)
00149       else:
00150         self.model_candidates.append(None)
00151         self.model_color_candidates.append(None)
00152     
00153     # forward prediction and comparison
00154     # compute T*image_last 
00155     # keep all points with -1 motion that match in current
00156     model_add = []
00157     model_add_color = []
00158     for img_last, climg_last, img_cur, climg_cur, mm in zip(T.apply(self.image_last), T.apply(self.color_image_last), \
00159       image, color_image, T.apply(self.model_motion_map)):
00160       if mm == None or mm >= 0:
00161         model_add.append(None)
00162         model_add_color.append(None)
00163         continue
00164       # mm < 0:
00165       if img_last == img_cur:
00166         model_add.append(img_cur)
00167         model_add_color.append(climg_cur)
00168       else:
00169         model_add.append(None)
00170         model_add_color.append(None)
00171 
00172     #model_add = T.applyInverse(model_add)
00173     #print "object -"
00174     #print model_add
00175     self.accumulate(model_add, model_add_color)
00176     
00177     # backward prediction and comparison
00178     # compute T^-1*image
00179     # keep all points with + motion that match in last
00180     model_add = []
00181     model_add_color = []
00182     #print "T.applyInverse(image)"
00183     #print self.image_last
00184     #print T.applyInverse(image)
00185     #print self.model_motion_map
00186     for img_last, climg_last, img_cur, climg_cur, mm in zip(self.image_last, self.color_image_last, \
00187       T.applyInverse(image), T.applyInverse(color_image), T.applyInverse(self.model_motion_map)):
00188       if mm == None or mm <= 0:
00189         model_add.append(None)
00190         model_add_color.append(None)
00191         continue
00192       # mm > 0:
00193       if img_last == img_cur:
00194         model_add.append(img_cur)
00195         model_add_color.append(climg_cur)
00196       else:
00197         model_add.append(None)
00198         model_add_color.append(None)
00199 
00200     #print "object +"
00201     #print model_add
00202     model_add = T.apply(model_add)
00203     model_add_color = T.apply(model_add_color)
00204 
00205     self.accumulate(model_add, model_add_color)
00206 
00207     # log
00208     self.image_last = image
00209     self.color_image_last = color_image
00210   
00211   def detectMotion(self, ot1, ot2):
00212     assert len(ot1)==len(ot2)
00213     
00214     res = []
00215     for i,j in zip(ot1, ot2):
00216       res.append (j - i)
00217   
00218     return res
00219     
00220   def mergeMotionMap(self, motion_map):
00221     res = []
00222     for i, j in zip(self.model_motion_map, motion_map):
00223       if i != None and j != None and i*j < 0:
00224         res.append(0)
00225       elif j > 0: # always take new map if map is + 
00226         res.append(j)
00227       elif i < 0: # always take new map if old map is -
00228         res.append(j)
00229       else: # otherwise take bigger value
00230         res.append (max ([i,j]))
00231     return res
00232     
00233 if __name__ == "__main__":
00234   #obj_main = [ 3, 2, 3 ], [ 'a', 'a', 'b' ]
00235   #obj_behind = [5,5], ['a', 'b']
00236   #obj_infront = [4],  ['b']
00237 
00238   obj_main = [ 3, 3, 3 ], [ 'a', 'b', 'c' ]
00239   obj_behind = [5,5], ['a', 'b']
00240   obj_infront = [4],  ['b']
00241   
00242   image_size = 15
00243   
00244   scene = Scene(image_size)
00245   scene.addObject("main", obj_main[0], obj_main[1], 6)
00246   #scene.addObject("behind", obj_behind[0], obj_behind[1], 2)
00247   #scene.addObject("infront", obj_infront[0], obj_infront[1], 9)
00248 
00249   segmenter = Segmenter(image_size)
00250   segmenter.step(* scene.generateImage())
00251   
00252   print "========================="
00253 
00254   print scene.display()
00255 
00256   main_transforms =   [ 1, 2, -4, 3, 2 ]
00257   behind_transforms = [ 0, 0,  1, 3, 0 ]
00258   #behind_transforms = [0]*5
00259 
00260   for i, (Tm_val, Tb_val) in enumerate(zip(main_transforms, behind_transforms)):
00261     print "=================================================="
00262     print "t=%d; T=%d" % (i+1, Tm_val)
00263     Tm = Transformation(Tm_val)
00264     Tm.applyToScene(scene, "main")
00265     
00266     #Tb = Transformation(Tb_val)
00267     #Tb.applyToScene(scene, "behind")
00268     
00269     print scene.display()
00270     print "--------------------------------------------------"
00271     
00272     segmenter.step(* scene.generateImage(), T=Tm)
00273     print "motion map"
00274     print segmenter.model_motion_map
00275     print "object"
00276     print segmenter.model_object, segmenter.model_color
00277     print "candidates"
00278     print segmenter.model_candidates, segmenter.model_color_candidates
00279     


shape_reconstruction
Author(s): Roberto Martín-Martín
autogenerated on Sat Jun 8 2019 18:37:26