Variables
example4 - train Namespace Reference

Variables

int all_cropped_num = len(os.listdir(train_cropped_images_path))//3
 
 callbacks
 
int channels = 2
 
 col
 
 col_end = col+cropped_w
 
 compiled_model = model
 
list config_list = [(noisy_images, False), (pure_images, False), (ir_images, True)]
 
 conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
 
 conv10 = Conv2D(channels, 1, activation='sigmoid')(conv9)
 
 conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
 
 conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
 
 conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
 
 conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
 
 conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
 
 conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
 
 conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
 
 conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
 
 crop = img.crop((col_i, row_i, col_i + w, row_i + h))
 
 cropped_h
 
list cropped_image_offsets = []
 
list cropped_images_list = [(cropped_noisy_images, "noisy"), (cropped_pure_images, "pure")]
 
list cropped_ir_images = [f for f in glob.glob(train_cropped_images_path + "**/left*" + IMAGE_EXTENSION, recursive=True)]
 
list cropped_noisy_images = [f for f in glob.glob(train_cropped_images_path + "**/res*" + IMAGE_EXTENSION, recursive=True)]
 convert cropped images to arrays More...
 
list cropped_pure_images = [f for f in glob.glob(train_cropped_images_path + "**/gt*" + IMAGE_EXTENSION, recursive=True)]
 
 cropped_w
 
 curr_cropped_images
 
 denoised_col = cropped_w
 
string denoised_dir = images_path+r"/denoised"
 
 denoised_image = model.predict(sample)
 
 denoised_name = os.path.basename(directory.split('/')[-1])
 
 denoised_row = cropped_h
 
 drop4 = Dropout(0.5)(conv4)
 
 drop5 = Dropout(0.5)(conv5)
 
 epochs
 
 file_path = os.path.join(train_cropped_images_path, filename)
 
 filelist
 
 first_image = i*images_num_to_process
 
int frame_num = 0
 
 gpus = tf.config.experimental.list_physical_devices('GPU')
 
 gray_image = cv2.cvtColor(ii, cv2.COLOR_BGR2GRAY)
 
 h
 
 height
 
 ii = cv2.imread(file)
 
 im_and_ir = images_plt
 
 im_files = [f for f in glob.glob(directory + "**/res*" , recursive=True)]
 
string IMAGE_EXTENSION = '.png'
 
int images_num_to_process = 1000
 
string images_path = root+r"/images"
 
list images_plt = [cv2.imread(f, cv2.IMREAD_UNCHANGED) for f in im_files if f.endswith(IMAGE_EXTENSION)]
 
 images_type
 
 img = Image.fromarray(np.array(gray_image).astype("uint16"))
 
 img_height
 
 img_width
 
tuple input_size = (img_width, img_height, channels)
 
 inputs = Input(input_size)
 
tuple ir_config = (ir_images, ir_total_cropped_images, True, {})
 SPLIT IMAGES ##################. More...
 
string ir_cropped_images_file = test_cropped_images_path+r'/'
 
 ir_im_files = [f for f in glob.glob(ir_cropped_images_file + "**/left*" , recursive=True)]
 
list ir_images = [f for f in glob.glob(train_images + "**/left*" + IMAGE_EXTENSION, recursive=True)]
 
list ir_images_plt = [cv2.imread(f, cv2.IMREAD_UNCHANGED) for f in ir_im_files if f.endswith(IMAGE_EXTENSION)]
 
list ir_total_cropped_images = [0]*len(ir_images)
 
 is_ir
 
 iterations = all_cropped_num//images_num_to_process
 
 limit = first_image+images_num_to_process
 
 log_file = open(name, "w")
 
 logical_gpus = tf.config.experimental.list_logical_devices('GPU')
 
string logs_path = root+r"/logs"
 
 loss
 
 merge6 = concatenate([drop4, up6], axis=3)
 
 merge7 = concatenate([conv3, up7], axis=3)
 
 merge8 = concatenate([conv2, up8], axis=3)
 
 merge9 = concatenate([conv1, up9], axis=3)
 
 metrics
 
 model = Model(inputs=inputs, outputs=conv10)
 
 model_checkpoint = ModelCheckpoint(models_path + r"/unet_membrane.hdf5", monitor='loss', verbose=1, save_best_only=True)
 
string model_name = 'DEPTH_'
 
string models_path = root+r"/models"
 
string name = logs_path+r'/loss_output_'
 
string new_test_cropped_images_path = test_cropped_images_path+r'/'
 
tuple noisy_config = (noisy_images, total_cropped_images, False, origin_files_index_size_path_test)
 
list noisy_images = [f for f in glob.glob(train_images + "**/res*" + IMAGE_EXTENSION, recursive=True)]
 
 noisy_input_train = img
 
 old_stdout = sys.stdout
 
 optimizer
 
 origin_file_name
 
 origin_files_index_size_path
 
dictionary origin_files_index_size_path_test = {}
 
string outfile = denoised_dir+'/'
 
 path = os.path.join(train_cropped_images_path, curr_cropped_images[i])
 
list paths = [root, images_path, models_path, logs_path, train_images, train_cropped_images_path]
 
 pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
 
 pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
 
 pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
 
 pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
 
list pure_images = [f for f in glob.glob(train_images + "**/gt*" + IMAGE_EXTENSION, recursive=True)]
 
 pure_input_train = img
 
int rolling_frame_num = 0
 
string root = r"./unet_flow"
 
 row
 
 row_end = row+cropped_h
 
 sample = samples[i:i+1]
 
 samples = img
 
string save_model_name = models_path+'/'
 
 save_to
 
 stdout
 
 steps_per_epoch = len(cropped_noisy_images)//unet_epochs
 
 t1 = time.perf_counter()
 
 t2 = time.perf_counter()
 
string test_cropped_images_path = images_path+r"/test_cropped"
 
string test_images = images_path+r"/test"
 
 test_img_height
 
 test_img_width
 
 test_model_name = save_model_name
 
 timestr = time.strftime("%Y%m%d-%H%M%S")
 
list total_cropped_images = [0]*len(noisy_images)
 
string train_cropped_images_path = images_path+r"/train_cropped"
 
string train_images = images_path+r"/train"
 
int unet_epochs = 1
 
 up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))
 
 up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))
 
 up8
 
 up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))
 
 w
 
 whole_image = np.zeros((height, width, channels), dtype="float32")
 
 width
 

Variable Documentation

int example4 - train.all_cropped_num = len(os.listdir(train_cropped_images_path))//3

Definition at line 168 of file example4 - train.py.

example4 - train.callbacks

Definition at line 247 of file example4 - train.py.

int example4 - train.channels = 2

Definition at line 34 of file example4 - train.py.

example4 - train.col

Definition at line 403 of file example4 - train.py.

int example4 - train.col_end = col+cropped_w

Definition at line 407 of file example4 - train.py.

example4 - train.compiled_model = model

Definition at line 113 of file example4 - train.py.

list example4 - train.config_list = [(noisy_images, False), (pure_images, False), (ir_images, True)]

Definition at line 136 of file example4 - train.py.

example4 - train.conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)

Definition at line 69 of file example4 - train.py.

example4 - train.conv10 = Conv2D(channels, 1, activation='sigmoid')(conv9)

Definition at line 108 of file example4 - train.py.

example4 - train.conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)

Definition at line 72 of file example4 - train.py.

example4 - train.conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)

Definition at line 75 of file example4 - train.py.

example4 - train.conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)

Definition at line 78 of file example4 - train.py.

example4 - train.conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)

Definition at line 83 of file example4 - train.py.

example4 - train.conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)

Definition at line 89 of file example4 - train.py.

example4 - train.conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)

Definition at line 94 of file example4 - train.py.

example4 - train.conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)

Definition at line 100 of file example4 - train.py.

example4 - train.conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)

Definition at line 105 of file example4 - train.py.

example4 - train.crop = img.crop((col_i, row_i, col_i + w, row_i + h))

Definition at line 156 of file example4 - train.py.

example4 - train.cropped_h

Definition at line 131 of file example4 - train.py.

list example4 - train.cropped_image_offsets = []

Definition at line 359 of file example4 - train.py.

list example4 - train.cropped_images_list = [(cropped_noisy_images, "noisy"), (cropped_pure_images, "pure")]

Definition at line 189 of file example4 - train.py.

list example4 - train.cropped_ir_images = [f for f in glob.glob(train_cropped_images_path + "**/left*" + IMAGE_EXTENSION, recursive=True)]

Definition at line 187 of file example4 - train.py.

list example4 - train.cropped_noisy_images = [f for f in glob.glob(train_cropped_images_path + "**/res*" + IMAGE_EXTENSION, recursive=True)]

convert cropped images to arrays

IMAGE TO ARRAY ##################.

Definition at line 185 of file example4 - train.py.

list example4 - train.cropped_pure_images = [f for f in glob.glob(train_cropped_images_path + "**/gt*" + IMAGE_EXTENSION, recursive=True)]

Definition at line 186 of file example4 - train.py.

example4 - train.cropped_w

Definition at line 131 of file example4 - train.py.

example4 - train.curr_cropped_images

Definition at line 192 of file example4 - train.py.

example4 - train.denoised_col = cropped_w

Definition at line 409 of file example4 - train.py.

string example4 - train.denoised_dir = images_path+r"/denoised"

Definition at line 266 of file example4 - train.py.

example4 - train.denoised_image = model.predict(sample)

Definition at line 405 of file example4 - train.py.

example4 - train.denoised_name = os.path.basename(directory.split('/')[-1])

Definition at line 420 of file example4 - train.py.

example4 - train.denoised_row = cropped_h

Definition at line 408 of file example4 - train.py.

example4 - train.drop4 = Dropout(0.5)(conv4)

Definition at line 80 of file example4 - train.py.

example4 - train.drop5 = Dropout(0.5)(conv5)

Definition at line 85 of file example4 - train.py.

example4 - train.epochs

Definition at line 246 of file example4 - train.py.

example4 - train.file_path = os.path.join(train_cropped_images_path, filename)

Definition at line 122 of file example4 - train.py.

example4 - train.filelist

Definition at line 140 of file example4 - train.py.

example4 - train.first_image = i*images_num_to_process

Definition at line 180 of file example4 - train.py.

int example4 - train.frame_num = 0

Definition at line 153 of file example4 - train.py.

example4 - train.gpus = tf.config.experimental.list_physical_devices('GPU')

Definition at line 42 of file example4 - train.py.

example4 - train.gray_image = cv2.cvtColor(ii, cv2.COLOR_BGR2GRAY)

Definition at line 148 of file example4 - train.py.

example4 - train.h

Definition at line 141 of file example4 - train.py.

example4 - train.height

Definition at line 152 of file example4 - train.py.

example4 - train.ii = cv2.imread(file)

Definition at line 147 of file example4 - train.py.

example4 - train.im_and_ir = images_plt

Definition at line 224 of file example4 - train.py.

list example4 - train.im_files = [f for f in glob.glob(directory + "**/res*" , recursive=True)]

Definition at line 193 of file example4 - train.py.

string example4 - train.IMAGE_EXTENSION = '.png'

Definition at line 39 of file example4 - train.py.

example4 - train.images_num_to_process = 1000

Definition at line 167 of file example4 - train.py.

string example4 - train.images_path = root+r"/images"

Definition at line 19 of file example4 - train.py.

example4 - train.images_plt = [cv2.imread(f, cv2.IMREAD_UNCHANGED) for f in im_files if f.endswith(IMAGE_EXTENSION)]

Definition at line 217 of file example4 - train.py.

example4 - train.images_type

Definition at line 192 of file example4 - train.py.

tuple example4 - train.img = Image.fromarray(np.array(gray_image).astype("uint16"))

Definition at line 149 of file example4 - train.py.

example4 - train.img_height

Definition at line 35 of file example4 - train.py.

example4 - train.img_width

Definition at line 35 of file example4 - train.py.

tuple example4 - train.input_size = (img_width, img_height, channels)

Definition at line 67 of file example4 - train.py.

example4 - train.inputs = Input(input_size)

Definition at line 68 of file example4 - train.py.

tuple example4 - train.ir_config = (ir_images, ir_total_cropped_images, True, {})

SPLIT IMAGES ##################.

Definition at line 312 of file example4 - train.py.

string example4 - train.ir_cropped_images_file = test_cropped_images_path+r'/'

Definition at line 360 of file example4 - train.py.

list example4 - train.ir_im_files = [f for f in glob.glob(ir_cropped_images_file + "**/left*" , recursive=True)]

Definition at line 193 of file example4 - train.py.

list example4 - train.ir_images = [f for f in glob.glob(train_images + "**/left*" + IMAGE_EXTENSION, recursive=True)]

Definition at line 135 of file example4 - train.py.

example4 - train.ir_images_plt = [cv2.imread(f, cv2.IMREAD_UNCHANGED) for f in ir_im_files if f.endswith(IMAGE_EXTENSION)]

Definition at line 218 of file example4 - train.py.

list example4 - train.ir_total_cropped_images = [0]*len(ir_images)

Definition at line 307 of file example4 - train.py.

example4 - train.is_ir

Definition at line 140 of file example4 - train.py.

example4 - train.iterations = all_cropped_num//images_num_to_process

Definition at line 169 of file example4 - train.py.

example4 - train.limit = first_image+images_num_to_process

Definition at line 196 of file example4 - train.py.

example4 - train.log_file = open(name, "w")

Definition at line 62 of file example4 - train.py.

example4 - train.logical_gpus = tf.config.experimental.list_logical_devices('GPU')

Definition at line 49 of file example4 - train.py.

string example4 - train.logs_path = root+r"/logs"

Definition at line 21 of file example4 - train.py.

example4 - train.loss

Definition at line 111 of file example4 - train.py.

example4 - train.merge6 = concatenate([drop4, up6], axis=3)

Definition at line 88 of file example4 - train.py.

example4 - train.merge7 = concatenate([conv3, up7], axis=3)

Definition at line 93 of file example4 - train.py.

example4 - train.merge8 = concatenate([conv2, up8], axis=3)

Definition at line 99 of file example4 - train.py.

example4 - train.merge9 = concatenate([conv1, up9], axis=3)

Definition at line 104 of file example4 - train.py.

example4 - train.metrics

Definition at line 111 of file example4 - train.py.

example4 - train.model = Model(inputs=inputs, outputs=conv10)

Definition at line 110 of file example4 - train.py.

example4 - train.model_checkpoint = ModelCheckpoint(models_path + r"/unet_membrane.hdf5", monitor='loss', verbose=1, save_best_only=True)

Definition at line 242 of file example4 - train.py.

string example4 - train.model_name = 'DEPTH_'

Definition at line 60 of file example4 - train.py.

string example4 - train.models_path = root+r"/models"

Definition at line 20 of file example4 - train.py.

example4 - train.name = logs_path+r'/loss_output_'

Definition at line 61 of file example4 - train.py.

string example4 - train.new_test_cropped_images_path = test_cropped_images_path+r'/'

Definition at line 326 of file example4 - train.py.

tuple example4 - train.noisy_config = (noisy_images, total_cropped_images, False, origin_files_index_size_path_test)

Definition at line 313 of file example4 - train.py.

list example4 - train.noisy_images = [f for f in glob.glob(train_images + "**/res*" + IMAGE_EXTENSION, recursive=True)]

Definition at line 133 of file example4 - train.py.

example4 - train.noisy_input_train = img

Definition at line 239 of file example4 - train.py.

example4 - train.old_stdout = sys.stdout

Definition at line 58 of file example4 - train.py.

example4 - train.optimizer

Definition at line 111 of file example4 - train.py.

example4 - train.origin_file_name

Definition at line 395 of file example4 - train.py.

example4 - train.origin_files_index_size_path

Definition at line 317 of file example4 - train.py.

dictionary example4 - train.origin_files_index_size_path_test = {}

Definition at line 259 of file example4 - train.py.

string example4 - train.outfile = denoised_dir+'/'

Definition at line 421 of file example4 - train.py.

Definition at line 201 of file example4 - train.py.

Definition at line 24 of file example4 - train.py.

example4 - train.pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

Definition at line 71 of file example4 - train.py.

example4 - train.pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

Definition at line 74 of file example4 - train.py.

example4 - train.pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

Definition at line 77 of file example4 - train.py.

example4 - train.pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

Definition at line 81 of file example4 - train.py.

list example4 - train.pure_images = [f for f in glob.glob(train_images + "**/gt*" + IMAGE_EXTENSION, recursive=True)]

Definition at line 134 of file example4 - train.py.

example4 - train.pure_input_train = img

Definition at line 237 of file example4 - train.py.

example4 - train.rolling_frame_num = 0

Definition at line 142 of file example4 - train.py.

string example4 - train.root = r"./unet_flow"

Definition at line 18 of file example4 - train.py.

example4 - train.row

Definition at line 403 of file example4 - train.py.

int example4 - train.row_end = row+cropped_h

Definition at line 406 of file example4 - train.py.

example4 - train.sample = samples[i:i+1]

Definition at line 402 of file example4 - train.py.

example4 - train.samples = img

Definition at line 393 of file example4 - train.py.

string example4 - train.save_model_name = models_path+'/'

Definition at line 166 of file example4 - train.py.

example4 - train.save_to
Initial value:
1 = os.path.join(train_cropped_images_path,
2  name + '_{:03}' + '_row_' + str(row_i) + '_col_' + str(col_i) + '_width' + str(
3  w) + '_height' + str(h) + IMAGE_EXTENSION)

Definition at line 157 of file example4 - train.py.

example4 - train.stdout

Definition at line 63 of file example4 - train.py.

example4 - train.steps_per_epoch = len(cropped_noisy_images)//unet_epochs

Definition at line 243 of file example4 - train.py.

example4 - train.t1 = time.perf_counter()

Definition at line 399 of file example4 - train.py.

example4 - train.t2 = time.perf_counter()

Definition at line 418 of file example4 - train.py.

string example4 - train.test_cropped_images_path = images_path+r"/test_cropped"

Definition at line 265 of file example4 - train.py.

string example4 - train.test_images = images_path+r"/test"

Definition at line 264 of file example4 - train.py.

example4 - train.test_img_height

Definition at line 260 of file example4 - train.py.

example4 - train.test_img_width

Definition at line 260 of file example4 - train.py.

example4 - train.test_model_name = save_model_name

Definition at line 262 of file example4 - train.py.

example4 - train.timestr = time.strftime("%Y%m%d-%H%M%S")

Definition at line 59 of file example4 - train.py.

example4 - train.total_cropped_images = [0]*len(noisy_images)

Definition at line 306 of file example4 - train.py.

string example4 - train.train_cropped_images_path = images_path+r"/train_cropped"

Definition at line 23 of file example4 - train.py.

string example4 - train.train_images = images_path+r"/train"

Definition at line 22 of file example4 - train.py.

int example4 - train.unet_epochs = 1

Definition at line 37 of file example4 - train.py.

example4 - train.up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))

Definition at line 87 of file example4 - train.py.

example4 - train.up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))

Definition at line 92 of file example4 - train.py.

example4 - train.up8
Initial value:
1 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
2  UpSampling2D(size=(2, 2))(conv7))

Definition at line 97 of file example4 - train.py.

example4 - train.up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))

Definition at line 103 of file example4 - train.py.

example4 - train.w

Definition at line 141 of file example4 - train.py.

example4 - train.whole_image = np.zeros((height, width, channels), dtype="float32")

Definition at line 397 of file example4 - train.py.

example4 - train.width

Definition at line 152 of file example4 - train.py.



librealsense2
Author(s): Sergey Dorodnicov , Doron Hirshberg , Mark Horn , Reagan Lopez , Itay Carpis
autogenerated on Mon May 3 2021 02:50:36