Namespaces | Variables
example4 - train.py File Reference

Go to the source code of this file.

Namespaces

 example4 - train
 

Variables

int example4 - train.all_cropped_num = len(os.listdir(train_cropped_images_path))//3
 
 example4 - train.callbacks
 
int example4 - train.channels = 2
 
 example4 - train.col
 
 example4 - train.col_end = col+cropped_w
 
 example4 - train.compiled_model = model
 
list example4 - train.config_list = [(noisy_images, False), (pure_images, False), (ir_images, True)]
 
 example4 - train.conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
 
 example4 - train.conv10 = Conv2D(channels, 1, activation='sigmoid')(conv9)
 
 example4 - train.conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
 
 example4 - train.conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
 
 example4 - train.conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
 
 example4 - train.conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
 
 example4 - train.conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
 
 example4 - train.conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
 
 example4 - train.conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
 
 example4 - train.conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
 
 example4 - train.crop = img.crop((col_i, row_i, col_i + w, row_i + h))
 
 example4 - train.cropped_h
 
list example4 - train.cropped_image_offsets = []
 
list example4 - train.cropped_images_list = [(cropped_noisy_images, "noisy"), (cropped_pure_images, "pure")]
 
list example4 - train.cropped_ir_images = [f for f in glob.glob(train_cropped_images_path + "**/left*" + IMAGE_EXTENSION, recursive=True)]
 
list example4 - train.cropped_noisy_images = [f for f in glob.glob(train_cropped_images_path + "**/res*" + IMAGE_EXTENSION, recursive=True)]
 convert cropped images to arrays More...
 
list example4 - train.cropped_pure_images = [f for f in glob.glob(train_cropped_images_path + "**/gt*" + IMAGE_EXTENSION, recursive=True)]
 
 example4 - train.cropped_w
 
 example4 - train.curr_cropped_images
 
 example4 - train.denoised_col = cropped_w
 
string example4 - train.denoised_dir = images_path+r"/denoised"
 
 example4 - train.denoised_image = model.predict(sample)
 
 example4 - train.denoised_name = os.path.basename(directory.split('/')[-1])
 
 example4 - train.denoised_row = cropped_h
 
 example4 - train.drop4 = Dropout(0.5)(conv4)
 
 example4 - train.drop5 = Dropout(0.5)(conv5)
 
 example4 - train.epochs
 
 example4 - train.file_path = os.path.join(train_cropped_images_path, filename)
 
 example4 - train.filelist
 
 example4 - train.first_image = i*images_num_to_process
 
int example4 - train.frame_num = 0
 
 example4 - train.gpus = tf.config.experimental.list_physical_devices('GPU')
 
 example4 - train.gray_image = cv2.cvtColor(ii, cv2.COLOR_BGR2GRAY)
 
 example4 - train.h
 
 example4 - train.height
 
 example4 - train.ii = cv2.imread(file)
 
 example4 - train.im_and_ir = images_plt
 
 example4 - train.im_files = [f for f in glob.glob(directory + "**/res*" , recursive=True)]
 
string example4 - train.IMAGE_EXTENSION = '.png'
 
int example4 - train.images_num_to_process = 1000
 
string example4 - train.images_path = root+r"/images"
 
list example4 - train.images_plt = [cv2.imread(f, cv2.IMREAD_UNCHANGED) for f in im_files if f.endswith(IMAGE_EXTENSION)]
 
 example4 - train.images_type
 
 example4 - train.img = Image.fromarray(np.array(gray_image).astype("uint16"))
 
 example4 - train.img_height
 
 example4 - train.img_width
 
tuple example4 - train.input_size = (img_width, img_height, channels)
 
 example4 - train.inputs = Input(input_size)
 
tuple example4 - train.ir_config = (ir_images, ir_total_cropped_images, True, {})
 SPLIT IMAGES ##################. More...
 
string example4 - train.ir_cropped_images_file = test_cropped_images_path+r'/'
 
 example4 - train.ir_im_files = [f for f in glob.glob(ir_cropped_images_file + "**/left*" , recursive=True)]
 
list example4 - train.ir_images = [f for f in glob.glob(train_images + "**/left*" + IMAGE_EXTENSION, recursive=True)]
 
list example4 - train.ir_images_plt = [cv2.imread(f, cv2.IMREAD_UNCHANGED) for f in ir_im_files if f.endswith(IMAGE_EXTENSION)]
 
list example4 - train.ir_total_cropped_images = [0]*len(ir_images)
 
 example4 - train.is_ir
 
 example4 - train.iterations = all_cropped_num//images_num_to_process
 
 example4 - train.limit = first_image+images_num_to_process
 
 example4 - train.log_file = open(name, "w")
 
 example4 - train.logical_gpus = tf.config.experimental.list_logical_devices('GPU')
 
string example4 - train.logs_path = root+r"/logs"
 
 example4 - train.loss
 
 example4 - train.merge6 = concatenate([drop4, up6], axis=3)
 
 example4 - train.merge7 = concatenate([conv3, up7], axis=3)
 
 example4 - train.merge8 = concatenate([conv2, up8], axis=3)
 
 example4 - train.merge9 = concatenate([conv1, up9], axis=3)
 
 example4 - train.metrics
 
 example4 - train.model = Model(inputs=inputs, outputs=conv10)
 
 example4 - train.model_checkpoint = ModelCheckpoint(models_path + r"/unet_membrane.hdf5", monitor='loss', verbose=1, save_best_only=True)
 
string example4 - train.model_name = 'DEPTH_'
 
string example4 - train.models_path = root+r"/models"
 
string example4 - train.name = logs_path+r'/loss_output_'
 
string example4 - train.new_test_cropped_images_path = test_cropped_images_path+r'/'
 
tuple example4 - train.noisy_config = (noisy_images, total_cropped_images, False, origin_files_index_size_path_test)
 
list example4 - train.noisy_images = [f for f in glob.glob(train_images + "**/res*" + IMAGE_EXTENSION, recursive=True)]
 
 example4 - train.noisy_input_train = img
 
 example4 - train.old_stdout = sys.stdout
 
 example4 - train.optimizer
 
 example4 - train.origin_file_name
 
 example4 - train.origin_files_index_size_path
 
dictionary example4 - train.origin_files_index_size_path_test = {}
 
string example4 - train.outfile = denoised_dir+'/'
 
 example4 - train.path = os.path.join(train_cropped_images_path, curr_cropped_images[i])
 
list example4 - train.paths = [root, images_path, models_path, logs_path, train_images, train_cropped_images_path]
 
 example4 - train.pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
 
 example4 - train.pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
 
 example4 - train.pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
 
 example4 - train.pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
 
list example4 - train.pure_images = [f for f in glob.glob(train_images + "**/gt*" + IMAGE_EXTENSION, recursive=True)]
 
 example4 - train.pure_input_train = img
 
int example4 - train.rolling_frame_num = 0
 
string example4 - train.root = r"./unet_flow"
 
 example4 - train.row
 
 example4 - train.row_end = row+cropped_h
 
 example4 - train.sample = samples[i:i+1]
 
 example4 - train.samples = img
 
string example4 - train.save_model_name = models_path+'/'
 
 example4 - train.save_to
 
 example4 - train.stdout
 
 example4 - train.steps_per_epoch = len(cropped_noisy_images)//unet_epochs
 
 example4 - train.t1 = time.perf_counter()
 
 example4 - train.t2 = time.perf_counter()
 
string example4 - train.test_cropped_images_path = images_path+r"/test_cropped"
 
string example4 - train.test_images = images_path+r"/test"
 
 example4 - train.test_img_height
 
 example4 - train.test_img_width
 
 example4 - train.test_model_name = save_model_name
 
 example4 - train.timestr = time.strftime("%Y%m%d-%H%M%S")
 
list example4 - train.total_cropped_images = [0]*len(noisy_images)
 
string example4 - train.train_cropped_images_path = images_path+r"/train_cropped"
 
string example4 - train.train_images = images_path+r"/train"
 
int example4 - train.unet_epochs = 1
 
 example4 - train.up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))
 
 example4 - train.up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))
 
 example4 - train.up8
 
 example4 - train.up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))
 
 example4 - train.w
 
 example4 - train.whole_image = np.zeros((height, width, channels), dtype="float32")
 
 example4 - train.width
 


librealsense2
Author(s): Sergey Dorodnicov , Doron Hirshberg , Mark Horn , Reagan Lopez , Itay Carpis
autogenerated on Mon May 3 2021 02:50:25