drfcn_config.py
Go to the documentation of this file.
00001 # --------------------------------------------------------
00002 # Deformable Convolutional Networks
00003 # Copyright (c) 2016 by Contributors
00004 # Copyright (c) 2017 Microsoft
00005 # Licensed under The Apache-2.0 License [see LICENSE for details]
00006 # Modified by Yuwen Xiong, Bin Xiao
00007 # --------------------------------------------------------
00008 
00009 import yaml
00010 import numpy as np
00011 from easydict import EasyDict as edict
00012 
00013 config = edict()
00014 
00015 config.MXNET_VERSION = ''
00016 config.output_path = ''
00017 config.symbol = ''
00018 config.gpus = ''
00019 config.CLASS_AGNOSTIC = True
00020 config.SCALES = [(600, 1000)]  # first is scale (the shorter side); second is max size
00021 
00022 # default training
00023 config.default = edict()
00024 config.default.frequent = 20
00025 config.default.kvstore = 'device'
00026 
00027 # network related params
00028 config.network = edict()
00029 config.network.pretrained = ''
00030 config.network.pretrained_epoch = 0
00031 config.network.PIXEL_MEANS = np.array([0, 0, 0])
00032 config.network.IMAGE_STRIDE = 0
00033 config.network.RPN_FEAT_STRIDE = 16
00034 config.network.RCNN_FEAT_STRIDE = 16
00035 config.network.FIXED_PARAMS = ['gamma', 'beta']
00036 config.network.FIXED_PARAMS_SHARED = ['gamma', 'beta']
00037 config.network.ANCHOR_SCALES = (8, 16, 32)
00038 config.network.ANCHOR_RATIOS = (0.5, 1, 2)
00039 config.network.NUM_ANCHORS = len(config.network.ANCHOR_SCALES) * len(config.network.ANCHOR_RATIOS)
00040 
00041 # dataset related params
00042 config.dataset = edict()
00043 config.dataset.dataset = 'PascalVOC'
00044 config.dataset.image_set = '2007_trainval'
00045 config.dataset.test_image_set = '2007_test'
00046 config.dataset.root_path = './data'
00047 config.dataset.dataset_path = './data/VOCdevkit'
00048 config.dataset.NUM_CLASSES = 21
00049 
00050 
00051 config.TRAIN = edict()
00052 
00053 config.TRAIN.lr = 0
00054 config.TRAIN.lr_step = ''
00055 config.TRAIN.lr_factor = 0.1
00056 config.TRAIN.warmup = False
00057 config.TRAIN.warmup_lr = 0
00058 config.TRAIN.warmup_step = 0
00059 config.TRAIN.momentum = 0.9
00060 config.TRAIN.wd = 0.0005
00061 config.TRAIN.begin_epoch = 0
00062 config.TRAIN.end_epoch = 0
00063 config.TRAIN.model_prefix = ''
00064 
00065 config.TRAIN.ALTERNATE = edict()
00066 config.TRAIN.ALTERNATE.RPN_BATCH_IMAGES = 0
00067 config.TRAIN.ALTERNATE.RCNN_BATCH_IMAGES = 0
00068 config.TRAIN.ALTERNATE.rpn1_lr = 0
00069 config.TRAIN.ALTERNATE.rpn1_lr_step = ''    # recommend '2'
00070 config.TRAIN.ALTERNATE.rpn1_epoch = 0       # recommend 3
00071 config.TRAIN.ALTERNATE.rfcn1_lr = 0
00072 config.TRAIN.ALTERNATE.rfcn1_lr_step = ''   # recommend '5'
00073 config.TRAIN.ALTERNATE.rfcn1_epoch = 0      # recommend 8
00074 config.TRAIN.ALTERNATE.rpn2_lr = 0
00075 config.TRAIN.ALTERNATE.rpn2_lr_step = ''    # recommend '2'
00076 config.TRAIN.ALTERNATE.rpn2_epoch = 0       # recommend 3
00077 config.TRAIN.ALTERNATE.rfcn2_lr = 0
00078 config.TRAIN.ALTERNATE.rfcn2_lr_step = ''   # recommend '5'
00079 config.TRAIN.ALTERNATE.rfcn2_epoch = 0      # recommend 8
00080 # optional
00081 config.TRAIN.ALTERNATE.rpn3_lr = 0
00082 config.TRAIN.ALTERNATE.rpn3_lr_step = ''    # recommend '2'
00083 config.TRAIN.ALTERNATE.rpn3_epoch = 0       # recommend 3
00084 
00085 # whether resume training
00086 config.TRAIN.RESUME = False
00087 # whether flip image
00088 config.TRAIN.FLIP = True
00089 # whether shuffle image
00090 config.TRAIN.SHUFFLE = True
00091 # whether use OHEM
00092 config.TRAIN.ENABLE_OHEM = False
00093 # size of images for each device, 2 for rcnn, 1 for rpn and e2e
00094 config.TRAIN.BATCH_IMAGES = 2
00095 # e2e changes behavior of anchor loader and metric
00096 config.TRAIN.END2END = False
00097 # group images with similar aspect ratio
00098 config.TRAIN.ASPECT_GROUPING = True
00099 
00100 # R-CNN
00101 # rcnn rois batch size
00102 config.TRAIN.BATCH_ROIS = 128
00103 config.TRAIN.BATCH_ROIS_OHEM = 128
00104 # rcnn rois sampling params
00105 config.TRAIN.FG_FRACTION = 0.25
00106 config.TRAIN.FG_THRESH = 0.5
00107 config.TRAIN.BG_THRESH_HI = 0.5
00108 config.TRAIN.BG_THRESH_LO = 0.0
00109 # rcnn bounding box regression params
00110 config.TRAIN.BBOX_REGRESSION_THRESH = 0.5
00111 config.TRAIN.BBOX_WEIGHTS = np.array([1.0, 1.0, 1.0, 1.0])
00112 
00113 # RPN anchor loader
00114 # rpn anchors batch size
00115 config.TRAIN.RPN_BATCH_SIZE = 256
00116 # rpn anchors sampling params
00117 config.TRAIN.RPN_FG_FRACTION = 0.5
00118 config.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
00119 config.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
00120 config.TRAIN.RPN_CLOBBER_POSITIVES = False
00121 # rpn bounding box regression params
00122 config.TRAIN.RPN_BBOX_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
00123 config.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
00124 
00125 # used for end2end training
00126 # RPN proposal
00127 config.TRAIN.CXX_PROPOSAL = True
00128 config.TRAIN.RPN_NMS_THRESH = 0.7
00129 config.TRAIN.RPN_PRE_NMS_TOP_N = 12000
00130 config.TRAIN.RPN_POST_NMS_TOP_N = 2000
00131 config.TRAIN.RPN_MIN_SIZE = config.network.RPN_FEAT_STRIDE
00132 # approximate bounding box regression
00133 config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED = False
00134 config.TRAIN.BBOX_MEANS = (0.0, 0.0, 0.0, 0.0)
00135 config.TRAIN.BBOX_STDS = (0.1, 0.1, 0.2, 0.2)
00136 
00137 config.TEST = edict()
00138 
00139 # R-CNN testing
00140 # use rpn to generate proposal
00141 config.TEST.HAS_RPN = False
00142 # size of images for each device
00143 config.TEST.BATCH_IMAGES = 1
00144 
00145 # RPN proposal
00146 config.TEST.CXX_PROPOSAL = True
00147 config.TEST.RPN_NMS_THRESH = 0.7
00148 config.TEST.RPN_PRE_NMS_TOP_N = 6000
00149 config.TEST.RPN_POST_NMS_TOP_N = 300
00150 config.TEST.RPN_MIN_SIZE = config.network.RPN_FEAT_STRIDE
00151 
00152 # RPN generate proposal
00153 config.TEST.PROPOSAL_NMS_THRESH = 0.7
00154 config.TEST.PROPOSAL_PRE_NMS_TOP_N = 20000
00155 config.TEST.PROPOSAL_POST_NMS_TOP_N = 2000
00156 config.TEST.PROPOSAL_MIN_SIZE = config.network.RPN_FEAT_STRIDE
00157 
00158 # RCNN nms
00159 config.TEST.NMS = 0.3
00160 
00161 config.TEST.max_per_image = 300
00162 
00163 # Test Model Epoch
00164 config.TEST.test_epoch = 0
00165 
00166 
00167 def update_config(config_file):
00168     exp_config = None
00169     with open(config_file) as f:
00170         exp_config = edict(yaml.load(f))
00171         for k, v in exp_config.items():
00172             if k in config:
00173                 if isinstance(v, dict):
00174                     if k == 'TRAIN':
00175                         if 'BBOX_WEIGHTS' in v:
00176                             v['BBOX_WEIGHTS'] = np.array(v['BBOX_WEIGHTS'])
00177                     elif k == 'network':
00178                         if 'PIXEL_MEANS' in v:
00179                             v['PIXEL_MEANS'] = np.array(v['PIXEL_MEANS'])
00180                     for vk, vv in v.items():
00181                         config[k][vk] = vv
00182                 else:
00183                     if k == 'SCALES':
00184                         config[k][0] = (tuple(v))
00185                     else:
00186                         config[k] = v
00187             else:
00188                 raise ValueError("key must exist in config.py")


rail_object_detector
Author(s):
autogenerated on Sat Jun 8 2019 20:26:30