vgg16_batch_normalization.py
Go to the documentation of this file.
00001 import chainer
00002 import chainer.functions as F
00003 import chainer.links as L
00004 
00005 
00006 class VGG16BatchNormalization(chainer.Chain):
00007 
00008     def __init__(self, n_class=1000):
00009         super(self.__class__, self).__init__(
00010             conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=1),
00011             bn1_1=L.BatchNormalization(64),
00012             conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1),
00013             bn1_2=L.BatchNormalization(64),
00014 
00015             conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1),
00016             bn2_1=L.BatchNormalization(128),
00017             conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1),
00018             bn2_2=L.BatchNormalization(128),
00019 
00020             conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1),
00021             bn3_1=L.BatchNormalization(256),
00022             conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1),
00023             bn3_2=L.BatchNormalization(256),
00024             conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1),
00025             bn3_3=L.BatchNormalization(256),
00026 
00027             conv4_1=L.Convolution2D(256, 512, 3, stride=1, pad=1),
00028             bn4_1=L.BatchNormalization(512),
00029             conv4_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
00030             bn4_2=L.BatchNormalization(512),
00031             conv4_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
00032             bn4_3=L.BatchNormalization(512),
00033 
00034             conv5_1=L.Convolution2D(512, 512, 3, stride=1, pad=1),
00035             bn5_1=L.BatchNormalization(512),
00036             conv5_2=L.Convolution2D(512, 512, 3, stride=1, pad=1),
00037             bn5_2=L.BatchNormalization(512),
00038             conv5_3=L.Convolution2D(512, 512, 3, stride=1, pad=1),
00039             bn5_3=L.BatchNormalization(512),
00040 
00041             fc6=L.Linear(25088, 4096),
00042             fc7=L.Linear(4096, 4096),
00043             fc8=L.Linear(4096, n_class)
00044         )
00045 
00046     def __call__(self, x, t=None):
00047         h = F.relu(self.bn1_1(self.conv1_1(x)))
00048         h = F.relu(self.bn1_2(self.conv1_2(h)))
00049         h = F.max_pooling_2d(h, 2, stride=2)
00050 
00051         h = F.relu(self.bn2_1(self.conv2_1(h)))
00052         h = F.relu(self.bn2_2(self.conv2_2(h)))
00053         h = F.max_pooling_2d(h, 2, stride=2)
00054 
00055         h = F.relu(self.bn3_1(self.conv3_1(h)))
00056         h = F.relu(self.bn3_2(self.conv3_2(h)))
00057         h = F.relu(self.bn3_3(self.conv3_3(h)))
00058         h = F.max_pooling_2d(h, 2, stride=2)
00059 
00060         h = F.relu(self.bn4_1(self.conv4_1(h)))
00061         h = F.relu(self.bn4_2(self.conv4_2(h)))
00062         h = F.relu(self.bn4_3(self.conv4_3(h)))
00063         h = F.max_pooling_2d(h, 2, stride=2)
00064 
00065         h = F.relu(self.bn5_1(self.conv5_1(h)))
00066         h = F.relu(self.bn5_2(self.conv5_2(h)))
00067         h = F.relu(self.bn5_3(self.conv5_3(h)))
00068         h = F.max_pooling_2d(h, 2, stride=2)
00069 
00070         h = F.dropout(F.relu(self.fc6(h)), ratio=0.5)
00071         h = F.dropout(F.relu(self.fc7(h)), ratio=0.5)
00072         h = self.fc8(h)
00073         fc8 = h
00074 
00075         self.pred = F.softmax(h)
00076 
00077         if t is None:
00078             assert not chainer.config.train
00079             return
00080 
00081         self.loss = F.softmax_cross_entropy(fc8, t)
00082         self.acc = F.accuracy(self.pred, t)
00083 
00084         return self.loss


jsk_recognition_utils
Author(s):
autogenerated on Sun Oct 8 2017 02:42:48