reorg_layer.c
Go to the documentation of this file.
00001 #include "reorg_layer.h"
00002 #include "cuda.h"
00003 #include "blas.h"
00004 #include <stdio.h>
00005 
00006 
00007 layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse)
00008 {
00009     layer l = {0};
00010     l.type = REORG;
00011     l.batch = batch;
00012     l.stride = stride;
00013     l.h = h;
00014     l.w = w;
00015     l.c = c;
00016     if(reverse){
00017         l.out_w = w*stride;
00018         l.out_h = h*stride;
00019         l.out_c = c/(stride*stride);
00020     }else{
00021         l.out_w = w/stride;
00022         l.out_h = h/stride;
00023         l.out_c = c*(stride*stride);
00024     }
00025     l.reverse = reverse;
00026     fprintf(stderr, "reorg              /%2d  %4d x%4d x%4d   ->  %4d x%4d x%4d\n",  stride, w, h, c, l.out_w, l.out_h, l.out_c);
00027     l.outputs = l.out_h * l.out_w * l.out_c;
00028     l.inputs = h*w*c;
00029     int output_size = l.out_h * l.out_w * l.out_c * batch;
00030     l.output =  calloc(output_size, sizeof(float));
00031     l.delta =   calloc(output_size, sizeof(float));
00032 
00033     l.forward = forward_reorg_layer;
00034     l.backward = backward_reorg_layer;
00035 #ifdef GPU
00036     l.forward_gpu = forward_reorg_layer_gpu;
00037     l.backward_gpu = backward_reorg_layer_gpu;
00038 
00039     l.output_gpu  = cuda_make_array(l.output, output_size);
00040     l.delta_gpu   = cuda_make_array(l.delta, output_size);
00041 #endif
00042     return l;
00043 }
00044 
00045 void resize_reorg_layer(layer *l, int w, int h)
00046 {
00047     int stride = l->stride;
00048     int c = l->c;
00049 
00050     l->h = h;
00051     l->w = w;
00052 
00053     if(l->reverse){
00054         l->out_w = w*stride;
00055         l->out_h = h*stride;
00056         l->out_c = c/(stride*stride);
00057     }else{
00058         l->out_w = w/stride;
00059         l->out_h = h/stride;
00060         l->out_c = c*(stride*stride);
00061     }
00062 
00063     l->outputs = l->out_h * l->out_w * l->out_c;
00064     l->inputs = l->outputs;
00065     int output_size = l->outputs * l->batch;
00066 
00067     l->output = realloc(l->output, output_size * sizeof(float));
00068     l->delta = realloc(l->delta, output_size * sizeof(float));
00069 
00070 #ifdef GPU
00071     cuda_free(l->output_gpu);
00072     cuda_free(l->delta_gpu);
00073     l->output_gpu  = cuda_make_array(l->output, output_size);
00074     l->delta_gpu   = cuda_make_array(l->delta,  output_size);
00075 #endif
00076 }
00077 
00078 void forward_reorg_layer(const layer l, network_state state)
00079 {
00080     if(l.reverse){
00081         reorg_cpu(state.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output);
00082     }else {
00083         reorg_cpu(state.input, l.w, l.h, l.c, l.batch, l.stride, 0, l.output);
00084     }
00085 }
00086 
00087 void backward_reorg_layer(const layer l, network_state state)
00088 {
00089     if(l.reverse){
00090         reorg_cpu(l.delta, l.w, l.h, l.c, l.batch, l.stride, 0, state.delta);
00091     }else{
00092         reorg_cpu(l.delta, l.w, l.h, l.c, l.batch, l.stride, 1, state.delta);
00093     }
00094 }
00095 
00096 #ifdef GPU
00097 void forward_reorg_layer_gpu(layer l, network_state state)
00098 {
00099     if(l.reverse){
00100         reorg_ongpu(state.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output_gpu);
00101     }else {
00102         reorg_ongpu(state.input, l.w, l.h, l.c, l.batch, l.stride, 0, l.output_gpu);
00103     }
00104 }
00105 
00106 void backward_reorg_layer_gpu(layer l, network_state state)
00107 {
00108     if(l.reverse){
00109         reorg_ongpu(l.delta_gpu, l.w, l.h, l.c, l.batch, l.stride, 0, state.delta);
00110     }else{
00111         reorg_ongpu(l.delta_gpu, l.w, l.h, l.c, l.batch, l.stride, 1, state.delta);
00112     }
00113 }
00114 #endif


rail_object_detector
Author(s):
autogenerated on Sat Jun 8 2019 20:26:30