setup_windows.py
Go to the documentation of this file.
00001 # --------------------------------------------------------
00002 # Deformable Convolutional Networks
00003 # Copyright (c) 2015 Microsoft
00004 # Licensed under The MIT License [see LICENSE for details]
00005 # Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
00006 # --------------------------------------------------------
00007 
00008 import numpy as np
00009 import os
00010 from os.path import join as pjoin
00011 #from distutils.core import setup
00012 from setuptools import setup
00013 from distutils.extension import Extension
00014 from Cython.Distutils import build_ext
00015 import subprocess
00016 
00017 #change for windows, by MrX
00018 nvcc_bin = 'nvcc.exe'
00019 lib_dir = 'lib/x64'
00020 
00021 import distutils.msvc9compiler
00022 distutils.msvc9compiler.VERSION = 14.0
00023 
00024 
00025 def find_in_path(name, path):
00026     "Find a file in a search path"
00027     # Adapted fom
00028     # http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
00029     for dir in path.split(os.pathsep):
00030         binpath = pjoin(dir, name)
00031         if os.path.exists(binpath):
00032             return os.path.abspath(binpath)
00033     return None
00034 
00035 
00036 def locate_cuda():
00037     """Locate the CUDA environment on the system
00038 
00039     Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
00040     and values giving the absolute path to each directory.
00041 
00042     Starts by looking for the CUDAHOME env variable. If not found, everything
00043     is based on finding 'nvcc' in the PATH.
00044     """
00045 
00046     # first check if the CUDAHOME env variable is in use
00047     if 'CUDA_PATH' in os.environ:
00048         home = os.environ['CUDA_PATH']
00049         print("home = %s\n" % home)
00050         nvcc = pjoin(home, 'bin', nvcc_bin)
00051     else:
00052         # otherwise, search the PATH for NVCC
00053         default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
00054         nvcc = find_in_path(nvcc_bin, os.environ['PATH'] + os.pathsep + default_path)
00055         if nvcc is None:
00056             raise EnvironmentError('The nvcc binary could not be '
00057                 'located in your $PATH. Either add it to your path, or set $CUDA_PATH')
00058         home = os.path.dirname(os.path.dirname(nvcc))
00059         print("home = %s, nvcc = %s\n" % (home, nvcc))
00060 
00061 
00062     cudaconfig = {'home':home, 'nvcc':nvcc,
00063                   'include': pjoin(home, 'include'),
00064                   'lib64': pjoin(home, lib_dir)}
00065     for k, v in cudaconfig.iteritems():
00066         if not os.path.exists(v):
00067             raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
00068 
00069     return cudaconfig
00070 CUDA = locate_cuda()
00071 
00072 
00073 # Obtain the numpy include directory.  This logic works across numpy versions.
00074 try:
00075     numpy_include = np.get_include()
00076 except AttributeError:
00077     numpy_include = np.get_numpy_include()
00078 
00079 
00080 def customize_compiler_for_nvcc(self):
00081     """inject deep into distutils to customize how the dispatch
00082     to gcc/nvcc works.
00083 
00084     If you subclass UnixCCompiler, it's not trivial to get your subclass
00085     injected in, and still have the right customizations (i.e.
00086     distutils.sysconfig.customize_compiler) run on it. So instead of going
00087     the OO route, I have this. Note, it's kindof like a wierd functional
00088     subclassing going on."""
00089 
00090     # tell the compiler it can processes .cu
00091     #self.src_extensions.append('.cu')
00092 
00093         
00094     # save references to the default compiler_so and _comple methods
00095     #default_compiler_so = self.spawn 
00096     #default_compiler_so = self.rc
00097     super = self.compile
00098 
00099     # now redefine the _compile method. This gets executed for each
00100     # object but distutils doesn't have the ability to change compilers
00101     # based on source extension: we add it.
00102     def compile(sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
00103         postfix=os.path.splitext(sources[0])[1]
00104         
00105         if postfix == '.cu':
00106             # use the cuda for .cu files
00107             #self.set_executable('compiler_so', CUDA['nvcc'])
00108             # use only a subset of the extra_postargs, which are 1-1 translated
00109             # from the extra_compile_args in the Extension class
00110             postargs = extra_postargs['nvcc']
00111         else:
00112             postargs = extra_postargs['gcc']
00113 
00114 
00115         return super(sources, output_dir, macros, include_dirs, debug, extra_preargs, postargs, depends)
00116         # reset the default compiler_so, which we might have changed for cuda
00117         #self.rc = default_compiler_so
00118 
00119     # inject our redefined _compile method into the class
00120     self.compile = compile
00121 
00122 
00123 # run the customize_compiler
00124 class custom_build_ext(build_ext):
00125     def build_extensions(self):
00126         customize_compiler_for_nvcc(self.compiler)
00127         build_ext.build_extensions(self)
00128 
00129 
00130 ext_modules = [
00131     # unix _compile: obj, src, ext, cc_args, extra_postargs, pp_opts
00132     Extension(
00133         "cpu_nms",
00134         sources=["cpu_nms.pyx"],
00135         extra_compile_args={'gcc': []},
00136         include_dirs = [numpy_include],
00137     ),
00138 ]
00139 
00140 setup(
00141     name='fast_rcnn',
00142     ext_modules=ext_modules,
00143     # inject our custom trigger
00144     cmdclass={'build_ext': custom_build_ext},
00145 )


rail_object_detector
Author(s):
autogenerated on Sat Jun 8 2019 20:26:30