setup_linux.py
Go to the documentation of this file.
00001 # --------------------------------------------------------
00002 # Deformable Convolutional Networks
00003 # Copyright (c) 2015 Microsoft
00004 # Licensed under The MIT License [see LICENSE for details]
00005 # Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
00006 # --------------------------------------------------------
00007 
00008 import os
00009 from os.path import join as pjoin
00010 from setuptools import setup
00011 from distutils.extension import Extension
00012 from Cython.Distutils import build_ext
00013 import numpy as np
00014 
00015 
00016 def find_in_path(name, path):
00017     "Find a file in a search path"
00018     # Adapted fom
00019     # http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
00020     for dir in path.split(os.pathsep):
00021         binpath = pjoin(dir, name)
00022         if os.path.exists(binpath):
00023             return os.path.abspath(binpath)
00024     return None
00025 
00026 
00027 def locate_cuda():
00028     """Locate the CUDA environment on the system
00029     Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
00030     and values giving the absolute path to each directory.
00031     Starts by looking for the CUDAHOME env variable. If not found, everything
00032     is based on finding 'nvcc' in the PATH.
00033     """
00034 
00035     # first check if the CUDAHOME env variable is in use
00036     if 'CUDAHOME' in os.environ:
00037         home = os.environ['CUDAHOME']
00038         nvcc = pjoin(home, 'bin', 'nvcc')
00039     else:
00040         # otherwise, search the PATH for NVCC
00041         default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
00042         nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
00043         if nvcc is None:
00044             raise EnvironmentError('The nvcc binary could not be '
00045                 'located in your $PATH. Either add it to your path, or set $CUDAHOME')
00046         home = os.path.dirname(os.path.dirname(nvcc))
00047 
00048     cudaconfig = {'home':home, 'nvcc':nvcc,
00049                   'include': pjoin(home, 'include'),
00050                   'lib64': pjoin(home, 'lib64')}
00051     for k, v in cudaconfig.iteritems():
00052         if not os.path.exists(v):
00053             raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
00054 
00055     return cudaconfig
00056 CUDA = locate_cuda()
00057 
00058 
00059 # Obtain the numpy include directory.  This logic works across numpy versions.
00060 try:
00061     numpy_include = np.get_include()
00062 except AttributeError:
00063     numpy_include = np.get_numpy_include()
00064 
00065 
00066 def customize_compiler_for_nvcc(self):
00067     """inject deep into distutils to customize how the dispatch
00068     to gcc/nvcc works.
00069     If you subclass UnixCCompiler, it's not trivial to get your subclass
00070     injected in, and still have the right customizations (i.e.
00071     distutils.sysconfig.customize_compiler) run on it. So instead of going
00072     the OO route, I have this. Note, it's kindof like a wierd functional
00073     subclassing going on."""
00074 
00075     # tell the compiler it can processes .cu
00076     self.src_extensions.append('.cu')
00077 
00078     # save references to the default compiler_so and _comple methods
00079     default_compiler_so = self.compiler_so
00080     super = self._compile
00081 
00082     # now redefine the _compile method. This gets executed for each
00083     # object but distutils doesn't have the ability to change compilers
00084     # based on source extension: we add it.
00085     def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
00086         if os.path.splitext(src)[1] == '.cu':
00087             # use the cuda for .cu files
00088             self.set_executable('compiler_so', CUDA['nvcc'])
00089             # use only a subset of the extra_postargs, which are 1-1 translated
00090             # from the extra_compile_args in the Extension class
00091             postargs = extra_postargs['nvcc']
00092         else:
00093             postargs = extra_postargs['gcc']
00094 
00095         super(obj, src, ext, cc_args, postargs, pp_opts)
00096         # reset the default compiler_so, which we might have changed for cuda
00097         self.compiler_so = default_compiler_so
00098 
00099     # inject our redefined _compile method into the class
00100     self._compile = _compile
00101 
00102 
00103 # run the customize_compiler
00104 class custom_build_ext(build_ext):
00105     def build_extensions(self):
00106         customize_compiler_for_nvcc(self.compiler)
00107         build_ext.build_extensions(self)
00108 
00109 
00110 ext_modules = [
00111     Extension(
00112         "cpu_nms",
00113         ["cpu_nms.pyx"],
00114         extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
00115         include_dirs = [numpy_include]
00116     ),
00117     Extension('gpu_nms',
00118         ['nms_kernel.cu', 'gpu_nms.pyx'],
00119         library_dirs=[CUDA['lib64']],
00120         libraries=['cudart'],
00121         language='c++',
00122         runtime_library_dirs=[CUDA['lib64']],
00123         # this syntax is specific to this build system
00124         # we're only going to use certain compiler args with nvcc and not with
00125         # gcc the implementation of this trick is in customize_compiler() below
00126         extra_compile_args={'gcc': ["-Wno-unused-function"],
00127                             'nvcc': ['-arch=sm_35',
00128                                      '--ptxas-options=-v',
00129                                      '-c',
00130                                      '--compiler-options',
00131                                      "'-fPIC'"]},
00132         include_dirs = [numpy_include, CUDA['include']]
00133     ),
00134 ]
00135 
00136 setup(
00137     name='nms',
00138     ext_modules=ext_modules,
00139     # inject our custom trigger
00140     cmdclass={'build_ext': custom_build_ext},
00141 )


rail_object_detector
Author(s):
autogenerated on Sat Jun 8 2019 20:26:30