run-unit-tests.py
Go to the documentation of this file.
1 #!python3
2 
3 # License: Apache 2.0. See LICENSE file in root directory.
4 # Copyright(c) 2021 Intel Corporation. All Rights Reserved.
5 
6 import sys, os, subprocess, locale, re, platform, getopt, time
7 from abc import ABC, abstractmethod
8 
9 # Remove Python's default list of places to look for modules!
10 # We want only modules in the directories we specifically provide to be found,
11 # otherwise pyrs other than what we compiled might be found...
12 sys.path = list()
13 sys.path.append( '' ) # directs Python to search modules in the current directory first
14 sys.path.append( os.path.dirname( sys.executable ))
15 sys.path.append( os.path.join( os.path.dirname( sys.executable ), 'DLLs' ))
16 sys.path.append( os.path.join( os.path.dirname( sys.executable ), 'lib' ))
17 # Add our py/ module directory
18 current_dir = os.path.dirname( os.path.abspath( __file__ ))
19 sys.path.append( current_dir + os.sep + "py" )
20 
21 from rspy import log, file, repo
22 
23 def usage():
24  ourname = os.path.basename(sys.argv[0])
25  print( 'Syntax: ' + ourname + ' [options] [dir]' )
26  print( ' dir: the directory holding the executable tests to run (default to the build directory')
27  print( 'Options:' )
28  print( ' --debug Turn on debugging information' )
29  print( ' -v, --verbose Errors will dump the log to stdout' )
30  print( ' -q, --quiet Suppress output; rely on exit status (0=no failures)' )
31  print( ' -r, --regex run all tests that fit the following regular expression' )
32  print( ' -s, --stdout do not redirect stdout to logs' )
33  print( ' -t, --tag run all tests with the following tag' )
34  print( ' --list-tags print out all available tags. This option will not run any tests')
35  print( ' --list-tests print out all available tests. This option will not run any tests')
36  sys.exit(2)
37 
38 # get os and directories for future use
39 # NOTE: WSL will read as 'Linux' but the build is Windows-based!
40 system = platform.system()
41 if system == 'Linux' and "microsoft" not in platform.uname()[3].lower():
42  linux = True
43 else:
44  linux = False
45 
46 # Parse command-line:
47 try:
48  opts,args = getopt.getopt( sys.argv[1:], 'hvqr:st:',
49  longopts = [ 'help', 'verbose', 'debug', 'quiet', 'regex=', 'stdout', 'tag=', 'list-tags', 'list-tests' ])
50 except getopt.GetoptError as err:
51  log.e( err ) # something like "option -a not recognized"
52  usage()
53 regex = None
54 to_stdout = False
55 tag = None
56 list_tags = False
57 list_tests = False
58 for opt,arg in opts:
59  if opt in ('-h','--help'):
60  usage()
61  elif opt in ('-v','--verbose'):
62  log.verbose_on()
63  elif opt in ('-q','--quiet'):
64  log.quiet_on()
65  elif opt in ('-r', '--regex'):
66  regex = arg
67  elif opt in ('-s', '--stdout'):
68  to_stdout = True
69  elif opt in ('-t', '--tag'):
70  tag = arg
71  elif opt == '--list-tags':
72  list_tags = True
73  elif opt == '--list-tests':
74  list_tests = True
75 
76 if len(args) > 1:
77  usage()
78 target = None
79 if len(args) == 1:
80  if os.path.isdir( args[0] ):
81  target = args[0]
82  else:
83  usage()
84 # Trying to assume target directory from inside build directory. Only works if there is only one location with tests
85 if not target:
86  build = repo.root + os.sep + 'build'
87  for executable in file.find(build, '(^|/)test-.*'):
88  if not file.is_executable(executable):
89  continue
90  dir_with_test = build + os.sep + os.path.dirname(executable)
91  if target and target != dir_with_test:
92  log.e("Found executable tests in 2 directories:", target, "and", dir_with_test, ". Can't default to directory")
93  usage()
94  target = dir_with_test
95 
96 if target:
97  logdir = target + os.sep + 'unit-tests'
98 else: # no test executables were found. We put the logs directly in build directory
99  logdir = os.path.join( repo.root, 'build', 'unit-tests' )
100 os.makedirs( logdir, exist_ok = True )
101 n_tests = 0
102 
103 # Python scripts should be able to find the pyrealsense2 .pyd or else they won't work. We don't know
104 # if the user (Travis included) has pyrealsense2 installed but even if so, we want to use the one we compiled.
105 # we search the librealsense repository for the .pyd file (.so file in linux)
106 pyrs = ""
107 if linux:
108  for so in file.find(repo.root, '(^|/)pyrealsense2.*\.so$'):
109  pyrs = so
110 else:
111  for pyd in file.find(repo.root, '(^|/)pyrealsense2.*\.pyd$'):
112  pyrs = pyd
113 
114 if pyrs:
115  # After use of find, pyrs contains the path from librealsense to the pyrealsense that was found
116  # We append it to the librealsense path to get an absolute path to the file to add to PYTHONPATH so it can be found by the tests
117  pyrs_path = repo.root + os.sep + pyrs
118  # We need to add the directory not the file itself
119  pyrs_path = os.path.dirname(pyrs_path)
120  log.d( 'found pyrealsense pyd in:', pyrs_path )
121  if not target:
122  target = pyrs_path
123  log.d( 'assuming executable path same as pyd path' )
124 
125 # Figure out which sys.path we want the tests to see, assuming we have Python tests
126 # PYTHONPATH is what Python will ADD to sys.path for the child processes
127 # (We can simply change `sys.path` but any child python scripts won't see it; we change the environment instead)
128 #
129 # We also need to add the path to the python packages that the tests use
130 os.environ["PYTHONPATH"] = current_dir + os.sep + "py"
131 #
132 if pyrs:
133  os.environ["PYTHONPATH"] += os.pathsep + pyrs_path
134 
135 def subprocess_run(cmd, stdout = None, timeout = 200, append = False):
136  """
137  Wrapper function for subprocess.run.
138  If the child process times out or ends with a non-zero exit status an exception is raised!
139 
140  :param cmd: the command and argument for the child process, as a list
141  :param stdout: path of file to direct the output of the process to (None to disable)
142  :param timeout: number of seconds to give the process before forcefully ending it (None to disable)
143  :param append: if True and stdout is not None, the log of the test will be appended to the file instead of
144  overwriting it
145  :return: the output written by the child, if stdout is None -- otherwise N/A
146  """
147  log.d( 'running:', cmd )
148  handle = None
149  start_time = time.time()
150  try:
151  log.debug_indent()
152  if stdout and stdout != subprocess.PIPE:
153  if append:
154  handle = open(stdout, "a" )
155  handle.write("\n---------------------------------------------------------------------------------\n\n")
156  handle.flush()
157  else:
158  handle = open( stdout, "w" )
159  stdout = handle
160  rv = subprocess.run( cmd,
161  stdout = stdout,
162  stderr = subprocess.STDOUT,
163  universal_newlines = True,
164  timeout = timeout,
165  check = True )
166  result = rv.stdout
167  if not result:
168  result = []
169  else:
170  result = result.split( '\n' )
171  return result
172  finally:
173  if handle:
174  handle.close()
175  log.debug_unindent()
176  run_time = time.time() - start_time
177  log.d("test took", run_time, "seconds")
178 
179 
180 def configuration_str( configuration, prefix = '', suffix = '' ):
181  """ Return a string repr (with a prefix and/or suffix) of the configuration or '' if it's None """
182  if configuration is None:
183  return ''
184  return prefix + '[' + ' '.join( configuration ) + ']' + suffix
185 
186 
187 def check_log_for_fails( path_to_log, testname, configuration = None ):
188  # Normal logs are expected to have in last line:
189  # "All tests passed (11 assertions in 1 test case)"
190  # Tests that have failures, however, will show:
191  # "test cases: 1 | 1 failed
192  # assertions: 9 | 6 passed | 3 failed"
193  # We make sure we look at the log written by the last run of the test by ignoring anything before the last
194  # line with "----...---" that separate between 2 separate runs of he test
195  if path_to_log is None:
196  return False
197  results = None
198  for ctx in file.grep( r'^test cases:\s*(\d+) \|\s*(\d+) (passed|failed)|^-+$', path_to_log ):
199  m = ctx['match']
200  if m.string == "---------------------------------------------------------------------------------":
201  results = None
202  else:
203  results = m
204 
205  if not results:
206  return False
207 
208  total = int(results.group(1))
209  passed = int(results.group(2))
210  if results.group(3) == 'failed':
211  # "test cases: 1 | 1 failed"
212  passed = total - passed
213  if passed < total:
214  if total == 1 or passed == 0:
215  desc = 'failed'
216  else:
217  desc = str(total - passed) + ' of ' + str(total) + ' failed'
218 
219  if log.is_verbose_on():
220  log.e( log.red + testname + log.reset + ': ' + configuration_str( configuration, suffix = ' ' ) + desc )
221  log.i( 'Log: >>>' )
222  log.out()
223  file.cat( path_to_log )
224  log.out( '<<<' )
225  else:
226  log.e( log.red + testname + log.reset + ': ' + configuration_str( configuration, suffix = ' ' ) + desc + '; see ' + path_to_log )
227  return True
228  return False
229 
230 
231 class TestConfig(ABC): # Abstract Base Class
232  """
233  Configuration for a test, encompassing any metadata needed to control its run, like retries etc.
234  """
235  def __init__(self):
236  self._configurations = list()
237  self._priority = 1000
238  self._tags = set()
239  self._flags = set()
240  self._timeout = 200
241 
242  def debug_dump(self):
243  if self._priority != 1000:
244  log.d( 'priority:', self._priority )
245  if self._timeout != 200:
246  log.d( 'timeout:', self._timeout)
247  if self._tags:
248  log.d( 'tags:', self._tags )
249  if self._flags:
250  log.d( 'flags:', self._flags )
251  if len(self._configurations) > 1:
252  log.d( len( self._configurations ), 'configurations' )
253  # don't show them... they are output separately
254 
255  @property
256  def configurations( self ):
257  return self._configurations
258 
259  @property
260  def priority( self ):
261  return self._priority
262 
263  @property
264  def timeout( self ):
265  return self._timeout
266 
267  @property
268  def tags( self ):
269  return self._tags
270 
271  @property
272  def flags( self ):
273  return self._flags
274 
275 
277  """
278  Configuration for a test -- from any text-based syntax with a given prefix, e.g. for python:
279  #test:usb2
280  #test:device L500* D400*
281  #test:retries 3
282  #test:priority 0
283  And, for C++ the prefix could be:
284  //#test:...
285  """
286  def __init__( self, source, line_prefix ):
287  """
288  :param source: The path to the text file
289  :param line_prefix: A regex to denote a directive (must be first thing in a line), which will
290  be immediately followed by the directive itself and optional arguments
291  """
292  TestConfig.__init__(self)
293 
294  # Parse the python
295  regex = r'^' + line_prefix + r'(\S+)((?:\s+\S+)*?)\s*(?:#\s*(.*))?$'
296  for context in file.grep( regex, source ):
297  match = context['match']
298  directive = match.group(1)
299  text_params = match.group(2).strip()
300  params = [s for s in text_params.split()]
301  comment = match.group(3)
302  if directive == 'device':
303  #log.d( ' configuration:', params )
304  if not params:
305  log.e( source + '+' + str(context['index']) + ': device directive with no devices listed' )
306  elif 'each' in text_params.lower() and len(params) > 1:
307  log.e( source + '+' + str(context['index']) + ': each() cannot be used in combination with other specs', params )
308  elif 'each' in text_params.lower() and not re.fullmatch( r'each\(.+\)', text_params, re.IGNORECASE ):
309  log.e( source + '+' + str(context['index']) + ': invalid \'each\' syntax:', params )
310  else:
311  self._configurations.append( params )
312  elif directive == 'priority':
313  if len(params) == 1 and params[0].isdigit():
314  self._priority = int( params[0] )
315  else:
316  log.e( source + '+' + str(context['index']) + ': priority directive with invalid parameters:', params )
317  elif directive == 'timeout':
318  if len(params) == 1 and params[0].isdigit():
319  self._timeout = int( params[0] )
320  else:
321  log.e( source + '+' + str(context['index']) + ': timeout directive with invalid parameters:', params )
322  elif directive == 'tag':
323  self._tags.update(params)
324  elif directive == 'flag':
325  self._flags.update( params )
326  else:
327  log.e( source + '+' + str(context['index']) + ': invalid directive "' + directive + '"; ignoring' )
328 
329 
330 class Test(ABC): # Abstract Base Class
331  """
332  Abstract class for a test. Holds the name of the test
333  """
334  def __init__(self, testname):
335  #log.d( 'found', testname )
336  self._name = testname
337  self._config = None
338  self._ran = False
339 
340  @abstractmethod
341  def run_test( self, configuration = None, log_path = None ):
342  pass
343 
344  def debug_dump( self ):
345  if self._config:
346  self._config.debug_dump()
347 
348  @property
349  def config( self ):
350  return self._config
351 
352  @property
353  def name( self ):
354  return self._name
355 
356  @property
357  def ran( self ):
358  return self._ran
359 
360  def get_log( self ):
361  global to_stdout
362  if to_stdout:
363  path = None
364  else:
365  path = logdir + os.sep + self.name + ".log"
366  return path
367 
368  def is_live( self ):
369  """
370  Returns True if the test configurations specify devices (test has a 'device' directive)
371  """
372  return self._config and len(self._config.configurations) > 0
373 
374 
375 class PyTest(Test):
376  """
377  Class for python tests. Hold the path to the script of the test
378  """
379  def __init__(self, testname, path_to_test):
380  """
381  :param testname: name of the test
382  :param path_to_test: the relative path from the current directory to the path
383  """
384  global current_dir
385  Test.__init__(self, testname)
386  self.path_to_script = current_dir + os.sep + path_to_test
387  self._config = TestConfigFromText( self.path_to_script, r'#\s*test:' )
388 
389  def debug_dump(self):
390  log.d( 'script:', self.path_to_script )
391  Test.debug_dump(self)
392 
393  @property
394  def command(self):
395  cmd = [sys.executable]
396  # The unit-tests should only find module we've specifically added -- but Python may have site packages
397  # that are automatically made available. We want to avoid those:
398  # -S : don't imply 'import site' on initialization
399  # NOTE: exit() is defined in site.py and works only if the site module is imported!
400  cmd += ['-S']
401  if sys.flags.verbose:
402  cmd += ["-v"]
403  cmd += [self.path_to_script]
404  if 'custom-args' not in self.config.flags:
405  if log.is_debug_on():
406  cmd += ['--debug']
407  if log.is_color_on():
408  cmd += ['--color']
409  return cmd
410 
411  def run_test( self, configuration = None, log_path = None ):
412  try:
413  subprocess_run( self.command, stdout=log_path, append=self.ran, timeout=self.config.timeout )
414  finally:
415  self._ran = True
416 
417 
418 class ExeTest(Test):
419  """
420  Class for c/cpp tests. Hold the path to the executable for the test
421  """
422  def __init__( self, testname, exe ):
423  """
424  :param testname: name of the test
425  :param exe: full path to executable
426  """
427  global current_dir
428  Test.__init__(self, testname)
429  self.exe = exe
430 
431  # Finding the c/cpp file of the test to get the configuration
432  # TODO: this is limited to a structure in which .cpp files and directories do not share names
433  # For example:
434  # unit-tests/
435  # func/
436  # ...
437  # test-func.cpp
438  # test-func.cpp will not be found!
439  split_testname = testname.split( '-' )
440  cpp_path = current_dir
441  found_test_dir = False
442 
443  while not found_test_dir:
444  # index 0 should be 'test' as tests always start with it
445  found_test_dir = True
446  for i in range(2, len(split_testname) ): # Checking if the next part of the test name is a sub-directory
447  sub_dir_path = cpp_path + os.sep + '-'.join(split_testname[1:i]) # The next sub-directory could have several words
448  if os.path.isdir(sub_dir_path):
449  cpp_path = sub_dir_path
450  del split_testname[1:i]
451  found_test_dir = False
452  break
453 
454  cpp_path += os.sep + '-'.join( split_testname )
455  if os.path.isfile( cpp_path + ".cpp" ):
456  cpp_path += ".cpp"
457  self._config = TestConfigFromText(cpp_path, r'//#\s*test:')
458  else:
459  log.w( log.red + testname + log.reset + ':', 'No matching .cpp file was found; no configuration will be used!' )
460 
461  @property
462  def command(self):
463  cmd = [self.exe]
464  if 'custom-args' not in self.config.flags:
465  # Assume we're a Catch2 exe, so:
466  #if sys.flags.verbose:
467  # cmd +=
468  if log.is_debug_on():
469  cmd += ['-d', 'yes'] # show durations for each test-case
470  #cmd += ['--success'] # show successful assertions in output
471  #if log.is_color_on():
472  # cmd += ['--use-colour', 'yes']
473  return cmd
474 
475  def run_test( self, configuration = None, log_path = None ):
476  try:
477  subprocess_run( self.command, stdout=log_path, append=self.ran, timeout=self.config.timeout )
478  finally:
479  self._ran = True
480 
481 
482 def get_tests():
483  global regex, target, pyrs, current_dir, linux
484  if regex:
485  pattern = re.compile(regex)
486  if target:
487  # In Linux, the build targets are located elsewhere than on Windows
488  # Go over all the tests from a "manifest" we take from the result of the last CMake
489  # run (rather than, for example, looking for test-* in the build-directory):
490  if linux:
491  manifestfile = target + '/CMakeFiles/TargetDirectories.txt'
492  else:
493  manifestfile = target + '/../CMakeFiles/TargetDirectories.txt'
494  #log.d( manifestfile )
495  for manifest_ctx in file.grep(r'(?<=unit-tests/build/)\S+(?=/CMakeFiles/test-\S+.dir$)', manifestfile):
496  # We need to first create the test name so we can see if it fits the regex
497  testdir = manifest_ctx['match'].group(0) # "log/internal/test-all"
498  #log.d( testdir )
499  testparent = os.path.dirname(testdir) # "log/internal"
500  if testparent:
501  testname = 'test-' + testparent.replace('/', '-') + '-' + os.path.basename(testdir)[5:] # "test-log-internal-all"
502  else:
503  testname = testdir # no parent folder so we get "test-all"
504 
505  if regex and not pattern.search( testname ):
506  continue
507 
508  if linux:
509  exe = target + '/unit-tests/build/' + testdir + '/' + testname
510  else:
511  exe = target + '/' + testname + '.exe'
512 
513  yield ExeTest(testname, exe)
514 
515  # Python unit-test scripts are in the same directory as us... we want to consider running them
516  # (we may not if they're live and we have no pyrealsense2.pyd):
517  for py_test in file.find(current_dir, '(^|/)test-.*\.py'):
518  testparent = os.path.dirname(py_test) # "log/internal" <- "log/internal/test-all.py"
519  if testparent:
520  testname = 'test-' + testparent.replace('/', '-') + '-' + os.path.basename(py_test)[5:-3] # remove .py
521  else:
522  testname = os.path.basename(py_test)[:-3]
523 
524  if regex and not pattern.search( testname ):
525  continue
526 
527  yield PyTest(testname, py_test)
528 
529 
530 def prioritize_tests( tests ):
531  return sorted(tests, key= lambda t: t.config.priority)
532 
534  """
535  Yield <configuration,serial-numbers> pairs for each valid configuration under which the
536  test should run.
537 
538  The <configuration> is a list of ('test:device') designations, e.g. ['L500*', 'D415'].
539  The <serial-numbers> is a set of device serial-numbers that fit this configuration.
540 
541  :param test: The test (of class type Test) we're interested in
542  """
543  for configuration in test.config.configurations:
544  try:
545  for serial_numbers in devices.by_configuration( configuration ):
546  yield configuration, serial_numbers
547  except RuntimeError as e:
548  if devices.acroname:
549  log.e( log.red + test.name + log.reset + ': ' + str(e) )
550  else:
551  log.w( log.yellow + test.name + log.reset + ': ' + str(e) )
552  continue
553 
554 
555 log.i( 'Logs in:', logdir )
556 def test_wrapper( test, configuration = None ):
557  global n_tests
558  n_tests += 1
559  #
560  if not log.is_debug_on() or log.is_color_on():
561  log.progress( configuration_str( configuration, suffix = ' ' ) + test.name, '...' )
562  #
563  log_path = test.get_log()
564  try:
565  test.run_test( configuration = configuration, log_path = log_path )
566  except FileNotFoundError as e:
567  log.e( log.red + test.name + log.reset + ':', str(e) + configuration_str( configuration, prefix = ' ' ) )
568  except subprocess.TimeoutExpired:
569  log.e(log.red + test.name + log.reset + ':', configuration_str(configuration, suffix=' ') + 'timed out')
570  except subprocess.CalledProcessError as cpe:
571  if not check_log_for_fails( log_path, test.name, configuration ):
572  # An unexpected error occurred
573  log.e( log.red + test.name + log.reset + ':', configuration_str( configuration, suffix = ' ' ) + 'exited with non-zero value (' + str(cpe.returncode) + ')' )
574 
575 
576 # Run all tests
577 list_only = list_tags or list_tests
578 if not list_only:
579  if pyrs:
580  sys.path.append( pyrs_path )
581  from rspy import devices
582  devices.query()
583  #
584  # Under Travis, we'll have no devices and no acroname
585  skip_live_tests = len(devices.all()) == 0 and not devices.acroname
586 #
587 log.reset_errors()
588 tags = set()
589 tests = []
590 for test in prioritize_tests( get_tests() ):
591  #
592  log.d( 'found', test.name, '...' )
593  try:
594  log.debug_indent()
595  test.debug_dump()
596  #
597  if tag and tag not in test.config.tags:
598  log.d( 'does not fit --tag:', test.config.tags )
599  continue
600  #
601  tags.update( test.config.tags )
602  tests.append( test.name )
603  if list_only:
604  n_tests += 1
605  continue
606  #
607  if not test.is_live():
608  test_wrapper( test )
609  continue
610  #
611  if skip_live_tests:
612  log.w( test.name + ':', 'is live and there are no cameras; skipping' )
613  continue
614  #
615  for configuration, serial_numbers in devices_by_test_config( test ):
616  try:
617  log.d( 'configuration:', configuration )
618  log.debug_indent()
619  devices.enable_only( serial_numbers, recycle = True )
620  except RuntimeError as e:
621  log.w( log.red + test.name + log.reset + ': ' + str(e) )
622  else:
623  test_wrapper( test, configuration )
624  finally:
625  log.debug_unindent()
626  #
627  finally:
628  log.debug_unindent()
629 
630 
631 log.progress()
632 #
633 if not n_tests:
634  log.e( 'No unit-tests found!' )
635  sys.exit(1)
636 #
637 if list_only:
638  if list_tags:
639  print( "Available tags:" )
640  for t in sorted( list( tags )):
641  print( t )
642  #
643  if list_tests:
644  print( "Available tests:" )
645  for t in sorted( tests ):
646  print( t )
647 #
648 else:
649  n_errors = log.n_errors()
650  if n_errors:
651  log.out( log.red + str(n_errors) + log.reset, 'of', n_tests, 'test(s)', log.red + 'failed!' + log.reset + log.clear_eos )
652  sys.exit(1)
653  #
654  log.out( str(n_tests) + ' unit-test(s) completed successfully' + log.clear_eos )
655 #
656 sys.exit(0)
def run_test(self, configuration=None, log_path=None)
def configurations(self)
std::string join(const std::string &base, const std::string &path)
Definition: filesystem.h:113
def prioritize_tests(tests)
def timeout(self)
def __init__(self, testname, path_to_test)
def __init__(self)
def priority(self)
def config(self)
def __init__(self, testname, exe)
def run_test(self, configuration=None, log_path=None)
def check_log_for_fails(path_to_log, testname, configuration=None)
def __init__(self, source, line_prefix)
def devices_by_test_config(test)
def run_test(self, configuration=None, log_path=None)
def get_log(self)
static std::string print(const transformation &tf)
def test_wrapper(test, configuration=None)
def configuration_str(configuration, prefix='', suffix='')
def subprocess_run(cmd, stdout=None, timeout=200, append=False)
def debug_dump(self)
def is_live(self)


librealsense2
Author(s): Sergey Dorodnicov , Doron Hirshberg , Mark Horn , Reagan Lopez , Itay Carpis
autogenerated on Mon May 3 2021 02:47:41