run_tests.py
Go to the documentation of this file.
1 #!/usr/bin/env python3
2 # Copyright 2015 gRPC authors.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Run tests in parallel."""
16 
17 from __future__ import print_function
18 
19 import argparse
20 import ast
21 import collections
22 import glob
23 import itertools
24 import json
25 import logging
26 import multiprocessing
27 import os
28 import os.path
29 import pipes
30 import platform
31 import random
32 import re
33 import socket
34 import subprocess
35 import sys
36 import tempfile
37 import time
38 import traceback
39 import uuid
40 
41 import six
42 from six.moves import urllib
43 
44 import python_utils.jobset as jobset
45 import python_utils.report_utils as report_utils
46 import python_utils.start_port_server as start_port_server
47 import python_utils.watch_dirs as watch_dirs
48 
49 try:
50  from python_utils.upload_test_results import upload_results_to_bq
51 except (ImportError):
52  pass # It's ok to not import because this is only necessary to upload results to BQ.
53 
54 gcp_utils_dir = os.path.abspath(
55  os.path.join(os.path.dirname(__file__), '../gcp/utils'))
56 sys.path.append(gcp_utils_dir)
57 
58 _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
59 os.chdir(_ROOT)
60 
61 _FORCE_ENVIRON_FOR_WRAPPERS = {
62  'GRPC_VERBOSITY': 'DEBUG',
63 }
64 
65 _POLLING_STRATEGIES = {
66  'linux': ['epoll1', 'poll'],
67  'mac': ['poll'],
68 }
69 
70 
72  return jobset.platform_string()
73 
74 
75 _DEFAULT_TIMEOUT_SECONDS = 5 * 60
76 _PRE_BUILD_STEP_TIMEOUT_SECONDS = 10 * 60
77 
78 
79 def run_shell_command(cmd, env=None, cwd=None):
80  try:
81  subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
82  except subprocess.CalledProcessError as e:
83  logging.exception(
84  "Error while running command '%s'. Exit status %d. Output:\n%s",
85  e.cmd, e.returncode, e.output)
86  raise
87 
88 
90  # Too much test parallelization has only been seen to be a problem
91  # so far on windows.
92  if jobset.platform_string() == 'windows':
93  return 64
94  return 1024
95 
96 
97 def _print_debug_info_epilogue(dockerfile_dir=None):
98  """Use to print useful info for debug/repro just before exiting."""
99  print('')
100  print('=== run_tests.py DEBUG INFO ===')
101  print('command: \"%s\"' % ' '.join(sys.argv))
102  if dockerfile_dir:
103  print('dockerfile: %s' % dockerfile_dir)
104  kokoro_job_name = os.getenv('KOKORO_JOB_NAME')
105  if kokoro_job_name:
106  print('kokoro job name: %s' % kokoro_job_name)
107  print('===============================')
108 
109 
110 # SimpleConfig: just compile with CONFIG=config, and run the binary to test
111 class Config(object):
112 
113  def __init__(self,
114  config,
115  environ=None,
116  timeout_multiplier=1,
117  tool_prefix=[],
118  iomgr_platform='native'):
119  if environ is None:
120  environ = {}
121  self.build_config = config
122  self.environ = environ
123  self.environ['CONFIG'] = config
124  self.tool_prefix = tool_prefix
125  self.timeout_multiplier = timeout_multiplier
126  self.iomgr_platform = iomgr_platform
127 
128  def job_spec(self,
129  cmdline,
130  timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
131  shortname=None,
132  environ={},
133  cpu_cost=1.0,
134  flaky=False):
135  """Construct a jobset.JobSpec for a test under this config
136 
137  Args:
138  cmdline: a list of strings specifying the command line the test
139  would like to run
140  """
141  actual_environ = self.environ.copy()
142  for k, v in environ.items():
143  actual_environ[k] = v
144  if not flaky and shortname and shortname in flaky_tests:
145  flaky = True
146  if shortname in shortname_to_cpu:
147  cpu_cost = shortname_to_cpu[shortname]
148  return jobset.JobSpec(
149  cmdline=self.tool_prefix + cmdline,
150  shortname=shortname,
151  environ=actual_environ,
152  cpu_cost=cpu_cost,
153  timeout_seconds=(self.timeout_multiplier *
154  timeout_seconds if timeout_seconds else None),
155  flake_retries=4 if flaky or args.allow_flakes else 0,
156  timeout_retries=1 if flaky or args.allow_flakes else 0)
157 
158 
159 def get_c_tests(travis, test_lang):
160  out = []
161  platforms_str = 'ci_platforms' if travis else 'platforms'
162  with open('tools/run_tests/generated/tests.json') as f:
163  js = json.load(f)
164  return [
165  tgt for tgt in js
166  if tgt['language'] == test_lang and platform_string() in
167  tgt[platforms_str] and not (travis and tgt['flaky'])
168  ]
169 
170 
171 def _check_compiler(compiler, supported_compilers):
172  if compiler not in supported_compilers:
173  raise Exception('Compiler %s not supported (on this platform).' %
174  compiler)
175 
176 
177 def _check_arch(arch, supported_archs):
178  if arch not in supported_archs:
179  raise Exception('Architecture %s not supported.' % arch)
180 
181 
183  """Returns True if running running as a --use_docker child."""
184  return True if os.getenv('DOCKER_RUN_SCRIPT_COMMAND') else False
185 
186 
187 _PythonConfigVars = collections.namedtuple('_ConfigVars', [
188  'shell',
189  'builder',
190  'builder_prefix_arguments',
191  'venv_relative_python',
192  'toolchain',
193  'runner',
194 ])
195 
196 
197 def _python_config_generator(name, major, minor, bits, config_vars):
198  build = (config_vars.shell + config_vars.builder +
199  config_vars.builder_prefix_arguments +
200  [_python_pattern_function(major=major, minor=minor, bits=bits)] +
201  [name] + config_vars.venv_relative_python + config_vars.toolchain)
202  run = (config_vars.shell + config_vars.runner + [
203  os.path.join(name, config_vars.venv_relative_python[0]),
204  ])
205  return PythonConfig(name, build, run)
206 
207 
208 def _pypy_config_generator(name, major, config_vars):
209  return PythonConfig(
210  name, config_vars.shell + config_vars.builder +
211  config_vars.builder_prefix_arguments +
212  [_pypy_pattern_function(major=major)] + [name] +
213  config_vars.venv_relative_python + config_vars.toolchain,
214  config_vars.shell + config_vars.runner +
215  [os.path.join(name, config_vars.venv_relative_python[0])])
216 
217 
218 def _python_pattern_function(major, minor, bits):
219  # Bit-ness is handled by the test machine's environment
220  if os.name == "nt":
221  if bits == "64":
222  return '/c/Python{major}{minor}/python.exe'.format(major=major,
223  minor=minor,
224  bits=bits)
225  else:
226  return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
227  major=major, minor=minor, bits=bits)
228  else:
229  return 'python{major}.{minor}'.format(major=major, minor=minor)
230 
231 
233  if major == '2':
234  return 'pypy'
235  elif major == '3':
236  return 'pypy3'
237  else:
238  raise ValueError("Unknown PyPy major version")
239 
240 
241 class CLanguage(object):
242 
243  def __init__(self, lang_suffix, test_lang):
244  self.lang_suffix = lang_suffix
246  self.test_lang = test_lang
247 
248  def configure(self, config, args):
249  self.config = config
250  self.args = args
251  if self.platform == 'windows':
252  _check_compiler(self.args.compiler, [
253  'default',
254  'cmake',
255  'cmake_ninja_vs2017',
256  'cmake_vs2017',
257  'cmake_vs2019',
258  ])
259  _check_arch(self.args.arch, ['default', 'x64', 'x86'])
260 
261  activate_vs_tools = ''
262  if self.args.compiler == 'cmake_ninja_vs2017' or self.args.compiler == 'cmake' or self.args.compiler == 'default':
263  # cmake + ninja build is the default because it is faster and supports boringssl assembly optimizations
264  # the compiler used is exactly the same as for cmake_vs2017
265  cmake_generator = 'Ninja'
266  activate_vs_tools = '2017'
267  elif self.args.compiler == 'cmake_vs2017':
268  cmake_generator = 'Visual Studio 15 2017'
269  elif self.args.compiler == 'cmake_vs2019':
270  cmake_generator = 'Visual Studio 16 2019'
271  else:
272  print('should never reach here.')
273  sys.exit(1)
274 
276  self._cmake_generator_windows = cmake_generator
277  # required to pass as cmake "-A" configuration for VS builds (but not for Ninja)
278  self._cmake_architecture_windows = 'x64' if self.args.arch == 'x64' else 'Win32'
279  # when builing with Ninja, the VS common tools need to be activated first
280  self._activate_vs_tools_windows = activate_vs_tools
281  self._vs_tools_architecture_windows = 'x64' if self.args.arch == 'x64' else 'x86'
282 
283  else:
284  if self.platform == 'linux':
285  # Allow all the known architectures. _check_arch_option has already checked that we're not doing
286  # something illegal when not running under docker.
287  _check_arch(self.args.arch, ['default', 'x64', 'x86', 'arm64'])
288  else:
289  _check_arch(self.args.arch, ['default'])
290 
291  self._docker_distro, self._cmake_configure_extra_args = self._compiler_options(
292  self.args.use_docker, self.args.compiler)
293 
294  if self.args.arch == 'x86':
295  # disable boringssl asm optimizations when on x86
296  # see https://github.com/grpc/grpc/blob/b5b8578b3f8b4a9ce61ed6677e19d546e43c5c68/tools/run_tests/artifacts/artifact_targets.py#L253
297  self._cmake_configure_extra_args.append('-DOPENSSL_NO_ASM=ON')
298 
299  def test_specs(self):
300  out = []
301  binaries = get_c_tests(self.args.travis, self.test_lang)
302  for target in binaries:
303  if target.get('boringssl', False):
304  # cmake doesn't build boringssl tests
305  continue
306  auto_timeout_scaling = target.get('auto_timeout_scaling', True)
307  polling_strategies = (_POLLING_STRATEGIES.get(
308  self.platform, ['all']) if target.get('uses_polling', True) else
309  ['none'])
310  for polling_strategy in polling_strategies:
311  env = {
312  'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
313  _ROOT + '/src/core/tsi/test_creds/ca.pem',
314  'GRPC_POLL_STRATEGY':
315  polling_strategy,
316  'GRPC_VERBOSITY':
317  'DEBUG'
318  }
319  resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
320  if resolver:
321  env['GRPC_DNS_RESOLVER'] = resolver
322  shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
323  if polling_strategy in target.get('excluded_poll_engines', []):
324  continue
325 
326  timeout_scaling = 1
327  if auto_timeout_scaling:
328  config = self.args.config
329  if ('asan' in config or config == 'msan' or
330  config == 'tsan' or config == 'ubsan' or
331  config == 'helgrind' or config == 'memcheck'):
332  # Scale overall test timeout if running under various sanitizers.
333  # scaling value is based on historical data analysis
334  timeout_scaling *= 3
335 
336  if self.config.build_config in target['exclude_configs']:
337  continue
338  if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
339  continue
340  if self.platform == 'windows':
341  binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[
342  self.config.build_config], target['name'])
343  else:
344  binary = 'cmake/build/%s' % target['name']
345 
346  cpu_cost = target['cpu_cost']
347  if cpu_cost == 'capacity':
348  cpu_cost = multiprocessing.cpu_count()
349  if os.path.isfile(binary):
350  list_test_command = None
351  filter_test_command = None
352 
353  # these are the flag defined by gtest and benchmark framework to list
354  # and filter test runs. We use them to split each individual test
355  # into its own JobSpec, and thus into its own process.
356  if 'benchmark' in target and target['benchmark']:
357  with open(os.devnull, 'w') as fnull:
358  tests = subprocess.check_output(
359  [binary, '--benchmark_list_tests'],
360  stderr=fnull)
361  for line in tests.decode().split('\n'):
362  test = line.strip()
363  if not test:
364  continue
365  cmdline = [binary,
366  '--benchmark_filter=%s$' % test
367  ] + target['args']
368  out.append(
369  self.config.job_spec(
370  cmdline,
371  shortname='%s %s' %
372  (' '.join(cmdline), shortname_ext),
373  cpu_cost=cpu_cost,
374  timeout_seconds=target.get(
375  'timeout_seconds',
376  _DEFAULT_TIMEOUT_SECONDS) *
377  timeout_scaling,
378  environ=env))
379  elif 'gtest' in target and target['gtest']:
380  # here we parse the output of --gtest_list_tests to build up a complete
381  # list of the tests contained in a binary for each test, we then
382  # add a job to run, filtering for just that test.
383  with open(os.devnull, 'w') as fnull:
384  tests = subprocess.check_output(
385  [binary, '--gtest_list_tests'], stderr=fnull)
386  base = None
387  for line in tests.decode().split('\n'):
388  i = line.find('#')
389  if i >= 0:
390  line = line[:i]
391  if not line:
392  continue
393  if line[0] != ' ':
394  base = line.strip()
395  else:
396  assert base is not None
397  assert line[1] == ' '
398  test = base + line.strip()
399  cmdline = [binary,
400  '--gtest_filter=%s' % test
401  ] + target['args']
402  out.append(
403  self.config.job_spec(
404  cmdline,
405  shortname='%s %s' %
406  (' '.join(cmdline), shortname_ext),
407  cpu_cost=cpu_cost,
408  timeout_seconds=target.get(
409  'timeout_seconds',
410  _DEFAULT_TIMEOUT_SECONDS) *
411  timeout_scaling,
412  environ=env))
413  else:
414  cmdline = [binary] + target['args']
415  shortname = target.get(
416  'shortname',
417  ' '.join(pipes.quote(arg) for arg in cmdline))
418  shortname += shortname_ext
419  out.append(
420  self.config.job_spec(
421  cmdline,
422  shortname=shortname,
423  cpu_cost=cpu_cost,
424  flaky=target.get('flaky', False),
425  timeout_seconds=target.get(
426  'timeout_seconds',
427  _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
428  environ=env))
429  elif self.args.regex == '.*' or self.platform == 'windows':
430  print('\nWARNING: binary not found, skipping', binary)
431  return sorted(out)
432 
433  def pre_build_steps(self):
434  return []
435 
436  def build_steps(self):
437  if self.platform == 'windows':
438  return [[
439  'tools\\run_tests\\helper_scripts\\build_cxx.bat',
440  '-DgRPC_BUILD_MSVC_MP_COUNT=%d' % self.args.jobs
442  else:
443  return [['tools/run_tests/helper_scripts/build_cxx.sh'] +
445 
447  """Extra environment variables set for pre_build_steps and build_steps jobs."""
448  environ = {'GRPC_RUN_TESTS_CXX_LANGUAGE_SUFFIX': self.lang_suffix}
449  if self.platform == 'windows':
450  environ['GRPC_CMAKE_GENERATOR'] = self._cmake_generator_windows
451  environ[
452  'GRPC_CMAKE_ARCHITECTURE'] = self._cmake_architecture_windows
453  environ[
454  'GRPC_BUILD_ACTIVATE_VS_TOOLS'] = self._activate_vs_tools_windows
455  environ[
456  'GRPC_BUILD_VS_TOOLS_ARCHITECTURE'] = self._vs_tools_architecture_windows
457  return environ
458 
459  def post_tests_steps(self):
460  if self.platform == 'windows':
461  return []
462  else:
463  return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
464 
465  def _clang_cmake_configure_extra_args(self, version_suffix=''):
466  return [
467  '-DCMAKE_C_COMPILER=clang%s' % version_suffix,
468  '-DCMAKE_CXX_COMPILER=clang++%s' % version_suffix,
469  ]
470 
471  def _compiler_options(self, use_docker, compiler):
472  """Returns docker distro and cmake configure args to use for given compiler."""
473  if not use_docker and not _is_use_docker_child():
474  # if not running under docker, we cannot ensure the right compiler version will be used,
475  # so we only allow the non-specific choices.
476  _check_compiler(compiler, ['default', 'cmake'])
477 
478  if compiler == 'default' or compiler == 'cmake':
479  return ('debian11', [])
480  elif compiler == 'gcc6':
481  return ('gcc_6', [])
482  elif compiler == 'gcc10.2':
483  return ('debian11', [])
484  elif compiler == 'gcc10.2_openssl102':
485  return ('debian11_openssl102', [
486  "-DgRPC_SSL_PROVIDER=package",
487  ])
488  elif compiler == 'gcc11':
489  return ('gcc_11', [])
490  elif compiler == 'gcc_musl':
491  return ('alpine', [])
492  elif compiler == 'clang6':
493  return ('clang_6', self._clang_cmake_configure_extra_args())
494  elif compiler == 'clang13':
495  return ('clang_13', self._clang_cmake_configure_extra_args())
496  else:
497  raise Exception('Compiler %s not supported.' % compiler)
498 
499  def dockerfile_dir(self):
500  return 'tools/dockerfile/test/cxx_%s_%s' % (
501  self._docker_distro, _docker_arch_suffix(self.args.arch))
502 
503  def __str__(self):
504  return self.lang_suffix
505 
506 
507 # This tests Node on grpc/grpc-node and will become the standard for Node testing
508 class RemoteNodeLanguage(object):
509 
510  def __init__(self):
512 
513  def configure(self, config, args):
514  self.config = config
515  self.args = args
516  # Note: electron ABI only depends on major and minor version, so that's all
517  # we should specify in the compiler argument
518  _check_compiler(self.args.compiler, [
519  'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
520  'electron1.3', 'electron1.6'
521  ])
522  if self.args.compiler == 'default':
523  self.runtime = 'node'
524  self.node_version = '8'
525  else:
526  if self.args.compiler.startswith('electron'):
527  self.runtime = 'electron'
528  self.node_version = self.args.compiler[8:]
529  else:
530  self.runtime = 'node'
531  # Take off the word "node"
532  self.node_version = self.args.compiler[4:]
533 
534  # TODO: update with Windows/electron scripts when available for grpc/grpc-node
535  def test_specs(self):
536  if self.platform == 'windows':
537  return [
538  self.config.job_spec(
539  ['tools\\run_tests\\helper_scripts\\run_node.bat'])
540  ]
541  else:
542  return [
543  self.config.job_spec(
544  ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
545  None,
546  environ=_FORCE_ENVIRON_FOR_WRAPPERS)
547  ]
548 
549  def pre_build_steps(self):
550  return []
551 
552  def build_steps(self):
553  return []
554 
556  """Extra environment variables set for pre_build_steps and build_steps jobs."""
557  return {}
558 
559  def post_tests_steps(self):
560  return []
561 
562  def dockerfile_dir(self):
563  return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
564  self.args.arch)
565 
566  def __str__(self):
567  return 'grpc-node'
568 
569 
570 class Php7Language(object):
571 
572  def configure(self, config, args):
573  self.config = config
574  self.args = args
575  _check_compiler(self.args.compiler, ['default'])
576 
577  def test_specs(self):
578  return [
579  self.config.job_spec(['src/php/bin/run_tests.sh'],
580  environ=_FORCE_ENVIRON_FOR_WRAPPERS)
581  ]
582 
583  def pre_build_steps(self):
584  return []
585 
586  def build_steps(self):
587  return [['tools/run_tests/helper_scripts/build_php.sh']]
588 
590  """Extra environment variables set for pre_build_steps and build_steps jobs."""
591  return {}
592 
593  def post_tests_steps(self):
594  return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
595 
596  def dockerfile_dir(self):
597  return 'tools/dockerfile/test/php7_debian11_%s' % _docker_arch_suffix(
598  self.args.arch)
599 
600  def __str__(self):
601  return 'php7'
602 
603 
604 class PythonConfig(
605  collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
606  """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
607 
608 
609 class PythonLanguage(object):
610 
611  _TEST_SPECS_FILE = {
612  'native': ['src/python/grpcio_tests/tests/tests.json'],
613  'gevent': [
614  'src/python/grpcio_tests/tests/tests.json',
615  'src/python/grpcio_tests/tests_gevent/tests.json',
616  ],
617  'asyncio': ['src/python/grpcio_tests/tests_aio/tests.json'],
618  }
619 
620  _TEST_COMMAND = {
621  'native': 'test_lite',
622  'gevent': 'test_gevent',
623  'asyncio': 'test_aio',
624  }
625 
626  def configure(self, config, args):
627  self.config = config
628  self.args = args
629  self.pythons = self._get_pythons(self.args)
630 
631  def test_specs(self):
632  # load list of known test suites
633  jobs = []
634  for io_platform in self._TEST_SPECS_FILE:
635  test_cases = []
636  for tests_json_file_name in self._TEST_SPECS_FILE[io_platform]:
637  with open(tests_json_file_name) as tests_json_file:
638  test_cases.extend(json.load(tests_json_file))
639 
640  environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
641  # TODO(https://github.com/grpc/grpc/issues/21401) Fork handlers is not
642  # designed for non-native IO manager. It has a side-effect that
643  # overrides threading settings in C-Core.
644  if io_platform != 'native':
645  environment['GRPC_ENABLE_FORK_SUPPORT'] = '0'
646  for python_config in self.pythons:
647  # TODO(https://github.com/grpc/grpc/issues/23784) allow gevent
648  # to run on later version once issue solved.
649  if io_platform == 'gevent' and python_config.name != 'py36':
650  continue
651  jobs.extend([
652  self.config.job_spec(
653  python_config.run + [self._TEST_COMMAND[io_platform]],
654  timeout_seconds=8 * 60,
655  environ=dict(
656  GRPC_PYTHON_TESTRUNNER_FILTER=str(test_case),
657  **environment),
658  shortname='%s.%s.%s' %
659  (python_config.name, io_platform, test_case),
660  ) for test_case in test_cases
661  ])
662  return jobs
663 
664  def pre_build_steps(self):
665  return []
666 
667  def build_steps(self):
668  return [config.build for config in self.pythons]
669 
671  """Extra environment variables set for pre_build_steps and build_steps jobs."""
672  return {}
673 
674  def post_tests_steps(self):
675  if self.config.build_config != 'gcov':
676  return []
677  else:
678  return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
679 
680  def dockerfile_dir(self):
681  return 'tools/dockerfile/test/python_%s_%s' % (
683  _docker_arch_suffix(self.args.arch))
684 
686  """Choose the docker image to use based on python version."""
687  if self.args.compiler == 'python_alpine':
688  return 'alpine'
689  else:
690  return 'debian11_default'
691 
692  def _get_pythons(self, args):
693  """Get python runtimes to test with, based on current platform, architecture, compiler etc."""
694  if args.iomgr_platform != 'native':
695  raise ValueError(
696  'Python builds no longer differentiate IO Manager platforms, please use "native"'
697  )
698 
699  if args.arch == 'x86':
700  bits = '32'
701  else:
702  bits = '64'
703 
704  if os.name == 'nt':
705  shell = ['bash']
706  builder = [
707  os.path.abspath(
708  'tools/run_tests/helper_scripts/build_python_msys2.sh')
709  ]
710  builder_prefix_arguments = ['MINGW{}'.format(bits)]
711  venv_relative_python = ['Scripts/python.exe']
712  toolchain = ['mingw32']
713  else:
714  shell = []
715  builder = [
716  os.path.abspath(
717  'tools/run_tests/helper_scripts/build_python.sh')
718  ]
719  builder_prefix_arguments = []
720  venv_relative_python = ['bin/python']
721  toolchain = ['unix']
722 
723  runner = [
724  os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
725  ]
726 
727  config_vars = _PythonConfigVars(shell, builder,
728  builder_prefix_arguments,
729  venv_relative_python, toolchain, runner)
730  python36_config = _python_config_generator(name='py36',
731  major='3',
732  minor='6',
733  bits=bits,
734  config_vars=config_vars)
735  python37_config = _python_config_generator(name='py37',
736  major='3',
737  minor='7',
738  bits=bits,
739  config_vars=config_vars)
740  python38_config = _python_config_generator(name='py38',
741  major='3',
742  minor='8',
743  bits=bits,
744  config_vars=config_vars)
745  python39_config = _python_config_generator(name='py39',
746  major='3',
747  minor='9',
748  bits=bits,
749  config_vars=config_vars)
750  python310_config = _python_config_generator(name='py310',
751  major='3',
752  minor='10',
753  bits=bits,
754  config_vars=config_vars)
755  pypy27_config = _pypy_config_generator(name='pypy',
756  major='2',
757  config_vars=config_vars)
758  pypy32_config = _pypy_config_generator(name='pypy3',
759  major='3',
760  config_vars=config_vars)
761 
762  if args.compiler == 'default':
763  if os.name == 'nt':
764  return (python38_config,)
765  elif os.uname()[0] == 'Darwin':
766  # NOTE(rbellevi): Testing takes significantly longer on
767  # MacOS, so we restrict the number of interpreter versions
768  # tested.
769  return (python38_config,)
770  elif platform.machine() == 'aarch64':
771  # Currently the python_debian11_default_arm64 docker image
772  # only has python3.9 installed (and that seems sufficient
773  # for arm64 testing)
774  return (python39_config,)
775  else:
776  return (
777  python36_config,
778  python38_config,
779  )
780  elif args.compiler == 'python3.6':
781  return (python36_config,)
782  elif args.compiler == 'python3.7':
783  return (python37_config,)
784  elif args.compiler == 'python3.8':
785  return (python38_config,)
786  elif args.compiler == 'python3.9':
787  return (python39_config,)
788  elif args.compiler == 'python3.10':
789  return (python310_config,)
790  elif args.compiler == 'pypy':
791  return (pypy27_config,)
792  elif args.compiler == 'pypy3':
793  return (pypy32_config,)
794  elif args.compiler == 'python_alpine':
795  return (python38_config,)
796  elif args.compiler == 'all_the_cpythons':
797  return (
798  python36_config,
799  python37_config,
800  python38_config,
801  python39_config,
802  python310_config,
803  )
804  else:
805  raise Exception('Compiler %s not supported.' % args.compiler)
806 
807  def __str__(self):
808  return 'python'
809 
810 
811 class RubyLanguage(object):
812 
813  def configure(self, config, args):
814  self.config = config
815  self.args = args
816  _check_compiler(self.args.compiler, ['default'])
817 
818  def test_specs(self):
819  tests = [
820  self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
821  timeout_seconds=10 * 60,
822  environ=_FORCE_ENVIRON_FOR_WRAPPERS)
823  ]
824  for test in [
825  'src/ruby/end2end/sig_handling_test.rb',
826  'src/ruby/end2end/channel_state_test.rb',
827  'src/ruby/end2end/channel_closing_test.rb',
828  'src/ruby/end2end/sig_int_during_channel_watch_test.rb',
829  'src/ruby/end2end/killed_client_thread_test.rb',
830  'src/ruby/end2end/forking_client_test.rb',
831  'src/ruby/end2end/grpc_class_init_test.rb',
832  'src/ruby/end2end/multiple_killed_watching_threads_test.rb',
833  'src/ruby/end2end/load_grpc_with_gc_stress_test.rb',
834  'src/ruby/end2end/client_memory_usage_test.rb',
835  'src/ruby/end2end/package_with_underscore_test.rb',
836  'src/ruby/end2end/graceful_sig_handling_test.rb',
837  'src/ruby/end2end/graceful_sig_stop_test.rb',
838  'src/ruby/end2end/errors_load_before_grpc_lib_test.rb',
839  'src/ruby/end2end/logger_load_before_grpc_lib_test.rb',
840  'src/ruby/end2end/status_codes_load_before_grpc_lib_test.rb',
841  'src/ruby/end2end/call_credentials_timeout_test.rb',
842  'src/ruby/end2end/call_credentials_returning_bad_metadata_doesnt_kill_background_thread_test.rb'
843  ]:
844  tests.append(
845  self.config.job_spec(['ruby', test],
846  shortname=test,
847  timeout_seconds=20 * 60,
848  environ=_FORCE_ENVIRON_FOR_WRAPPERS))
849  return tests
850 
851  def pre_build_steps(self):
852  return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
853 
854  def build_steps(self):
855  return [['tools/run_tests/helper_scripts/build_ruby.sh']]
856 
858  """Extra environment variables set for pre_build_steps and build_steps jobs."""
859  return {}
860 
861  def post_tests_steps(self):
862  return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
863 
864  def dockerfile_dir(self):
865  return 'tools/dockerfile/test/ruby_debian11_%s' % _docker_arch_suffix(
866  self.args.arch)
867 
868  def __str__(self):
869  return 'ruby'
870 
871 
872 class CSharpLanguage(object):
873 
874  def __init__(self):
876 
877  def configure(self, config, args):
878  self.config = config
879  self.args = args
880  _check_compiler(self.args.compiler, ['default', 'coreclr', 'mono'])
881  if self.args.compiler == 'default':
882  # test both runtimes by default
883  self.test_runtimes = ['coreclr', 'mono']
884  else:
885  # only test the specified runtime
886  self.test_runtimes = [self.args.compiler]
887 
888  if self.platform == 'windows':
889  _check_arch(self.args.arch, ['default'])
890  self._cmake_arch_option = 'x64'
891  else:
892  self._docker_distro = 'debian11'
893 
894  def test_specs(self):
895  with open('src/csharp/tests.json') as f:
896  tests_by_assembly = json.load(f)
897 
898  msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
899  nunit_args = ['--labels=All', '--noresult', '--workers=1']
900 
901  specs = []
902  for test_runtime in self.test_runtimes:
903  if test_runtime == 'coreclr':
904  assembly_extension = '.dll'
905  assembly_subdir = 'bin/%s/netcoreapp3.1' % msbuild_config
906  runtime_cmd = ['dotnet', 'exec']
907  elif test_runtime == 'mono':
908  assembly_extension = '.exe'
909  assembly_subdir = 'bin/%s/net45' % msbuild_config
910  if self.platform == 'windows':
911  runtime_cmd = []
912  elif self.platform == 'mac':
913  # mono before version 5.2 on MacOS defaults to 32bit runtime
914  runtime_cmd = ['mono', '--arch=64']
915  else:
916  runtime_cmd = ['mono']
917  else:
918  raise Exception('Illegal runtime "%s" was specified.')
919 
920  for assembly in six.iterkeys(tests_by_assembly):
921  assembly_file = 'src/csharp/%s/%s/%s%s' % (
922  assembly, assembly_subdir, assembly, assembly_extension)
923 
924  # normally, run each test as a separate process
925  for test in tests_by_assembly[assembly]:
926  cmdline = runtime_cmd + [assembly_file,
927  '--test=%s' % test] + nunit_args
928  specs.append(
929  self.config.job_spec(
930  cmdline,
931  shortname='csharp.%s.%s' % (test_runtime, test),
932  environ=_FORCE_ENVIRON_FOR_WRAPPERS))
933  return specs
934 
935  def pre_build_steps(self):
936  if self.platform == 'windows':
937  return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat']]
938  else:
939  return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
940 
941  def build_steps(self):
942  if self.platform == 'windows':
943  return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
944  else:
945  return [['tools/run_tests/helper_scripts/build_csharp.sh']]
946 
948  """Extra environment variables set for pre_build_steps and build_steps jobs."""
949  if self.platform == 'windows':
950  return {'ARCHITECTURE': self._cmake_arch_option}
951  else:
952  return {}
953 
954  def post_tests_steps(self):
955  if self.platform == 'windows':
956  return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
957  else:
958  return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
959 
960  def dockerfile_dir(self):
961  return 'tools/dockerfile/test/csharp_%s_%s' % (
962  self._docker_distro, _docker_arch_suffix(self.args.arch))
963 
964  def __str__(self):
965  return 'csharp'
966 
967 
968 class ObjCLanguage(object):
969 
970  def configure(self, config, args):
971  self.config = config
972  self.args = args
973  _check_compiler(self.args.compiler, ['default'])
974 
975  def test_specs(self):
976  out = []
977  # Currently not supporting compiling as frameworks in Bazel
978  # TODO(jtattermusch): verify the above claim is still accurate.
979  out.append(
980  self.config.job_spec(
981  ['src/objective-c/tests/build_one_example.sh'],
982  timeout_seconds=20 * 60,
983  shortname='ios-buildtest-example-sample-frameworks',
984  cpu_cost=1e6,
985  environ={
986  'SCHEME': 'Sample',
987  'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
988  'FRAMEWORKS': 'YES'
989  }))
990  # TODO(jtattermusch): Create bazel target for the sample and remove the test task from here.
991  out.append(
992  self.config.job_spec(
993  ['src/objective-c/tests/build_one_example.sh'],
994  timeout_seconds=20 * 60,
995  shortname='ios-buildtest-example-switftsample',
996  cpu_cost=1e6,
997  environ={
998  'SCHEME': 'SwiftSample',
999  'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
1000  }))
1001  # Disabled due to #20258
1002  # TODO (mxyan): Reenable this test when #20258 is resolved.
1003  # out.append(
1004  # self.config.job_spec(
1005  # ['src/objective-c/tests/build_one_example_bazel.sh'],
1006  # timeout_seconds=20 * 60,
1007  # shortname='ios-buildtest-example-watchOS-sample',
1008  # cpu_cost=1e6,
1009  # environ={
1010  # 'SCHEME': 'watchOS-sample-WatchKit-App',
1011  # 'EXAMPLE_PATH': 'src/objective-c/examples/watchOS-sample',
1012  # 'FRAMEWORKS': 'NO'
1013  # }))
1014 
1015  # TODO(jtattermusch): move the test out of the test/core/iomgr/CFStreamTests directory?
1016  # How does one add the cfstream dependency in bazel?
1017  out.append(
1018  self.config.job_spec(
1019  ['test/core/iomgr/ios/CFStreamTests/build_and_run_tests.sh'],
1020  timeout_seconds=60 * 60,
1021  shortname='ios-test-cfstream-tests',
1022  cpu_cost=1e6,
1023  environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1024  # TODO(jtattermusch): Create bazel target for the test and remove the test from here
1025  # (how does one add the cronet dependency in bazel?)
1026  out.append(
1027  self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1028  timeout_seconds=60 * 60,
1029  shortname='ios-test-cronettests',
1030  cpu_cost=1e6,
1031  environ={'SCHEME': 'CronetTests'}))
1032  # TODO(jtattermusch): Create bazel target for the test and remove the test from here.
1033  out.append(
1034  self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1035  timeout_seconds=30 * 60,
1036  shortname='ios-perf-test',
1037  cpu_cost=1e6,
1038  environ={'SCHEME': 'PerfTests'}))
1039  # TODO(jtattermusch): Clarify what's the difference between PerfTests and PerfTestsPosix
1040  # TODO(jtattermusch): Create bazel target for the test and remove the test from here.
1041  out.append(
1042  self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1043  timeout_seconds=30 * 60,
1044  shortname='ios-perf-test-posix',
1045  cpu_cost=1e6,
1046  environ={'SCHEME': 'PerfTestsPosix'}))
1047  # TODO(jtattermusch): Create bazel target for the test (how does one add the cronet dependency in bazel?)
1048  # TODO(jtattermusch): move the test out of the test/cpp/ios directory?
1049  out.append(
1050  self.config.job_spec(['test/cpp/ios/build_and_run_tests.sh'],
1051  timeout_seconds=60 * 60,
1052  shortname='ios-cpp-test-cronet',
1053  cpu_cost=1e6,
1054  environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1055  # TODO(jtattermusch): Make sure the //src/objective-c/tests:TvTests bazel test passes and remove the test from here.
1056  out.append(
1057  self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1058  timeout_seconds=30 * 60,
1059  shortname='tvos-test-basictests',
1060  cpu_cost=1e6,
1061  environ={
1062  'SCHEME': 'TvTests',
1063  'PLATFORM': 'tvos'
1064  }))
1065 
1066  return sorted(out)
1067 
1068  def pre_build_steps(self):
1069  return []
1070 
1071  def build_steps(self):
1072  return []
1073 
1075  """Extra environment variables set for pre_build_steps and build_steps jobs."""
1076  return {}
1077 
1078  def post_tests_steps(self):
1079  return []
1080 
1081  def dockerfile_dir(self):
1082  return None
1083 
1084  def __str__(self):
1085  return 'objc'
1086 
1087 
1088 class Sanity(object):
1089 
1090  def configure(self, config, args):
1091  self.config = config
1092  self.args = args
1093  _check_compiler(self.args.compiler, ['default'])
1094 
1095  def test_specs(self):
1096  import yaml
1097  with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
1098  environ = {'TEST': 'true'}
1099  if _is_use_docker_child():
1100  environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
1101  environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
1102  environ['IWYU_SKIP_DOCKER'] = 'true'
1103  # sanity tests run tools/bazel wrapper concurrently
1104  # and that can result in a download/run race in the wrapper.
1105  # under docker we already have the right version of bazel
1106  # so we can just disable the wrapper.
1107  environ['DISABLE_BAZEL_WRAPPER'] = 'true'
1108  return [
1109  self.config.job_spec(cmd['script'].split(),
1110  timeout_seconds=30 * 60,
1111  environ=environ,
1112  cpu_cost=cmd.get('cpu_cost', 1))
1113  for cmd in yaml.load(f)
1114  ]
1115 
1116  def pre_build_steps(self):
1117  return []
1118 
1119  def build_steps(self):
1120  return []
1121 
1123  """Extra environment variables set for pre_build_steps and build_steps jobs."""
1124  return {}
1125 
1126  def post_tests_steps(self):
1127  return []
1128 
1129  def dockerfile_dir(self):
1130  return 'tools/dockerfile/test/sanity'
1131 
1132  def __str__(self):
1133  return 'sanity'
1134 
1135 
1136 # different configurations we can run under
1137 with open('tools/run_tests/generated/configs.json') as f:
1138  _CONFIGS = dict(
1139  (cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
1140 
1141 _LANGUAGES = {
1142  'c++': CLanguage('cxx', 'c++'),
1143  'c': CLanguage('c', 'c'),
1144  'grpc-node': RemoteNodeLanguage(),
1145  'php7': Php7Language(),
1146  'python': PythonLanguage(),
1147  'ruby': RubyLanguage(),
1148  'csharp': CSharpLanguage(),
1149  'objc': ObjCLanguage(),
1150  'sanity': Sanity()
1151 }
1152 
1153 _MSBUILD_CONFIG = {
1154  'dbg': 'Debug',
1155  'opt': 'Release',
1156  'gcov': 'Debug',
1157 }
1158 
1159 
1160 def _build_step_environ(cfg, extra_env={}):
1161  """Environment variables set for each build step."""
1162  environ = {'CONFIG': cfg, 'GRPC_RUN_TESTS_JOBS': str(args.jobs)}
1163  msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
1164  if msbuild_cfg:
1165  environ['MSBUILD_CONFIG'] = msbuild_cfg
1166  environ.update(extra_env)
1167  return environ
1168 
1169 
1171  """Returns msbuild cmdline option for selected architecture."""
1172  if arch == 'default' or arch == 'x86':
1173  return '/p:Platform=Win32'
1174  elif arch == 'x64':
1175  return '/p:Platform=x64'
1176  else:
1177  print('Architecture %s not supported.' % arch)
1178  sys.exit(1)
1179 
1180 
1182  """Checks that architecture option is valid."""
1183  if platform_string() == 'windows':
1184  _windows_arch_option(arch)
1185  elif platform_string() == 'linux':
1186  # On linux, we need to be running under docker with the right architecture.
1187  runtime_machine = platform.machine()
1188  runtime_arch = platform.architecture()[0]
1189  if arch == 'default':
1190  return
1191  elif runtime_machine == 'x86_64' and runtime_arch == '64bit' and arch == 'x64':
1192  return
1193  elif runtime_machine == 'x86_64' and runtime_arch == '32bit' and arch == 'x86':
1194  return
1195  elif runtime_machine == 'aarch64' and runtime_arch == '64bit' and arch == 'arm64':
1196  return
1197  else:
1198  print(
1199  'Architecture %s does not match current runtime architecture.' %
1200  arch)
1201  sys.exit(1)
1202  else:
1203  if args.arch != 'default':
1204  print('Architecture %s not supported on current platform.' %
1205  args.arch)
1206  sys.exit(1)
1207 
1208 
1210  """Returns suffix to dockerfile dir to use."""
1211  if arch == 'default' or arch == 'x64':
1212  return 'x64'
1213  elif arch == 'x86':
1214  return 'x86'
1215  elif arch == 'arm64':
1216  return 'arm64'
1217  else:
1218  print('Architecture %s not supported with current settings.' % arch)
1219  sys.exit(1)
1220 
1221 
1222 def runs_per_test_type(arg_str):
1223  """Auxiliary function to parse the "runs_per_test" flag.
1224 
1225  Returns:
1226  A positive integer or 0, the latter indicating an infinite number of
1227  runs.
1228 
1229  Raises:
1230  argparse.ArgumentTypeError: Upon invalid input.
1231  """
1232  if arg_str == 'inf':
1233  return 0
1234  try:
1235  n = int(arg_str)
1236  if n <= 0:
1237  raise ValueError
1238  return n
1239  except:
1240  msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
1241  raise argparse.ArgumentTypeError(msg)
1242 
1243 
1244 def percent_type(arg_str):
1245  pct = float(arg_str)
1246  if pct > 100 or pct < 0:
1247  raise argparse.ArgumentTypeError(
1248  "'%f' is not a valid percentage in the [0, 100] range" % pct)
1249  return pct
1250 
1251 
1252 # This is math.isclose in python >= 3.5
1253 def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
1254  return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
1255 
1256 
1257 def _shut_down_legacy_server(legacy_server_port):
1258  """Shut down legacy version of port server."""
1259  try:
1260  version = int(
1261  urllib.request.urlopen('http://localhost:%d/version_number' %
1262  legacy_server_port,
1263  timeout=10).read())
1264  except:
1265  pass
1266  else:
1267  urllib.request.urlopen('http://localhost:%d/quitquitquit' %
1268  legacy_server_port).read()
1269 
1270 
1271 def _calculate_num_runs_failures(list_of_results):
1272  """Calculate number of runs and failures for a particular test.
1273 
1274  Args:
1275  list_of_results: (List) of JobResult object.
1276  Returns:
1277  A tuple of total number of runs and failures.
1278  """
1279  num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
1280  num_failures = 0
1281  for jobresult in list_of_results:
1282  if jobresult.retries > 0:
1283  num_runs += jobresult.retries
1284  if jobresult.num_failures > 0:
1285  num_failures += jobresult.num_failures
1286  return num_runs, num_failures
1287 
1288 
1289 class BuildAndRunError(object):
1290  """Represents error type in _build_and_run."""
1291 
1292  BUILD = object()
1293  TEST = object()
1294  POST_TEST = object()
1295 
1296 
1297 # returns a list of things that failed (or an empty list on success)
1298 def _build_and_run(check_cancelled,
1299  newline_on_success,
1300  xml_report=None,
1301  build_only=False):
1302  """Do one pass of building & running tests."""
1303  # build latest sequentially
1304  num_failures, resultset = jobset.run(build_steps,
1305  maxjobs=1,
1306  stop_on_failure=True,
1307  newline_on_success=newline_on_success,
1308  travis=args.travis)
1309  if num_failures:
1310  return [BuildAndRunError.BUILD]
1311 
1312  if build_only:
1313  if xml_report:
1314  report_utils.render_junit_xml_report(
1315  resultset, xml_report, suite_name=args.report_suite_name)
1316  return []
1317 
1318  # start antagonists
1319  antagonists = [
1320  subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
1321  for _ in range(0, args.antagonists)
1322  ]
1323  start_port_server.start_port_server()
1324  resultset = None
1325  num_test_failures = 0
1326  try:
1327  infinite_runs = runs_per_test == 0
1328  one_run = set(spec for language in languages
1329  for spec in language.test_specs()
1330  if (re.search(args.regex, spec.shortname) and
1331  (args.regex_exclude == '' or
1332  not re.search(args.regex_exclude, spec.shortname))))
1333  # When running on travis, we want out test runs to be as similar as possible
1334  # for reproducibility purposes.
1335  if args.travis and args.max_time <= 0:
1336  massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
1337  else:
1338  # whereas otherwise, we want to shuffle things up to give all tests a
1339  # chance to run.
1340  massaged_one_run = list(
1341  one_run) # random.sample needs an indexable seq.
1342  num_jobs = len(massaged_one_run)
1343  # for a random sample, get as many as indicated by the 'sample_percent'
1344  # argument. By default this arg is 100, resulting in a shuffle of all
1345  # jobs.
1346  sample_size = int(num_jobs * args.sample_percent / 100.0)
1347  massaged_one_run = random.sample(massaged_one_run, sample_size)
1348  if not isclose(args.sample_percent, 100.0):
1349  assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
1350  print("Running %d tests out of %d (~%d%%)" %
1351  (sample_size, num_jobs, args.sample_percent))
1352  if infinite_runs:
1353  assert len(massaged_one_run
1354  ) > 0, 'Must have at least one test for a -n inf run'
1355  runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
1356  else itertools.repeat(massaged_one_run, runs_per_test))
1357  all_runs = itertools.chain.from_iterable(runs_sequence)
1358 
1359  if args.quiet_success:
1360  jobset.message(
1361  'START',
1362  'Running tests quietly, only failing tests will be reported',
1363  do_newline=True)
1364  num_test_failures, resultset = jobset.run(
1365  all_runs,
1366  check_cancelled,
1367  newline_on_success=newline_on_success,
1368  travis=args.travis,
1369  maxjobs=args.jobs,
1370  maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
1371  stop_on_failure=args.stop_on_failure,
1372  quiet_success=args.quiet_success,
1373  max_time=args.max_time)
1374  if resultset:
1375  for k, v in sorted(resultset.items()):
1376  num_runs, num_failures = _calculate_num_runs_failures(v)
1377  if num_failures > 0:
1378  if num_failures == num_runs: # what about infinite_runs???
1379  jobset.message('FAILED', k, do_newline=True)
1380  else:
1381  jobset.message('FLAKE',
1382  '%s [%d/%d runs flaked]' %
1383  (k, num_failures, num_runs),
1384  do_newline=True)
1385  finally:
1386  for antagonist in antagonists:
1387  antagonist.kill()
1388  if args.bq_result_table and resultset:
1389  upload_extra_fields = {
1390  'compiler': args.compiler,
1391  'config': args.config,
1392  'iomgr_platform': args.iomgr_platform,
1393  'language': args.language[
1394  0
1395  ], # args.language is a list but will always have one element when uploading to BQ is enabled.
1396  'platform': platform_string()
1397  }
1398  try:
1399  upload_results_to_bq(resultset, args.bq_result_table,
1400  upload_extra_fields)
1401  except NameError as e:
1402  logging.warning(
1403  e) # It's fine to ignore since this is not critical
1404  if xml_report and resultset:
1405  report_utils.render_junit_xml_report(
1406  resultset,
1407  xml_report,
1408  suite_name=args.report_suite_name,
1409  multi_target=args.report_multi_target)
1410 
1411  number_failures, _ = jobset.run(post_tests_steps,
1412  maxjobs=1,
1413  stop_on_failure=False,
1414  newline_on_success=newline_on_success,
1415  travis=args.travis)
1416 
1417  out = []
1418  if number_failures:
1419  out.append(BuildAndRunError.POST_TEST)
1420  if num_test_failures:
1421  out.append(BuildAndRunError.TEST)
1422 
1423  return out
1424 
1425 
1426 # parse command line
1427 argp = argparse.ArgumentParser(description='Run grpc tests.')
1428 argp.add_argument('-c',
1429  '--config',
1430  choices=sorted(_CONFIGS.keys()),
1431  default='opt')
1432 argp.add_argument(
1433  '-n',
1434  '--runs_per_test',
1435  default=1,
1436  type=runs_per_test_type,
1437  help='A positive integer or "inf". If "inf", all tests will run in an '
1438  'infinite loop. Especially useful in combination with "-f"')
1439 argp.add_argument('-r', '--regex', default='.*', type=str)
1440 argp.add_argument('--regex_exclude', default='', type=str)
1441 argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
1442 argp.add_argument('-s', '--slowdown', default=1.0, type=float)
1443 argp.add_argument('-p',
1444  '--sample_percent',
1445  default=100.0,
1446  type=percent_type,
1447  help='Run a random sample with that percentage of tests')
1448 argp.add_argument(
1449  '-t',
1450  '--travis',
1451  default=False,
1452  action='store_const',
1453  const=True,
1454  help='When set, indicates that the script is running on CI (= not locally).'
1455 )
1456 argp.add_argument('--newline_on_success',
1457  default=False,
1458  action='store_const',
1459  const=True)
1460 argp.add_argument('-l',
1461  '--language',
1462  choices=sorted(_LANGUAGES.keys()),
1463  nargs='+',
1464  required=True)
1465 argp.add_argument('-S',
1466  '--stop_on_failure',
1467  default=False,
1468  action='store_const',
1469  const=True)
1470 argp.add_argument('--use_docker',
1471  default=False,
1472  action='store_const',
1473  const=True,
1474  help='Run all the tests under docker. That provides ' +
1475  'additional isolation and prevents the need to install ' +
1476  'language specific prerequisites. Only available on Linux.')
1477 argp.add_argument(
1478  '--allow_flakes',
1479  default=False,
1480  action='store_const',
1481  const=True,
1482  help=
1483  'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
1484 )
1485 argp.add_argument(
1486  '--arch',
1487  choices=['default', 'x86', 'x64', 'arm64'],
1488  default='default',
1489  help=
1490  'Selects architecture to target. For some platforms "default" is the only supported choice.'
1491 )
1492 argp.add_argument(
1493  '--compiler',
1494  choices=[
1495  'default',
1496  'gcc6',
1497  'gcc10.2',
1498  'gcc10.2_openssl102',
1499  'gcc11',
1500  'gcc_musl',
1501  'clang6',
1502  'clang13',
1503  'python2.7',
1504  'python3.5',
1505  'python3.6',
1506  'python3.7',
1507  'python3.8',
1508  'python3.9',
1509  'pypy',
1510  'pypy3',
1511  'python_alpine',
1512  'all_the_cpythons',
1513  'electron1.3',
1514  'electron1.6',
1515  'coreclr',
1516  'cmake',
1517  'cmake_ninja_vs2017',
1518  'cmake_vs2017',
1519  'cmake_vs2019',
1520  'mono',
1521  ],
1522  default='default',
1523  help=
1524  'Selects compiler to use. Allowed values depend on the platform and language.'
1525 )
1526 argp.add_argument('--iomgr_platform',
1527  choices=['native', 'gevent', 'asyncio'],
1528  default='native',
1529  help='Selects iomgr platform to build on')
1530 argp.add_argument('--build_only',
1531  default=False,
1532  action='store_const',
1533  const=True,
1534  help='Perform all the build steps but don\'t run any tests.')
1535 argp.add_argument('--measure_cpu_costs',
1536  default=False,
1537  action='store_const',
1538  const=True,
1539  help='Measure the cpu costs of tests')
1540 argp.add_argument('-a', '--antagonists', default=0, type=int)
1541 argp.add_argument('-x',
1542  '--xml_report',
1543  default=None,
1544  type=str,
1545  help='Generates a JUnit-compatible XML report')
1546 argp.add_argument('--report_suite_name',
1547  default='tests',
1548  type=str,
1549  help='Test suite name to use in generated JUnit XML report')
1550 argp.add_argument(
1551  '--report_multi_target',
1552  default=False,
1553  const=True,
1554  action='store_const',
1555  help='Generate separate XML report for each test job (Looks better in UIs).'
1556 )
1557 argp.add_argument(
1558  '--quiet_success',
1559  default=False,
1560  action='store_const',
1561  const=True,
1562  help=
1563  'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
1564  + 'Useful when running many iterations of each test (argument -n).')
1565 argp.add_argument(
1566  '--force_default_poller',
1567  default=False,
1568  action='store_const',
1569  const=True,
1570  help='Don\'t try to iterate over many polling strategies when they exist')
1571 argp.add_argument(
1572  '--force_use_pollers',
1573  default=None,
1574  type=str,
1575  help='Only use the specified comma-delimited list of polling engines. '
1576  'Example: --force_use_pollers epoll1,poll '
1577  ' (This flag has no effect if --force_default_poller flag is also used)')
1578 argp.add_argument('--max_time',
1579  default=-1,
1580  type=int,
1581  help='Maximum test runtime in seconds')
1582 argp.add_argument('--bq_result_table',
1583  default='',
1584  type=str,
1585  nargs='?',
1586  help='Upload test results to a specified BQ table.')
1587 args = argp.parse_args()
1588 
1589 flaky_tests = set()
1590 shortname_to_cpu = {}
1591 
1592 if args.force_default_poller:
1593  _POLLING_STRATEGIES = {}
1594 elif args.force_use_pollers:
1595  _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
1596 
1597 jobset.measure_cpu_costs = args.measure_cpu_costs
1598 
1599 # grab config
1600 run_config = _CONFIGS[args.config]
1601 build_config = run_config.build_config
1602 
1603 # TODO(jtattermusch): is this setting applied/being used?
1604 if args.travis:
1605  _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
1606 
1607 languages = set(_LANGUAGES[l] for l in args.language)
1608 for l in languages:
1609  l.configure(run_config, args)
1610 
1611 if len(languages) != 1:
1612  print('Building multiple languages simultaneously is not supported!')
1613  sys.exit(1)
1614 
1615 # If --use_docker was used, respawn the run_tests.py script under a docker container
1616 # instead of continuing.
1617 if args.use_docker:
1618  if not args.travis:
1619  print('Seen --use_docker flag, will run tests under docker.')
1620  print('')
1621  print(
1622  'IMPORTANT: The changes you are testing need to be locally committed'
1623  )
1624  print(
1625  'because only the committed changes in the current branch will be')
1626  print('copied to the docker environment.')
1627  time.sleep(5)
1628 
1629  dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
1630  if len(dockerfile_dirs) > 1:
1631  print('Languages to be tested require running under different docker '
1632  'images.')
1633  sys.exit(1)
1634  else:
1635  dockerfile_dir = next(iter(dockerfile_dirs))
1636 
1637  child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
1638  run_tests_cmd = 'python3 tools/run_tests/run_tests.py %s' % ' '.join(
1639  child_argv[1:])
1640 
1641  env = os.environ.copy()
1642  env['DOCKERFILE_DIR'] = dockerfile_dir
1643  env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run.sh'
1644  env['DOCKER_RUN_SCRIPT_COMMAND'] = run_tests_cmd
1645 
1646  retcode = subprocess.call(
1647  'tools/run_tests/dockerize/build_and_run_docker.sh',
1648  shell=True,
1649  env=env)
1650  _print_debug_info_epilogue(dockerfile_dir=dockerfile_dir)
1651  sys.exit(retcode)
1652 
1653 _check_arch_option(args.arch)
1654 
1655 # collect pre-build steps (which get retried if they fail, e.g. to avoid
1656 # flakes on downloading dependencies etc.)
1657 build_steps = list(
1658  set(
1659  jobset.JobSpec(cmdline,
1660  environ=_build_step_environ(
1661  build_config, extra_env=l.build_steps_environ()),
1662  timeout_seconds=_PRE_BUILD_STEP_TIMEOUT_SECONDS,
1663  flake_retries=2)
1664  for l in languages
1665  for cmdline in l.pre_build_steps()))
1666 
1667 # collect build steps
1668 build_steps.extend(
1669  set(
1670  jobset.JobSpec(cmdline,
1671  environ=_build_step_environ(
1672  build_config, extra_env=l.build_steps_environ()),
1673  timeout_seconds=None)
1674  for l in languages
1675  for cmdline in l.build_steps()))
1676 
1677 # collect post test steps
1678 post_tests_steps = list(
1679  set(
1680  jobset.JobSpec(cmdline,
1681  environ=_build_step_environ(
1682  build_config, extra_env=l.build_steps_environ()))
1683  for l in languages
1684  for cmdline in l.post_tests_steps()))
1685 runs_per_test = args.runs_per_test
1686 
1687 errors = _build_and_run(check_cancelled=lambda: False,
1688  newline_on_success=args.newline_on_success,
1689  xml_report=args.xml_report,
1690  build_only=args.build_only)
1691 if not errors:
1692  jobset.message('SUCCESS', 'All tests passed', do_newline=True)
1693 else:
1694  jobset.message('FAILED', 'Some tests failed', do_newline=True)
1695 
1696 if not _is_use_docker_child():
1697  # if --use_docker was used, the outer invocation of run_tests.py will
1698  # print the debug info instead.
1700 
1701 exit_code = 0
1702 if BuildAndRunError.BUILD in errors:
1703  exit_code |= 1
1704 if BuildAndRunError.TEST in errors:
1705  exit_code |= 2
1706 if BuildAndRunError.POST_TEST in errors:
1707  exit_code |= 4
1708 sys.exit(exit_code)
xds_interop_client.str
str
Definition: xds_interop_client.py:487
run_tests.RubyLanguage.configure
def configure(self, config, args)
Definition: run_tests.py:813
run_tests.CSharpLanguage.config
config
Definition: run_tests.py:878
run_tests.CLanguage._vs_tools_architecture_windows
_vs_tools_architecture_windows
Definition: run_tests.py:281
run_tests.PythonLanguage.pre_build_steps
def pre_build_steps(self)
Definition: run_tests.py:664
http2_test_server.format
format
Definition: http2_test_server.py:118
run_tests._windows_arch_option
def _windows_arch_option(arch)
Definition: run_tests.py:1170
run_tests.CSharpLanguage._docker_distro
_docker_distro
Definition: run_tests.py:892
run_tests.Config.build_config
build_config
Definition: run_tests.py:116
run_tests.CSharpLanguage.test_runtimes
test_runtimes
Definition: run_tests.py:883
run_tests.CLanguage.__str__
def __str__(self)
Definition: run_tests.py:503
run_tests.CSharpLanguage.__str__
def __str__(self)
Definition: run_tests.py:964
run_tests.RemoteNodeLanguage.test_specs
def test_specs(self)
Definition: run_tests.py:535
run_tests._check_compiler
def _check_compiler(compiler, supported_compilers)
Definition: run_tests.py:171
run_tests.ObjCLanguage.post_tests_steps
def post_tests_steps(self)
Definition: run_tests.py:1078
python_utils.report_utils
Definition: report_utils.py:1
run_tests.CSharpLanguage.build_steps_environ
def build_steps_environ(self)
Definition: run_tests.py:947
run_tests.CLanguage.lang_suffix
lang_suffix
Definition: run_tests.py:244
capstone.range
range
Definition: third_party/bloaty/third_party/capstone/bindings/python/capstone/__init__.py:6
run_tests.PythonLanguage.dockerfile_dir
def dockerfile_dir(self)
Definition: run_tests.py:680
run_tests.CLanguage.post_tests_steps
def post_tests_steps(self)
Definition: run_tests.py:459
run_tests.Php7Language.args
args
Definition: run_tests.py:574
run_tests.PythonLanguage.pythons
pythons
Definition: run_tests.py:629
run_tests.Php7Language.config
config
Definition: run_tests.py:573
copy
static int copy(grpc_slice_buffer *input, grpc_slice_buffer *output)
Definition: message_compress.cc:145
run_tests.ObjCLanguage.build_steps
def build_steps(self)
Definition: run_tests.py:1071
run_tests.RemoteNodeLanguage.platform
platform
Definition: run_tests.py:511
run_tests.CLanguage.test_specs
def test_specs(self)
Definition: run_tests.py:299
run_tests.CLanguage.platform
platform
Definition: run_tests.py:245
run_tests.Sanity.test_specs
def test_specs(self)
Definition: run_tests.py:1095
run_tests.RubyLanguage.config
config
Definition: run_tests.py:814
run_tests.CLanguage._cmake_generator_windows
_cmake_generator_windows
Definition: run_tests.py:276
run_tests._check_arch
def _check_arch(arch, supported_archs)
Definition: run_tests.py:177
run_tests.RemoteNodeLanguage.build_steps_environ
def build_steps_environ(self)
Definition: run_tests.py:555
run_tests.PythonLanguage._TEST_SPECS_FILE
dictionary _TEST_SPECS_FILE
Definition: run_tests.py:611
run_tests.get_c_tests
def get_c_tests(travis, test_lang)
Definition: run_tests.py:159
run_tests.CLanguage._activate_vs_tools_windows
_activate_vs_tools_windows
Definition: run_tests.py:280
run_tests._pypy_config_generator
def _pypy_config_generator(name, major, config_vars)
Definition: run_tests.py:208
run_tests.RemoteNodeLanguage.runtime
runtime
Definition: run_tests.py:523
run_tests.Sanity.post_tests_steps
def post_tests_steps(self)
Definition: run_tests.py:1126
run_tests._shut_down_legacy_server
def _shut_down_legacy_server(legacy_server_port)
Definition: run_tests.py:1257
run_tests.isclose
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0)
Definition: run_tests.py:1253
run_tests.PythonLanguage._get_pythons
def _get_pythons(self, args)
Definition: run_tests.py:692
python_utils.watch_dirs
Definition: watch_dirs.py:1
run_tests.RubyLanguage.post_tests_steps
def post_tests_steps(self)
Definition: run_tests.py:861
run_tests.Php7Language.post_tests_steps
def post_tests_steps(self)
Definition: run_tests.py:593
run_tests.Php7Language.test_specs
def test_specs(self)
Definition: run_tests.py:577
run_tests.ObjCLanguage.config
config
Definition: run_tests.py:971
run_tests.CSharpLanguage.__init__
def __init__(self)
Definition: run_tests.py:874
run_tests.CLanguage._cmake_configure_extra_args
_cmake_configure_extra_args
Definition: run_tests.py:275
run_tests.ObjCLanguage.args
args
Definition: run_tests.py:972
run_tests.PythonLanguage.post_tests_steps
def post_tests_steps(self)
Definition: run_tests.py:674
run_tests.CLanguage._clang_cmake_configure_extra_args
def _clang_cmake_configure_extra_args(self, version_suffix='')
Definition: run_tests.py:465
python_utils.upload_test_results
Definition: upload_test_results.py:1
run_tests._print_debug_info_epilogue
def _print_debug_info_epilogue(dockerfile_dir=None)
Definition: run_tests.py:97
run_tests._docker_arch_suffix
def _docker_arch_suffix(arch)
Definition: run_tests.py:1209
run_tests.Php7Language
Definition: run_tests.py:570
run_tests.ObjCLanguage
Definition: run_tests.py:968
run_tests.Sanity.args
args
Definition: run_tests.py:1092
run_tests.RubyLanguage.args
args
Definition: run_tests.py:815
run_tests._calculate_num_runs_failures
def _calculate_num_runs_failures(list_of_results)
Definition: run_tests.py:1271
run_tests.PythonLanguage.build_steps
def build_steps(self)
Definition: run_tests.py:667
run_tests.Sanity.dockerfile_dir
def dockerfile_dir(self)
Definition: run_tests.py:1129
run_tests.CSharpLanguage.dockerfile_dir
def dockerfile_dir(self)
Definition: run_tests.py:960
run_tests.Sanity.__str__
def __str__(self)
Definition: run_tests.py:1132
xds_interop_client.int
int
Definition: xds_interop_client.py:113
run_tests.RemoteNodeLanguage.__init__
def __init__(self)
Definition: run_tests.py:510
run_tests.ObjCLanguage.build_steps_environ
def build_steps_environ(self)
Definition: run_tests.py:1074
max
int max
Definition: bloaty/third_party/zlib/examples/enough.c:170
run_tests.Php7Language.dockerfile_dir
def dockerfile_dir(self)
Definition: run_tests.py:596
run_tests.CLanguage.args
args
Definition: run_tests.py:250
run_tests.PythonLanguage.args
args
Definition: run_tests.py:628
run_tests.RemoteNodeLanguage.node_version
node_version
Definition: run_tests.py:524
run_tests._build_step_environ
def _build_step_environ(cfg, extra_env={})
Definition: run_tests.py:1160
run_tests.Php7Language.build_steps
def build_steps(self)
Definition: run_tests.py:586
run_tests.RubyLanguage.build_steps_environ
def build_steps_environ(self)
Definition: run_tests.py:857
run_tests._PythonConfigVars
_PythonConfigVars
Definition: run_tests.py:187
run_tests.PythonLanguage.__str__
def __str__(self)
Definition: run_tests.py:807
run_tests.CLanguage
Definition: run_tests.py:241
run_tests.CLanguage.build_steps
def build_steps(self)
Definition: run_tests.py:436
run_tests.RubyLanguage.pre_build_steps
def pre_build_steps(self)
Definition: run_tests.py:851
run_tests.CLanguage.dockerfile_dir
def dockerfile_dir(self)
Definition: run_tests.py:499
python_utils.jobset
Definition: jobset.py:1
run_tests.RemoteNodeLanguage.dockerfile_dir
def dockerfile_dir(self)
Definition: run_tests.py:562
run_tests._python_pattern_function
def _python_pattern_function(major, minor, bits)
Definition: run_tests.py:218
run_tests.RubyLanguage.dockerfile_dir
def dockerfile_dir(self)
Definition: run_tests.py:864
run_tests.CSharpLanguage.test_specs
def test_specs(self)
Definition: run_tests.py:894
run_tests.RubyLanguage.__str__
def __str__(self)
Definition: run_tests.py:868
run_tests.Config.tool_prefix
tool_prefix
Definition: run_tests.py:119
run_tests.Sanity.build_steps
def build_steps(self)
Definition: run_tests.py:1119
run_tests.RemoteNodeLanguage.args
args
Definition: run_tests.py:515
run_tests.Config
Definition: run_tests.py:111
run_tests._is_use_docker_child
def _is_use_docker_child()
Definition: run_tests.py:182
run_tests.Sanity.build_steps_environ
def build_steps_environ(self)
Definition: run_tests.py:1122
run_tests.Sanity.configure
def configure(self, config, args)
Definition: run_tests.py:1090
run_tests.CLanguage.test_lang
test_lang
Definition: run_tests.py:246
run_tests.Php7Language.pre_build_steps
def pre_build_steps(self)
Definition: run_tests.py:583
run_tests.CSharpLanguage._cmake_arch_option
_cmake_arch_option
Definition: run_tests.py:890
run_tests.PythonLanguage.test_specs
def test_specs(self)
Definition: run_tests.py:631
run_tests.Php7Language.build_steps_environ
def build_steps_environ(self)
Definition: run_tests.py:589
run_tests.BuildAndRunError
Definition: run_tests.py:1289
run_tests.PythonLanguage.configure
def configure(self, config, args)
Definition: run_tests.py:626
run_tests.CLanguage.build_steps_environ
def build_steps_environ(self)
Definition: run_tests.py:446
run_tests.Sanity.pre_build_steps
def pre_build_steps(self)
Definition: run_tests.py:1116
run_tests.platform_string
def platform_string()
Definition: run_tests.py:71
run_tests.RubyLanguage.test_specs
def test_specs(self)
Definition: run_tests.py:818
run_tests.Config.environ
environ
Definition: run_tests.py:117
run_tests.PythonConfig
Definition: run_tests.py:605
run_tests.RubyLanguage
Definition: run_tests.py:811
read
int read(izstream &zs, T *x, Items items)
Definition: bloaty/third_party/zlib/contrib/iostream2/zstream.h:115
run_tests.RemoteNodeLanguage.build_steps
def build_steps(self)
Definition: run_tests.py:552
run_tests.CLanguage.pre_build_steps
def pre_build_steps(self)
Definition: run_tests.py:433
run_tests._python_config_generator
def _python_config_generator(name, major, minor, bits, config_vars)
Definition: run_tests.py:197
run_tests.Config.iomgr_platform
iomgr_platform
Definition: run_tests.py:121
run_tests.CSharpLanguage.post_tests_steps
def post_tests_steps(self)
Definition: run_tests.py:954
run_tests.CLanguage.configure
def configure(self, config, args)
Definition: run_tests.py:248
run_tests.PythonLanguage._TEST_COMMAND
dictionary _TEST_COMMAND
Definition: run_tests.py:620
run_tests.Sanity.config
config
Definition: run_tests.py:1091
run_tests.ObjCLanguage.dockerfile_dir
def dockerfile_dir(self)
Definition: run_tests.py:1081
run_tests.RemoteNodeLanguage.config
config
Definition: run_tests.py:514
python_utils.start_port_server
Definition: python_utils/start_port_server.py:1
next
AllocList * next[kMaxLevel]
Definition: abseil-cpp/absl/base/internal/low_level_alloc.cc:100
run_tests.ObjCLanguage.configure
def configure(self, config, args)
Definition: run_tests.py:970
run_tests.runs_per_test_type
def runs_per_test_type(arg_str)
Definition: run_tests.py:1222
run_tests.ObjCLanguage.pre_build_steps
def pre_build_steps(self)
Definition: run_tests.py:1068
cpp.gmock_class.set
set
Definition: bloaty/third_party/googletest/googlemock/scripts/generator/cpp/gmock_class.py:44
run_tests.CLanguage.__init__
def __init__(self, lang_suffix, test_lang)
Definition: run_tests.py:243
run_tests.CLanguage.config
config
Definition: run_tests.py:249
run_tests.Config.__init__
def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[], iomgr_platform='native')
Definition: run_tests.py:113
run_tests.CSharpLanguage.args
args
Definition: run_tests.py:879
run_tests.Php7Language.configure
def configure(self, config, args)
Definition: run_tests.py:572
open
#define open
Definition: test-fs.c:46
run_tests.CSharpLanguage.pre_build_steps
def pre_build_steps(self)
Definition: run_tests.py:935
run_tests.PythonLanguage._python_docker_distro_name
def _python_docker_distro_name(self)
Definition: run_tests.py:685
run_tests.Php7Language.__str__
def __str__(self)
Definition: run_tests.py:600
run_tests.CLanguage._compiler_options
def _compiler_options(self, use_docker, compiler)
Definition: run_tests.py:471
iter
Definition: test_winkernel.cpp:47
run_tests.CLanguage._cmake_architecture_windows
_cmake_architecture_windows
Definition: run_tests.py:278
run_tests.RemoteNodeLanguage.post_tests_steps
def post_tests_steps(self)
Definition: run_tests.py:559
len
int len
Definition: abseil-cpp/absl/base/internal/low_level_alloc_test.cc:46
run_tests.PythonLanguage
Definition: run_tests.py:609
run_tests.CSharpLanguage
Definition: run_tests.py:872
run_tests.RemoteNodeLanguage.pre_build_steps
def pre_build_steps(self)
Definition: run_tests.py:549
run_tests.Config.timeout_multiplier
timeout_multiplier
Definition: run_tests.py:120
run_tests.run_shell_command
def run_shell_command(cmd, env=None, cwd=None)
Definition: run_tests.py:79
run_tests.CSharpLanguage.configure
def configure(self, config, args)
Definition: run_tests.py:877
split
static void split(const char *s, char ***ss, size_t *ns)
Definition: debug/trace.cc:111
run_tests.RemoteNodeLanguage.configure
def configure(self, config, args)
Definition: run_tests.py:513
run_tests.CSharpLanguage.build_steps
def build_steps(self)
Definition: run_tests.py:941
run_tests._pypy_pattern_function
def _pypy_pattern_function(major)
Definition: run_tests.py:232
run_tests.PythonLanguage.build_steps_environ
def build_steps_environ(self)
Definition: run_tests.py:670
run_tests.percent_type
def percent_type(arg_str)
Definition: run_tests.py:1244
run_tests.RemoteNodeLanguage
Definition: run_tests.py:508
run_tests.Sanity
Definition: run_tests.py:1088
run_tests._check_arch_option
def _check_arch_option(arch)
Definition: run_tests.py:1181
run_tests.ObjCLanguage.test_specs
def test_specs(self)
Definition: run_tests.py:975
run_tests.PythonLanguage.config
config
Definition: run_tests.py:627
run_tests.ObjCLanguage.__str__
def __str__(self)
Definition: run_tests.py:1084
run_tests.CSharpLanguage.platform
platform
Definition: run_tests.py:875
run_tests.RubyLanguage.build_steps
def build_steps(self)
Definition: run_tests.py:854
run_tests.Config.job_spec
def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS, shortname=None, environ={}, cpu_cost=1.0, flaky=False)
Definition: run_tests.py:128
run_tests.max_parallel_tests_for_current_platform
def max_parallel_tests_for_current_platform()
Definition: run_tests.py:89
python_utils.upload_test_results.upload_results_to_bq
def upload_results_to_bq(resultset, bq_table, extra_fields)
Definition: upload_test_results.py:108
run_tests.RemoteNodeLanguage.__str__
def __str__(self)
Definition: run_tests.py:566
run_tests._build_and_run
def _build_and_run(check_cancelled, newline_on_success, xml_report=None, build_only=False)
Definition: run_tests.py:1298


grpc
Author(s):
autogenerated on Thu Mar 13 2025 03:01:14