Classes | |
class | BenchmarkColor |
class | TestGetUniqueBenchmarkNames |
Unit tests. More... | |
class | TestReportDifference |
class | TestReportDifferenceBetweenFamilies |
class | TestReportDifferenceForPercentageAggregates |
class | TestReportDifferenceWithUTest |
class | TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly |
class | TestReportSorting |
Functions | |
def | assert_measurements (unittest_instance, lhs, rhs) |
def | assert_utest (unittest_instance, lhs, rhs) |
def | calc_utest (timings_cpu, timings_time) |
def | calculate_change (old_val, new_val) |
def | color_format (use_color, fmt_str, *args, **kwargs) |
def | extract_field (partition, field_name) |
def | filter_benchmark (json_orig, family, replacement="") |
def | find_longest_name (benchmark_list) |
def | generate_difference_report (json1, json2, use_color=True) |
def | get_difference_report (json1, json2, utest=False) |
def | get_unique_benchmark_names (json) |
def | intersect (list1, list2) |
def | is_potentially_comparable_benchmark (x) |
def | partition_benchmarks (json1, json2) |
def | print_difference_report (json_diff_report, include_aggregates_only=False, utest=False, utest_alpha=0.05, use_color=True) |
def | print_utest (bc_name, utest, utest_alpha, first_col_width, use_color=True) |
Variables | |
BC_BOLD = BenchmarkColor('BOLD', '\033[1m') | |
BC_CYAN = BenchmarkColor('CYAN', '\033[96m') | |
BC_ENDC = BenchmarkColor('ENDC', '\033[0m') | |
BC_FAIL = BenchmarkColor('FAIL', '\033[91m') | |
BC_HEADER = BenchmarkColor('HEADER', '\033[92m') | |
BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m') | |
BC_NONE = BenchmarkColor('NONE', '') | |
BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m') | |
BC_OKGREEN | |
BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m') | |
BC_WARNING = BenchmarkColor('WARNING', '\033[93m') | |
BC_WHITE = BenchmarkColor('WHITE', '\033[97m') | |
UTEST_COL_NAME | |
UTEST_MIN_REPETITIONS | |
UTEST_OPTIMAL_REPETITIONS | |
report.py - Utilities for reporting statistics about benchmark results
def gbench.report.assert_measurements | ( | unittest_instance, | |
lhs, | |||
rhs | |||
) |
Definition at line 1039 of file benchmark/tools/gbench/report.py.
def gbench.report.assert_utest | ( | unittest_instance, | |
lhs, | |||
rhs | |||
) |
Definition at line 1023 of file benchmark/tools/gbench/report.py.
def gbench.report.calc_utest | ( | timings_cpu, | |
timings_time | |||
) |
Definition at line 160 of file benchmark/tools/gbench/report.py.
def gbench.report.calculate_change | ( | old_val, | |
new_val | |||
) |
Return a float representing the decimal change between old_val and new_val.
Definition at line 73 of file benchmark/tools/gbench/report.py.
def gbench.report.color_format | ( | use_color, | |
fmt_str, | |||
* | args, | ||
** | kwargs | ||
) |
Return the result of 'fmt_str.format(*args, **kwargs)' after transforming 'args' and 'kwargs' according to the value of 'use_color'. If 'use_color' is False then all color codes in 'args' and 'kwargs' are replaced with the empty string.
Definition at line 45 of file benchmark/tools/gbench/report.py.
def gbench.report.extract_field | ( | partition, | |
field_name | |||
) |
Definition at line 153 of file benchmark/tools/gbench/report.py.
def gbench.report.filter_benchmark | ( | json_orig, | |
family, | |||
replacement = "" |
|||
) |
Apply a filter to the json, and only leave the 'family' of benchmarks.
Definition at line 84 of file benchmark/tools/gbench/report.py.
def gbench.report.find_longest_name | ( | benchmark_list | ) |
Return the length of the longest benchmark name in a given list of benchmark JSON objects
Definition at line 61 of file benchmark/tools/gbench/report.py.
def gbench.report.generate_difference_report | ( | json1, | |
json2, | |||
use_color = True |
|||
) |
Calculate and report the difference between each test of two benchmarks runs specified as 'json1' and 'json2'.
Definition at line 69 of file bloaty/third_party/protobuf/third_party/benchmark/tools/gbench/report.py.
def gbench.report.get_difference_report | ( | json1, | |
json2, | |||
utest = False |
|||
) |
Calculate and report the difference between each test of two benchmarks runs specified as 'json1' and 'json2'. Output is another json containing relevant details for each test run.
Definition at line 210 of file benchmark/tools/gbench/report.py.
def gbench.report.get_unique_benchmark_names | ( | json | ) |
While *keeping* the order, give all the unique 'names' used for benchmarks.
Definition at line 100 of file benchmark/tools/gbench/report.py.
def gbench.report.intersect | ( | list1, | |
list2 | |||
) |
Given two lists, get a new list consisting of the elements only contained in *both of the input lists*, while preserving the ordering.
Definition at line 111 of file benchmark/tools/gbench/report.py.
def gbench.report.is_potentially_comparable_benchmark | ( | x | ) |
Definition at line 119 of file benchmark/tools/gbench/report.py.
def gbench.report.partition_benchmarks | ( | json1, | |
json2 | |||
) |
While preserving the ordering, find benchmarks with the same names in both of the inputs, and group them. (i.e. partition/filter into groups with common name)
Definition at line 123 of file benchmark/tools/gbench/report.py.
def gbench.report.print_difference_report | ( | json_diff_report, | |
include_aggregates_only = False , |
|||
utest = False , |
|||
utest_alpha = 0.05 , |
|||
use_color = True |
|||
) |
Calculate and report the difference between each test of two benchmarks runs specified as 'json1' and 'json2'.
Definition at line 274 of file benchmark/tools/gbench/report.py.
def gbench.report.print_utest | ( | bc_name, | |
utest, | |||
utest_alpha, | |||
first_col_width, | |||
use_color = True |
|||
) |
Definition at line 177 of file benchmark/tools/gbench/report.py.
gbench.report.BC_BOLD = BenchmarkColor('BOLD', '\033[1m') |
Definition at line 37 of file benchmark/tools/gbench/report.py.
gbench.report.BC_CYAN = BenchmarkColor('CYAN', '\033[96m') |
Definition at line 29 of file benchmark/tools/gbench/report.py.
gbench.report.BC_ENDC = BenchmarkColor('ENDC', '\033[0m') |
Definition at line 36 of file benchmark/tools/gbench/report.py.
gbench.report.BC_FAIL = BenchmarkColor('FAIL', '\033[91m') |
Definition at line 35 of file benchmark/tools/gbench/report.py.
gbench.report.BC_HEADER = BenchmarkColor('HEADER', '\033[92m') |
Definition at line 32 of file benchmark/tools/gbench/report.py.
gbench.report.BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m') |
Definition at line 28 of file benchmark/tools/gbench/report.py.
gbench.report.BC_NONE = BenchmarkColor('NONE', '') |
Definition at line 27 of file benchmark/tools/gbench/report.py.
gbench.report.BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m') |
Definition at line 30 of file benchmark/tools/gbench/report.py.
gbench.report.BC_OKGREEN |
Definition at line 31 of file benchmark/tools/gbench/report.py.
gbench.report.BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m') |
Definition at line 38 of file benchmark/tools/gbench/report.py.
gbench.report.BC_WARNING = BenchmarkColor('WARNING', '\033[93m') |
Definition at line 33 of file benchmark/tools/gbench/report.py.
gbench.report.BC_WHITE = BenchmarkColor('WHITE', '\033[97m') |
Definition at line 34 of file benchmark/tools/gbench/report.py.
gbench.report.UTEST_COL_NAME |
Definition at line 42 of file benchmark/tools/gbench/report.py.
gbench.report.UTEST_MIN_REPETITIONS |
Definition at line 40 of file benchmark/tools/gbench/report.py.
gbench.report.UTEST_OPTIMAL_REPETITIONS |
Definition at line 41 of file benchmark/tools/gbench/report.py.