OpenGrok
Home
Sort by relevance
Sort by last modified time
Full Search
Definition
Symbol
File Path
History
|
|
Help
Searched
refs:benchmark_runs
(Results
1 - 10
of
10
) sorted by null
/external/toolchain-utils/crosperf/
results_organizer_unittest.py
9
We create some labels,
benchmark_runs
and then create a ResultsOrganizer,
80
benchmark_runs
= [None] * 8
81
benchmark_runs
[0] = BenchmarkRun('b1', benchmarks[0], labels[0], 1, '', '',
83
benchmark_runs
[1] = BenchmarkRun('b2', benchmarks[0], labels[0], 2, '', '',
85
benchmark_runs
[2] = BenchmarkRun('b3', benchmarks[0], labels[1], 1, '', '',
87
benchmark_runs
[3] = BenchmarkRun('b4', benchmarks[0], labels[1], 2, '', '',
89
benchmark_runs
[4] = BenchmarkRun('b5', benchmarks[1], labels[0], 1, '', '',
91
benchmark_runs
[5] = BenchmarkRun('b6', benchmarks[1], labels[0], 2, '', '',
93
benchmark_runs
[6] = BenchmarkRun('b7', benchmarks[1], labels[1], 1, '', '',
95
benchmark_runs
[7] = BenchmarkRun('b8', benchmarks[1], labels[1], 2, '', ''
[
all
...]
experiment_status.py
18
self.num_total = len(self.experiment.
benchmark_runs
)
88
"""Get the status string of all the
benchmark_runs
."""
90
for benchmark_run in self.experiment.
benchmark_runs
:
117
def _GetNamesAndIterations(self,
benchmark_runs
):
120
for benchmark_run in
benchmark_runs
:
126
def _GetCompactNamesAndIterations(self,
benchmark_runs
):
128
for benchmark_run in
benchmark_runs
:
145
return ' %s \n%s' % (len(
benchmark_runs
), ''.join(output_segs))
experiment.py
106
self.
benchmark_runs
= self._GenerateBenchmarkRuns()
119
benchmark_runs
= []
129
benchmark_runs
.append(benchmark_run.BenchmarkRun(
134
return
benchmark_runs
143
for t in self.
benchmark_runs
:
182
for run in self.
benchmark_runs
:
189
for run in self.
benchmark_runs
:
results_organizer.py
4
"""Parse data from
benchmark_runs
for tabulator."""
107
def _MakeOrganizeResultOutline(
benchmark_runs
, labels):
119
# iterations (e.g. [r.iteration for r in
benchmark_runs
] == [1, 3])
121
for run in
benchmark_runs
:
129
for run in
benchmark_runs
:
138
def OrganizeResults(
benchmark_runs
, labels, benchmarks=None, json_report=False):
139
"""Create a dict from
benchmark_runs
.
152
result = _MakeOrganizeResultOutline(
benchmark_runs
, labels)
159
for benchmark_run in
benchmark_runs
:
schedv2.py
154
some
benchmark_runs
.
195
"""The thread to read cache for a list of
benchmark_runs
.
244
# Mapping from label to a list of
benchmark_runs
.
246
for br in self._experiment.
benchmark_runs
:
278
n_benchmarkruns = len(self._experiment.
benchmark_runs
)
283
BenchmarkRunCacheReader(self, self._experiment.
benchmark_runs
).run()
297
benchmarkrun_segments.append(self._experiment.
benchmark_runs
[start:end])
298
benchmarkrun_segments.append(self._experiment.
benchmark_runs
[
301
# Assert: aggregation of benchmarkrun_segments equals to
benchmark_runs
.
318
'Total {} cache hit out of {}
benchmark_runs
.'.format
[
all
...]
experiment_runner_unittest.py
288
for r in self.exp.
benchmark_runs
:
318
for r in self.exp.
benchmark_runs
:
374
for r in self.exp.
benchmark_runs
:
395
bench_run = self.exp.
benchmark_runs
[5]
398
self.assertEqual(len(self.exp.
benchmark_runs
), 6)
420
for r in self.exp.
benchmark_runs
:
generate_report.py
63
def CountBenchmarks(
benchmark_runs
):
64
"""Counts the number of iterations for each benchmark in
benchmark_runs
."""
65
# Example input for
benchmark_runs
:
70
for name, results in
benchmark_runs
.iteritems()]
experiment_runner.py
149
for br in experiment.
benchmark_runs
:
218
for benchmark_run in experiment.
benchmark_runs
:
274
for benchmark_run in experiment.
benchmark_runs
:
results_report_unittest.py
114
num_runs = len(experiment.
benchmark_runs
) // num_configs
139
experiment.
benchmark_runs
.extend(MakeSuccessfulRun(n)
results_report.py
305
for benchmark_run in self.experiment.
benchmark_runs
:
514
return OrganizeResults(experiment.
benchmark_runs
, experiment.labels,
Completed in 4762 milliseconds