HomeSort by relevance Sort by last modified time
    Searched refs:benchmark_results (Results 1 - 4 of 4) sorted by null

  /external/toolchain-utils/crosperf/
results_report.py 165 def _GetTables(benchmark_results, columns, table_type):
166 iter_counts = benchmark_results.iter_counts
167 result = benchmark_results.run_keyvals
180 table = TableGenerator(runs, benchmark_results.label_names).GetTable()
189 def _GetPerfTables(benchmark_results, columns, table_type):
190 p_table = _PerfTable(benchmark_results.benchmark_names_and_iterations,
191 benchmark_results.label_names,
192 benchmark_results.read_perf_report)
196 iterations = benchmark_results.iter_counts[benchmark]
203 benchmark_results.label_names
    [all...]
results_report_unittest.py 202 def _GetOutput(self, experiment=None, benchmark_results=None):
207 HTMLResultsReport(benchmark_results).GetReport()
244 output = self._GetOutput(benchmark_results=results)
255 output = self._GetOutput(benchmark_results=results)
generate_report.py 199 def RunActions(actions, benchmark_results, output_prefix, overwrite, verbose):
206 get_contents = lambda: report_ctor(benchmark_results).GetReport()
  /external/gemmlowp/test/
benchmark.cc 150 std::map<gemm_t, std::vector<double>> benchmark_results; local
190 benchmark_results[gemm].emplace_back(gflops);
204 for (auto b : benchmark_results) {

Completed in 79 milliseconds