HomeSort by relevance Sort by last modified time
    Searched refs:benchmark_name (Results 1 - 25 of 25) sorted by null

  /external/v8/tools/
perf-compare.py 172 def getBenchmark(self, benchmark_name):
173 benchmark_object = self.benchmarks_.get(benchmark_name)
175 benchmark_object = Benchmark(benchmark_name)
176 self.benchmarks_[benchmark_name] = benchmark_object
349 def StartBenchmark(self, benchmark_name):
351 self.Print(" <td class=\"name-column\">%s</td>" % benchmark_name)
425 benchmark_name = "/".join(trace["graphs"][1:])
432 benchmark_object = benchmark_suite_object.getBenchmark(benchmark_name)
447 for benchmark_name in benchmark_suite_object.SortedTestKeys():
448 benchmark_object = benchmark_suite_object.getBenchmark(benchmark_name)
    [all...]
  /external/google-benchmark/test/
register_benchmark_test.cc 33 CHECK(name == run.benchmark_name()) << "expected " << name << " got "
34 << run.benchmark_name();
skip_with_error_test.cc 36 CHECK(name == run.benchmark_name())
37 << "expected " << name << " got " << run.benchmark_name();
  /external/libcxx/utils/google-benchmark/test/
register_benchmark_test.cc 33 CHECK(name == run.benchmark_name()) << "expected " << name << " got "
34 << run.benchmark_name();
skip_with_error_test.cc 36 CHECK(name == run.benchmark_name())
37 << "expected " << name << " got " << run.benchmark_name();
  /external/toolchain-utils/crosperf/
experiment_status.py 137 benchmark_name = benchmark_run.benchmark.name
138 benchmark_iterations[benchmark_name].append(benchmark_run.iteration)
results_report.py 109 {'benchmark_name': {'perf_event_name': [LabelData]}}
136 def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration):
141 if benchmark_name not in self.perf_data:
142 self.perf_data[benchmark_name] = {event: [] for event in perf_of_run}
143 ben_data = self.perf_data[benchmark_name]
510 def _ReadExperimentPerfReport(results_directory, label_name, benchmark_name,
518 raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1)
  /external/google-benchmark/src/
reporter.cc 85 std::string BenchmarkReporter::Run::benchmark_name() const { function in class:benchmark::BenchmarkReporter::Run
statistics.cc 122 CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
150 data.run_name = reports[0].benchmark_name();
csv_reporter.cc 95 std::string name = run.benchmark_name();
complexity.cc 186 std::string run_name = reports[0].benchmark_name().substr(
187 0, reports[0].benchmark_name().find('/'));
console_reporter.cc 123 result.benchmark_name().c_str());
json_reporter.cc 170 out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
  /external/libcxx/utils/google-benchmark/src/
reporter.cc 85 std::string BenchmarkReporter::Run::benchmark_name() const { function in class:benchmark::BenchmarkReporter::Run
statistics.cc 122 CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
150 data.run_name = reports[0].benchmark_name();
csv_reporter.cc 95 std::string name = run.benchmark_name();
complexity.cc 186 std::string run_name = reports[0].benchmark_name().substr(
187 0, reports[0].benchmark_name().find('/'));
console_reporter.cc 124 result.benchmark_name().c_str());
json_reporter.cc 170 out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
  /external/python/cpython3/Tools/importbench/
importbench.py 212 benchmark_name = benchmark.__doc__
213 old_result = max(prev_results[benchmark_name])
214 new_result = max(new_results[benchmark_name])
218 print(benchmark_name, ':', result)
  /external/google-fruit/extras/benchmark/
run_benchmarks.py 728 benchmark_name = benchmark_definition['name']
730 if (benchmark_name in {'boost_di_compile_time', 'boost_di_run_time', 'boost_di_executable_size'
    [all...]
  /external/tensorflow/tensorflow/tools/benchmark/
benchmark_model.cc 215 const string& benchmark_name, const string& postfix,
219 stream << benchmark_name; local
381 string benchmark_name = ""; local
412 Flag("benchmark_name", &benchmark_name, "benchmark name"),
482 LOG(INFO) << "Benchmark name: [" << benchmark_name << "]";
643 if (!benchmark_name.empty() && !output_prefix.empty()) {
653 RecordBenchmarkEntry(output_prefix, benchmark_name, "", no_stat_num_runs,
657 RecordBenchmarkEntry(output_prefix, benchmark_name, "meta-init", 1,
662 RecordBenchmarkEntry(output_prefix, benchmark_name, "meta-first-inference"
    [all...]
  /external/tensorflow/tensorflow/python/platform/
benchmark.py 341 benchmark_name = "%s.%s" % (benchmark.__module__, benchmark.__name__)
352 full_benchmark_name = "%s.%s" % (benchmark_name, attr)
  /external/google-benchmark/include/benchmark/
benchmark.h     [all...]
  /external/libcxx/utils/google-benchmark/include/benchmark/
benchmark.h     [all...]

Completed in 1363 milliseconds