HomeSort by relevance Sort by last modified time
    Searched refs:benchmarks (Results 1 - 25 of 218) sorted by null

1 2 3 4 5 6 7 8 9

  /libcore/benchmarks/src/benchmarks/
Foo.java 17 package benchmarks; package
20 * A trivial class used by several benchmarks.
StringDexCacheBenchmark.java 17 package benchmarks; package
VirtualVersusInterfaceBenchmark.java 17 package benchmarks; package
  /bionic/benchmarks/
run-on-host.sh 6 m -j bionic-benchmarks-glibc
11 ${HOST_OUT}/nativetest64/bionic-benchmarks-glibc/bionic-benchmarks-glibc $@
21 prepare $1 bionic-benchmarks
28 ${NATIVETEST}/bionic-benchmarks/bionic-benchmarks $@
  /external/toolchain-utils/crosperf/
results_organizer_unittest.py 79 benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2]
81 benchmark_runs[0] = BenchmarkRun('b1', benchmarks[0], labels[0], 1, '', '',
83 benchmark_runs[1] = BenchmarkRun('b2', benchmarks[0], labels[0], 2, '', '',
85 benchmark_runs[2] = BenchmarkRun('b3', benchmarks[0], labels[1], 1, '', '',
87 benchmark_runs[3] = BenchmarkRun('b4', benchmarks[0], labels[1], 2, '', '',
89 benchmark_runs[4] = BenchmarkRun('b5', benchmarks[1], labels[0], 1, '', '',
91 benchmark_runs[5] = BenchmarkRun('b6', benchmarks[1], labels[0], 2, '', '',
93 benchmark_runs[6] = BenchmarkRun('b7', benchmarks[1], labels[1], 1, '', '',
95 benchmark_runs[7] = BenchmarkRun('b8', benchmarks[1], labels[1], 2, '', '',
104 organized = OrganizeResults(benchmark_runs, labels, benchmarks)
    [all...]
experiment_factory_unittest.py 56 self.assertEqual(len(exp.benchmarks), 1)
57 self.assertEqual(exp.benchmarks[0].name, 'PageCycler')
58 self.assertEqual(exp.benchmarks[0].test_name, 'PageCycler')
59 self.assertEqual(exp.benchmarks[0].iterations, 3)
160 self.assertEqual(len(exp.benchmarks), 1)
161 self.assertEqual(exp.benchmarks[0].name, 'kraken')
162 self.assertEqual(exp.benchmarks[0].test_name, 'kraken')
163 self.assertEqual(exp.benchmarks[0].iterations, 1)
164 self.assertEqual(exp.benchmarks[0].suite, 'telemetry_Crosperf')
165 self.assertFalse(exp.benchmarks[0].show_all_results
    [all...]
experiment_factory.py 92 ChromeOS benchmarks, but the idea is that in the future, other types
96 def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
99 """Add all the tests in a set to the benchmarks list."""
105 benchmarks.append(telemetry_benchmark)
157 # Construct benchmarks.
160 benchmarks = []
175 self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args,
179 self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
184 self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
188 # Add non-telemetry toolchain-perf benchmarks
    [all...]
  /libcore/benchmarks/src/benchmarks/regression/
StringLengthBenchmark.java 17 package benchmarks.regression;
DefaultCharsetBenchmark.java 17 package benchmarks.regression;
FileBenchmark.java 17 package benchmarks.regression;
SSLSocketFactoryBenchmark.java 17 package benchmarks.regression;
ThreadLocalBenchmark.java 17 package benchmarks.regression;
CharsetForNameBenchmark.java 17 package benchmarks.regression;
DnsBenchmark.java 17 package benchmarks.regression;
StringToRealBenchmark.java 17 package benchmarks.regression;
IntConstantRemainderBenchmark.java 17 package benchmarks.regression;
LoopingBackwardsBenchmark.java 17 package benchmarks.regression;
  /external/okhttp/benchmarks/src/main/java/com/squareup/okhttp/benchmarks/
HttpClient.java 16 package com.squareup.okhttp.benchmarks;
Client.java 16 package com.squareup.okhttp.benchmarks;
  /external/eigen/bench/
benchmark-blocking-sizes.cpp 345 void serialize_benchmarks(const char* filename, const vector<benchmark_t>& benchmarks, size_t first_benchmark_to_run)
353 size_t benchmarks_vector_size = benchmarks.size();
357 fwrite(benchmarks.data(), sizeof(benchmark_t), benchmarks.size(), file);
361 bool deserialize_benchmarks(const char* filename, vector<benchmark_t>& benchmarks, size_t& first_benchmark_to_run)
377 benchmarks.resize(benchmarks_vector_size);
378 if (benchmarks.size() != fread(benchmarks.data(), sizeof(benchmark_t), benchmarks.size(), file)) {
386 vector<benchmark_t>& benchmarks,
    [all...]
  /packages/apps/QuickSearchBox/benchmarks/src/com/android/quicksearchbox/benchmarks/
ApplicationsLatency.java 17 package com.android.quicksearchbox.benchmarks;
25 mmm packages/apps/QuickSearchBox/benchmarks \
29 -n com.android.quicksearchbox.benchmarks/.ApplicationsLatency \
ContactsLatency.java 17 package com.android.quicksearchbox.benchmarks;
25 mmm packages/apps/QuickSearchBox/benchmarks \
29 -n com.android.quicksearchbox.benchmarks/.ContactsLatency \
  /external/v8/tools/
try_perf.py 59 parser.add_argument('benchmarks', nargs='+', help='The benchmarks to run.')
75 if not options.benchmarks:
76 print 'Please specify the benchmarks to run as arguments.'
79 for benchmark in options.benchmarks:
83 'Available public benchmarks: %s' % (benchmark, PUBLIC_BENCHMARKS))
99 benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks]
100 cmd += ['-p \'testfilter=[%s]\'' % ','.join(benchmarks)]
  /external/autotest/client/site_tests/kernel_Lmbench/
kernel_Lmbench.py 11 """Run some benchmarks from the lmbench3 suite.
13 lmbench is a series of micro benchmarks intended to measure basic operating
20 perl. Here we can also tune the individual benchmarks to be more
59 """Run the benchmarks.
68 benchmarks = [
100 for (bm, cmd) in benchmarks:
121 # Common parameters for the benchmarks. More details here:
  /external/google-benchmark/src/
benchmark_api_internal.h 36 std::vector<Benchmark::Instance>* benchmarks,

Completed in 598 milliseconds

1 2 3 4 5 6 7 8 9