/frameworks/base/libs/hwui/tests/macrobench/ |
main.cpp | 24 #include <benchmark/benchmark.h> 46 std::unique_ptr<benchmark::BenchmarkReporter> gBenchmarkReporter; 49 benchmark::BenchmarkReporter* reporter); 136 gBenchmarkReporter.reset(new benchmark::ConsoleReporter()); 138 gBenchmarkReporter.reset(new benchmark::JSONReporter()); 140 gBenchmarkReporter.reset(new benchmark::CSVReporter()); 311 gBenchmarkReporter.reset(new benchmark::ConsoleReporter()); 322 benchmark::BenchmarkReporter::Context context; 323 context.num_cpus = benchmark::NumCPUs() [all...] |
/external/v8/benchmarks/ |
base.js | 29 // Simple framework for running the benchmark suites and 33 // A benchmark has a name (string) and a function that will be run to 36 // running the benchmark, but the running time of these functions will 37 // not be accounted for in the benchmark score. 38 function Benchmark(name, run, setup, tearDown) { 46 // Benchmark results hold the benchmark and the measured time used to 47 // run the benchmark. The benchmark score is computed later once a 48 // full benchmark suite has run to completion [all...] |
/system/netd/tests/benchmarks/ |
connect_benchmark.cpp | 62 * (TODO: ideally this should be against median latency, but google-benchmark only supports one 92 #include <benchmark/benchmark.h> 121 static void ipv4_loopback(benchmark::State& state, const bool waitBetweenRuns) { 178 static void ipv6_loopback(benchmark::State& state, const bool waitBetweenRuns) { 236 ::benchmark::State& state, const int reportingLevel, 290 static void ipv4_metrics_reporting_no_fwmark(::benchmark::State& state) { 293 BENCHMARK(ipv4_metrics_reporting_no_fwmark)->MinTime(MIN_TIME)->UseManualTime(); 296 static void ipv4_metrics_reporting_no_load(::benchmark::State& state) { 299 BENCHMARK(ipv4_metrics_reporting_no_load)->MinTime(MIN_TIME)->UseManualTime() [all...] |
/external/autotest/tko/ |
machine_aggr.cgi | 22 # input is a list of benchmark:key values -- benchmark1:key1,... 27 (benchmark, key) = benchmark_key_pair.split(':') 28 benchmark_idx.append(benchmark) 30 elif form.has_key("benchmark") and form.has_key("key"): 31 benchmarks = form["benchmark"].value 60 for benchmark, key in zip(benchmark_idx, key_idx): 63 where = { 'subdir' : benchmark, 'machine_idx' : machine.idx , 'status' : 6} 85 #get the base value for each benchmark 87 for test in frontend.test.select(db, { 'subdir' : benchmark, 'machine_idx' : machine.idx, 'kernel_idx' : kernel_base.idx}): 102 if benchmark == "kernbench" [all...] |
/external/chromium-trace/catapult/telemetry/telemetry/internal/ |
story_runner.py | 298 def RunBenchmark(benchmark, finder_options): 306 benchmark.CustomizeBrowserOptions(finder_options.browser_options) 308 benchmark_metadata = benchmark.GetMetadata() 316 not decorators.IsBenchmarkEnabled(benchmark, possible_browser)): 317 print '%s is disabled on the selected browser' % benchmark.Name() 319 print 'Running benchmark anyway due to: --also-run-disabled-tests' 321 print 'Try --also-run-disabled-tests to force the benchmark to run.' 323 # benchmark name and disabled state. 326 benchmark.ValueCanBeAddedPredicate, benchmark_enabled=False 329 # When a disabled benchmark is run we now want to return success sinc [all...] |
/external/boringssl/src/ssl/test/runner/poly1305/ |
poly1305_test.go | 58 func benchmark(b *testing.B, size int, unaligned bool) { func 72 func Benchmark64(b *testing.B) { benchmark(b, 64, false) } 73 func Benchmark1K(b *testing.B) { benchmark(b, 1024, false) } 74 func Benchmark64Unaligned(b *testing.B) { benchmark(b, 64, true) } 75 func Benchmark1KUnaligned(b *testing.B) { benchmark(b, 1024, true) }
|
/prebuilts/go/darwin-x86/src/go/doc/testdata/ |
benchmark.go | 16 var benchTime = flag.Duration("test.benchtime", 1*time.Second, "approximate run time for each benchmark") 25 // B is a type passed to Benchmark functions to manage benchmark 30 benchmark InternalBenchmark 37 // before a benchmark starts, but it can also used to resume timing after 56 // ResetTimer sets the elapsed benchmark time to zero. 66 // If this is called, the benchmark will report ns/op and MB/s. 76 // runN runs a single benchmark for the specified number of iterations. 84 b.benchmark.F(b) 130 // run times the benchmark function in a separate goroutine [all...] |
/prebuilts/go/linux-x86/src/go/doc/testdata/ |
benchmark.go | 16 var benchTime = flag.Duration("test.benchtime", 1*time.Second, "approximate run time for each benchmark") 25 // B is a type passed to Benchmark functions to manage benchmark 30 benchmark InternalBenchmark 37 // before a benchmark starts, but it can also used to resume timing after 56 // ResetTimer sets the elapsed benchmark time to zero. 66 // If this is called, the benchmark will report ns/op and MB/s. 76 // runN runs a single benchmark for the specified number of iterations. 84 b.benchmark.F(b) 130 // run times the benchmark function in a separate goroutine [all...] |
/bionic/benchmarks/ |
property_benchmark.cpp | 29 #include <benchmark/benchmark.h> 137 static void BM_property_get(benchmark::State& state) { 148 BENCHMARK(BM_property_get)->TEST_NUM_PROPS; 150 static void BM_property_find(benchmark::State& state) { 160 BENCHMARK(BM_property_find)->TEST_NUM_PROPS; 162 static void BM_property_read(benchmark::State& state) { 183 BENCHMARK(BM_property_read)->TEST_NUM_PROPS; 185 static void BM_property_serial(benchmark::State& state) { 204 BENCHMARK(BM_property_serial)->TEST_NUM_PROPS [all...] |
/external/caliper/caliper/src/main/java/com/google/caliper/runner/ |
BenchmarkClass.java | 102 // Benchmark. 118 public void cleanup(Object benchmark) throws UserCodeException { 119 callTearDown(benchmark); 149 private void callSetUp(Object benchmark) throws UserCodeException { 152 method.invoke(benchmark); 163 private void callTearDown(Object benchmark) throws UserCodeException { 166 method.invoke(benchmark);
|
/cts/tests/openglperf2/jni/primitive/ |
GLPrimitive.cpp | 30 // Holds the current benchmark's renderer. 79 JNIEnv* env, jclass /*clazz*/, jobject surface, jint benchmark, 82 switch (benchmark) { 97 "Unknown benchmark '%d'", benchmark);
|
/external/caliper/caliper/src/main/java/com/google/caliper/worker/ |
ArbitraryMeasurementWorker.java | 22 import com.google.caliper.runner.Running.Benchmark; 41 @Benchmark Object benchmark, 44 super(benchmark, method); 58 double measured = (Double) benchmarkMethod.invoke(benchmark);
|
/external/okhttp/benchmarks/src/main/java/com/squareup/okhttp/benchmarks/ |
UrlConnection.java | 34 @Override public void prepare(Benchmark benchmark) { 35 super.prepare(benchmark); 36 if (benchmark.tls) {
|
/system/core/logcat/tests/ |
logcat_benchmark.cpp | 21 #include <benchmark/benchmark.h> 25 static void BM_logcat_sorted_order(benchmark::State& state) { 131 BENCHMARK(BM_logcat_sorted_order);
|
/external/autotest/client/site_tests/kernel_Lmbench/ |
kernel_Lmbench.py | 23 Example benchmark runs and outputs on a Lumpy device: 61 For details and output format refer to individual benchmark man pages: 128 # procs - number of processes for context switch benchmark - lat_ctx
|
/external/chromium-trace/catapult/telemetry/telemetry/ |
benchmark_runner_unittest.py | 7 from telemetry import benchmark 13 class BenchmarkFoo(benchmark.Benchmark): 14 """ Benchmark Foo for testing.""" 21 class BenchmarkBar(benchmark.Benchmark): 22 """ Benchmark Bar for testing long description line.""" 28 class UnusualBenchmark(benchmark.Benchmark): 43 ' BarBenchmarkkkkk Benchmark Bar for testing long description line.\n [all...] |
/external/google-benchmark/src/ |
colorprint.h | 8 namespace benchmark { namespace 31 } // end namespace benchmark
|
sleep.cc | 26 namespace benchmark { namespace 50 } // end namespace benchmark
|
timers.h | 7 namespace benchmark { namespace 46 } // end namespace benchmark
|
/external/libcxx/utils/google-benchmark/src/ |
colorprint.h | 8 namespace benchmark { namespace 31 } // end namespace benchmark
|
sleep.cc | 26 namespace benchmark { namespace 50 } // end namespace benchmark
|
timers.h | 7 namespace benchmark { namespace 46 } // end namespace benchmark
|
/external/protobuf/benchmarks/ |
ProtoBench.java | 60 System.err.println("e.g. com.google.protocolbuffers.benchmark.Message1"); 104 benchmark("Serialize to byte string", inputData.length, new Action() { 107 benchmark("Serialize to byte array", inputData.length, new Action() { 110 benchmark("Serialize to memory stream", inputData.length, new Action() { 116 benchmark("Serialize to /dev/null with FileOutputStream", inputData.length, new Action() { 121 benchmark("Serialize to /dev/null reusing FileOutputStream", inputData.length, new Action() { 128 benchmark("Deserialize from byte string", inputData.length, new Action() { 133 benchmark("Deserialize from byte array", inputData.length, new Action() { 139 benchmark("Deserialize from memory stream", inputData.length, new Action() { 156 private static void benchmark(String name, long dataSize, Action action) throws IOException method in class:ProtoBench [all...] |
/external/toolchain-utils/crosperf/ |
mock_instance.py | 8 from benchmark import Benchmark 38 benchmark1 = Benchmark('benchmark1', 'autotest_name_1', 'autotest_args', 2, '', 41 benchmark2 = Benchmark('benchmark2', 'autotest_name_2', 'autotest_args', 2, '',
|
/prebuilts/go/darwin-x86/test/bench/go1/ |
parser_test.go | 7 // go parser benchmark based on go/parser/performance_test.go 40 b.Fatalf("benchmark failed due to parse error: %s", err)
|