/system/extras/mmap-perf/ |
mmapPerf.cpp | 1 #include "benchmark/benchmark_api.h" 113 static void benchRandomRead(benchmark::State& state) { 121 BENCHMARK(benchRandomRead); 123 static void benchRandomWrite(benchmark::State& state) { 131 BENCHMARK(benchRandomWrite); 133 static void benchLinearRead(benchmark::State& state) { 142 BENCHMARK(benchLinearRead); 144 static void benchLinearWrite(benchmark::State& state) { 153 BENCHMARK(benchLinearWrite);
|
/test/vts/tools/build/tasks/list/ |
vts_test_bin_package_list.mk | 17 android.hardware.tests.msgq@1.0-service-benchmark \
|
/external/caliper/caliper/src/main/java/com/google/caliper/runner/ |
ExperimentModule.java | 19 import static com.google.caliper.runner.Running.Benchmark; 64 @Benchmark 82 @Benchmark 87 private static Method findBenchmarkMethod(Class<?> benchmark, String methodName, 91 return benchmark.getDeclaredMethod(methodName, params);
|
Parameter.java | 83 "benchmark", 99 void inject(Object benchmark, String value) { 102 field.set(benchmark, o);
|
ParameterSet.java | 33 * benchmark class. Has nothing to do with particular choices of <i>values</i> for these parameters 86 public void injectAll(Object benchmark, Map<String, String> actualValues) { 89 parameter.inject(benchmark, value);
|
AllocationInstrument.java | 24 import com.google.caliper.Benchmark; 48 * benchmark method and reports some statistic. The benchmark method must accept a 50 * the benchmark method, and it must be public and non-static. 67 return method.isAnnotationPresent(Benchmark.class) || BenchmarkMethods.isTimeMethod(method); 86 throw new InvalidBenchmarkException("Benchmark methods must have no arguments or accept " 97 public void dryRun(Object benchmark) throws UserCodeException { 98 // execute the benchmark method, but don't try to take any measurements, because this JVM 101 benchmarkMethod.invoke(benchmark, 1); 138 public void dryRun(Object benchmark) throws InvalidBenchmarkException [all...] |
/external/toolchain-utils/crosperf/ |
experiment_factory.py | 11 from benchmark import Benchmark 101 telemetry_benchmark = Benchmark(test_name, test_name, test_args, 161 all_benchmark_settings = experiment_file.GetSettings('benchmark') 190 Benchmark( 207 benchmark = Benchmark(test_name, test_name, test_args, iterations, 210 benchmarks.append(benchmark) 228 # Add the single benchmark. 229 benchmark = Benchmark [all...] |
experiment.py | 118 """Generate benchmark runs from labels and benchmark defintions.""" 121 for benchmark in self.benchmarks: 122 for iteration in xrange(1, benchmark.iterations + 1): 124 benchmark_run_name = '%s: %s (%s)' % (label.name, benchmark.name, 126 full_name = '%s_%s_%s' % (label.name, benchmark.name, iteration) 130 benchmark_run_name, benchmark, label, iteration,
|
/prebuilts/go/darwin-x86/src/go/parser/ |
performance_test.go | 22 b.Fatalf("benchmark failed due to parse error: %s", err)
|
/prebuilts/go/linux-x86/src/go/parser/ |
performance_test.go | 22 b.Fatalf("benchmark failed due to parse error: %s", err)
|
/external/v8/tools/ |
parser-shell.cc | 142 std::string benchmark; local 151 } else if (strncmp(argv[i], "--benchmark=", 12) == 0) { 152 benchmark = std::string(argv[i]).substr(12); 181 if (benchmark.empty()) benchmark = "Baseline"; 182 printf("%s(FirstParseRunTime): %.f ms\n", benchmark.c_str(), 184 printf("%s(SecondParseRunTime): %.f ms\n", benchmark.c_str(),
|
/system/extras/tests/binder/benchmarks/ |
binderAddInts.cpp | 19 * Binder add integers benchmark (Using google-benchmark library) 40 #include <benchmark/benchmark.h> 82 static void BM_addInts(benchmark::State& state); 106 static void BM_addInts(benchmark::State& state) 129 // Perform the IPC operations in the benchmark 164 BENCHMARK(BM_addInts); 249 ::benchmark::Initialize(&argc, argv); 252 // available at the start of the benchmark [all...] |
/external/autotest/cli/ |
job_unittest.py | 259 u'control_file': u'def step_init():\n job.next_step(\'step0\')\n\ndef step0():\n AUTHOR = "mbligh@google.com (Martin Bligh)"\n NAME = "Kernbench"\n TIME = "SHORT"\n TEST_CLASS = "Kernel"\n TEST_CATEGORY = "Benchmark"\n TEST_TYPE = "client"\n \n DOC = """\n A standard CPU benchmark. Runs a kernel compile and measures the performance.\n """\n \n job.run_test(\'kernbench\')', 277 u'job': {u'control_file': u'def step_init():\n job.next_step(\'step0\')\n\ndef step0():\n AUTHOR = "mbligh@google.com (Martin Bligh)"\n NAME = "Kernbench"\n TIME = "SHORT"\n TEST_CLASS = "Kernel"\n TEST_CATEGORY = "Benchmark"\n TEST_TYPE = "client"\n \n DOC = """\n A standard CPU benchmark. Runs a kernel compile and measures the performance.\n """\n \n job.run_test(\'kernbench\')', 298 u'control_file': u'def step_init():\n job.next_step(\'step0\')\n\ndef step0():\n AUTHOR = "mbligh@google.com (Martin Bligh)"\n NAME = "Kernbench"\n TIME = "SHORT"\n TEST_CLASS = "Kernel"\n TEST_CATEGORY = "Benchmark"\n TEST_TYPE = "client"\n \n DOC = """\n A standard CPU benchmark. Runs a kernel compile and measures the performance.\n """\n \n job.run_test(\'kernbench\')', 323 u'job': {u'control_file': u'def step_init():\n job.next_step(\'step0\')\n\ndef step0():\n AUTHOR = "mbligh@google.com (Martin Bligh)"\n NAME = "Kernbench"\n TIME = "SHORT"\n TEST_CLASS = "Kernel"\n TEST_CATEGORY = "Benchmark"\n TEST_TYPE = "client"\n \n DOC = """\n A standard CPU benchmark. Runs a kernel compile and measures the performance.\n """\n \n job.run_test(\'kernbench\')', 348 u'job': {u'control_file': u'def step_init():\n job.next_step(\'step0\')\n\ndef step0():\n AUTHOR = "mbligh@google.com (Martin Bligh)"\n NAME = "Kernbench"\n TIME = "SHORT"\n TEST_CLASS = "Kernel"\n TEST_CATEGORY = "Benchmark"\n TEST_TYPE = "client"\n \n DOC = """\n A standard CPU benchmark. Runs a kernel compile and measures the performance.\n """\n \n job.run_test(\'kernbench\ (…) [all...] |
/build/soong/cc/ |
test.go | 306 func (benchmark *benchmarkDecorator) linkerInit(ctx BaseModuleContext) { 311 benchmark.baseLinker.dynamicProperties.RunPaths = append(benchmark.baseLinker.dynamicProperties.RunPaths, runpath) 312 benchmark.binaryDecorator.linkerInit(ctx) 315 func (benchmark *benchmarkDecorator) linkerDeps(ctx DepsContext, deps Deps) Deps { 316 deps = benchmark.binaryDecorator.linkerDeps(ctx, deps) 317 deps.StaticLibs = append(deps.StaticLibs, "libgoogle-benchmark") 321 func (benchmark *benchmarkDecorator) install(ctx ModuleContext, file android.Path) { 322 benchmark.binaryDecorator.baseInstaller.dir = filepath.Join("nativetest", ctx.ModuleName()) 323 benchmark.binaryDecorator.baseInstaller.dir64 = filepath.Join("nativetest64", ctx.ModuleName() [all...] |
/external/autotest/server/cros/ |
telemetry_runner.py | 199 """Build command to execute telemetry based on script and benchmark. 203 @param test_or_benchmark: Name of the test or benchmark we want to run, 299 @param test_or_benchmark: Name of the test or benchmark we want to run, 365 def run_telemetry_benchmark(self, benchmark, perf_value_writer=None, 367 """Runs a telemetry benchmark on a dut. 369 @param benchmark: Benchmark we want to run. 380 logging.debug('Running telemetry benchmark: %s', benchmark) 382 if benchmark not in ON_DUT_WHITE_LIST [all...] |
/external/google-benchmark/src/ |
benchmark.cc | 15 #include "benchmark/benchmark.h" 59 "Minimum number of seconds we should run benchmark before " 64 "of the benchmark execution, regardless of number of " 68 "The number of runs of each benchmark. If greater than 1, the " 72 "Report the result of each benchmark repetitions. When 'true' is " 95 namespace benchmark { namespace 217 const benchmark::internal::Benchmark::Instance& b, 220 // Create report about this benchmark run [all...] |
commandlineflags.h | 25 namespace benchmark { namespace 77 } // end namespace benchmark
|
/external/libcxx/utils/google-benchmark/src/ |
benchmark.cc | 15 #include "benchmark/benchmark.h" 59 "Minimum number of seconds we should run benchmark before " 64 "of the benchmark execution, regardless of number of " 68 "The number of runs of each benchmark. If greater than 1, the " 72 "Report the result of each benchmark repetitions. When 'true' is " 95 namespace benchmark { namespace 217 const benchmark::internal::Benchmark::Instance& b, 220 // Create report about this benchmark run [all...] |
/external/llvm/utils/yaml-bench/ |
YAMLBench.cpp | 1 //===- YAMLBench - Benchmark the YAMLParser implementation ----------------===// 146 static void benchmark( llvm::TimerGroup &Group function 221 llvm::TimerGroup Group("YAML parser benchmark"); 222 benchmark(Group, "Fast", createJSONText(10, 500)); 224 llvm::TimerGroup Group("YAML parser benchmark"); 225 benchmark(Group, "Small Values", createJSONText(MemoryLimitMB, 5)); 226 benchmark(Group, "Medium Values", createJSONText(MemoryLimitMB, 500)); 227 benchmark(Group, "Large Values", createJSONText(MemoryLimitMB, 50000));
|
/frameworks/base/libs/hwui/tests/macrobench/ |
TestSceneRunner.cpp | 25 #include <benchmark/benchmark.h> 67 benchmark::BenchmarkReporter* reporter, RenderProxy* proxy, 69 using namespace benchmark; 114 benchmark::BenchmarkReporter* reporter) {
|
/cts/tests/openglperf2/src/android/opengl2/cts/primitive/ |
GLPrimitiveBenchmark.java | 113 * @param benchmark An enum representing the benchmark to run. 117 * @param timeout The milliseconds to wait for an iteration of the benchmark before timing out. 119 * @throws Exception If the benchmark could not be run. 121 private void runBenchmark(BenchmarkName benchmark, boolean offscreen, int numFrames, 123 String benchmarkName = benchmark.toString();
|
/prebuilts/go/darwin-x86/test/bench/go1/ |
mandel_test.go | 5 // This benchmark, taken from the shootuot, tests floating point performance.
|
/prebuilts/go/linux-x86/test/bench/go1/ |
mandel_test.go | 5 // This benchmark, taken from the shootuot, tests floating point performance.
|
/development/tutorials/MoarRam/src/com/android/benchmark/moarram/ |
MainActivity.java | 1 package com.android.benchmark.moarram;
|
/external/android-clat/ |
clatd_microbenchmark.c | 16 * clatd_microbenchmark.c - micro-benchmark for clatd tun send path 138 void benchmark(const char *name, int fd, int s, int num, int do_read, function 200 benchmark("Blocking", fd, sock, NUMPACKETS, 1, payload, sizeof(payload), payload_sum); 205 benchmark("No read", fd, sock, NUMPACKETS, 0, payload, sizeof(payload), payload_sum); 210 benchmark("Nonblocking", fd, sock, NUMPACKETS, 1, payload, sizeof(payload), payload_sum);
|