/prebuilts/go/linux-x86/test/bench/go1/ |
fmt_test.go | 7 // benchmark based on fmt/fmt_test.go
|
gzip_test.go | 5 // This benchmark tests gzip and gunzip performance.
|
/system/vold/ |
Benchmark.cpp | 17 #include "Benchmark.h" 51 static nsecs_t benchmark(const std::string& path) { function in namespace:android::vold 149 return benchmark(benchPath);
|
/external/caliper/caliper/src/main/java/com/google/caliper/runner/ |
ExperimentingCaliperRun.java | 102 stdout.println(" Benchmark Methods: " + FluentIterable.from(allExperiments) 269 Object benchmark = experimentComponent.getBenchmarkInstance(); local 270 benchmarkClass.setUpBenchmark(benchmark); 272 experiment.instrumentation().dryRun(benchmark); 275 // discard 'benchmark' now; the worker will have to instantiate its own anyway 276 benchmarkClass.cleanup(benchmark);
|
/external/google-benchmark/src/ |
csv_reporter.cc | 15 #include "benchmark/reporter.h" 30 namespace benchmark { namespace 108 } // end namespace benchmark
|
cycleclock.h | 26 #include "benchmark/macros.h" 48 namespace benchmark { namespace 143 } // end namespace benchmark
|
re.h | 31 namespace benchmark { namespace 124 } // end namespace benchmark
|
/external/libcxx/benchmarks/ |
unordered_set_operations.bench.cpp | 8 #include "benchmark/benchmark_api.h" 111 void BM_Hash(benchmark::State& st, HashFn fn, GenInputs gen) { 115 benchmark::DoNotOptimize(&last_hash); 118 benchmark::DoNotOptimize(last_hash += fn(*it)); 120 benchmark::ClobberMemory();
|
/external/libcxx/utils/google-benchmark/src/ |
csv_reporter.cc | 15 #include "benchmark/reporter.h" 30 namespace benchmark { namespace 108 } // end namespace benchmark
|
cycleclock.h | 26 #include "benchmark/macros.h" 48 namespace benchmark { namespace 143 } // end namespace benchmark
|
re.h | 31 namespace benchmark { namespace 124 } // end namespace benchmark
|
/frameworks/base/libs/androidfw/tests/ |
Android.mk | 115 libgoogle-benchmark
|
/prebuilts/go/darwin-x86/src/go/types/ |
self_test.go | 21 var benchmark = flag.Bool("b", false, "run benchmarks") var 43 if !*benchmark { 65 b := testing.Benchmark(func(b *testing.B) {
|
/prebuilts/go/darwin-x86/src/math/big/ |
calibrate_test.go | 5 // This file prints execution times for the Mul benchmark 28 // measureKaratsuba returns the time to run a Karatsuba-relevant benchmark 32 res := testing.Benchmark(karatsubaLoad)
|
/prebuilts/go/linux-x86/src/go/types/ |
self_test.go | 21 var benchmark = flag.Bool("b", false, "run benchmarks") var 43 if !*benchmark { 65 b := testing.Benchmark(func(b *testing.B) {
|
/prebuilts/go/linux-x86/src/math/big/ |
calibrate_test.go | 5 // This file prints execution times for the Mul benchmark 28 // measureKaratsuba returns the time to run a Karatsuba-relevant benchmark 32 res := testing.Benchmark(karatsubaLoad)
|
/external/vogar/src/vogar/android/ |
HostRuntime.java | 138 if (!run.benchmark && run.checkJni) { 160 dex = new JackDexTask(run, classpath, run.benchmark, name, classpathElement, action, 163 dex = new DexTask(run.androidSdk, classpath, run.benchmark, name, classpathElement,
|
/external/owasp/sanitizer/ |
Makefile | 18 @echo " benchmark - Times the sanitizer against a tree builder." 19 @echo " profile - Profiles the benchmark." 38 TEST_CLASSPATH=$(CLASSPATH):lib/htmlparser-1.3/htmlparser-1.3.jar:lib/junit/junit.jar:lib/commons-codec-1.4/commons-codec-1.4.jar:benchmark-data 125 java ${JASSERTS} -cp tools/emma/lib/emma.jar:lib/guava-libraries/guava.jar:lib/jsr305/jsr305.jar:lib/htmlparser-1.3/htmlparser-1.3.jar:lib/commons-codec-1.4/commons-codec-1.4.jar:benchmark-data \ 147 # Runs a benchmark that compares performance. 148 benchmark: out/tests.tstamp 150 org.owasp.html.Benchmark benchmark-data/Yahoo\!.html 152 # Profiles the benchmark. 155 java -cp ${TEST_CLASSPATH}:out/classes -agentlib:hprof=cpu=times,format=a,file=out/java.hprof.txt,lineno=y,doe=y org.owasp.html.Benchmark benchmark-data/Yahoo\!.html [all...] |
/external/toolchain-utils/crosperf/ |
experiment_runner.py | 152 br.benchmark.test_name, br.iteration, br.test_args, 155 br.label, br.share_cache, br.benchmark.suite, 156 br.benchmark.show_all_results, br.benchmark.run_local) 273 self.l.LogOutput('Storing results of each benchmark run.') 279 benchmark_run.result.CleanUp(benchmark_run.benchmark.rm_chroot_tmp)
|
/external/autotest/tko/ |
nightly.py | 214 # for 1 benchmark metric over all kernels (rows), 308 print "<tr> <td><b> Benchmark </b></td>", 312 for benchmark in benchmarks: 313 print "<tr> <td><b>", benchmark, "</b></td>" 316 vals = plot_data[col].get(benchmark, []) 322 perf.append_cgi_args(args, {'test':benchmark}) 385 # generate graph image for one benchmark, showing avg and 413 xlegend = "Benchmark" 468 # for 1 benchmark metric over all kernels (rows), 493 # generate graph image for one benchmark [all...] |
/prebuilts/go/darwin-x86/src/sync/ |
mutex_test.go | 229 // This benchmark models a situation where spinning in the mutex should be 234 // As a matter of fact, this benchmark still triggers some spinning in the mutex. 263 // This benchmark models a situation where spinning in the mutex should be
|
/prebuilts/go/linux-x86/src/sync/ |
mutex_test.go | 229 // This benchmark models a situation where spinning in the mutex should be 234 // As a matter of fact, this benchmark still triggers some spinning in the mutex. 263 // This benchmark models a situation where spinning in the mutex should be
|
/external/chromium-trace/catapult/telemetry/telemetry/ |
decorators.py | 158 """Decorator for specifying the owner of a benchmark.""" 222 def IsBenchmarkEnabled(benchmark, possible_browser): 223 return (not benchmark.ShouldDisable(possible_browser) and 224 IsEnabled(benchmark, possible_browser)[0])
|
record_wpr_unittest.py | 8 from telemetry import benchmark 68 class MockBenchmark(benchmark.Benchmark): 77 group.add_option('', '--mock-benchmark-url', action='store', type='string') 87 class MockTimelineBasedMeasurementBenchmark(benchmark.Benchmark): 95 group.add_option('', '--mock-benchmark-url', action='store', type='string') 126 # When the RecorderPageTest is created from a Benchmark, the benchmark will 163 flags.extend(['--mock-benchmark-url', self._url] [all...] |
/external/chromium-trace/catapult/telemetry/telemetry/internal/results/ |
chart_json_output_formatter_unittest.py | 10 from telemetry import benchmark 32 self._benchmark_metadata = benchmark.BenchmarkMetadata( 92 benchmark.BenchmarkMetadata('benchmark_name', ''),
|