/external/chromium_org/chrome/app/ |
PRESUBMIT.py | 39 results = [] 40 results.extend(_CheckNoProductNameInGeneratedResources(input_api, output_api)) 41 return results
|
/external/chromium_org/content/shell/renderer/test_runner/ |
mock_grammar_check.cc | 19 std::vector<blink::WebTextCheckingResult>* results) { 20 DCHECK(results); 53 results->push_back(
|
/external/chromium_org/tools/perf/benchmarks/ |
browsermark.py | 30 def ValidateAndMeasurePage(self, _, tab, results): 42 'window.location.pathname.indexOf("results") != -1', 600) 45 results.AddValue( 46 scalar.ScalarValue(results.current_page, 'Score', 'score', result))
|
html5gaming.py | 22 def ValidateAndMeasurePage(self, _, tab, results): 30 results.AddValue( 31 scalar.ScalarValue(results.current_page, 'Score', 'score', result))
|
jsgamebench.py | 25 def ValidateAndMeasurePage(self, page, tab, results): 32 results.AddValue(scalar.ScalarValue( 33 results.current_page, 'Score', 'score (bigger is better)', result))
|
service_worker.py | 17 def ValidateAndMeasurePage(self, _, tab, results): 19 json = tab.EvaluateJavaScript('window.results') 21 results.AddValue(scalar.ScalarValue( 22 results.current_page, key, value['units'], value['value']))
|
kraken.py | 82 def ValidateAndMeasurePage(self, page, tab, results): 84 'document.title.indexOf("Results") != -1', 700) 88 self._power_metric.AddResults(tab, results) 99 results.AddValue(list_of_scalar_values.ListOfScalarValues( 100 results.current_page, key, 'ms', result_dict[key], important=False, 105 # results system should do that for us. 106 results.AddValue(scalar.ScalarValue( 107 results.current_page, 'Total', 'ms', total, 108 description='Total of the means of the results for each type '
|
/external/chromium_org/tools/perf/measurements/ |
polymer_load.py | 32 def ValidateAndMeasurePage(self, _, tab, results): 34 results.AddValue(scalar.ScalarValue( 35 results.current_page, 'Total', 'ms', result))
|
repaint_unittest.py | 26 that all metrics were added to the results. The test is purely functional, 38 results = self.RunMeasurement(measurement, ps, options=self._options) 39 self.assertEquals(0, len(results.failures)) 41 frame_times = results.FindAllPageSpecificValuesNamed('frame_times') 45 mean_frame_time = results.FindAllPageSpecificValuesNamed('mean_frame_time') 49 jank = results.FindAllPageSpecificValuesNamed('jank') 53 mostly_smooth = results.FindAllPageSpecificValuesNamed('mostly_smooth')
|
thread_times_unittest.py | 25 results = self.RunMeasurement(measurement, ps, options = timeline_options) 26 self.assertEquals(0, len(results.failures)) 30 cpu_time = results.FindAllPageSpecificValuesNamed(cpu_time_name) 39 results = self.RunMeasurement(measurement, ps, options = timeline_options) 40 self.assertEquals(0, len(results.failures)) 44 cpu_time = results.FindAllPageSpecificValuesNamed(cpu_time_name)
|
/external/chromium_org/tools/telemetry/telemetry/core/platform/power_monitor/ |
sysfs_power_monitor_unittest.py | 190 results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats( 193 for freq in results[cpu]: 194 self.assertAlmostEqual(results[cpu][freq],
|
/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/lib2to3/fixes/ |
fix_sys_exc.py | 22 def transform(self, node, results): 23 sys_attr = results["attribute"][0] 28 attr[1].children[0].prefix = results["dot"].prefix
|
fix_raise.py | 38 def transform(self, node, results): 41 exc = results["exc"].clone() 60 if "val" not in results: 66 val = results["val"].clone() 73 if "tb" in results: 74 tb = results["tb"].clone()
|
/prebuilts/python/linux-x86/2.7.5/lib/python2.7/lib2to3/fixes/ |
fix_sys_exc.py | 22 def transform(self, node, results): 23 sys_attr = results["attribute"][0] 28 attr[1].children[0].prefix = results["dot"].prefix
|
fix_raise.py | 38 def transform(self, node, results): 41 exc = results["exc"].clone() 60 if "val" not in results: 66 val = results["val"].clone() 73 if "tb" in results: 74 tb = results["tb"].clone()
|
/system/media/camera/docs/ |
metadata-parser-sanity-check | 35 local results 36 results="$(python "$file" 2>&1)" 40 echo "$results" >& 2
|
/external/chromium_org/tools/telemetry/telemetry/web_perf/ |
timeline_based_measurement_unittest.py | 13 from telemetry.results import page_test_results 26 def AddResults(self, model, renderer_thread, interaction_records, results): 27 results.AddValue(scalar.ScalarValue( 28 results.current_page, 'FakeFastMetric', 'ms', 1)) 29 results.AddValue(scalar.ScalarValue( 30 results.current_page, 'FastMetricRecords', 'count', 36 def AddResults(self, model, renderer_thread, interaction_records, results): 37 results.AddValue(scalar.ScalarValue( 38 results.current_page, 'FakeSmoothMetric', 'ms', 1)) 39 results.AddValue(scalar.ScalarValue 76 def results(self): member in class:TimelineBasedMetricTestData [all...] |
/external/chromium_org/build/android/pylib/linker/ |
test_runner.py | 91 results = test.Run(self.device) 96 results = base_test_result.TestRunResults() 97 results.AddResult(LinkerExceptionTestResult( 100 if not results.DidRunPass(): 101 return results, test 103 return results, None
|
/external/chromium_org/chromeos/login/auth/ |
mock_url_fetchers.h | 30 const std::string& results, 48 const std::string& results, 63 const std::string& results, 78 const std::string& results, 93 const std::string& results, 116 const std::string& results,
|
/external/chromium_org/content/test/gpu/page_sets/ |
PRESUBMIT.py | 64 results = [] 68 results.append(output_api.PresubmitError( 76 results.append(output_api.PresubmitError( 80 return results 84 results = _VerifyFilesInCloud(input_api, output_api) 85 return results 89 results = _VerifyFilesInCloud(input_api, output_api) 90 return results
|
/external/chromium_org/third_party/WebKit/Source/devtools/scripts/jsdoc-validator/ |
PRESUBMIT.py | 45 results = '\n'.join(['%s (%s != %s)' % (name, expected, actual) for (name, expected, actual) in hashes_modified]) 47 (build_jsdoc_validator_jar.jar_name, build_jsdoc_validator_jar.hashes_name, results))]
|
/external/chromium_org/third_party/WebKit/Source/modules/speech/ |
SpeechRecognitionEvent.idl | 31 [InitializedByEventConstructor] readonly attribute SpeechRecognitionResultList results;
|
/external/chromium_org/third_party/skia/gm/rebaseline_server/ |
compare_configs_test.py | 11 TODO(epoger): Create a command to update the expected results (in 13 1. examine the results in self.output_dir_actual and make sure they are ok 31 import results namespace 37 """Process results of a GM run with the ConfigComparisons object.""" 46 results.KEY__HEADER__RESULTS_ALL),
|
/external/chromium_org/third_party/skia/gm/rebaseline_server/static/ |
utils.js | 2 element = $(".results-header-actions");
|
/external/chromium_org/tools/auto_bisect/ |
PRESUBMIT.py | 31 results = [] 32 results.extend(_CheckAllConfigFiles(input_api, output_api)) 33 results.extend(_RunUnitTests(input_api, output_api)) 34 results.extend(_RunPyLint(input_api, output_api)) 35 return results 39 """Checks all bisect config files and returns a list of presubmit results.""" 40 results = [] 44 results.extend(_CheckConfigFile(config_file, output_api)) 45 return results 49 """Checks one bisect config file and returns a list of presubmit results."" [all...] |