/external/lldb/tools/lldb-perf/common/clang/ |
lldb_perf_clang.cpp | 13 #include "lldb-perf/lib/Results.h" 186 WriteResults (Results &results) 188 Results::Dictionary& results_dict = results.GetDictionary(); 190 m_time_set_bp_main.WriteAverageAndStandardDeviation(results); 199 m_time_create_target.WriteAverageAndStandardDeviation(results); 200 m_expr_first_evaluate.WriteAverageAndStandardDeviation(results); 201 m_expr_frame_zero.WriteAverageAndStandardDeviation(results); 202 m_expr_frame_non_zero.WriteAverageAndStandardDeviation(results); [all...] |
/external/eclipse-basebuilder/basebuilder-3.6.2/org.eclipse.releng.basebuilder/plugins/org.eclipse.test.performance.ui/src/org/eclipse/test/internal/performance/results/ui/ |
ComponentsView.java | 11 package org.eclipse.test.internal.performance.results.ui; 44 import org.eclipse.test.internal.performance.results.model.BuildResultsElement; 45 import org.eclipse.test.internal.performance.results.model.ComponentResultsElement; 46 import org.eclipse.test.internal.performance.results.model.ConfigResultsElement; 47 import org.eclipse.test.internal.performance.results.model.ResultsElement; 48 import org.eclipse.test.internal.performance.results.model.ScenarioResultsElement; 49 import org.eclipse.test.internal.performance.results.utils.IPerformancesConstants; 50 import org.eclipse.test.internal.performance.results.utils.Util; 57 * View to see the performance results of all the components in a hierarchical tree. 60 * machines (aka config). All builds results are stored onto each configuratio [all...] |
BuildsView.java | 11 package org.eclipse.test.internal.performance.results.ui; 41 import org.eclipse.test.internal.performance.results.db.DB_Results; 42 import org.eclipse.test.internal.performance.results.model.BuildResultsElement; 43 import org.eclipse.test.internal.performance.results.model.ResultsElement; 44 import org.eclipse.test.internal.performance.results.utils.IPerformancesConstants; 45 import org.eclipse.test.internal.performance.results.utils.Util; 54 * View to see all the builds which have performance results stored in the database. 56 * Typical actions from this view are update local data files with builds results 63 * Action to generate results. 81 String[] baselines = BuildsView.this.results.getBaselines() [all...] |
/external/chromium_org/chrome/browser/extensions/api/input/ |
input.cc | 161 base::DictionaryValue* results = new base::DictionaryValue(); local 162 results->SetString("layout", keyboard::GetKeyboardLayout()); 163 results->SetBoolean("a11ymode", keyboard::GetAccessibilityKeyboardEnabled()); 164 results->SetBoolean("experimental", 166 SetResult(results);
|
/external/chromium_org/chrome/browser/resources/chromeos/chromevox/common/ |
xpath_util.js | 59 null); // no existing results 63 var results = []; 68 results.push(xpathNode); 70 return results; 85 null); // no existing results 89 var results = []; 94 results.push(xpathNode); 96 return results; 125 null); // no existing results 147 null); // no existing results [all...] |
/external/chromium_org/chrome/browser/resources/history/ |
history_mobile.css | 35 #results-display { 40 #results-display, 41 #results-pagination { 85 .no-results-message { 91 .search-results .no-results-message { 148 #results-pagination button { 177 .day-results, 178 .search-results { 301 .no-results-message [all...] |
/external/chromium_org/media/ |
PRESUBMIT.py | 163 results = [] 164 results.extend(_CheckForUseOfWrongClock(input_api, output_api)) 165 results.extend(_CheckForMessageLoopProxy(input_api, output_api)) 166 results.extend(_CheckForHistogramOffByOne(input_api, output_api)) 167 return results
|
/external/chromium_org/third_party/WebKit/Source/devtools/ |
PRESUBMIT.py | 117 results = [] 118 results.extend(_CompileDevtoolsFrontend(input_api, output_api)) 119 results.extend(_CheckConvertSVGToPNGHashes(input_api, output_api)) 120 results.extend(_CheckOptimizePNGHashes(input_api, output_api)) 121 return results
|
/external/chromium_org/third_party/icu/source/test/intltest/ |
allcoll.cpp | 49 const Collator::EComparisonResult CollationDummyTest::results[] = { member in class:CollationDummyTest 98 doTest(myCollation, testSourceCases[i], testTargetCases[i], results[i]); 116 doTest(myCollation, testSourceCases[i], testTargetCases[i], results[i]); 126 doTest(myCollation, testSourceCases[i], testTargetCases[i], results[i]); 149 doTest(myCollation, testSourceCases[i], testTargetCases[i], results[i]);
|
/external/chromium_org/third_party/skia/gm/rebaseline_server/ |
writable_expectations.py | 25 import results namespace 44 # use the modified results 53 file_section = set_descriptions[results.KEY__SET_DESCRIPTIONS__SECTION] 57 set_descriptions[results.KEY__SET_DESCRIPTIONS__DIR]) 61 set_descriptions[results.KEY__SET_DESCRIPTIONS__REPO_REVISION]) 77 dicts = results.BaseComparisons.read_dicts_from_root(self.root) 79 # Make sure we have expected-results sections in all our output dicts.
|
/external/chromium_org/tools/telemetry/telemetry/page/ |
profile_generator.py | 22 from telemetry.results import results_options 77 results = results_options.CreateResults( 79 page_runner.Run(test, test.page_set, expectations, options, results) 81 if results.failures: 84 '\n'.join(results.pages_that_failed)) 87 # Everything is a-ok, move results to final destination.
|
page_runner.py | 24 from telemetry.results import results_options 235 possible_browser, results, state): 249 results.WillAttemptPageRun(attempt_num, max_attempts) 271 _RunPage(test, page, state, expectation, results) 334 def Run(test, page_set, expectations, finder_options, results): 371 pages = _CheckArchives(page_set, pages, results) 391 results.WillRunPage(page) 393 results.AddValue(skip.SkipValue(page, 'Test cannot run')) 394 results.DidRunPage(page) 413 results.WillRunPage(page [all...] |
page_test_unittest.py | 21 def ValidateAndMeasurePage(self, page, tab, results): 29 def ValidateAndMeasurePage(self, page, tab, results): 35 results.AddValue(scalar.ScalarValue(page, 'x', 'ms', 7)) 39 def ValidateAndMeasurePage(self, page, tab, results): 47 def ValidateAndMeasurePage(self, page, tab, results): 55 def ValidateAndMeasurePage(self, page, tab, results): 67 def ValidateAndMeasurePage(self, page, tab, results):
|
/external/eclipse-basebuilder/basebuilder-3.6.2/org.eclipse.releng.basebuilder/plugins/org.eclipse.test.performance.ui/src/org/eclipse/test/internal/performance/results/model/ |
DimResultsElement.java | 11 package org.eclipse.test.internal.performance.results.model; 16 import org.eclipse.test.internal.performance.results.db.*; 55 // Results category 57 DIMENSION_DESCRIPTOR.setCategory("Results"); 59 DIM_COUNT_DESCRIPTOR.setCategory("Results"); 61 DIM_AVERAGE_DESCRIPTOR.setCategory("Results"); 63 DIM_STDDEV_DESCRIPTOR.setCategory("Results"); 65 DIM_ERROR_DESCRIPTOR.setCategory("Results"); 67 DIM_HAD_VALUES_DESCRIPTOR.setCategory("Results"); 77 public DimResultsElement(AbstractResults results, ResultsElement parent, Dim d) [all...] |
/external/icu/icu4c/source/test/intltest/ |
allcoll.cpp | 49 const Collator::EComparisonResult CollationDummyTest::results[] = { member in class:CollationDummyTest 98 doTest(myCollation, testSourceCases[i], testTargetCases[i], results[i]); 116 doTest(myCollation, testSourceCases[i], testTargetCases[i], results[i]); 126 doTest(myCollation, testSourceCases[i], testTargetCases[i], results[i]); 149 doTest(myCollation, testSourceCases[i], testTargetCases[i], results[i]);
|
/external/chromium_org/chrome/browser/chromeos/drive/ |
search_metadata.cc | 232 MetadataSearchResultVector* results) { 267 // the final results. Hence, here we fill the part. 273 results->push_back(MetadataSearchResult( 279 std::reverse(results->begin(), results->end()); 287 scoped_ptr<MetadataSearchResultVector> results, 290 results.reset(); 291 callback.Run(error, results.Pass()); 312 scoped_ptr<MetadataSearchResultVector> results( 314 MetadataSearchResultVector* results_ptr = results.get() [all...] |
/external/deqp/scripts/ |
run_nightly.py | 60 ${RESULTS} 97 self.results = [] 101 results = [] 105 results.append(TestCaseResult(items[0], items[1])) 107 return results 114 batchResult.results = readTestCaseResultsFromCSV(filename) 141 for result in batchResult.results: 152 for result in batchResult.results: 158 results = [] 168 results.append(MultiResult(caseName, statusCodes) [all...] |
/external/chromium_org/components/omnibox/ |
search_provider.cc | 168 SearchSuggestionParser::Results* results) { 170 // pressed a key. Revise the cached results appropriately. 173 results->suggest_results.begin(); 174 sug_it != results->suggest_results.end(); ++sug_it) { 178 results->navigation_results.begin(); 179 nav_it != results->navigation_results.end(); ++nav_it) { 204 // Can't return search/suggest results for bogus input. 268 // Answers needs scored history results before any suggest query has been 269 // started, since the query for answer-bearing results needs additiona 345 SearchSuggestionParser::Results* results = local [all...] |
/external/chromium_org/chrome/browser/media_galleries/ |
media_scan_manager_unittest.cc | 180 const MediaFolderFinder::MediaFolderFinderResults& results) { 182 find_folders_results_ = results; 329 // A/H/ - included in results 361 MediaFolderFinder::MediaFolderFinderResults results = local 363 EXPECT_EQ(expected_folders.size(), results.size()); 368 EXPECT_TRUE(results.find(*it) != results.end()); 374 // A/ - included in results 398 MediaFolderFinder::MediaFolderFinderResults results = local 400 EXPECT_EQ(expected_folders.size(), results.size()) 443 MediaFolderFinder::MediaFolderFinderResults results = local 506 MediaFolderFinder::MediaFolderFinderResults results = local [all...] |
/external/chromium_org/v8/test/mjsunit/ |
contextual-calls.js | 44 Realm.shared.results = []; 49 Realm.shared.results.push(return_this()); \ 50 Realm.shared.results.push(return_this_strict()); \ 53 Realm.shared.results.push(return_this()); \ 54 Realm.shared.results.push(return_this_strict()); \ 57 assertSame(globals[0], Realm.shared.results[0]); 58 assertSame(undefined, Realm.shared.results[1]); 59 assertSame(globals[i], Realm.shared.results[2]); 60 assertSame(globals[i], Realm.shared.results[3]);
|
/external/lldb/test/unittest2/test/ |
test_setups.py | 182 results = [] 187 results.append('Module1.setUpModule') 190 results.append('Module1.tearDownModule') 195 results.append('Module2.setUpModule') 198 results.append('Module2.tearDownModule') 203 results.append('setup 1') 206 results.append('teardown 1') 208 results.append('Test1.testOne') 210 results.append('Test1.testTwo') 215 results.append('setup 2' [all...] |
/cts/tests/tests/provider/src/android/provider/cts/ |
ContactsContract_StatusUpdatesTest.java | 61 ContentProviderResult[] results = mResolver.applyBatch(ContactsContract.AUTHORITY, ops); local 62 assertNotNull(results[0].uri); 63 assertNotNull(results[1].uri); 65 dataId = ContentUris.parseId(results[1].uri); 113 ContentProviderResult[] results = mResolver.applyBatch(ContactsContract.AUTHORITY, ops); local 114 assertNotNull(results[0].uri); 115 return results[0].uri;
|
/external/chromium_org/build/android/pylib/gtest/ |
test_runner.py | 87 results = base_test_result.TestRunResults() 117 results.AddResult(base_test_result.BaseTestResult( 121 results.AddResult(base_test_result.BaseTestResult( 126 results.AddResult(base_test_result.BaseTestResult( 136 results.AddResult(base_test_result.BaseTestResult( 143 results.AddResult(base_test_result.BaseTestResult( 155 return results 171 # Calculate unknown test results.
|
/external/chromium_org/ppapi/proxy/ |
pdf_resource_unittest.cc | 73 PP_PrivateFindResult* results; local 79 &results, local 83 ASSERT_EQ(1, results[0].start_index); 84 ASSERT_EQ(2, results[0].length); 85 ASSERT_EQ(7, results[1].start_index); 86 ASSERT_EQ(2, results[1].length); 89 memory_iface->MemFree(results);
|
/external/chromium_org/third_party/WebKit/Source/core/clipboard/ |
DataObject.cpp | 129 ListHashSet<String> results; local 134 results.add(m_itemList[i]->type()); 142 results.add(mimeTypeFiles); 143 return results; 204 Vector<String> results; local 207 results.append(static_cast<File*>(m_itemList[i]->getAsFile().get())->path()); 209 return results;
|