/development/samples/training/multiscreen/newsreader/src/com/example/android/newsreader/ |
NonsenseGenerator.java | 77 List<String> words = new ArrayList<String>(); local 78 generateSentence(words, isHeadline); 79 words.set(0, String.valueOf(Character.toUpperCase(words.get(0).charAt(0))) + 80 words.get(0).substring(1)); 81 return joinWords(words); 102 * @param words the list of words to which the sentence will be appended. 105 private void generateSentence(List<String> words, boolean isHeadline) { 107 generateTimeClause(words, isHeadline) [all...] |
/external/chromium_org/third_party/mesa/src/src/glsl/glcpp/tests/ |
039-func-arg-obj-macro-with-comma.c | 2 #define bar two,words
|
/external/mesa3d/src/glsl/glcpp/tests/ |
039-func-arg-obj-macro-with-comma.c | 2 #define bar two,words
|
/external/valgrind/main/none/tests/x86/ |
cpuid.stdout.exp | 1 cpuid words (0): 0x........ 0x........ 0x........ 0x........ 2 cpuid words (1): 0x........ 0x........ 0x........ 0x........
|
/external/dropbear/libtomcrypt/src/pk/asn1/der/object_identifier/ |
der_length_object_identifier.c | 35 @param nwords The number of OID words 36 @param words The actual OID words to get the size of 40 int der_length_object_identifier(unsigned long *words, unsigned long nwords, unsigned long *outlen) 44 LTC_ARGCHK(words != NULL); 48 /* must be >= 2 words */ 54 if (words[0] > 3 || (words[0] < 2 && words[1] > 39)) { 60 wordbuf = words[0] * 40 + words[1] [all...] |
der_encode_object_identifier.c | 21 @param words The words to encode (upto 32-bits each) 22 @param nwords The number of words in the OID 27 int der_encode_object_identifier(unsigned long *words, unsigned long nwords, 33 LTC_ARGCHK(words != NULL); 38 if ((err = der_length_object_identifier(words, nwords, &x)) != CRYPT_OK) { 48 wordbuf = words[0] * 40 + words[1]; 53 wordbuf = words[y + 1]; 74 wordbuf = words[0] * 40 + words[1]; [all...] |
der_decode_object_identifier.c | 20 Decode OID data and store the array of integers in words 23 @param words [out] The destination of the OID words 24 @param outlen [in/out] The number of OID words 28 unsigned long *words, unsigned long *outlen) 33 LTC_ARGCHK(words != NULL); 41 /* must be room for at least two words */ 70 /* decode words */ 81 words[0] = t / 40; 82 words[1] = t % 40 [all...] |
/external/valgrind/main/memcheck/tests/ |
wrap6.c | 182 UInt* words = calloc(200, sizeof(UInt)); local 183 TRASH_IREGS(r, words); 184 free(words); 208 UInt* words = calloc(200, sizeof(UInt)); local 209 words[1-1] = a1; 210 TRASH_IREGS(r, words); 211 free(words); 235 UInt* words = calloc(200, sizeof(UInt)); local 236 words[1-1] = a1; 237 words[2-1] = a2 263 UInt* words = calloc(200, sizeof(UInt)); local 292 UInt* words = calloc(200, sizeof(UInt)); local 322 UInt* words = calloc(200, sizeof(UInt)); local 353 UInt* words = calloc(200, sizeof(UInt)); local 386 UInt* words = calloc(200, sizeof(UInt)); local 422 UInt* words = calloc(200, sizeof(UInt)); local 459 UInt* words = calloc(200, sizeof(UInt)); local 497 UInt* words = calloc(200, sizeof(UInt)); local 536 UInt* words = calloc(200, sizeof(UInt)); local 576 UInt* words = calloc(200, sizeof(UInt)); local [all...] |
test-plo.c | 73 UWord* words = malloc(3 * sizeof(UWord)); local 74 free(words); 77 UWord w = words[1];
|
/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/idlelib/ |
AutoExpand.py | 26 words = self.getwords() 29 words, index, insert, line = self.state 31 words = self.getwords() 33 if not words: 38 newword = words[index] 39 index = (index + 1) % len(words) 45 self.state = words, index, curinsert, curline 60 words = [] 62 # search backwards through words before 67 words.append(w [all...] |
/prebuilts/python/linux-x86/2.7.5/lib/python2.7/idlelib/ |
AutoExpand.py | 26 words = self.getwords() 29 words, index, insert, line = self.state 31 words = self.getwords() 33 if not words: 38 newword = words[index] 39 index = (index + 1) % len(words) 45 self.state = words, index, curinsert, curline 60 words = [] 62 # search backwards through words before 67 words.append(w [all...] |
/external/chromium_org/content/common/android/ |
address_parser.cc | 13 // Minimum number of words in an address after the house number 18 // Maximum number of words allowed in an address between the house number 30 // Maximum number of words after the house number in which the location name 68 // detected. Start tokenizing the following words to find a valid 83 WordList words; local 84 words.push_back(house_number); 97 if (next_word == words.size()) { 112 words.push_back(Word(tokenizer.token_begin(), tokenizer.token_end())); 117 const Word& current_word = words[next_word]; 148 // Look for location names in the words after the house number [all...] |
/pdk/util/ |
diff_products.py | 50 words = line.split() 51 if len(words) < 2: 53 if words[0] in PRODUCT_KEYWORDS: 55 if overrideProperty and words[1] == ":=": 56 if len(productData[words[0]]) != 0: 57 print "** Warning: overriding property " + words[0] + " that was:" + \ 58 productData[words[0]] 59 productData[words[0]] = {} 60 d = productData[words[0]] 61 for word in words[2:] [all...] |
/external/chromium/chrome/browser/history/ |
query_parser.h | 36 // the number of words in this node. 47 // Returns true if this node matches at least one of the words in words. If 50 virtual bool HasMatchIn(const std::vector<QueryWord>& words, 53 // Appends the words that make up this node in |words|. 54 virtual void AppendWords(std::vector<string16>* words) const = 0; 72 // sqlite_query and the number of words is returned. 76 // Parses the query words in query, returning the nodes that constitute the 77 // valid words in the query. This is intended for later usage wit [all...] |
query_parser.cc | 79 virtual bool HasMatchIn(const std::vector<QueryWord>& words, 83 virtual void AppendWords(std::vector<string16>* words) const; 90 bool QueryNodeWord::HasMatchIn(const std::vector<QueryWord>& words, 92 for (size_t i = 0; i < words.size(); ++i) { 93 if (Matches(words[i].word, false)) { 94 size_t match_start = words[i].position; 111 void QueryNodeWord::AppendWords(std::vector<string16>* words) const { 112 words->push_back(word_); 148 virtual bool HasMatchIn(const std::vector<QueryWord>& words, 153 virtual void AppendWords(std::vector<string16>* words) const [all...] |
/external/chromium_org/chrome/browser/history/ |
query_parser.h | 33 // the number of words in this node. 43 // Returns true if this node matches at least one of the words in |words|. An 44 // entry is added to |match_positions| for all matching words giving the 46 virtual bool HasMatchIn(const std::vector<QueryWord>& words, 49 // Returns true if this node matches at least one of the words in |words|. 50 virtual bool HasMatchIn(const std::vector<QueryWord>& words) const = 0; 52 // Appends the words that make up this node in |words| [all...] |
query_parser.cc | 90 const std::vector<QueryWord>& words, 93 const std::vector<QueryWord>& words) const OVERRIDE; 94 virtual void AppendWords(std::vector<base::string16>* words) const OVERRIDE; 129 bool QueryNodeWord::HasMatchIn(const std::vector<QueryWord>& words, 132 for (size_t i = 0; i < words.size(); ++i) { 133 if (Matches(words[i].word, false)) { 134 size_t match_start = words[i].position; 144 bool QueryNodeWord::HasMatchIn(const std::vector<QueryWord>& words) const { 145 for (size_t i = 0; i < words.size(); ++i) { 146 if (Matches(words[i].word, false) [all...] |
/packages/inputmethods/LatinIME/tests/src/com/android/inputmethod/latin/ |
SuggestedWordsTests.java | 48 final SuggestedWords words = new SuggestedWords( local 55 assertEquals(NUMBER_OF_ADDED_SUGGESTIONS + 1, words.size()); 56 assertEquals("typed", words.getWord(0)); 57 assertEquals(SuggestedWordInfo.KIND_TYPED, words.getInfo(0).mKind); 58 assertEquals("0", words.getWord(1)); 59 assertEquals(SuggestedWordInfo.KIND_CORRECTION, words.getInfo(1).mKind); 60 assertEquals("4", words.getWord(5)); 61 assertEquals(SuggestedWordInfo.KIND_CORRECTION, words.getInfo(5).mKind); 63 final SuggestedWords wordsWithoutTyped = words.getSuggestedWordsExcludingTypedWord(); 64 assertEquals(words.size() - 1, wordsWithoutTyped.size()) [all...] |
/external/llvm/utils/emacs/ |
llvm-mode.el | 21 `(,(regexp-opt '("void" "i[0-9]+" "float" "double" "type" "label" "opaque") 'words) . font-lock-type-face) 33 "pointersize" "volatile" "fastcc" "coldcc" "cc") 'words) . font-lock-keyword-face) 36 "setne" "seteq" "setlt" "setgt" "setle" "setge") 'words) . font-lock-keyword-face) 38 `(,(regexp-opt '("fadd" "fsub" "fmul" "fdiv" "frem") 'words) . font-lock-keyword-face) 40 `(,(regexp-opt '("phi" "tail" "call" "cast" "select" "to" "shl" "shr" "fcmp" "icmp" "vaarg" "vanext") 'words) . font-lock-keyword-face) 42 `(,(regexp-opt '("ret" "br" "switch" "invoke" "unwind" "unreachable") 'words) . font-lock-keyword-face) 44 `(,(regexp-opt '("malloc" "alloca" "free" "load" "store" "getelementptr") 'words) . font-lock-keyword-face)
|
/external/chromium_org/tools/deep_memory_profiler/subcommands/ |
pprof.py | 98 words = line.split() 99 bucket = bucket_set.get(int(words[BUCKET_ID])) 110 com_committed += int(words[COMMITTED]) 111 com_allocs += int(words[ALLOC_COUNT]) - int(words[FREE_COUNT]) 142 words = line.split() 143 bucket = bucket_set.get(int(words[BUCKET_ID])) 155 int(words[ALLOC_COUNT]) - int(words[FREE_COUNT]), 156 words[COMMITTED] [all...] |
stacktrace.py | 33 words = line.split() 34 bucket = bucket_set.get(int(words[BUCKET_ID])) 38 out.write(words[i] + ' ')
|
/external/valgrind/main/helgrind/ |
hg_wordset.c | 3 /*--- Sets of words, with unique set identifiers. ---*/ 137 UWord* words; member in struct:__anon28119 192 wv->words = NULL; 195 wv->words = wsu->alloc( wsu->cc, (SizeT)sz * sizeof(UWord) ); 203 if (wv->words) { 204 dealloc(wv->words); 228 if (wv1->words[i] == wv2->words[i]) 230 if (wv1->words[i] < wv2->words[i] [all...] |
/external/chromium_org/third_party/WebKit/Source/devtools/front_end/ |
CompletionDictionary.js | 105 var words = []; 108 words.push(i); 110 return words;
|
TextUtils.js | 105 var words = []; 110 words.push(text.substring(startWord, i)); 116 words.push(text.substring(startWord)); 117 return words;
|
/external/chromium_org/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/coverage/ |
templite.py | 62 # Action tag: split into words and parse further. 63 words = tok[2:-2].strip().split() 64 if words[0] == 'if': 67 assert len(words) == 2 68 ops.append(('if', (words[1], if_ops))) 71 elif words[0] == 'for': 73 assert len(words) == 4 and words[2] == 'in' 75 ops.append(('for', (words[1], words[3], for_ops)) [all...] |