/external/parameter-framework/upstream/test/tokenizer/ |
Test.cpp | 31 #include "Tokenizer.h" 44 SCENARIO("Tokenizer tests") 46 GIVEN ("A default tokenizer") { 49 Tokenizer tokenizer("a bcd ef"); 53 CHECK(tokenizer.split() == expected); 58 Tokenizer tokenizer(""); 62 CHECK(tokenizer.split() == expected); 67 Tokenizer tokenizer(" a \n\t bc ") [all...] |
/system/core/init/parser/ |
tokenizer_test.cpp | 15 #include "tokenizer.h" 26 Tokenizer tokenizer(data); \ 27 ASSERT_EQ(Tokenizer::TOK_START, tokenizer.current().type) 30 ASSERT_TRUE(tokenizer.Next()); \ 31 ASSERT_EQ(test_text, tokenizer.current().text); \ 32 ASSERT_EQ(Tokenizer::TOK_TEXT, tokenizer.current().type) 35 ASSERT_TRUE(tokenizer.Next()); [all...] |
/external/jacoco/org.jacoco.examples/build/src/main/java/org/jacoco/examples/parser/ |
ExpressionParser.java | 30 private final StreamTokenizer tokenizer; field in class:ExpressionParser 33 tokenizer = new StreamTokenizer(new StringReader(s)); 34 tokenizer.ordinaryChar('('); 35 tokenizer.ordinaryChar(')'); 36 tokenizer.ordinaryChar('+'); 37 tokenizer.ordinaryChar('-'); 38 tokenizer.ordinaryChar('*'); 39 tokenizer.ordinaryChar('/'); 43 tokenizer.nextToken(); 82 e = new Const(tokenizer.nval) [all...] |
/external/doclava/src/com/google/doclava/apicheck/ |
ApiFile.java | 69 final Tokenizer tokenizer = new Tokenizer(filename, (new String(buf, 0, size)).toCharArray()); local 73 String token = tokenizer.getToken(); 78 parsePackage(api, tokenizer); 80 throw new ApiParseException("expected package got " + token, tokenizer.getLine()); 90 private static void parsePackage(ApiInfo api, Tokenizer tokenizer) 96 token = tokenizer.requireToken(); 97 assertIdent(tokenizer, token) [all...] |
/external/deqp/framework/opengl/ |
gluVarTypeUtil.cpp | 92 VarTokenizer tokenizer(nameWithPath); 93 TCU_CHECK(tokenizer.getToken() == VarTokenizer::TOKEN_IDENTIFIER); 94 return tokenizer.getIdentifier(); 99 VarTokenizer tokenizer(nameWithPath); 101 if (tokenizer.getToken() == VarTokenizer::TOKEN_IDENTIFIER) 102 tokenizer.advance(); 105 while (tokenizer.getToken() != VarTokenizer::TOKEN_END) 109 if (tokenizer.getToken() == VarTokenizer::TOKEN_PERIOD) 111 tokenizer.advance(); 112 TCU_CHECK(tokenizer.getToken() == VarTokenizer::TOKEN_IDENTIFIER) [all...] |
/external/protobuf/python/google/protobuf/internal/ |
text_format_test.py | 608 tokenizer = text_format._Tokenizer(text.splitlines()) 609 methods = [(tokenizer.ConsumeIdentifier, 'identifier1'), 611 (tokenizer.ConsumeString, 'string1'), 612 (tokenizer.ConsumeIdentifier, 'identifier2'), 614 (tokenizer.ConsumeInt32, 123), 615 (tokenizer.ConsumeIdentifier, 'identifier3'), 617 (tokenizer.ConsumeString, 'string'), 618 (tokenizer.ConsumeIdentifier, 'identifiER_4'), 620 (tokenizer.ConsumeFloat, 1.1e+2), 621 (tokenizer.ConsumeIdentifier, 'ID5') [all...] |
/external/apache-xml/src/main/java/org/apache/xml/utils/ |
StylesheetPIHandler.java | 152 StringTokenizer tokenizer = new StringTokenizer(data, " \t=\n", true); local 157 while (tokenizer.hasMoreTokens()) 160 token = tokenizer.nextToken(); 163 if (tokenizer.hasMoreTokens() && 170 token = tokenizer.nextToken(); 171 while (tokenizer.hasMoreTokens() && 173 token = tokenizer.nextToken(); 179 token = tokenizer.nextToken(); 180 while (tokenizer.hasMoreTokens() && 182 token = tokenizer.nextToken() [all...] |
/external/protobuf/src/google/protobuf/io/ |
tokenizer_unittest.cc | 40 #include <google/protobuf/io/tokenizer.h> 183 EXPECT_TRUE(Tokenizer::ParseInteger(text, kuint64max, &result)); 198 Tokenizer::TokenType type; 208 { "hello", Tokenizer::TYPE_IDENTIFIER }, 211 { "123", Tokenizer::TYPE_INTEGER }, 212 { "0xab6", Tokenizer::TYPE_INTEGER }, 213 { "0XAB6", Tokenizer::TYPE_INTEGER }, 214 { "0X1234567", Tokenizer::TYPE_INTEGER }, 215 { "0x89abcdef", Tokenizer::TYPE_INTEGER }, 216 { "0x89ABCDEF", Tokenizer::TYPE_INTEGER } [all...] |
/external/antlr/antlr-3.4/runtime/CSharp2/Sources/Antlr3.Runtime/Antlr.Runtime.Tree/ |
TreePatternParser.cs | 37 protected TreePatternLexer tokenizer; field in class:Antlr.Runtime.Tree.TreePatternParser 42 public TreePatternParser(TreePatternLexer tokenizer, TreeWizard wizard, ITreeAdaptor adaptor) { 43 this.tokenizer = tokenizer; 46 ttype = tokenizer.NextToken(); // kickstart 66 ttype = tokenizer.NextToken(); 90 ttype = tokenizer.NextToken(); 98 ttype = tokenizer.NextToken(); 102 label = tokenizer.sval.ToString(); 103 ttype = tokenizer.NextToken() [all...] |
/external/antlr/antlr-3.4/runtime/CSharp3/Sources/Antlr3.Runtime/Tree/ |
TreePatternParser.cs | 39 protected TreePatternLexer tokenizer; field in class:Antlr.Runtime.Tree.TreePatternParser 44 public TreePatternParser( TreePatternLexer tokenizer, TreeWizard wizard, ITreeAdaptor adaptor ) 46 this.tokenizer = tokenizer; 49 ttype = tokenizer.NextToken(); // kickstart 75 ttype = tokenizer.NextToken(); 105 ttype = tokenizer.NextToken(); 115 ttype = tokenizer.NextToken(); 120 label = tokenizer.sval.ToString(); 121 ttype = tokenizer.NextToken() [all...] |
/external/antlr/antlr-3.4/runtime/Java/src/main/java/org/antlr/runtime/tree/ |
TreePatternParser.java | 34 protected TreePatternLexer tokenizer; field in class:TreePatternParser 39 public TreePatternParser(TreePatternLexer tokenizer, TreeWizard wizard, TreeAdaptor adaptor) { 40 this.tokenizer = tokenizer; 43 ttype = tokenizer.nextToken(); // kickstart 64 ttype = tokenizer.nextToken(); 89 ttype = tokenizer.nextToken(); 97 ttype = tokenizer.nextToken(); 101 label = tokenizer.sval.toString(); 102 ttype = tokenizer.nextToken() [all...] |
/packages/apps/Gallery2/src/com/android/gallery3d/data/ |
Face.java | 34 StringTokenizer tokenizer = new StringTokenizer(rect); local 36 while (tokenizer.hasMoreElements()) { 37 mPosition.left = Integer.parseInt(tokenizer.nextToken()); 38 mPosition.top = Integer.parseInt(tokenizer.nextToken()); 39 mPosition.right = Integer.parseInt(tokenizer.nextToken()); 40 mPosition.bottom = Integer.parseInt(tokenizer.nextToken());
|
/external/emma/core/java12/com/vladium/emma/ |
AppLoggers.java | 59 final StringTokenizer tokenizer = new StringTokenizer (_filter, COMMA_DELIMITERS); local 60 if (tokenizer.countTokens () > 0) 62 temp = new HashSet (tokenizer.countTokens ()); 63 while (tokenizer.hasMoreTokens ()) 65 temp.add (tokenizer.nextToken ());
|
/external/antlr/antlr-3.4/runtime/ObjC/Framework/ |
ANTLRTreePatternParser.h | 39 ANTLRTreePatternLexer *tokenizer; variable 50 - (id) initWithTokenizer:(ANTLRTreePatternLexer *)tokenizer 59 @property (retain) ANTLRTreePatternLexer *tokenizer; variable
|
/external/nanopb-c/generator/google/protobuf/ |
text_format.py | 156 tokenizer = _Tokenizer(text) 157 while not tokenizer.AtEnd(): 158 _MergeField(tokenizer, message) 161 def _MergeField(tokenizer, message): 165 tokenizer: A tokenizer to parse the field name and values. 172 if tokenizer.TryConsume('['): 173 name = [tokenizer.ConsumeIdentifier()] 174 while tokenizer.TryConsume('.'): 175 name.append(tokenizer.ConsumeIdentifier() [all...] |
/external/chromium-trace/catapult/third_party/vinn/third_party/parse5/lib/tokenization/ |
location_info_mixin.js | 3 exports.assign = function (tokenizer) { 4 //NOTE: obtain Tokenizer proto this way to avoid module circular references 5 var tokenizerProto = Object.getPrototypeOf(tokenizer); 7 tokenizer.tokenStartLoc = -1; 10 tokenizer._attachLocationInfo = function (token) { 18 tokenizer._createStartTagToken = function (tagNameFirstCh) { 23 tokenizer._createEndTagToken = function (tagNameFirstCh) { 28 tokenizer._createCommentToken = function () { 33 tokenizer._createDoctypeToken = function (doctypeNameFirstCh) { 38 tokenizer._createCharacterToken = function (type, ch) [all...] |
/external/antlr/antlr-3.4/runtime/ObjC/ANTLR.framework/Headers/ |
ANTLRTreePatternParser.h | 39 ANTLRTreePatternLexer *tokenizer; variable 50 - (id) initWithTokenizer:(ANTLRTreePatternLexer *)tokenizer
|
/external/antlr/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/A/Headers/ |
ANTLRTreePatternParser.h | 39 ANTLRTreePatternLexer *tokenizer; variable 50 - (id) initWithTokenizer:(ANTLRTreePatternLexer *)tokenizer
|
/external/antlr/antlr-3.4/runtime/ObjC/ANTLR.framework/Versions/Current/Headers/ |
ANTLRTreePatternParser.h | 39 ANTLRTreePatternLexer *tokenizer; variable 50 - (id) initWithTokenizer:(ANTLRTreePatternLexer *)tokenizer
|
/frameworks/native/libs/input/ |
VirtualKeyMap.cpp | 25 #include <utils/Tokenizer.h> 52 Tokenizer* tokenizer; local 53 status_t status = Tokenizer::open(filename, &tokenizer); 65 Parser parser(map, tokenizer); 70 tokenizer->getFilename().string(), tokenizer->getLineNumber(), 79 delete tokenizer; 87 VirtualKeyMap::Parser::Parser(VirtualKeyMap* map, Tokenizer* tokenizer) [all...] |
/system/connectivity/shill/ |
scope_logger.cc | 133 StringTokenizer tokenizer(expression, "+-"); 134 tokenizer.set_options(StringTokenizer::RETURN_DELIMS); 135 while (tokenizer.GetNext()) { 136 if (tokenizer.token_is_delim()) { 137 enable_scope = (tokenizer.token() == "+"); 141 if (tokenizer.token().empty()) 146 if (tokenizer.token() == kScopeNames[i]) { 152 << "Unknown scope '" << tokenizer.token() << "'";
|
/external/mockftpserver/MockFtpServer/src/main/java/org/mockftpserver/stub/command/ |
AlloCommandHandler.java | 64 StringTokenizer tokenizer = new StringTokenizer(parametersString, RECORD_SIZE_DELIMITER);
local 65 invocationRecord.set(NUMBER_OF_BYTES_KEY, Integer.valueOf(tokenizer.nextToken()));
66 Assert.isTrue(tokenizer.hasMoreTokens(), "Missing record size: [" + parametersString + "]");
67 invocationRecord.set(RECORD_SIZE_KEY, Integer.valueOf(tokenizer.nextToken()));
|
/external/mockftpserver/branches/1.x_Branch/src/main/java/org/mockftpserver/stub/command/ |
AlloCommandHandler.java | 66 StringTokenizer tokenizer = new StringTokenizer(parametersString, RECORD_SIZE_DELIMITER);
local 67 invocationRecord.set(NUMBER_OF_BYTES_KEY, Integer.valueOf(tokenizer.nextToken()));
68 Assert.isTrue(tokenizer.hasMoreTokens(), "Missing record size: [" + parametersString + "]");
69 invocationRecord.set(RECORD_SIZE_KEY, Integer.valueOf(tokenizer.nextToken()));
|
/external/libchrome/base/ |
sys_info_chromeos.cc | 132 StringTokenizer tokenizer(version, "."); 133 if (tokenizer.GetNext()) { 134 StringToInt(StringPiece(tokenizer.token_begin(), tokenizer.token_end()), 137 if (tokenizer.GetNext()) { 138 StringToInt(StringPiece(tokenizer.token_begin(), tokenizer.token_end()), 141 if (tokenizer.GetNext()) { 142 StringToInt(StringPiece(tokenizer.token_begin(), tokenizer.token_end()) [all...] |
/external/protobuf/python/google/protobuf/ |
text_format.py | 227 tokenizer = _Tokenizer(lines) 228 while not tokenizer.AtEnd(): 229 _MergeField(tokenizer, message, allow_multiple_scalars) 302 def _MergeField(tokenizer, message, allow_multiple_scalars): 306 tokenizer: A tokenizer to parse the field name and values. 316 if tokenizer.TryConsume('['): 317 name = [tokenizer.ConsumeIdentifier()] 318 while tokenizer.TryConsume('.'): 319 name.append(tokenizer.ConsumeIdentifier() [all...] |