/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/ |
inspect.py | 39 import tokenize namespace 651 elif type == tokenize.NEWLINE: 658 elif type == tokenize.INDENT: 661 elif type == tokenize.DEDENT: 668 elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): 677 tokenize.tokenize(iter(lines).next, blockfinder.tokeneater) [all...] |
tokenize.py | 20 tokenize(readline, tokeneater=printtoken) 34 __all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"] 155 def tokenize(readline, tokeneater=printtoken): function 157 The tokenize() function accepts two parameters: one representing the 158 input stream, and one providing an output mechanism for tokenize(). 254 # Output text will tokenize the back to the input 351 ("<tokenize>", lnum, pos, line)) 424 tokenize(open(sys.argv[1]).readline) 426 tokenize(sys.stdin.readline)
|
pyclbr.py | 44 import tokenize namespace 153 g = tokenize.generate_tokens(f.readline)
|
gettext.py | 84 import token, tokenize namespace 85 tokens = tokenize.generate_tokens(StringIO(plural).readline) 88 except tokenize.TokenError:
|
/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/lib2to3/ |
patcomp.py | 18 from .pgen2 import driver, literals, token, tokenize, parse, grammar namespace 36 tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
|
refactor.py | 26 from .pgen2 import driver, tokenize, token namespace 132 gen = tokenize.generate_tokens(StringIO.StringIO(source).readline) 136 ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT)) 333 encoding = tokenize.detect_encoding(f.readline)[0] 658 """Wraps a tokenize stream to systematically modify start/end.""" 659 tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next) 672 """Generates lines as expected by tokenize from a list of lines.
|
/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/lib2to3/tests/ |
test_parser.py | 20 from lib2to3.pgen2 import tokenize namespace 167 encoding = tokenize.detect_encoding(fp.readline)[0]
|
/prebuilts/python/linux-x86/2.7.5/lib/python2.7/ |
inspect.py | 39 import tokenize namespace 651 elif type == tokenize.NEWLINE: 658 elif type == tokenize.INDENT: 661 elif type == tokenize.DEDENT: 668 elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): 677 tokenize.tokenize(iter(lines).next, blockfinder.tokeneater) [all...] |
tokenize.py | 20 tokenize(readline, tokeneater=printtoken) 34 __all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"] 155 def tokenize(readline, tokeneater=printtoken): function 157 The tokenize() function accepts two parameters: one representing the 158 input stream, and one providing an output mechanism for tokenize(). 254 # Output text will tokenize the back to the input 351 ("<tokenize>", lnum, pos, line)) 424 tokenize(open(sys.argv[1]).readline) 426 tokenize(sys.stdin.readline)
|
pyclbr.py | 44 import tokenize namespace 153 g = tokenize.generate_tokens(f.readline)
|
/prebuilts/python/linux-x86/2.7.5/lib/python2.7/lib2to3/ |
patcomp.py | 18 from .pgen2 import driver, literals, token, tokenize, parse, grammar namespace 36 tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
|
refactor.py | 26 from .pgen2 import driver, tokenize, token namespace 132 gen = tokenize.generate_tokens(StringIO.StringIO(source).readline) 136 ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT)) 333 encoding = tokenize.detect_encoding(f.readline)[0] 658 """Wraps a tokenize stream to systematically modify start/end.""" 659 tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next) 672 """Generates lines as expected by tokenize from a list of lines.
|
/prebuilts/python/linux-x86/2.7.5/lib/python2.7/lib2to3/tests/ |
test_parser.py | 20 from lib2to3.pgen2 import tokenize namespace 167 encoding = tokenize.detect_encoding(fp.readline)[0]
|
/packages/apps/UnifiedEmail/src/com/android/emailcommon/mail/ |
Address.java | 137 final Rfc822Token[] tokens = Rfc822Tokenizer.tokenize(rawAddress); 205 Rfc822Token[] tokens = Rfc822Tokenizer.tokenize(addressList); 228 Rfc822Token[] tokens = Rfc822Tokenizer.tokenize(addressList);
|
/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/lib2to3/pgen2/ |
tokenize.py | 23 tokenize(readline, tokeneater=printtoken) 37 __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize", 160 def tokenize(readline, tokeneater=printtoken): function 162 The tokenize() function accepts two parameters: one representing the 163 input stream, and one providing an output mechanism for tokenize(). 256 in the same way as the tokenize() generator. 335 # Output text will tokenize the back to the input 427 ("<tokenize>", lnum, pos, line)) 499 if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline) 500 else: tokenize(sys.stdin.readline [all...] |
pgen.py | 5 from . import grammar, token, tokenize namespace 19 self.generator = tokenize.generate_tokens(stream.readline) 323 while tup[0] in (tokenize.COMMENT, tokenize.NL):
|
/prebuilts/python/linux-x86/2.7.5/lib/python2.7/lib2to3/pgen2/ |
tokenize.py | 23 tokenize(readline, tokeneater=printtoken) 37 __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize", 160 def tokenize(readline, tokeneater=printtoken): function 162 The tokenize() function accepts two parameters: one representing the 163 input stream, and one providing an output mechanism for tokenize(). 256 in the same way as the tokenize() generator. 335 # Output text will tokenize the back to the input 427 ("<tokenize>", lnum, pos, line)) 499 if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline) 500 else: tokenize(sys.stdin.readline [all...] |
pgen.py | 5 from . import grammar, token, tokenize namespace 19 self.generator = tokenize.generate_tokens(stream.readline) 323 while tup[0] in (tokenize.COMMENT, tokenize.NL):
|
/external/chromium_org/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/coverage/ |
parser.py | 3 import opcode, re, sys, token, tokenize namespace 96 # Tokenize, to find excluded suites, to find docstrings, and to find 105 tokgen = tokenize.generate_tokens(StringIO(self.text).readline) 109 tokenize.tok_name.get(toktype, toktype), 145 if ttext.strip() and toktype != tokenize.COMMENT:
|
/packages/apps/UnifiedEmail/src/com/android/mail/compose/ |
FromAddressSpinnerAdapter.java | 113 return String.format(sFormatString, Rfc822Tokenizer.tokenize(address)[0].getAddress());
|
/packages/apps/UnifiedEmail/src/com/android/mail/providers/ |
ReplyFromAccount.java | 115 Rfc822Token[] tokens = Rfc822Tokenizer.tokenize(possibleCustomFrom);
|
/external/chromium_org/third_party/WebKit/Source/devtools/front_end/cm/ |
php.js | 9 if (stream.match(delim)) state.tokenize = null; 34 state.tokenize = heredoc(stream.current().slice(3)); 35 return state.tokenize(stream, state); 83 } else if (isPHP && state.php.tokenize == null && stream.match("?>")) {
|
/external/e2fsprogs/e2fsck/ |
dict.c | 1227 static int tokenize(char *string, ...) function 1322 if (tokenize(in+1, &tok1, &tok2, (char **) 0) != 2) { [all...] |
/packages/apps/Bluetooth/src/com/android/bluetooth/map/ |
BluetoothMapbMessageMms.java | 501 Rfc822Token tokens[] = Rfc822Tokenizer.tokenize(headerValue); 504 Rfc822Token tokens[] = Rfc822Tokenizer.tokenize(headerValue); 507 Rfc822Token tokens[] = Rfc822Tokenizer.tokenize(headerValue); 510 Rfc822Token tokens[] = Rfc822Tokenizer.tokenize(headerValue); 513 Rfc822Token tokens[] = Rfc822Tokenizer.tokenize(headerValue); [all...] |
/external/deqp/framework/randomshaders/ |
rsgBuiltinFunctions.hpp | 43 void tokenize (GeneratorState& state, TokenStream& str) const; 95 void UnaryBuiltinVecFunc<GetValueRangeWeight, ComputeValueRange, Evaluate>::tokenize (GeneratorState& state, TokenStream& str) const function in class:rsg::UnaryBuiltinVecFunc 98 m_child->tokenize(state, str);
|