HomeSort by relevance Sort by last modified time
    Searched refs:tokenize (Results 1 - 25 of 112) sorted by null

1 2 3 4 5

  /external/chromium_org/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/coverage/
phystokens.py 3 import keyword, re, token, tokenize namespace
9 tokenize.generate_tokens() doesn't return a token for the backslash that
38 if last_ttype == tokenize.COMMENT:
77 ws_tokens = [token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]
81 tokgen = tokenize.generate_tokens(StringIO(source).readline)
98 tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3]
backward.py 75 # Python 3.2 provides `tokenize.open`, the best way to open source files.
76 import tokenize namespace
78 open_source = tokenize.open # pylint: disable=E1101
81 detect_encoding = tokenize.detect_encoding # pylint: disable=E1101
  /external/chromium_org/third_party/WebKit/Source/devtools/front_end/cm/
coffeescript.js 72 state.tokenize = longComment;
73 return state.tokenize(stream, state);
124 state.tokenize = tokenFactory(stream.current(), 'string');
125 return state.tokenize(stream, state);
130 state.tokenize = tokenFactory(stream.current(), 'string-2');
131 return state.tokenize(stream, state);
182 state.tokenize = tokenBase;
192 state.tokenize = tokenBase;
203 state.tokenize = tokenBase;
255 var style = state.tokenize(stream, state)
    [all...]
clike.js 22 state.tokenize = tokenString(ch);
23 return state.tokenize(stream, state);
35 state.tokenize = tokenComment;
69 state.tokenize = null;
78 state.tokenize = null;
111 tokenize: null,
127 var style = (state.tokenize || tokenBase)(stream, state);
148 if (state.tokenize != tokenBase && state.tokenize != null) return CodeMirror.Pass;
181 state.tokenize = cppHook
    [all...]
  /external/chromium_org/third_party/WebKit/Source/core/html/parser/
CSSPreloadScanner.h 67 inline void tokenize(UChar, const SegmentedString&);
CSSPreloadScanner.cpp 60 tokenize(*it, source);
81 inline void CSSPreloadScanner::tokenize(UChar c, const SegmentedString& source) function in class:WebCore::CSSPreloadScanner
  /external/chromium_org/third_party/libjingle/source/talk/base/
stringencode_unittest.cc 236 EXPECT_EQ(5ul, tokenize("one two three four five", ' ', &fields));
238 EXPECT_EQ(1ul, tokenize("one", ' ', &fields));
242 EXPECT_EQ(5ul, tokenize(" one two three four five ", ' ', &fields));
244 EXPECT_EQ(1ul, tokenize(" one ", ' ', &fields));
246 EXPECT_EQ(0ul, tokenize(" ", ' ', &fields));
253 tokenize("find middle one", ' ', &fields);
259 tokenize(" find middle one ", ' ', &fields);
263 tokenize(" ", ' ', &fields);
283 ASSERT_EQ(0ul, tokenize("D \"A B", ' ', '(', ')', NULL));
286 tokenize("A B C", ' ', '"', '"', &fields)
    [all...]
  /external/chromium/testing/gmock/scripts/generator/cpp/
ast.py 46 from cpp import tokenize namespace
549 if parts[-1].token_type == tokenize.NAME:
579 if (type_name and type_name[-1].token_type == tokenize.NAME and
580 p.token_type == tokenize.NAME):
581 type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
738 if token.token_type == tokenize.NAME:
749 if next.token_type == tokenize.SYNTAX and next.name == '(':
754 syntax = tokenize.SYNTAX
763 new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';'
    [all...]
  /prebuilts/python/darwin-x86/2.7.5/lib/python2.7/
tabnanny.py 26 import tokenize namespace
27 if not hasattr(tokenize, 'NL'):
28 raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
106 process_tokens(tokenize.generate_tokens(f.readline))
108 except tokenize.TokenError, msg:
274 INDENT = tokenize.INDENT
275 DEDENT = tokenize.DEDENT
276 NEWLINE = tokenize.NEWLINE
277 JUNK = tokenize.COMMENT, tokenize.N
    [all...]
  /prebuilts/python/linux-x86/2.7.5/lib/python2.7/
tabnanny.py 26 import tokenize namespace
27 if not hasattr(tokenize, 'NL'):
28 raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
106 process_tokens(tokenize.generate_tokens(f.readline))
108 except tokenize.TokenError, msg:
274 INDENT = tokenize.INDENT
275 DEDENT = tokenize.DEDENT
276 NEWLINE = tokenize.NEWLINE
277 JUNK = tokenize.COMMENT, tokenize.N
    [all...]
  /packages/apps/QuickSearchBox/src/com/android/quicksearchbox/
LevenshteinSuggestionFormatter.java 43 final Token[] queryTokens = tokenize(query);
44 final Token[] suggestionTokens = tokenize(suggestion);
99 Token[] tokenize(final String seq) { method in class:LevenshteinSuggestionFormatter
  /cts/tests/tests/text/src/android/text/util/cts/
Rfc822TokenizerTest.java 114 Rfc822Token[] tokens = Rfc822Tokenizer.tokenize("");
118 tokens = Rfc822Tokenizer.tokenize(text);
124 tokens = Rfc822Tokenizer.tokenize(text);
130 Rfc822Tokenizer.tokenize(null);
  /external/chromium_org/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/
pep8.py 102 import tokenize namespace
370 if (token_type == tokenize.OP and
373 prev_type == tokenize.NAME and
443 prev_type = tokenize.OP
446 if token_type in (tokenize.NL, tokenize.NEWLINE, tokenize.ERRORTOKEN):
457 elif token_type == tokenize.OP:
464 if ((prev_type != tokenize.OP or prev_text in '}])') and not
465 (prev_type == tokenize.NAME and iskeyword(prev_text)))
    [all...]
  /prebuilts/python/darwin-x86/2.7.5/lib/python2.7/lib2to3/pgen2/
driver.py 26 from . import grammar, parse, token, tokenize, pgen namespace
40 # XXX Move the prefix computation into a wrapper around tokenize.
59 if type in (tokenize.COMMENT, tokenize.NL):
88 tokens = tokenize.generate_tokens(stream.readline)
105 tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
grammar.py 10 token module; the Python tokenize module reports all operators as the
19 from . import token, tokenize namespace
129 # Map from operator to number (since tokenize doesn't do this)
  /prebuilts/python/linux-x86/2.7.5/lib/python2.7/lib2to3/pgen2/
driver.py 26 from . import grammar, parse, token, tokenize, pgen namespace
40 # XXX Move the prefix computation into a wrapper around tokenize.
59 if type in (tokenize.COMMENT, tokenize.NL):
88 tokens = tokenize.generate_tokens(stream.readline)
105 tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
grammar.py 10 token module; the Python tokenize module reports all operators as the
19 from . import token, tokenize namespace
129 # Map from operator to number (since tokenize doesn't do this)
  /external/apache-xml/src/main/java/org/apache/xpath/compiler/
Lexer.java 96 void tokenize(String pat) throws javax.xml.transform.TransformerException method in class:Lexer
98 tokenize(pat, null); method
109 void tokenize(String pat, Vector targetStrings) method in class:Lexer
  /frameworks/ex/common/java/com/android/common/
Rfc822Validator.java 66 Rfc822Token[] tokens = Rfc822Tokenizer.tokenize(text);
124 Rfc822Token[] tokens = Rfc822Tokenizer.tokenize(cs);
  /frameworks/ex/chips/src/com/android/ex/chips/
SingleRecipientArrayAdapter.java 61 destination.setText(Rfc822Tokenizer.tokenize(entry.getDestination())[0].getAddress());
  /packages/providers/ContactsProvider/src/com/android/providers/contacts/
HanziToPinyin.java 97 private void tokenize(char character, Token token) { method in class:HanziToPinyin
152 tokenize(character, token); method
  /packages/apps/UnifiedEmail/src/com/android/mail/
EmailAddress.java 81 // Try and tokenize the string
82 final Rfc822Token[] tokens = Rfc822Tokenizer.tokenize(rawAddress);
  /frameworks/base/core/java/android/text/util/
Rfc822Tokenizer.java 45 public static void tokenize(CharSequence text, Collection<Rfc822Token> out) { method in class:Rfc822Tokenizer
171 public static Rfc822Token[] tokenize(CharSequence text) { method in class:Rfc822Tokenizer
173 tokenize(text, out); method
  /packages/apps/QuickSearchBox/tests/src/com/android/quicksearchbox/
LevenshteinFormatterTest.java 44 Token[] tokens = mFormatter.tokenize(input);
94 Token[] sourceTokens = mFormatter.tokenize(source);
95 Token[] targetTokens = mFormatter.tokenize(target);
  /prebuilts/python/darwin-x86/2.7.5/lib/python2.7/idlelib/
ScriptBinding.py 24 import tokenize namespace
72 tabnanny.process_tokens(tokenize.generate_tokens(f.readline))
73 except tokenize.TokenError, msg:

Completed in 737 milliseconds

1 2 3 4 5