HomeSort by relevance Sort by last modified time
    Searched refs:generate_tokens (Results 1 - 24 of 24) sorted by null

  /prebuilts/python/darwin-x86/2.7.5/lib/python2.7/lib2to3/pgen2/
driver.py 88 tokens = tokenize.generate_tokens(stream.readline)
105 tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
tokenize.py 6 generate_tokens(readline) is a generator that breaks a stream of
38 "generate_tokens", "untokenize"]
171 tuples generated by generate_tokens().
180 for token_info in generate_tokens(readline):
336 t1 = [tok[:2] for tok in generate_tokens(f.readline)]
339 t2 = [tok[:2] for tokin generate_tokens(readline)]
345 def generate_tokens(readline): function
347 The generate_tokens() generator requires one argment, readline, which
pgen.py 19 self.generator = tokenize.generate_tokens(stream.readline)
  /prebuilts/python/linux-x86/2.7.5/lib/python2.7/lib2to3/pgen2/
driver.py 88 tokens = tokenize.generate_tokens(stream.readline)
105 tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
tokenize.py 6 generate_tokens(readline) is a generator that breaks a stream of
38 "generate_tokens", "untokenize"]
171 tuples generated by generate_tokens().
180 for token_info in generate_tokens(readline):
336 t1 = [tok[:2] for tok in generate_tokens(f.readline)]
339 t2 = [tok[:2] for tokin generate_tokens(readline)]
345 def generate_tokens(readline): function
347 The generate_tokens() generator requires one argment, readline, which
pgen.py 19 self.generator = tokenize.generate_tokens(stream.readline)
  /prebuilts/python/darwin-x86/2.7.5/lib/python2.7/
tokenize.py 3 generate_tokens(readline) is a generator that breaks a stream of
34 __all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"]
166 tuples generated by generate_tokens().
175 for token_info in generate_tokens(readline):
255 t1 = [tok[:2] for tok in generate_tokens(f.readline)]
258 t2 = [tok[:2] for tok in generate_tokens(readline)]
264 def generate_tokens(readline): function
266 The generate_tokens() generator requires one argment, readline, which
pyclbr.py 153 g = tokenize.generate_tokens(f.readline)
tabnanny.py 106 process_tokens(tokenize.generate_tokens(f.readline))
cgitb.py 84 for ttype, token, start, end, line in tokenize.generate_tokens(reader):
gettext.py 85 tokens = tokenize.generate_tokens(StringIO(plural).readline)
trace.py 427 for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
  /prebuilts/python/linux-x86/2.7.5/lib/python2.7/
tokenize.py 3 generate_tokens(readline) is a generator that breaks a stream of
34 __all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"]
166 tuples generated by generate_tokens().
175 for token_info in generate_tokens(readline):
255 t1 = [tok[:2] for tok in generate_tokens(f.readline)]
258 t2 = [tok[:2] for tok in generate_tokens(readline)]
264 def generate_tokens(readline): function
266 The generate_tokens() generator requires one argment, readline, which
pyclbr.py 153 g = tokenize.generate_tokens(f.readline)
tabnanny.py 106 process_tokens(tokenize.generate_tokens(f.readline))
cgitb.py 84 for ttype, token, start, end, line in tokenize.generate_tokens(reader):
gettext.py 85 tokens = tokenize.generate_tokens(StringIO(plural).readline)
trace.py 427 for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
  /prebuilts/python/darwin-x86/2.7.5/lib/python2.7/idlelib/
ScriptBinding.py 72 tabnanny.process_tokens(tokenize.generate_tokens(f.readline))
  /prebuilts/python/darwin-x86/2.7.5/lib/python2.7/lib2to3/
patcomp.py 36 tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
refactor.py 132 gen = tokenize.generate_tokens(StringIO.StringIO(source).readline)
659 tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)
  /prebuilts/python/linux-x86/2.7.5/lib/python2.7/idlelib/
ScriptBinding.py 72 tabnanny.process_tokens(tokenize.generate_tokens(f.readline))
  /prebuilts/python/linux-x86/2.7.5/lib/python2.7/lib2to3/
patcomp.py 36 tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
refactor.py 132 gen = tokenize.generate_tokens(StringIO.StringIO(source).readline)
659 tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)

Completed in 2212 milliseconds