Home | History | Annotate | Download | only in scripts

Lines Matching refs:lines

163 def StartsWith(lines, pos, string):
164 """Returns True iff the given position in lines starts with 'string'."""
166 return lines[pos.line][pos.column:].startswith(string)
174 # We found regex in lines
186 def FindFirst(lines, token_table, cursor):
187 """Finds the first occurrence of any string in strings in lines."""
191 for line in lines[start.line:]:
202 return MakeToken(lines, found_start, found_end, token_type)
204 # We failed to find str in lines
208 def SubString(lines, start, end):
209 """Returns a substring in lines."""
212 end = Cursor(len(lines) - 1, len(lines[-1]))
218 return lines[start.line][start.column:end.column]
220 result_lines = ([lines[start.line][start.column:]] +
221 lines[start.line + 1:end.line] +
222 [lines[end.line][:end.column]])
229 # First, completely remove lines containing nothing but a meta
233 # Then, remove meta comments from contentful lines.
237 def MakeToken(lines, start, end, token_type):
240 return Token(start, end, SubString(lines, start, end), token_type)
243 def ParseToken(lines, pos, regex, token_type):
244 line = lines[pos.line][pos.column:]
247 return MakeToken(lines, pos, pos + m.end(), token_type)
261 def Skip(lines, pos, regex):
262 line = lines[pos.line][pos.column:]
270 def SkipUntil(lines, pos, regex, token_type):
271 line = lines[pos.line][pos.column:]
281 def ParseExpTokenInParens(lines, pos):
283 pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
284 pos = Skip(lines, pos, r'\(')
286 pos = Skip(lines, pos, r'\)')
290 pos = SkipUntil(lines, pos, r'\(|\)', ')')
291 if SubString(lines, pos, pos + 1) == '(':
293 pos = Skip(lines, pos, r'\)')
300 return MakeToken(lines, start, pos, 'exp')
310 def TokenizeLines(lines, pos):
312 found = FindFirst(lines, TOKEN_TABLE, pos)
314 yield MakeToken(lines, pos, Eof(), 'code')
321 prev_token = MakeToken(lines, pos, found.start, 'code')
328 id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
330 pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
332 eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
334 pos = Skip(lines, eq_token.end, r'\s*')
336 if SubString(lines, pos, pos + 2) != '[[':
337 exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
344 id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
346 pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
351 id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
353 pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
355 dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
356 yield MakeToken(lines, pos, dots_pos, 'exp')
357 yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
360 yield MakeToken(lines, pos, new_pos, 'exp')
366 exp_token = ParseExpTokenInParens(lines, found.end)
385 lines = s.splitlines(True)
386 for token in TokenizeLines(lines, Cursor(0, 0)):
790 # The style guide made an exception to allow long header guard lines
797 # The style guide made an exception to allow long header guard lines
807 lines = string.splitlines()
809 for line in lines: