Lines Matching refs:lines
164 def StartsWith(lines, pos, string): argument
167 return lines[pos.line][pos.column:].startswith(string)
187 def FindFirst(lines, token_table, cursor): argument
192 for line in lines[start.line:]:
203 return MakeToken(lines, found_start, found_end, token_type)
209 def SubString(lines, start, end): argument
213 end = Cursor(len(lines) - 1, len(lines[-1]))
219 return lines[start.line][start.column:end.column]
221 result_lines = ([lines[start.line][start.column:]] +
222 lines[start.line + 1:end.line] +
223 [lines[end.line][:end.column]])
227 def MakeToken(lines, start, end, token_type): argument
230 return Token(start, end, SubString(lines, start, end), token_type)
233 def ParseToken(lines, pos, regex, token_type): argument
234 line = lines[pos.line][pos.column:]
237 return MakeToken(lines, pos, pos + m.end(), token_type)
251 def Skip(lines, pos, regex): argument
252 line = lines[pos.line][pos.column:]
260 def SkipUntil(lines, pos, regex, token_type): argument
261 line = lines[pos.line][pos.column:]
271 def ParseExpTokenInParens(lines, pos): argument
273 pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
274 pos = Skip(lines, pos, r'\(')
276 pos = Skip(lines, pos, r'\)')
280 pos = SkipUntil(lines, pos, r'\(|\)', ')')
281 if SubString(lines, pos, pos + 1) == '(':
283 pos = Skip(lines, pos, r'\)')
290 return MakeToken(lines, start, pos, 'exp')
300 def TokenizeLines(lines, pos): argument
302 found = FindFirst(lines, TOKEN_TABLE, pos)
304 yield MakeToken(lines, pos, Eof(), 'code')
311 prev_token = MakeToken(lines, pos, found.start, 'code')
322 id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
324 pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
326 eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
328 pos = Skip(lines, eq_token.end, r'\s*')
330 if SubString(lines, pos, pos + 2) != '[[':
331 exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
338 id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
340 pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
345 id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
347 pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
349 dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
350 yield MakeToken(lines, pos, dots_pos, 'exp')
351 yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
354 yield MakeToken(lines, pos, new_pos, 'exp')
360 exp_token = ParseExpTokenInParens(lines, found.end)
377 lines = s.splitlines(True)
378 return TokenizeLines(lines, Cursor(0, 0))
800 lines = string.splitlines()
802 for line in lines: