Home | History | Annotate | Download | only in test
      1 # -----------------------------------------------------------------------------
      2 # hedit.py
      3 #
      4 # Paring of Fortran H Edit descriptions (Contributed by Pearu Peterson)
      5 #
      6 # These tokens can't be easily tokenized because they are of the following
      7 # form:
      8 #
      9 #   nHc1...cn
     10 #
     11 # where n is a positive integer and c1 ... cn are characters.
     12 #
     13 # This example shows how to modify the state of the lexer to parse
     14 # such tokens
     15 # -----------------------------------------------------------------------------
     16 import sys
     17 if ".." not in sys.path: sys.path.insert(0,"..")
     18 
     19 import ply.lex as lex
     20 
     21 tokens = (
     22     'H_EDIT_DESCRIPTOR',
     23     )
     24 
     25 # Tokens
     26 t_ignore = " \t\n"
     27 
     28 def t_H_EDIT_DESCRIPTOR(t):
     29     r"\d+H.*"                     # This grabs all of the remaining text
     30     i = t.value.index('H')
     31     n = eval(t.value[:i])
     32     
     33     # Adjust the tokenizing position
     34     t.lexer.lexpos -= len(t.value) - (i+1+n)
     35     t.value = t.value[i+1:i+1+n]
     36     return t                                  
     37     
     38 def t_error(t):
     39     print("Illegal character '%s'" % t.value[0])
     40     t.lexer.skip(1)
     41     
     42 # Build the lexer
     43 lex.lex()
     44 lex.runmain(data="3Habc 10Habcdefghij 2Hxy")
     45 
     46 
     47 
     48