Home | History | Annotate | Download | only in elements
      1 /*
      2  * Copyright (C) 2017 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 package parser.elements
     18 
     19 import lexer.Token
     20 import lexer.TokenGrammar
     21 import parser.peekPreviousToken
     22 import parser.peekToken
     23 import java.text.ParseException
     24 
     25 /**
     26  * Start parsing at position in an iterator. Find the end, collect the results.
     27  * @param iter An iterator of a list of tokens, starting at a position to parse.
     28  */
     29 abstract class AbstractParser(iter: ListIterator<Token>) {
     30 
     31     val indexStart: Int
     32 
     33     init {
     34         while (iter.hasNext() && peekToken(iter)?.identifier == TokenGrammar.EMPTY_LINE) {
     35             iter.next() //skip over beginning empty lines
     36         }
     37         require(iter.hasNext()) { "Iterator is empty" }
     38         indexStart = iter.nextIndex()
     39     }
     40 
     41     /**
     42      * Do something with the tokens.
     43      */
     44     abstract fun parseTokens(tokens: List<Token>)
     45 
     46     /**
     47      * Determine end of token sequence, collect tokens from here to there.
     48      */
     49     abstract fun scanTokens(iter: ListIterator<Token>): List<Token>
     50 
     51     protected fun scanDocTokens(iter: ListIterator<Token>): List<Token> {
     52         val tokens = mutableListOf<Token>()
     53         var token: Token
     54 
     55         //ignore any empty lines that start the doc block (if called after doc_start)
     56         while (peekPreviousToken(iter)?.identifier == TokenGrammar.EMPTY_LINE) iter.previous()
     57 
     58         //queue up doc_start if called after
     59         if (peekPreviousToken(iter)?.identifier == TokenGrammar.DOC_START) iter.previous()
     60 
     61         if (peekToken(iter)!!.identifier != TokenGrammar.DOC_START)
     62             throw ParseException("Doc comment blocks must begin with ${TokenGrammar.DOC_START.value}", this.indexStart)
     63 
     64         tokens.add(iter.next()) //doc_start
     65 
     66         while (iter.hasNext()) {
     67             token = iter.next()
     68             tokens.add(token)
     69             if (token.identifier == TokenGrammar.DOC_END) {
     70                 break
     71             } else if (token.identifier == TokenGrammar.DOC_START) {
     72                 throw ParseException("Nested doc comments not allowed", this.indexStart)
     73             }
     74         }
     75 
     76         if (peekPreviousToken(iter)?.identifier != TokenGrammar.DOC_END) {
     77             throw ParseException("Unable to find doc comment end", this.indexStart)
     78         }
     79         return tokens
     80     }
     81 
     82     /**
     83      * Collect annotations (optional) to end of declaration (code body), may be nested.
     84      */
     85     protected fun scanDeclarationTokens(iter: ListIterator<Token>): List<Token> {
     86         val tokens = mutableListOf<Token>()
     87         var token: Token
     88         var nestLevel = 0
     89         var inDoc = false
     90 
     91         while (iter.hasNext()) {
     92             token = iter.next()
     93             tokens.add(token)
     94 
     95             if (token.identifier == TokenGrammar.DOC_START) {
     96                 inDoc = true
     97             } else if (token.identifier == TokenGrammar.DOC_END) {
     98                 inDoc = false
     99             }
    100 
    101             if (inDoc) {
    102                 continue
    103             } else if (token.identifier == TokenGrammar.BRACE_OPEN) {
    104                 nestLevel++
    105             } else if (token.identifier == TokenGrammar.BRACE_CLOSE) {
    106                 nestLevel--
    107             } else if (token.identifier == TokenGrammar.SEMICOLON && nestLevel == 0) {
    108                 break
    109             }
    110         }
    111         assert(tokens.last().identifier == TokenGrammar.SEMICOLON)
    112         return tokens
    113     }
    114 
    115     fun resetIterator(iter: ListIterator<Token>) {
    116         while (iter.hasPrevious() && iter.previousIndex() >= indexStart) {
    117             iter.previous()
    118         }
    119         assert(iter.nextIndex() == this.indexStart)
    120     }
    121 }