]> ocean-lang.org Git - ocean/blobdiff - csrc/parsergen.mdc
parsergen: split out reduce step of parser
[ocean] / csrc / parsergen.mdc
index 0f4bf1f4eb301b3ed9a5109b0307e8ee95d77871..6eb43181006fca10c080b545a6f47b85173636ac 100644 (file)
@@ -171,7 +171,11 @@ is treated as an error.
 Symbols can be either `TK_ident` or `TK_mark`.  They are saved in a
 table of known symbols and the resulting parser will report them as
 `TK_reserved + N`.  A small set of identifiers are reserved for the
-different token types that `scanner` can report.
+different token types that `scanner` can report, and an even smaller set
+are reserved for a special token that the parser can generate (`EOL`) as
+will be described later.  This latter set cannot use predefined numbers,
+so they are marked as `isspecial` for now and will get assigned a number
+with the non-terminals later.
 
 ###### declarations
 
@@ -186,9 +190,12 @@ different token types that `scanner` can report.
                { TK_out,          "OUT" },
                { TK_newline,      "NEWLINE" },
                { TK_eof,          "$eof" },
+               { -1,              "EOL" },
        };
+
 ###### symbol fields
        short num;
+       unsigned int isspecial:1;
 
 Note that `TK_eof` and the two `TK_*_comment` tokens cannot be
 recognised.  The former is automatically expected at the end of the text
@@ -246,6 +253,7 @@ symbol, but its type might be `Unknown`.
                        s = sym_find(g, t);
                        s->type = Terminal;
                        s->num = reserved_words[i].num;
+                       s->isspecial = 1;
                }
        }
 
@@ -1481,8 +1489,10 @@ a report.
 
 Once we have built everything we allocate arrays for the two lists:
 symbols and itemsets.  This allows more efficient access during
-reporting.  The symbols are grouped as terminals and then non-terminals,
-and we record the changeover point in `first_nonterm`.
+reporting.  The symbols are grouped as terminals, then non-terminals,
+then virtual, with the start of non-terminals recorded as `first_nonterm`.
+Special terminals -- meaning just EOL -- are included with the
+non-terminals so that they are not expected by the scanner.
 
 ###### grammar fields
        struct symbol **symtab;
@@ -1497,7 +1507,7 @@ and we record the changeover point in `first_nonterm`.
                struct itemset *is;
                int snum = TK_reserved;
                for (s = g->syms; s; s = s->next)
-                       if (s->num < 0 && s->type == Terminal) {
+                       if (s->num < 0 && s->type == Terminal && !s->isspecial) {
                                s->num = snum;
                                snum++;
                        }
@@ -1775,11 +1785,6 @@ terminals to items where that terminal could be shifted and another
 which maps terminals to items that could be reduced when the terminal
 is in look-ahead.  We report when we get conflicts between the two.
 
-As a special case, if we find a SHIFT/REDUCE conflict, on the NEWLINE
-terminal, we ignore it.  NEWLINES are handled specially with its own
-rules for when to shift and when to reduce.  Conflicts are expected,
-but handled internally.
-
        static int conflicts_slr(struct grammar *g, enum grammar_type type)
        {
                int i;
@@ -1831,7 +1836,7 @@ but handled internally.
                                int k;
                                for (k = 0; k < la.cnt; k++) {
                                        int pos = symset_find(&shifts, la.syms[k]);
-                                       if (pos >= 0 && la.syms[k] != TK_newline) {
+                                       if (pos >= 0) {
                                                printf("  State %d has SHIFT/REDUCE conflict on ", i);
                                                cnt++;
                                                        prtxt(g->symtab[la.syms[k]]->name);
@@ -1927,7 +1932,8 @@ The table of nonterminals used for tracing is a similar array.
                for (i = TK_reserved;
                     i < g->num_syms;
                     i++)
-                       if (g->symtab[i]->type == Nonterminal)
+                       if (g->symtab[i]->type == Nonterminal ||
+                           g->symtab[i]->isspecial)
                                fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
                                        g->symtab[i]->name.txt);
                fprintf(f, "};\n\n");
@@ -2273,7 +2279,15 @@ appropriate for tokens) on any terminal symbol.
                fprintf(f, "static void do_free(short sym, void *asn)\n");
                fprintf(f, "{\n");
                fprintf(f, "\tif (!asn) return;\n");
-               fprintf(f, "\tif (sym < %d) {\n", g->first_nonterm);
+               fprintf(f, "\tif (sym < %d", g->first_nonterm);
+               /* Need to handle special terminals too */
+               for (i = 0; i < g->num_syms; i++) {
+                       struct symbol *s = g->symtab[i];
+                       if (i >= g->first_nonterm && s->type == Terminal &&
+                           s->isspecial)
+                               fprintf(f, " || sym == %d", s->num);
+               }
+               fprintf(f, ") {\n");
                fprintf(f, "\t\tfree(asn);\n\t\treturn;\n\t}\n");
                fprintf(f, "\tswitch(sym) {\n");
 
@@ -2712,14 +2726,6 @@ to be handled.
 
 We return whatever `asn` was returned by reducing production zero.
 
-If we can neither shift nor reduce we have an error to handle.  We pop
-single entries off the stack until we can shift the `TK_error` symbol,
-then drop input tokens until we find one we can shift into the new error
-state.  We need to ensure that something is dropped or shifted after an
-error, or we could get into an infinite loop, only shifting in
-`TK_error`, then reducing.  So we track if there has been a shift since
-the last error, and if not the next error always discards one token.
-
 When we find `TK_in` and `TK_out` tokens which report indents we need
 to handle them directly as the grammar cannot express what we want to
 do with them.
@@ -2762,32 +2768,93 @@ in the thing that preceed:
 Here the NEWLINE will be shifted because nothing can be reduced until
 the `if` is seen.
 
-When during error handling we discard tokens read in, we want to keep
-discarding until we see one that is recognised.  If we had a full set
-of LR(1) grammar states, this would mean looking in the look-ahead set,
-but we don't keep a full look-ahead set.  We only record the subset
-that leads to SHIFT.  We can, however, deduce the look-ahead set by
-looking at the SHIFT subsets for all states that we can get to by
-reducing zero or more times.  So we need a little function which
-checks if a given token is in any of these look-ahead sets.
+We have already discussed the bulk of the handling of a "reduce" action,
+with the `pop()` and `shift()` functions doing much of the work.  There
+is a little more complexity needed to manage storage for the asn (Abstract
+Syntax Node), and also a test of whether the reduction is permitted.
 
-###### parser includes
-       #include "parser.h"
+When we try to shift the result of reducing production-zero, it will
+fail because there is no next state.  In this case the asn will not have
+been stored on the stack, so it get stored in the `ret` variable, and we
+report that that input has been accepted.
 
-###### parser_run
+###### parser vars
 
-       static int in_lookahead(struct token *tk, const struct state *states, int state)
-       {
-               while (state >= 0) {
-                       if (search(&states[state], tk->num) >= 0)
-                               return 1;
-                       if (states[state].reduce_prod < 0)
-                               return 0;
-                       state = search(&states[state], states[state].reduce_sym);
+       void *ret = NULL;
+
+###### try reduce
+
+       if (states[tos->state].reduce_prod >= 0) {
+               void **body;
+               void *res;
+               const struct state *nextstate = &states[tos->state];
+               int prod = nextstate->reduce_prod;
+               int size = nextstate->reduce_size;
+               int res_size = nextstate->result_size;
+
+               body = p.asn_stack + (p.tos - size);
+               res = res_size ? calloc(1, res_size) : NULL;
+               res_size = do_reduce(prod, body, config, res);
+               if (res_size != nextstate->result_size)
+                       abort();
+               pop(&p, size, do_free);
+               if (!shift(&p, nextstate->reduce_sym, res, states)) {
+                       accepted = 1;
+                       ret = res;
+                       parser_trace_action(trace, "Accept");
+               } else
+                       parser_trace_action(trace, "Reduce");
+               continue;
+       }
+
+If we can neither shift nor reduce we have an error to handle.  There
+are two possible responses to an error: we can pop single frames off the
+stack until we find a frame that can shift `TK_error`, or we can discard
+the current look-ahead symbol.
+
+When we first see an error we do the first of these and set a flag to
+record that we are processing an error.  If the normal shift/reduce
+tests fail to find that anything can be done from that state, we start
+dropping tokens until either we manage to shift one, or reach end-of-file.
+
+###### parser vars
+
+       int in_err = 0;
+
+###### did shift
+
+       in_err = 0;
+
+###### handle error
+
+       if (in_err) {
+               parser_trace_action(trace, "DISCARD");
+               if (tk->num == TK_eof)
+                       break;
+               free(tk);
+               tk = NULL;
+       } else {
+               struct token *err_tk;
+
+               parser_trace_action(trace, "ERROR");
+
+               err_tk = tok_copy(*tk);
+               while (p.tos > 0 && shift(&p, TK_error, err_tk, states) == 0)
+                       // discard this state
+                       pop(&p, 1, do_free);
+               if (p.tos == 0) {
+                       free(err_tk);
+                       // no state accepted TK_error
+                       break;
                }
-               return 0;
+               in_err = 1;
        }
 
+###### parser includes
+       #include "parser.h"
+
+###### parser_run
+
        void *parser_run(struct token_state *tokens,
                         const struct state states[],
                         int (*do_reduce)(int, void**, struct token_config*, void*),
@@ -2798,12 +2865,10 @@ checks if a given token is in any of these look-ahead sets.
                struct parser p = { 0 };
                struct token *tk = NULL;
                int accepted = 0;
-               int shift_since_err = 1;
-               void *ret = NULL;
+               ## parser vars
 
                shift(&p, TK_eof, NULL, states);
                while (!accepted && p.tos > 0) {
-                       struct token *err_tk;
                        struct frame *tos = &p.stack[p.tos-1];
                        if (!tk)
                                tk = tok_copy(token_next(tokens));
@@ -2837,73 +2902,14 @@ checks if a given token is in any of these look-ahead sets.
                                }
                        }
                        if (shift(&p, tk->num, tk, states)) {
-                               shift_since_err = 1;
                                tk = NULL;
                                parser_trace_action(trace, "Shift");
+                               ## did shift
                                continue;
                        }
 
-                       if (states[tos->state].reduce_prod >= 0) {
-                               void **body;
-                               void *res;
-                               const struct state *nextstate = &states[tos->state];
-                               int prod = nextstate->reduce_prod;
-                               int size = nextstate->reduce_size;
-                               int res_size = nextstate->result_size;
-
-                               body = p.asn_stack + (p.tos - size);
-                               res = res_size ? calloc(1, res_size) : NULL;
-                               res_size = do_reduce(prod, body, config, res);
-                               if (res_size != nextstate->result_size)
-                                       abort();
-
-                               pop(&p, size, do_free);
-
-                               if (!shift(&p, nextstate->reduce_sym,
-                                          res, states)) {
-                                       if (prod != 0) abort();
-                                       accepted = 1;
-                                       ret = res;
-                               }
-                               parser_trace_action(trace, "Reduce");
-                               continue;
-                       }
-                       /* Error. We walk up the stack until we
-                        * find a state which will accept TK_error.
-                        * We then shift in TK_error and see what state
-                        * that takes us too.
-                        * Then we discard input tokens until
-                        * we find one that is acceptable.
-                        */
-                       parser_trace_action(trace, "ERROR");
-
-                       err_tk = tok_copy(*tk);
-                       while (p.tos > 0 &&
-                              shift(&p, TK_error, err_tk, states) == 0)
-                               // discard this state
-                               pop(&p, 1, do_free);
-                       if (p.tos == 0) {
-                               free(err_tk);
-                               // no state accepted TK_error
-                               break;
-                       }
-                       if (!shift_since_err) {
-                               /* must discard at least one token to avoid
-                                * infinite loop.
-                                */
-                               if (tk->num == TK_eof)
-                                       break;
-                               free(tk);
-                               tk = tok_copy(token_next(tokens));
-                       }
-                       shift_since_err = 0;
-                       tos = &p.stack[p.tos-1];
-                       while (!in_lookahead(tk, states, tos->state) &&
-                              tk->num != TK_eof) {
-                               free(tk);
-                               tk = tok_copy(token_next(tokens));
-                               shift_since_err = 1;
-                       }
+                       ## try reduce
+                       ## handle error
                }
                free(tk);
                pop(&p, p.tos, do_free);