X-Git-Url: https://ocean-lang.org/code/?p=ocean;a=blobdiff_plain;f=csrc%2Fparsergen.mdc;h=d9467442eeab1dfec4720f7a2b4f5c6666d43867;hp=f2bbe0151d75977dd2b2ae9f16031f4c171a5949;hb=10db06aed6af588a0ccd05e80a0f50286949d56c;hpb=5f8aaec6eb5315fff28c9c55c15620f43d483f97 diff --git a/csrc/parsergen.mdc b/csrc/parsergen.mdc index f2bbe01..d946744 100644 --- a/csrc/parsergen.mdc +++ b/csrc/parsergen.mdc @@ -5,6 +5,9 @@ fragments, analyses it, and can report details about the analysis and write out C code files which can be compiled to make a parser. "2D support" means that indentation and line breaks can be significant. +Indent tokens (IN and OUT) and end-of-line tokens (EOL and NEWLINE) can +be used to describe the grammar and the parser can selectively ignore +these where they aren't relevant. There are several distinct sections. @@ -105,7 +108,6 @@ symbol. struct production { unsigned short precedence; enum assoc assoc; - char line_like; ## production fields }; struct grammar { @@ -150,11 +152,11 @@ because that is what we need to detect tags. Productions are comprised primarily of symbols - terminal and non-terminal. We do not make a syntactic distinction between the two, -though non-terminals must be identifiers. Non-terminal symbols are -those which appear in the head of a production, terminal symbols are -those which don't. There are also "virtual" symbols used for precedence -marking discussed later, and sometimes we won't know what type a symbol -is yet. +though non-terminals must be identifiers while terminals can also be +marks. Non-terminal symbols are those which appear in the head of a +production, terminal symbols are those which don't. There are also +"virtual" symbols used for precedence marking discussed later, and +sometimes we won't know what type a symbol is yet. To help with code safety it is possible to declare the terminal symbols. If this is done, then any symbol used in a production that does not @@ -172,7 +174,11 @@ is treated as an error. Symbols can be either `TK_ident` or `TK_mark`. They are saved in a table of known symbols and the resulting parser will report them as `TK_reserved + N`. A small set of identifiers are reserved for the -different token types that `scanner` can report. +different token types that `scanner` can report, and an even smaller set +are reserved for a special token that the parser can generate (`EOL`) as +will be described later. This latter set cannot use predefined numbers, +so they are marked as `isspecial` for now and will get assigned a number +with the non-terminals later. ###### declarations @@ -187,9 +193,12 @@ different token types that `scanner` can report. { TK_out, "OUT" }, { TK_newline, "NEWLINE" }, { TK_eof, "$eof" }, + { -1, "EOL" }, }; + ###### symbol fields short num; + unsigned int isspecial:1; Note that `TK_eof` and the two `TK_*_comment` tokens cannot be recognised. The former is automatically expected at the end of the text @@ -247,6 +256,7 @@ symbol, but its type might be `Unknown`. s = sym_find(g, t); s->type = Terminal; s->num = reserved_words[i].num; + s->isspecial = 1; } } @@ -281,7 +291,7 @@ carry precedence information but are not included in the grammar. A production can declare that it inherits the precedence of a given virtual symbol. -This use for `$$` precludes it from being used as a symbol in the +This use of `$$` precludes it from being used as a symbol in the described language. Two other symbols: `${` and `}$` are also unavailable. @@ -383,9 +393,10 @@ here is told which was found in the `isref` argument. found += 1; t = token_next(ts); } - if (found == 0) + if (found == 0) { err = "No symbols given on precedence/TERM line"; goto abort; + } return NULL; abort: while (t.num != TK_newline && t.num != TK_eof) @@ -504,10 +515,6 @@ Now we have all the bits we need to parse a full production. tk = token_next(state); while (tk.num == TK_ident || tk.num == TK_mark) { struct symbol *bs = sym_find(g, tk.txt); - if (bs->type == Unknown) { - if (!g->terminals_declared) - bs->type = Terminal; - } if (bs->type == Virtual) { err = "Virtual symbol not permitted in production"; goto abort; @@ -527,11 +534,7 @@ Now we have all the bits we need to parse a full production. goto abort; } vs = sym_find(g, tk.txt); - if (vs->num == TK_newline) - p.line_like = 1; - else if (vs->num == TK_out) - p.line_like = 2; - else if (vs->precedence == 0) { + if (vs->precedence == 0) { err = "symbol after $$ must have precedence"; goto abort; } else { @@ -685,20 +688,22 @@ used as a terminal anywhere that a terminal is expected. goto abort; } token_close(state); - if (g->terminals_declared) { - struct symbol *s; - int errs = 0; - for (s = g->syms; s; s = s->next) { - if (s->type != Unknown) - continue; - errs += 1; - fprintf(stderr, "Token %.*s not declared\n", - s->name.len, s->name.txt); - } - if (errs) { - free(g); // FIXME free content - g = NULL; + + struct symbol *s; + for (s = g->syms; s; s = s->next) { + if (s->type != Unknown) + continue; + if (!g->terminals_declared) { + s->type = Terminal; + continue; } + err = "not declared"; + fprintf(stderr, "Token %.*s not declared\n", + s->name.len, s->name.txt); + } + if (err) { + free(g); // FIXME free content + g = NULL; } return g; abort: @@ -722,8 +727,8 @@ and to simplify some comparisons of sets, these sets will be stored in a list of unique sets, each assigned a number. Once we have the data structures in hand to manage these sets and lists, -we can start setting the 'nullable' flag, build the 'FIRST' sets, and -then create the item sets which define the various states. +we can start setting the 'nullable' flag, build the 'FIRST' and 'FOLLOW' +sets, and then create the item sets which define the various states. ### Symbol sets. @@ -940,54 +945,6 @@ changes happen. } } -### Setting `line_like` - -In order to be able to ignore newline tokens when not relevant, but -still include them in the parse when needed, we will need to know -which states can start a "line-like" section of code. We ignore -newlines when there is an indent since the most recent start of a -line-like symbol. - -A "line_like" symbol is simply any symbol that can derive a NEWLINE. -If a symbol cannot derive a NEWLINE, then it is only part of a line - -so is word-like. If it can derive a NEWLINE, then we consider it to -be like a line. - -Clearly the `TK_newline` token can derive a NEWLINE. Any symbol which -is the head of a production that contains a line_like symbol is also a -line-like symbol. We use a new field `line_like` to record this -attribute of symbols, and compute it in a repetitive manner similar to -`set_nullable`. - -###### symbol fields - int line_like; - -###### functions - static void set_line_like(struct grammar *g) - { - int check_again = 1; - g->symtab[TK_newline]->line_like = 1; - while (check_again) { - int p; - check_again = 0; - for (p = 0; p < g->production_count; p++) { - struct production *pr = g->productions[p]; - int s; - - if (pr->head->line_like) - continue; - - for (s = 0 ; s < pr->body_size; s++) { - if (pr->body[s]->line_like) { - pr->head->line_like = 1; - check_again = 1; - break; - } - } - } - } - } - ### Building the `first` sets When calculating what can follow a particular non-terminal, we will need @@ -1072,6 +1029,9 @@ and we find the set of possible "first" symbols after there. This is done using `add_first` above and only needs to be done once as the "first" sets are now stable and will not change. +###### grammar fields + struct symset *follow; + ###### follow code for (p = 0; p < g->production_count; p++) { @@ -1121,9 +1081,6 @@ combine these two functions into a single loop. We now just need to create and initialise the `follow` list to get a complete function. -###### grammar fields - struct symset *follow; - ###### functions static void build_follow(struct grammar *g) { @@ -1230,22 +1187,6 @@ particularly for LALR where itemsets get merged, at which point they need to be consider for completion again. So a `completed` flag is needed. -For correct handling of `TK_newline` when parsing, we will need to -know which states (itemsets) can occur at the start of a line, so we -will record a `starts_line` flag too whenever DOT is at the start of a -`line_like` symbol. - -Finally, for handling `TK_out` we need to know whether productions in the -current state started *before* the most recent indent. A state -doesn't usually keep details of individual productions, so we need to -add one extra detail. `min_prefix` is the smallest non-zero number of -symbols *before* DOT in any production in an itemset. This will allow -us to determine if the the most recent indent is sufficiently recent -to cancel it against a `TK_out`. If it was seen longer ago than the -`min_prefix`, and if the current state cannot be reduced, then the -indented section must have ended in the middle of a syntactic unit, so -an error must be signaled. - And now we can build the list of itemsets. The lookup routine returns both a success flag and a pointer to where in the list an insert should happen, so we don't need to search a second time. @@ -1259,8 +1200,6 @@ happen, so we don't need to search a second time. enum assoc assoc; unsigned short precedence; char completed; - char starts_line; - int min_prefix; }; ###### grammar fields @@ -1357,9 +1296,7 @@ may be supplemented by the LA set for the item which produced the new item. We also collect a set of all symbols which follow "DOT" (in `done`) as -this is used in the next stage. If any of these symbols are flagged as -`line_like`, then this state must be a `starts_line` state so now is a -good time to record that. +this is used in the next stage. When itemsets are created we assign a precedence to the itemset from the complete item, if there is one. We ignore the possibility of there @@ -1380,12 +1317,7 @@ so the item is ineffective. struct symbol *s; struct symset LA = INIT_SYMSET; unsigned short sn = 0; - struct symset LAnl = INIT_SYMSET; - unsigned short snnl = 0; - if (is->min_prefix == 0 || - (bs > 0 && bs < is->min_prefix)) - is->min_prefix = bs; if (bs == pr->body_size) continue; s = pr->body[bs]; @@ -1401,13 +1333,11 @@ so the item is ineffective. * not Right-associative, so we mustn't shift it. */ continue; - if (symset_find(&done, s->num) < 0) { + if (symset_find(&done, s->num) < 0) symset_add(&done, s->num, 0); - } + if (s->type != Nonterminal) continue; - if (s->line_like) - is->starts_line = 1; check_again = 1; if (type >= LALR) { // Need the LA set. @@ -1419,10 +1349,6 @@ so the item is ineffective. } sn = save_set(g, LA); LA = set_find(g, sn); - if (symset_find(&LA, TK_newline)) - symset_add(&LAnl, TK_newline, 0); - snnl = save_set(g, LAnl); - LAnl = set_find(g, snnl); } /* Add productions for this symbol */ @@ -1433,10 +1359,7 @@ so the item is ineffective. int itm = item_num(p2, 0); int pos = symset_find(&is->items, itm); if (pos < 0) { - if (g->productions[p2]->line_like) - symset_add(&is->items, itm, snnl); - else - symset_add(&is->items, itm, sn); + symset_add(&is->items, itm, sn); /* Will have re-ordered, so start * from beginning again */ i = -1; @@ -1445,8 +1368,6 @@ so the item is ineffective. struct symset tmp = INIT_SYMSET; struct symset *la = &LA; - if (g->productions[p2]->line_like) - la = &LAnl; symset_union(&tmp, &ss); if (symset_union(&tmp, la)) { is->items.data[pos] = save_set(g, tmp); @@ -1488,18 +1409,21 @@ itemsets (or merged with a pre-existing itemset). continue; if (pr->body[bp] != sym) continue; + + bp += 1; if (type >= LALR) la = is->items.data[j]; - pos = symset_find(&newitemset, pr->head->num); - if (bp + 1 == pr->body_size && + if (bp == pr->body_size && pr->precedence > 0 && pr->precedence > precedence) { // new itemset is reducible and has a precedence. precedence = pr->precedence; assoc = pr->assoc; } + pos = symset_find(&newitemset, item_num(p, bp)); + if (pos < 0) - symset_add(&newitemset, item_num(p, bp+1), la); + symset_add(&newitemset, item_num(p, bp), la); else if (type >= LALR) { // Need to merge la set. int la2 = newitemset.data[pos]; @@ -1567,8 +1491,10 @@ a report. Once we have built everything we allocate arrays for the two lists: symbols and itemsets. This allows more efficient access during -reporting. The symbols are grouped as terminals and then non-terminals, -and we record the changeover point in `first_nonterm`. +reporting. The symbols are grouped as terminals, then non-terminals, +then virtual, with the start of non-terminals recorded as `first_nonterm`. +Special terminals -- meaning just EOL -- are included with the +non-terminals so that they are not expected by the scanner. ###### grammar fields struct symbol **symtab; @@ -1583,7 +1509,7 @@ and we record the changeover point in `first_nonterm`. struct itemset *is; int snum = TK_reserved; for (s = g->syms; s; s = s->next) - if (s->num < 0 && s->type == Terminal) { + if (s->num < 0 && s->type == Terminal && !s->isspecial) { s->num = snum; snum++; } @@ -1604,7 +1530,6 @@ and we record the changeover point in `first_nonterm`. g->symtab[s->num] = s; set_nullable(g); - set_line_like(g); if (type >= SLR) build_first(g); @@ -1636,7 +1561,7 @@ all the tables that have been generated, plus a description of any conflicts. Firstly we have the complete list of symbols, together with the "FIRST" set if that was generated. We add a mark to each symbol to -show if it is considered to be "line-like" (`<`), or if it is nullable (`.`). +show if it is nullable (`.`). ###### functions @@ -1653,9 +1578,8 @@ show if it is considered to be "line-like" (`<`), or if it is nullable (`.`). if (!s) continue; - printf(" %c%c%3d%c: ", + printf(" %c%3d%c: ", s->nullable ? '.':' ', - s->line_like ? '<':' ', s->num, symtypes[s->type]); prtxt(s->name); if (s->precedence) @@ -1726,10 +1650,6 @@ it up a bit. First the items, with production number and associativity. printf(" [%d%s]", s->precedence, assoc_names[s->assoc]); } - if (pr->line_like == 1) - printf(" $$NEWLINE"); - else if (pr->line_like) - printf(" $$OUT"); printf("\n"); } @@ -1773,8 +1693,7 @@ Now we can report all the item sets complete with items, LA sets, and GO TO. for (s = 0; s < g->states; s++) { int j; struct itemset *is = g->statetab[s]; - printf(" Itemset %d:%s min prefix=%d", - s, is->starts_line?" (startsline)":"", is->min_prefix); + printf(" Itemset %d:", s); if (is->precedence) printf(" %d%s", is->precedence, assoc_names[is->assoc]); printf("\n"); @@ -1868,11 +1787,6 @@ terminals to items where that terminal could be shifted and another which maps terminals to items that could be reduced when the terminal is in look-ahead. We report when we get conflicts between the two. -As a special case, if we find a SHIFT/REDUCE conflict, on the NEWLINE -terminal, we ignore it. NEWLINES are handled specially with its own -rules for when to shift and when to reduce. Conflicts are expected, -but handled internally. - static int conflicts_slr(struct grammar *g, enum grammar_type type) { int i; @@ -1924,7 +1838,7 @@ but handled internally. int k; for (k = 0; k < la.cnt; k++) { int pos = symset_find(&shifts, la.syms[k]); - if (pos >= 0 && la.syms[k] != TK_newline) { + if (pos >= 0) { printf(" State %d has SHIFT/REDUCE conflict on ", i); cnt++; prtxt(g->symtab[la.syms[k]]->name); @@ -1962,9 +1876,10 @@ optional `FILE` to send tracing to. The `token_config` gets the list of known words added and then is used with the `code_node` to initialize the scanner. -`parse_XX` then calls the library function `parser_run` to actually complete -the parse. This needs the `states` table and functions to call the various -pieces of code provided in the grammar file, so they are generated first. +`parse_XX` then calls the library function `parser_run` to actually +complete the parse. This needs the `states` table, the `reductions` +table and functions to call the various pieces of code provided in the +grammar file, so they are generated first. ###### parser_generate @@ -1975,6 +1890,7 @@ pieces of code provided in the grammar file, so they are generated first. gen_non_term(f, g); gen_goto(f, g); gen_states(f, g); + gen_reductions(f, g); gen_reduce(f, g, file, pre_reduce); gen_free(f, g); @@ -1986,7 +1902,7 @@ pieces of code provided in the grammar file, so they are generated first. fprintf(f, "\tconfig->words_marks = known;\n"); fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n"); fprintf(f, "\ttokens = token_open(code, config);\n"); - fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n"); + fprintf(f, "\tvoid *rv = parser_run(tokens, states, reductions, do_reduce, do_free, trace, non_term, config);\n"); fprintf(f, "\ttoken_close(tokens);\n"); fprintf(f, "\treturn rv;\n"); fprintf(f, "}\n\n"); @@ -2020,20 +1936,23 @@ The table of nonterminals used for tracing is a similar array. for (i = TK_reserved; i < g->num_syms; i++) - if (g->symtab[i]->type == Nonterminal) + if (g->symtab[i]->type == Nonterminal || + g->symtab[i]->isspecial) fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len, g->symtab[i]->name.txt); fprintf(f, "};\n\n"); } -### States and the goto tables. +### States, reductions, and the go to tables. -For each state we record the goto table and details of the reducible -production if there is one. -Some of the details of the reducible production are stored in the -`do_reduce` function to come later. Here we store the production -number, the body size (useful for stack management), and the resulting -symbol (useful for knowing how to free data later). +For each state we record the go to table and the reducible production if +there is one, the details of which are in a separate table of +reductions. Some of the details of the reducible production are stored +in the `do_reduce` function to come later. In the go to table we store +the production number and in the reductions table: the body size (useful +for stack management), the resulting symbol (useful for knowing how to +free data later), and the size of the resulting asn object (useful for +preallocation space. The go to table is stored in a simple array of `sym` and corresponding `state`. @@ -2044,15 +1963,15 @@ The go to table is stored in a simple array of `sym` and corresponding short sym; short state; }; + struct reduction { + short size; + short sym; + short result_size; + }; struct state { + short reduce_prod; short go_to_cnt; const struct lookup * go_to; - short reduce_prod; - short reduce_size; - short reduce_sym; - char starts_line; - char newline_only; - short min_prefix; }; ###### functions @@ -2062,10 +1981,13 @@ The go to table is stored in a simple array of `sym` and corresponding int i; fprintf(f, "#line 0 \"gen_goto\"\n"); for (i = 0; i < g->states; i++) { + struct symset gt = g->statetab[i]->go_to; int j; + + if (gt.cnt == 0) + continue; fprintf(f, "static const struct lookup goto_%d[] = {\n", i); - struct symset gt = g->statetab[i]->go_to; for (j = 0; j < gt.cnt; j++) fprintf(f, "\t{ %d, %d },\n", gt.syms[j], gt.data[j]); @@ -2073,7 +1995,25 @@ The go to table is stored in a simple array of `sym` and corresponding } } -###### functions + static void gen_reductions(FILE *f, struct grammar *g) + { + int i; + fprintf(f, "#line 0 \"gen_reductions\"\n"); + fprintf(f, "static const struct reduction reductions[] = {\n"); + for (i = 0; i < g->production_count; i++) { + struct production *pr = g->productions[i]; + struct symbol *hd = pr->head; + fprintf(f, "\t{%d, %d, ", pr->body_size, hd->num); + if (hd->struct_name.txt == NULL) + fprintf(f, "0 },\n"); + else + fprintf(f, "sizeof(struct %.*s%s) },\n", + hd->struct_name.len, + hd->struct_name.txt, + hd->isref ? "*" : ""); + } + fprintf(f, "};\n\n"); + } static void gen_states(FILE *f, struct grammar *g) { @@ -2098,19 +2038,11 @@ The go to table is stored in a simple array of `sym` and corresponding prod_len = pr->body_size; } } - - if (prod >= 0) - fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, %d, %d, %d },\n", - i, is->go_to.cnt, i, prod, - g->productions[prod]->body_size, - g->productions[prod]->head->num, - is->starts_line, - g->productions[prod]->line_like, - is->min_prefix); + if (is->go_to.cnt) + fprintf(f, "\t[%d] = { %d, %d, goto_%d },\n", + i, prod, is->go_to.cnt, i); else - fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, 0, %d },\n", - i, is->go_to.cnt, i, - is->starts_line, is->min_prefix); + fprintf(f, "\t[%d] = { %d, 0, NULL },\n", i, prod); } fprintf(f, "};\n\n"); } @@ -2121,9 +2053,9 @@ When the parser engine decides to reduce a production, it calls `do_reduce` which runs the code that was included with the production, if any. -This code needs to be able to store data somewhere. Rather than -requiring `do_reduce` to `malloc` that "somewhere", we pass in a large -buffer and have `do_reduce` return the size to be saved. +This code needs to be able to store data somewhere so we record the size +of the data expected with each state so it can be allocated before +`do_reduce` is called. In order for the code to access "global" context, we pass in the "config" pointer that was passed to the parser function. If the `struct @@ -2206,7 +2138,7 @@ transformed, and will cause an error when the code is compiled. c = *name; } if (namlen == 0) { - if (name == *namep) + if (name == *namep || n > p->body_size) return -1; *namep = name; return n; @@ -2215,8 +2147,10 @@ transformed, and will cause an error when the code is compiled. for (i = 0; i < p->body_size; i++) { if (!subseq_match(nam, namlen, p->body[i]->name)) continue; - if (slen == 0 || p->body[i]->name.len < slen) + if (slen == 0 || p->body[i]->name.len < slen) { s = i; + slen = p->body[i]->name.len; + } if (s >= 0 && p->body[i] != p->body[s] && p->body[i]->name.len == p->body[s]->name.len) /* not unique, so s cannot be used */ @@ -2224,7 +2158,7 @@ transformed, and will cause an error when the code is compiled. } if (s < 0) return -1; - if (n == 0); + if (n == 0) n = 1; for (i = 0; i < p->body_size; i++) if (p->body[i] == p->body[s]) { @@ -2232,7 +2166,7 @@ transformed, and will cause an error when the code is compiled. if (n == 0) break; } - if (n > 1) + if (n > 0 || i == p->body_size) return -1; *namep = name; return i + 1; @@ -2303,15 +2237,15 @@ transformed, and will cause an error when the code is compiled. ###### functions static void gen_reduce(FILE *f, struct grammar *g, char *file, - struct code_node *code) + struct code_node *pre_reduce) { int i; fprintf(f, "#line 1 \"gen_reduce\"\n"); fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n"); fprintf(f, "{\n"); fprintf(f, "\tint ret_size = 0;\n"); - if (code) - code_node_print(f, code, file); + if (pre_reduce) + code_node_print(f, pre_reduce, file); fprintf(f, "#line 4 \"gen_reduce\"\n"); fprintf(f, "\tswitch(prod) {\n"); @@ -2359,7 +2293,15 @@ appropriate for tokens) on any terminal symbol. fprintf(f, "static void do_free(short sym, void *asn)\n"); fprintf(f, "{\n"); fprintf(f, "\tif (!asn) return;\n"); - fprintf(f, "\tif (sym < %d) {\n", g->first_nonterm); + fprintf(f, "\tif (sym < %d", g->first_nonterm); + /* Need to handle special terminals too */ + for (i = 0; i < g->num_syms; i++) { + struct symbol *s = g->symtab[i]; + if (i >= g->first_nonterm && s->type == Terminal && + s->isspecial) + fprintf(f, " || sym == %d", s->num); + } + fprintf(f, ") {\n"); fprintf(f, "\t\tfree(asn);\n\t\treturn;\n\t}\n"); fprintf(f, "\tswitch(sym) {\n"); @@ -2446,7 +2388,7 @@ grammar file). case 't': tag = optarg; break; default: - fprintf(stderr, "Usage: parsergen ...\n"); + fprintf(stderr, "Usage: parsergen -[05SL1R] [-t tag] [-o output] input\n"); exit(1); } } @@ -2634,9 +2576,9 @@ recognised properly, and link with `libicuuc` as `libmdcode` requires that. Having analysed the grammar and generated all the tables, we only need the shift/reduce engine to bring it all together. -### Goto table lookup +### Go to table lookup -The parser generator has nicely provided us with goto tables sorted by +The parser generator has nicely provided us with go to tables sorted by symbol number. We need a binary search function to find a symbol in the table. @@ -2662,6 +2604,24 @@ table. return -1; } +### Memory allocation + +The `scanner` returns tokens in a local variable - we want them in allocated +memory so they can live in the `asn_stack`. So we provide `tok_copy` to +make an allocated copy as required. + +###### parser includes + #include + +###### parser functions + + static struct token *tok_copy(struct token tk) + { + struct token *new = malloc(sizeof(*new)); + *new = tk; + return new; + } + ### The state stack. The core data structure for the parser is the stack. This tracks all @@ -2671,33 +2631,20 @@ The stack usually won't grow very large - maybe a few tens of entries. So we dynamically grow an array as required but never bother to shrink it down again. -We keep the stack as two separate allocations. One, `asn_stack` stores +We keep the stack as two separate allocations. One, `asn_stack`, stores the "abstract syntax nodes" which are created by each reduction. When we call `do_reduce` we need to pass an array of the `asn`s of the body of the production, and by keeping a separate `asn` stack, we can just pass a pointer into this stack. The other allocation stores all other stack fields of which there are -several. The `state` is the most important one and guides the parsing +two. The `state` is the most important one and guides the parsing process. The `sym` is nearly unnecessary as it is implicit in the state. However when we want to free entries from the `asn_stack`, it helps to know what type they are so we can call the right freeing function. The symbol leads us to the right free function through `do_free`. -The `indents` count tracks the line indents with-in the symbol or -immediately follow it. These are used to allow indent information to -guide parsing and error recovery. - -`since_newline` tracks how many stack frames since the last -start-of-line (whether indented or not). So if `since_newline` is -zero, then this symbol is at the start of a line. Similarly -`since_indent` counts the number of states since an indent, it is zero -precisely when `indents` is not zero. - -`newline_permitted` keeps track of whether newlines should be ignored -or not. - The stack is most properly seen as alternating states and symbols - states, like the 'DOT' in items, are between symbols. Each frame in our stack holds a state and the symbol that was before it. The @@ -2710,16 +2657,13 @@ to mark the beginning of the file as well as the end. struct parser { struct frame { short state; - short newline_permitted; - short sym; - short indents; - short since_newline; - short since_indent; } *stack; void **asn_stack; int stack_size; int tos; + + ## parser state }; #### Shift and pop @@ -2730,13 +2674,10 @@ Shift applies not only to terminals but also to non-terminals. When we reduce a production we will pop off frames corresponding to the body symbols, then push on a frame for the head of the production. This last is exactly the same process as shifting in a terminal so we use the same -function for both. In both cases we provide the symbol, the number of -indents the symbol contains (which will be zero for a terminal symbol) -and a flag indicating the the symbol was at (or was reduced from a -symbol which was at) the start of a line. The state is deduced from the -current top-of-stack state and the new symbol. +function for both. In both cases we provide the symbol. The state is +deduced from the current top-of-stack state and the new symbol. -To simplify other code we arrange for `shift` to fail if there is no `goto` +To simplify other code we arrange for `shift` to fail if there is no `go to` state for the symbol. This is useful in basic parsing due to our design that we shift when we can, and reduce when we cannot. So the `shift` function reports if it could. @@ -2747,19 +2688,12 @@ stack is empty, it always chooses zero as the next state. So `shift` finds the next state. If that succeeds it extends the allocations if needed and pushes all the information onto the stacks. -Newlines are permitted after a `starts_line` state until an internal -indent. If the new frame has neither a `starts_line` state nor an -indent, newlines are permitted if the previous stack frame permitted -them. - ###### parser functions static int shift(struct parser *p, - short sym, short indents, short start_of_line, - void *asn, + short sym, void *asn, const struct state states[]) { - // Push an entry onto the stack struct frame next = {0}; int newstate = p->tos ? search(&states[p->stack[p->tos-1].state], @@ -2767,6 +2701,7 @@ them. : 0; if (newstate < 0) return 0; + if (p->tos >= p->stack_size) { p->stack_size += 10; p->stack = realloc(p->stack, p->stack_size @@ -2775,30 +2710,7 @@ them. * sizeof(p->asn_stack[0])); } next.sym = sym; - next.indents = indents; next.state = newstate; - if (states[newstate].starts_line) - next.newline_permitted = 1; - else if (indents) - next.newline_permitted = 0; - else if (p->tos) - next.newline_permitted = - p->stack[p->tos-1].newline_permitted; - else - next.newline_permitted = 0; - - if (!start_of_line) { - if (p->tos) - next.since_newline = p->stack[p->tos-1].since_newline + 1; - else - next.since_newline = 1; - } - if (indents) - next.since_indent = 0; - else if (p->tos) - next.since_indent = p->stack[p->tos-1].since_indent + 1; - else - next.since_indent = 1; p->stack[p->tos] = next; p->asn_stack[p->tos] = asn; @@ -2807,322 +2719,228 @@ them. } `pop` primarily moves the top of stack (`tos`) back down the required -amount and frees any `asn` entries that need to be freed. It also -collects a summary of the indents and line starts in the symbols that -are being removed. It is called _after_ we reduce a production, just -before we `shift` the nonterminal in. +amount and frees any `asn` entries that need to be freed. It is called +_after_ we reduce a production, just before we `shift` the nonterminal +in. ###### parser functions - static int pop(struct parser *p, int num, - short *start_of_line, - void(*do_free)(short sym, void *asn)) + static void pop(struct parser *p, int num, + void(*do_free)(short sym, void *asn)) { int i; - short indents = 0; - int sol = 0; p->tos -= num; - for (i = 0; i < num; i++) { - sol |= !p->stack[p->tos+i].since_newline; - indents += p->stack[p->tos+i].indents; - do_free(p->stack[p->tos+i].sym, - p->asn_stack[p->tos+i]); - } - if (start_of_line) - *start_of_line = sol; - return indents; + for (i = 0; i < num; i++) + do_free(p->stack[p->tos+i].sym, p->asn_stack[p->tos+i]); } -### Memory allocation +### The heart of the parser. -The `scanner` returns tokens in a local variable - we want them in allocated -memory so they can live in the `asn_stack`. Similarly the `asn` produced by -a reduce is in a large buffer. Both of these require some allocation and -copying, hence `memdup` and `tokcopy`. +Now we have the parser. For each token we might shift it, trigger a +reduction, or start error handling. 2D tokens (IN, OUT, NEWLINE, EOL) +might also be ignored. Ignoring tokens is combined with shifting. -###### parser includes - #include +###### parser vars -###### parser functions + struct parser p = { 0 }; + struct token *tk = NULL; + int accepted = 0; - void *memdup(void *m, int len) - { - void *ret; - if (len == 0) - return NULL; - ret = malloc(len); - memcpy(ret, m, len); - return ret; +###### heart of parser + + shift(&p, TK_eof, NULL, states); + while (!accepted && p.tos > 0) { + struct frame *tos = &p.stack[p.tos-1]; + if (!tk) + tk = tok_copy(token_next(tokens)); + parser_trace(trace, &p, + tk, states, non_term, config->known_count); + + ## try shift or ignore + ## try reduce + ## handle error } - static struct token *tok_copy(struct token tk) - { - struct token *new = malloc(sizeof(*new)); - *new = tk; - return new; +Indents are ignored unless they can be shifted onto the stack +immediately or nothing can be shifted (in which case we reduce, and try +again). The end of an indented section - the OUT token - is ignored +precisely when the indent was ignored. To keep track of this we need a +small stack of flags, which is easily stored as bits in an `unsigned +long`. This will never overflow and the scanner only allows 20 levels +of indentation. + +###### parser state + unsigned long ignored_indents; + +NEWLINE is ignored when in an indented section of text which was not +explicitly expected by the grammar. So if the most recent indent is +ignored, so is any NEWLINE token. + +If a NEWLINE is seen but it cannot be shifted, we try to shift an EOL +token instead. If that succeeds, we make a new copy of the NEWLINE +token and continue. This allows a NEWLINE to appear to be preceded by +an indefinite number of EOL tokens. + +The token number for `EOL` cannot be statically declared, so when the +parser starts we need to look through the array of non-terminals to find +the EOL. + +###### parser state + int tk_eol; + +###### find eol + p.tk_eol = 0; + while (strcmp(non_term[p.tk_eol], "EOL") != 0) + p.tk_eol += 1; + p.tk_eol += TK_reserved + config->known_count; + +For other tokens, we shift the next token if that is possible, otherwise +we try to reduce a production. + +###### try shift or ignore + + if ((tk->num == TK_newline || tk->num == TK_out) && + (p.ignored_indents & 1)) { + /* indented, so ignore OUT and NEWLINE */ + if (tk->num == TK_out) + p.ignored_indents >>= 1; + free(tk); + tk = NULL; + parser_trace_action(trace, "Ignore"); + continue; + } + + if (shift(&p, tk->num, tk, states)) { + if (tk->num == TK_out) + p.ignored_indents >>= 1; + if (tk->num == TK_in) + p.ignored_indents <<= 1; + + parser_trace_action(trace, "Shift"); + tk = NULL; + ## did shift + continue; + } else if (tk->num == TK_newline && + shift(&p, p.tk_eol, tk, states)) { + tk = tok_copy(*tk); + parser_trace_action(trace, "ShiftEOL"); + continue; + } + + if (tk->num == TK_in && states[p.stack[p.tos-1].state].go_to_cnt > 0) { + /* No indent expected here and reduce is not mandatory, so ignore IN */ + free(tk); + tk = NULL; + p.ignored_indents <<= 1; + p.ignored_indents |= 1; + parser_trace_action(trace, "Ignore"); + continue; } -### The heart of the parser. +We have already discussed the bulk of the handling of a "reduce" action, +with the `pop()` and `shift()` functions doing much of the work. There +is a little more complexity needed to manage storage for the asn (Abstract +Syntax Node), and also a test of whether the reduction is permitted. -Now we have the parser. For each token we might shift it, trigger a -reduction, or start error handling. 2D tokens (IN, OUT, EOL) also need -to be handled. - -We return whatever `asn` was returned by reducing production zero. - -If we can neither shift nor reduce we have an error to handle. We pop -single entries off the stack until we can shift the `TK_error` symbol, -then drop input tokens until we find one we can shift into the new error -state. We need to ensure that something is dropped or shifted after an -error, or we could get into an infinite loop, only shifting in -`TK_error`, then reducing. So we track if there has been a shift since -the last error, and if not the next error always discards one token. - -When we find `TK_in` and `TK_out` tokens which report indents we need -to handle them directly as the grammar cannot express what we want to -do with them. - -`TK_in` tokens are easy: we simply update indent count in the top stack frame to -record how many indents there are following the previous token. - -`TK_out` tokens must be canceled against an indent count -within the stack. If we can reduce some symbols that are all since -the most recent indent, then we do that first. If the minimum prefix -of the current state then extends back before the most recent indent, -that indent can be cancelled. If the minimum prefix is shorter then -the indent had ended prematurely and we must start error handling, which -is still a work-in-progress. - -`TK_newline` tokens are ignored unless the top stack frame records -that they are permitted. In that case they will not be considered for -shifting if it is possible to reduce some symbols that are all since -the most recent start of line. This is how a newline forcibly -terminates any line-like structure - we try to reduce down to at most -one symbol for each line where newlines are allowed. -A consequence of this is that a rule like - -###### Example: newlines - broken - - Newlines -> - | NEWLINE Newlines - IfStatement -> Newlines if .... - -cannot work, as the NEWLINE will never be shifted as the empty string -will be reduced first. Optional sets of newlines need to be include -in the thing that preceed: - -###### Example: newlines - works - - If -> if - | NEWLINE If - IfStatement -> If .... - -Here the NEWLINE will be shifted because nothing can be reduced until -the `if` is seen. - -When during error handling we discard tokens read in, we want to keep -discarding until we see one that is recognised. If we had a full set -of LR(1) grammar states, this would mean looking in the look-ahead set, -but we don't keep a full look-ahead set. We only record the subset -that leads to SHIFT. We can, however, deduce the look-ahead set by -looking at the SHIFT subsets for all states that we can get to by -reducing zero or more times. So we need a little function which -checks if a given token is in any of these look-ahead sets. +When we try to shift the result of reducing production-zero, it will +fail because there is no next state. In this case the asn will not have +been stored on the stack, so it get stored in the `ret` variable, and we +report that that input has been accepted. -###### parser includes - #include "parser.h" +###### parser vars -###### parser_run + void *ret = NULL; - static int in_lookahead(struct token *tk, const struct state *states, int state) - { - while (state >= 0) { - if (search(&states[state], tk->num) >= 0) - return 1; - if (states[state].reduce_prod < 0) - return 0; - state = search(&states[state], states[state].reduce_sym); +###### try reduce + + if (states[tos->state].reduce_prod >= 0) { + void **body; + void *res; + const struct state *nextstate = &states[tos->state]; + int prod = nextstate->reduce_prod; + int size = reductions[prod].size; + int res_size = reductions[prod].result_size; + + body = p.asn_stack + (p.tos - size); + res = res_size ? calloc(1, res_size) : NULL; + res_size = do_reduce(prod, body, config, res); + if (res_size != reductions[prod].result_size) + abort(); + pop(&p, size, do_free); + if (!shift(&p, reductions[prod].sym, res, states)) { + accepted = 1; + ret = res; + parser_trace_action(trace, "Accept"); + } else + parser_trace_action(trace, "Reduce"); + continue; + } + +If we can neither shift nor reduce we have an error to handle. There +are two possible responses to an error: we can pop single frames off the +stack until we find a frame that can shift `TK_error`, or we can discard +the current look-ahead symbol. + +When we first see an error we do the first of these and set a flag to +record that we are processing an error. If the normal shift/reduce +tests fail to find that anything can be done from that state, we start +dropping tokens until either we manage to shift one, or reach end-of-file. + +###### parser vars + + int in_err = 0; + +###### did shift + + in_err = 0; + +###### handle error + + if (in_err) { + parser_trace_action(trace, "DISCARD"); + if (tk->num == TK_eof) + break; + free(tk); + tk = NULL; + } else { + struct token *err_tk; + + parser_trace_action(trace, "ERROR"); + + err_tk = tok_copy(*tk); + while (p.tos > 0 && shift(&p, TK_error, err_tk, states) == 0) + // discard this state + pop(&p, 1, do_free); + if (p.tos == 0) { + free(err_tk); + // no state accepted TK_error + break; } - return 0; + in_err = 1; } +###### parser includes + #include "parser.h" + +###### parser_run + void *parser_run(struct token_state *tokens, const struct state states[], + const struct reduction reductions[], int (*do_reduce)(int, void**, struct token_config*, void*), void (*do_free)(short, void*), FILE *trace, const char *non_term[], struct token_config *config) { - struct parser p = { 0 }; - struct token *tk = NULL; - int accepted = 0; - int shift_since_err = 1; - void *ret = NULL; - - shift(&p, TK_eof, 0, 1, NULL, states); - while (!accepted && p.tos > 0) { - struct token *err_tk; - struct frame *tos = &p.stack[p.tos-1]; - if (!tk) - tk = tok_copy(token_next(tokens)); - parser_trace(trace, &p, - tk, states, non_term, config->known_count); - - if (tk->num == TK_in) { - tos->indents += 1; - tos->since_newline = 0; - tos->since_indent = 0; - if (!states[tos->state].starts_line) - tos->newline_permitted = 0; - free(tk); - tk = NULL; - parser_trace_action(trace, "Record"); - continue; - } - if (tk->num == TK_out) { - if (states[tos->state].reduce_size >= 0 && - states[tos->state].reduce_size <= tos->since_indent) - goto force_reduce; - if (states[tos->state].min_prefix >= tos->since_indent) { - // OK to cancel - struct frame *in = tos - tos->since_indent; - in->indents -= 1; - if (in->indents == 0) { - /* Reassess since_indent and newline_permitted */ - if (in > p.stack) { - in->since_indent = in[-1].since_indent + 1; - in->newline_permitted = in[-1].newline_permitted; - } else { - in->since_indent = 0; - in->newline_permitted = 0; - } - if (states[in->state].starts_line) - in->newline_permitted = 1; - while (in < tos) { - in += 1; - in->since_indent = in[-1].since_indent + 1; - if (states[in->state].starts_line) - in->newline_permitted = 1; - else - in->newline_permitted = in[-1].newline_permitted; - } - } - free(tk); - tk = NULL; - parser_trace_action(trace, "Cancel"); - continue; - } - // fall through to error handling as both SHIFT and REDUCE - // will fail. - } - if (tk->num == TK_newline) { - if (!tos->newline_permitted) { - free(tk); - tk = NULL; - parser_trace_action(trace, "Discard"); - continue; - } - if (tos->since_newline > 1 && - states[tos->state].reduce_size >= 0 && - states[tos->state].reduce_size <= tos->since_newline) - goto force_reduce; - } - if (shift(&p, tk->num, 0, tk->num == TK_newline, tk, states)) { - shift_since_err = 1; - tk = NULL; - parser_trace_action(trace, "Shift"); - continue; - } - force_reduce: - if (states[tos->state].reduce_prod >= 0 && - states[tos->state].newline_only && - !(tk->num == TK_newline || - tk->num == TK_eof || - tk->num == TK_out || - (tos->indents == 0 && tos->since_newline == 0))) { - /* Anything other than newline or out or eof - * in an error unless we are already at start - * of line, as this production must end at EOL. - */ - } else if (states[tos->state].reduce_prod >= 0) { - void **body; - void *res; - const struct state *nextstate = &states[tos->state]; - int prod = nextstate->reduce_prod; - int size = nextstate->reduce_size; - int bufsize; - static char buf[16*1024]; - short indents, start_of_line; - - body = p.asn_stack + (p.tos - size); - - bufsize = do_reduce(prod, body, config, buf); - - indents = pop(&p, size, &start_of_line, - do_free); - res = memdup(buf, bufsize); - memset(buf, 0, bufsize); - if (!shift(&p, nextstate->reduce_sym, - indents, start_of_line, - res, states)) { - if (prod != 0) abort(); - accepted = 1; - ret = res; - } - parser_trace_action(trace, "Reduce"); - continue; - } - /* Error. We walk up the stack until we - * find a state which will accept TK_error. - * We then shift in TK_error and see what state - * that takes us too. - * Then we discard input tokens until - * we find one that is acceptable. - */ - parser_trace_action(trace, "ERROR"); - short indents = 0, start_of_line; - - err_tk = tok_copy(*tk); - while (p.tos > 0 && - shift(&p, TK_error, 0, 0, - err_tk, states) == 0) - // discard this state - indents += pop(&p, 1, &start_of_line, do_free); - if (p.tos == 0) { - free(err_tk); - // no state accepted TK_error - break; - } - if (!shift_since_err) { - /* must discard at least one token to avoid - * infinite loop. - */ - if (tk->num == TK_eof) - break; - free(tk); - tk = tok_copy(token_next(tokens)); - } - shift_since_err = 0; - tos = &p.stack[p.tos-1]; - while (!in_lookahead(tk, states, tos->state) && - tk->num != TK_eof) { - free(tk); - tk = tok_copy(token_next(tokens)); - shift_since_err = 1; - if (tk->num == TK_in) - indents += 1; - if (tk->num == TK_out) { - if (indents == 0) - break; - indents -= 1; - // FIXME update since_indent here - } - } - tos->indents += indents; - } + ## parser vars + + ## find eol + + ## heart of parser + free(tk); - pop(&p, p.tos, NULL, do_free); + pop(&p, p.tos, do_free); free(p.asn_stack); free(p.stack); return ret; @@ -3131,6 +2949,7 @@ checks if a given token is in any of these look-ahead sets. ###### exported functions void *parser_run(struct token_state *tokens, const struct state states[], + const struct reduction reductions[], int (*do_reduce)(int, void**, struct token_config*, void*), void (*do_free)(short, void*), FILE *trace, const char *non_term[], @@ -3162,15 +2981,6 @@ end inside square brackets. [TK_newline] = "NEWLINE", [TK_eof] = "$eof", }; - static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[]) - { - fprintf(trace, "(%d", f->state); - if (states[f->state].starts_line) - fprintf(trace, "s"); - if (f->newline_permitted) - fprintf(trace, "n%d", f->since_newline); - fprintf(trace, ") "); - } void parser_trace(FILE *trace, struct parser *p, struct token *tk, const struct state states[], @@ -3192,13 +3002,9 @@ end inside square brackets. } else fputs(non_term[sym - TK_reserved - knowns], trace); - if (f->indents) - fprintf(trace, ".%d", f->indents); - if (f->since_newline == 0) - fputs("/", trace); fputs(" ", trace); } - parser_trace_state(trace, f, states); + fprintf(trace, "(%d) ", f->state); } fprintf(trace, "["); if (tk->num < TK_reserved &&