X-Git-Url: https://ocean-lang.org/code/?p=ocean;a=blobdiff_plain;f=csrc%2Fparsergen.mdc;h=742996e16222854cecedd235f9449987cc6c5e08;hp=1bb813e78318d20e6285db273abdae2d0086c7b4;hb=d7f2c9af259a43cbdb8def0ebe8040deed480848;hpb=ad07ebb60c5e771f4c00f2dae948c5148272a3e7 diff --git a/csrc/parsergen.mdc b/csrc/parsergen.mdc index 1bb813e..742996e 100644 --- a/csrc/parsergen.mdc +++ b/csrc/parsergen.mdc @@ -17,7 +17,9 @@ There are several distinct sections. 5. `parser_run` is a library function intended to be linked together with the generated parser tables to complete the implementation of a parser. -6. `calc.cgm` is a test grammar for a simple calculator. +6. Finally `calc` is a test grammar for a simple calculator. The + `parsergen` program built from the C code in this file can extract + that grammar directly from this file and process it. ###### File: parsergen.c #include @@ -42,12 +44,10 @@ There are several distinct sections. ## parser includes ## parser functions ## parser_run -###### File: calc.cgm - ## demo grammar ###### File: parsergen.mk CFLAGS += -Wall -g all :: parsergen calc - parsergen.c parsergen.mk calc.cgm libparser.c parser.h : parsergen.mdc + parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc ./md2c parsergen.mdc ## Reading the grammar @@ -58,6 +58,13 @@ sections: `header`, `code`, and `grammar`. The first two will be literally copied into the generated `.c` and `.h`. files. The last contains the grammar. This is tokenised with "[scanner][]". +If the `--tag` option is given, then any top level header that doesn't +start with the tag is ignored, and the tag is striped from the rest. So +`--tag Foo` +means that the three needed sections must be `Foo: header`, `Foo: code`, +and `Foo: grammar`. The tag `calc` is used to extract the same calculator +from this file. + [mdcode]: mdcode.html [scanner]: scanner.html @@ -74,9 +81,10 @@ declarations, and data type declarations. These are all parsed with _ad hoc_ parsing as we don't have a parser generator yet. The precedence and associativity can be set for each production, but -can be inherited from symbols. The data type is potentially defined -for each non-terminal and describes what C structure is used to store -information about each symbol. +can be inherited from symbols. The data type (either structure or a +reference to a structure) is potentially defined for each non-terminal +and describes what C structure is used to store information about each +symbol. ###### declarations enum assoc {Left, Right, Non}; @@ -84,6 +92,7 @@ information about each symbol. struct symbol { struct text struct_name; + int isref; enum assoc assoc; unsigned short precedence; ## symbol fields @@ -91,6 +100,7 @@ information about each symbol. struct production { unsigned short precedence; enum assoc assoc; + char line_like; ## production fields }; struct grammar { @@ -103,6 +113,9 @@ comparing we define `text_is` and `prtxt`, which should possibly go in `mdcode`. `scanner` does provide `text_dump` which is useful for strings which might contain control characters. +`strip_tag` is a bit like `strncmp`, but adds a test for a colon, +because that is what we need to detect tags. + ###### functions static int text_is(struct text t, char *s) { @@ -114,6 +127,20 @@ which might contain control characters. printf("%.*s", t.len, t.txt); } + static int strip_tag(struct text *t, char *tag) + { + int skip = strlen(tag) + 1; + if (skip >= t->len || + strncmp(t->txt, tag, skip-1) != 0 || + t->txt[skip-1] != ':') + return 0; + while (skip < t->len && t->txt[skip] == ' ') + skip++; + t->len -= skip; + t->txt += skip; + return 1; + } + ### Symbols Productions are comprised primarily of symbols - terminal and @@ -124,11 +151,17 @@ those which don't. There are also "virtual" symbols used for precedence marking discussed later, and sometimes we won't know what type a symbol is yet. +To help with code safety it is possible to declare the terminal symbols. +If this is done, then any symbol used in a production that does not +appear in a head and is not declared is treated as an error. + ###### forward declarations enum symtype { Unknown, Virtual, Terminal, Nonterminal }; char *symtypes = "UVTN"; ###### symbol fields enum symtype type; +###### grammar fields + int terminals_declared; Symbols can be either `TK_ident` or `TK_mark`. They are saved in a table of known symbols and the resulting parser will report them as @@ -175,18 +208,6 @@ symbol, but its type might be `Unknown`. int num_syms; ###### functions - static int text_cmp(struct text a, struct text b) - { - int len = a.len; - if (a.len > b.len) - len = b.len; - int cmp = strncmp(a.txt, b.txt, len); - if (cmp) - return cmp; - else - return a.len - b.len; - } - static struct symbol *sym_find(struct grammar *g, struct text s) { struct symbol **l = &g->syms; @@ -226,20 +247,24 @@ symbol, but its type might be `Unknown`. ### Data types and precedence. -Data type specification and precedence specification are both -introduced by a dollar sign at the start of the line. If the next -word is `LEFT`, `RIGHT` or `NON`, then the line specifies a +Data type specification, precedence specification, and declaration of +terminals are all introduced by a dollar sign at the start of the line. +If the next word is `LEFT`, `RIGHT` or `NON`, then the line specifies a +precedence, if it is `TERM` the the line declares terminals without precedence, otherwise it specifies a data type. The data type name is simply stored and applied to the head of all -subsequent productions. It must be the name of a structure, so `$expr` -maps to `struct expr`. - -Any productions given before the first data type will have no data type -and can carry no information. In order to allow other non-terminals to -have no type, the data type `$void` can be given. This does *not* mean -that `struct void` will be used, but rather than no type will be -associated with future non-terminals. +subsequent productions. It must be the name of a structure optionally +preceded by an asterisk which means a reference or "pointer". So +`$expression` maps to `struct expression` and `$*statement` maps to +`struct statement *`. + +Any productions given before the first data type declaration will have +no data type associated with them and can carry no information. In +order to allow other non-terminals to have no type, the data type +`$void` can be given. This does *not* mean that `struct void` will be +used, but rather than no type will be associated with future +non-terminals. The precedence line must contain a list of symbols - typically terminal symbols, but not necessarily. It can only contain symbols @@ -260,8 +285,12 @@ declares how it associates. This level is stored in each symbol listed and may be inherited by any production which uses the symbol. A production inherits from the last symbol which has a precedence. +The symbols on the first precedence line have the lowest precedence. +Subsequent lines introduce symbols with higher precedence. + ###### grammar fields struct text current_type; + int type_isref; int prec_levels; ###### declarations @@ -269,11 +298,12 @@ production inherits from the last symbol which has a precedence. static const char *known[] = { "$$", "${", "}$" }; ###### functions - static char *dollar_line(struct token_state *ts, struct grammar *g) + static char *dollar_line(struct token_state *ts, struct grammar *g, int isref) { struct token t = token_next(ts); char *err; enum assoc assoc; + int term = 0; int found; if (t.num != TK_ident) { @@ -286,8 +316,12 @@ production inherits from the last symbol which has a precedence. assoc = Right; else if (text_is(t.txt, "NON")) assoc = Non; - else { + else if (text_is(t.txt, "TERM")) { + term = 1; + g->terminals_declared = 1; + } else { g->current_type = t.txt; + g->type_isref = isref; if (text_is(t.txt, "void")) g->current_type.txt = NULL; t = token_next(ts); @@ -298,7 +332,12 @@ production inherits from the last symbol which has a precedence. return NULL; } - // This is a precedence line, need some symbols. + if (isref) { + err = "$* cannot be followed by a precedence"; + goto abort; + } + + // This is a precedence or TERM line, need some symbols. found = 0; g->prec_levels += 1; t = token_next(ts); @@ -312,6 +351,10 @@ production inherits from the last symbol which has a precedence. err = "$$ must be followed by a word"; goto abort; } + if (term) { + err = "Virtual symbols not permitted on $TERM line"; + goto abort; + } } else if (t.num != TK_ident && t.num != TK_mark) { err = "Illegal token in precedence line"; @@ -319,16 +362,19 @@ production inherits from the last symbol which has a precedence. } s = sym_find(g, t.txt); if (s->type != Unknown) { - err = "Symbols in precedence line must not already be known."; + err = "Symbols in precedence/TERM line must not already be known."; goto abort; } s->type = type; - s->precedence = g->prec_levels; - s->assoc = assoc; + if (!term) { + s->precedence = g->prec_levels; + s->assoc = assoc; + } found += 1; + t = token_next(ts); } if (found == 0) - err = "No symbols given on precedence line"; + err = "No symbols given on precedence/TERM line"; goto abort; return NULL; abort: @@ -361,12 +407,31 @@ collected and forms the code-fragment for the production. It must all be in one `code_node` of the literate code. The `}$` must be at the end of a line. -Text in the code fragment will undergo substitutions where `$N` for -some numeric `N` will be replaced with a variable holding the parse -information for the particular symbol in the production. `$0` is the -head of the production, `$1` is the first symbol of the body, etc. -The type of `$N` for a terminal symbol is `struct token`. For -a non-terminal, it is whatever has been declared for that symbol. +Text in the code fragment will undergo substitutions where `$N` or +`$ @@ -424,9 +489,11 @@ Now we have all the bit we need to parse a full production. struct symbol **body; int body_size; struct text code; + int code_line; ###### symbol fields int first_production; + int left_recursive; ###### functions static char *parse_production(struct grammar *g, @@ -443,8 +510,10 @@ Now we have all the bit we need to parse a full production. tk = token_next(state); while (tk.num == TK_ident || tk.num == TK_mark) { struct symbol *bs = sym_find(g, tk.txt); - if (bs->type == Unknown) - bs->type = Terminal; + if (bs->type == Unknown) { + if (!g->terminals_declared) + bs->type = Terminal; + } if (bs->type == Virtual) { err = "Virtual symbol not permitted in production"; goto abort; @@ -464,15 +533,21 @@ Now we have all the bit we need to parse a full production. goto abort; } vs = sym_find(g, tk.txt); - if (vs->type != Virtual) { - err = "symbol after $$ must be virtual"; + if (vs->num == TK_newline) + p.line_like = 1; + else if (vs->num == TK_out) + p.line_like = 2; + else if (vs->precedence == 0) { + err = "symbol after $$ must have precedence"; goto abort; + } else { + p.precedence = vs->precedence; + p.assoc = vs->assoc; } - p.precedence = vs->precedence; - p.assoc = vs->assoc; tk = token_next(state); } if (tk.num == TK_open) { + p.code_line = tk.line; p.code = collect_code(state, tk); if (p.code.txt == NULL) { err = "code fragment not closed properly"; @@ -480,6 +555,11 @@ Now we have all the bit we need to parse a full production. } tk = token_next(state); } + if (p.body_size >= 2 && + p.body[0] == p.head && + p.body[1]->num != TK_newline) + p.head->left_recursive = 1; + if (tk.num != TK_newline && tk.num != TK_eof) { err = "stray tokens at end of line"; goto abort; @@ -513,18 +593,23 @@ where `START` is the first non-terminal given. struct production *p = calloc(1,sizeof(*p)); struct text start = {"$start",6}; struct text eof = {"$eof",4}; + struct text code = {"$0 = $<1;", 9}; p->head = sym_find(g, start); p->head->type = Nonterminal; + p->head->struct_name = g->current_type; + p->head->isref = g->type_isref; + if (g->current_type.txt) + p->code = code; array_add(&p->body, &p->body_size, head); array_add(&p->body, &p->body_size, sym_find(g, eof)); - g->start = p->head->num; p->head->first_production = g->production_count; array_add(&g->productions, &g->production_count, p); -Now we are ready to read in the grammar. - -###### grammar fields - int start; // the 'start' symbol. +Now we are ready to read in the grammar. We ignore comments +and strings so that the marks which introduce them can be +used as terminals in the grammar. We don't ignore numbers +even though we don't allow them as that causes the scanner +to produce errors that the parser is better positioned to handle. ###### grammar_read static struct grammar *grammar_read(struct code_node *code) @@ -545,7 +630,7 @@ Now we are ready to read in the grammar. }; struct token_state *state = token_open(code, &conf); - struct token tk = token_next(state); + struct token tk; struct symbol *head = NULL; struct grammar *g; char *err = NULL; @@ -567,7 +652,8 @@ Now we are ready to read in the grammar. else { head->type = Nonterminal; head->struct_name = g->current_type; - if (g->start == 0) { + head->isref = g->type_isref; + if (g->production_count == 0) { ## create production zero } head->first_production = g->production_count; @@ -587,7 +673,15 @@ Now we are ready to read in the grammar. err = "First production must have a head"; } else if (tk.num == TK_mark && text_is(tk.txt, "$")) { - err = dollar_line(state, g); + err = dollar_line(state, g, 0); + } else if (tk.num == TK_mark + && text_is(tk.txt, "$*")) { + err = dollar_line(state, g, 1); + } else if (tk.num == TK_mark + && text_is(tk.txt, "//")) { + while (tk.num != TK_newline && + tk.num != TK_eof) + tk = token_next(state); } else { err = "Unrecognised token at start of line."; } @@ -595,6 +689,21 @@ Now we are ready to read in the grammar. goto abort; } token_close(state); + if (g->terminals_declared) { + struct symbol *s; + int errs = 0; + for (s = g->syms; s; s = s->next) { + if (s->type != Unknown) + continue; + errs += 1; + fprintf(stderr, "Token %.*s not declared\n", + s->name.len, s->name.txt); + } + if (errs) { + free(g); + g = NULL; + } + } return g; abort: fprintf(stderr, "Error at line %d: %s\n", @@ -693,10 +802,10 @@ or `-1` to indicate failure. } We will often want to form the union of two symsets. When we do, we -will often want to know if anything changed (as they might mean there +will often want to know if anything changed (as that might mean there is more work to do). So `symset_union` reports whether anything was added to the first set. We use a slow quadratic approach as these -sets don't really get very big. If profiles shows this to be a problem is +sets don't really get very big. If profiles shows this to be a problem it can be optimised later. static int symset_union(struct symset *a, struct symset *b) @@ -799,7 +908,6 @@ array like the productions. return sl->ss; } - ### Setting `nullable` We set `nullable` on the head symbol for any production for which all @@ -837,34 +945,33 @@ changes happen. } } -### Setting `can_eol` +### Setting `line_like` In order to be able to ignore newline tokens when not relevant, but still include them in the parse when needed, we will need to know which states can start a "line-like" section of code. We ignore newlines when there is an indent since the most recent start of a -line-like section. +line-like symbol. -To know what is line-like, we first need to know which symbols can end -a line-like section, which is precisely those which can end with a -newline token. These symbols don't necessarily alway end with a -newline, but they can. Hence they are not described as "lines" but -only "line-like". +A "line_like" symbol is simply any symbol that can derive a NEWLINE. +If a symbol cannot derive a NEWLINE, then it is only part of a line - +so is word-like. If it can derive a NEWLINE, then we consider it to +be like a line. -Clearly the `TK_newline` token can end with a newline. Any symbol -which is the head of a production that contains a line-ending symbol -followed only by nullable symbols is also a line-ending symbol. We -use a new field `can_eol` to record this attribute of symbols, and -compute it in a repetitive manner similar to `set_nullable`. +Clearly the `TK_newline` token can derive a NEWLINE. Any symbol which +is the head of a production that contains a line_like symbol is also a +line-like symbol. We use a new field `line_like` to record this +attribute of symbols, and compute it in a repetitive manner similar to +`set_nullable`. ###### symbol fields - int can_eol; + int line_like; ###### functions - static void set_can_eol(struct grammar *g) + static void set_line_like(struct grammar *g) { int check_again = 1; - g->symtab[TK_newline]->can_eol = 1; + g->symtab[TK_newline]->line_like = 1; while (check_again) { int p; check_again = 0; @@ -872,17 +979,15 @@ compute it in a repetitive manner similar to `set_nullable`. struct production *pr = g->productions[p]; int s; - if (pr->head->can_eol) + if (pr->head->line_like) continue; - for (s = pr->body_size - 1; s >= 0; s--) { - if (pr->body[s]->can_eol) { - pr->head->can_eol = 1; + for (s = 0 ; s < pr->body_size; s++) { + if (pr->body[s]->line_like) { + pr->head->line_like = 1; check_again = 1; break; } - if (!pr->body[s]->nullable) - break; } } } @@ -901,7 +1006,7 @@ we repeat the calculations until no changes happen, just like with `set_nullable`. This makes use of the fact that `symset_union` reports if any change happens. -The core of this which finds the "first" of part of a production body +The core of this, which finds the "first" of part of a production body, will be reused for computing the "follow" sets, so we split it out into a separate function. @@ -1117,6 +1222,26 @@ can just compare the symset and the data values together. a.data[i] - b.data[i]; } +It will be helpful to know if an itemset has been "completed" or not, +particularly for LALR where itemsets get merged, at which point they +need to be consider for completion again. So a `completed` flag is needed. + +For correct handling of `TK_newline` when parsing, we will need to +know which states (itemsets) can occur at the start of a line, so we +will record a `starts_line` flag too whenever DOT is at the start of a +`line_like` symbol. + +Finally, for handling `TK_out` we need to know whether productions in the +current state started *before* the most recent indent. A state +doesn't usually keep details of individual productions, so we need to +add one extra detail. `min_prefix` is the smallest non-zero number of +symbols *before* DOT in any production in an itemset. This will allow +us to determine if the the most recent indent is sufficiently recent +to cancel it against a `TK_out`. If it was seen longer ago than the +`min_prefix`, and if the current state cannot be reduced, then the +indented section must have ended in the middle of a syntactic unit, so +an error must be signaled. + And now we can build the list of itemsets. The lookup routine returns both a success flag and a pointer to where in the list an insert should happen, so we don't need to search a second time. @@ -1127,8 +1252,11 @@ should happen, so we don't need to search a second time. short state; struct symset items; struct symset go_to; + enum assoc assoc; + unsigned short precedence; char completed; char starts_line; + int min_prefix; }; ###### grammar fields @@ -1158,7 +1286,7 @@ should happen, so we don't need to search a second time. } Adding an itemset may require merging the LA sets if LALR analysis is -happening. If any new LA set add symbol that weren't in the old LA set, we +happening. If any new LA set adds any symbols that weren't in the old LA set, we clear the `completed` flag so that the dependants of this itemset will be recalculated and their LA sets updated. @@ -1166,7 +1294,8 @@ recalculated and their LA sets updated. them to a data structure, of freeing them. static int add_itemset(struct grammar *g, struct symset ss, - enum grammar_type type, int starts_line) + enum assoc assoc, unsigned short precedence, + enum grammar_type type) { struct itemset **where, *is; int i; @@ -1176,9 +1305,10 @@ them to a data structure, of freeing them. is->state = g->states; g->states += 1; is->items = ss; + is->assoc = assoc; + is->precedence = precedence; is->next = *where; is->go_to = INIT_DATASET; - is->starts_line = starts_line; *where = is; return is->state; } @@ -1206,9 +1336,9 @@ them to a data structure, of freeing them. #### The build -To build all the itemsets, we first insert the initial itemset made from the -start symbol, complete each itemset, and then generate new itemsets from old -until no new ones can be made. +To build all the itemsets, we first insert the initial itemset made +from production zero, complete each itemset, and then generate new +itemsets from old until no new ones can be made. Completing an itemset means finding all the items where "DOT" is followed by a nonterminal and adding "DOT=0" items for every production from that @@ -1220,9 +1350,18 @@ be supplemented by the LA set for the item which produce the new item. We also collect a set of all symbols which follow "DOT" (in `done`) as this is used in the next stage. - -NOTE: precedence handling should happen here - I haven't written this yet -though. +If any of these symbols are flagged as `line_like`, then this +state must be a `starts_line` state so now is a good time to record that. + +When itemsets are created we assign a precedence to the itemset from +the complete item, if there is one. We ignore the possibility of +there being two and don't (currently) handle precedence in such +grammars. When completing a grammar we ignore any item where DOT is +followed by a terminal with a precedence lower than that for the +itemset. Unless the terminal has right associativity, we also ignore +items where the terminal has the same precedence. The result is that +unwanted items are still in the itemset, but the terminal doesn't get +into the go to set, so the item is ineffective. ###### complete itemset for (i = 0; i < is->items.cnt; i++) { @@ -1233,14 +1372,34 @@ though. struct symbol *s; struct symset LA = INIT_SYMSET; unsigned short sn = 0; + struct symset LAnl = INIT_SYMSET; + unsigned short snnl = 0; + if (is->min_prefix == 0 || + (bs > 0 && bs < is->min_prefix)) + is->min_prefix = bs; if (bs == pr->body_size) continue; s = pr->body[bs]; - if (symset_find(&done, s->num) < 0) + if (s->precedence && is->precedence && + is->precedence > s->precedence) + /* This terminal has a low precedence and + * shouldn't be shifted + */ + continue; + if (s->precedence && is->precedence && + is->precedence == s->precedence && s->assoc != Right) + /* This terminal has a matching precedence and is + * not Right-associative, so we mustn't shift it. + */ + continue; + if (symset_find(&done, s->num) < 0) { symset_add(&done, s->num, 0); + } if (s->type != Nonterminal) continue; + if (s->line_like) + is->starts_line = 1; again = 1; if (type >= LALR) { // Need the LA set. @@ -1252,6 +1411,10 @@ though. } sn = save_set(g, LA); LA = set_find(g, sn); + if (symset_find(&LA, TK_newline)) + symset_add(&LAnl, TK_newline, 0); + snnl = save_set(g, LAnl); + LAnl = set_find(g, snnl); } /* Add productions for this symbol */ @@ -1262,19 +1425,25 @@ though. int itm = item_num(p2, 0); int pos = symset_find(&is->items, itm); if (pos < 0) { - symset_add(&is->items, itm, sn); + if (g->productions[p2]->line_like) + symset_add(&is->items, itm, snnl); + else + symset_add(&is->items, itm, sn); /* Will have re-ordered, so start * from beginning again */ i = -1; } else if (type >= LALR) { struct symset ss = set_find(g, is->items.data[pos]); struct symset tmp = INIT_SYMSET; + struct symset *la = &LA; + if (g->productions[p2]->line_like) + la = &LAnl; symset_union(&tmp, &ss); - if (symset_union(&tmp, &LA)) { + if (symset_union(&tmp, la)) { is->items.data[pos] = save_set(g, tmp); i = -1; - }else + } else symset_free(tmp); } } @@ -1292,15 +1461,13 @@ with a pre-existing itemset). for (i = 0; i < done.cnt; i++) { int j; unsigned short state; - int starts_line = 0; struct symbol *sym = g->symtab[done.syms[i]]; + enum assoc assoc = Non; + unsigned short precedence = 0; struct symset newitemset = INIT_SYMSET; if (type >= LALR) newitemset = INIT_DATASET; - if (sym->can_eol || - (sym->nullable && is->starts_line)) - starts_line = 1; for (j = 0; j < is->items.cnt; j++) { int itm = is->items.syms[j]; int p = item_prod(itm); @@ -1316,6 +1483,13 @@ with a pre-existing itemset). if (type >= LALR) la = is->items.data[j]; pos = symset_find(&newitemset, pr->head->num); + if (bp + 1 == pr->body_size && + pr->precedence > 0 && + pr->precedence > precedence) { + // new itemset is reducible and has a precedence. + precedence = pr->precedence; + assoc = pr->assoc; + } if (pos < 0) symset_add(&newitemset, item_num(p, bp+1), la); else if (type >= LALR) { @@ -1333,12 +1507,12 @@ with a pre-existing itemset). } } } - state = add_itemset(g, newitemset, type, starts_line); + state = add_itemset(g, newitemset, assoc, precedence, type); if (symset_find(&is->go_to, done.syms[i]) < 0) symset_add(&is->go_to, done.syms[i], state); } -All that is left is to crate the initial itemset from production zero, and +All that is left is to create the initial itemset from production zero, and with `TK_eof` as the LA set. ###### functions @@ -1357,7 +1531,7 @@ with `TK_eof` as the LA set. } // production 0, offset 0 (with no data) symset_add(&first, item_num(0, 0), la); - add_itemset(g, first, type, g->productions[0]->body[0]->can_eol); + add_itemset(g, first, Non, 0, type); for (again = 0, is = g->items; is; is = is->next ?: again ? (again = 0, g->items) : NULL) { @@ -1406,6 +1580,11 @@ changeover point in `first_nonterm`. snum++; } g->first_nonterm = snum; + for (s = g->syms; s; s = s->next) + if (s->num < 0 && s->type != Virtual) { + s->num = snum; + snum++; + } for (s = g->syms; s; s = s->next) if (s->num < 0) { s->num = snum; @@ -1417,7 +1596,7 @@ changeover point in `first_nonterm`. g->symtab[s->num] = s; set_nullable(g); - set_can_eol(g); + set_line_like(g); if (type >= SLR) build_first(g); @@ -1435,7 +1614,7 @@ changeover point in `first_nonterm`. The purpose of the report is to give the grammar developer insight into how the grammar parser will work. It is basically a structured dump of -all the tables that have been generated, plus an description of any conflicts. +all the tables that have been generated, plus a description of any conflicts. ###### grammar_report static int grammar_report(struct grammar *g, enum grammar_type type) @@ -1444,11 +1623,13 @@ all the tables that have been generated, plus an description of any conflicts. if (g->follow) report_follow(g); report_itemsets(g); - return report_conflicts(g, type); + return report_conflicts(g, type) + report_left_recursive(g); } -Firstly we have the complete list of symbols, together with the "FIRST" -set if that was generated. +Firstly we have the complete list of symbols, together with the +"FIRST" set if that was generated. We add a mark to each symbol to +show if it can end in a newline (`>`), if it is considered to be +"line-like" (`<`), or if it is nullable (`.`). ###### functions @@ -1467,7 +1648,7 @@ set if that was generated. printf(" %c%c%3d%c: ", s->nullable ? '.':' ', - s->can_eol ? '>':' ', + s->line_like ? '<':' ', s->num, symtypes[s->type]); prtxt(s->name); if (s->precedence) @@ -1487,7 +1668,7 @@ set if that was generated. } } -Then we have to follow sets if they were computed. +Then we have the follow sets if they were computed. static void report_follow(struct grammar *g) { @@ -1529,9 +1710,19 @@ it up a bit. First the items, with production number and associativity. if (dot == pr->body_size) printf(" ."); printf(" [%d]", p); - if (pr->precedence) + if (pr->precedence && dot == pr->body_size) printf(" (%d%s)", pr->precedence, assoc_names[pr->assoc]); + if (dot < pr->body_size && + pr->body[dot]->precedence) { + struct symbol *s = pr->body[dot]; + printf(" [%d%s]", s->precedence, + assoc_names[s->assoc]); + } + if (pr->line_like == 1) + printf(" $$NEWLINE"); + else if (pr->line_like) + printf(" $$OUT"); printf("\n"); } @@ -1554,7 +1745,6 @@ The LA sets which are (possibly) reported with each item: Then the go to sets: - static void report_goto(struct grammar *g, struct symset gt) { int i; @@ -1576,7 +1766,11 @@ Now we can report all the item sets complete with items, LA sets, and GO TO. for (s = 0; s < g->states; s++) { int j; struct itemset *is = g->statetab[s]; - printf(" Itemset %d:%s\n", s, is->starts_line?" (startsline)":""); + printf(" Itemset %d:%s min prefix=%d", + s, is->starts_line?" (startsline)":"", is->min_prefix); + if (is->precedence) + printf(" %d%s", is->precedence, assoc_names[is->assoc]); + printf("\n"); for (j = 0; j < is->items.cnt; j++) { report_item(g, is->items.syms[j]); if (is->items.data != NO_DATA) @@ -1610,9 +1804,9 @@ LR1 are also similar as they have FOLLOW or LA sets. } LR0 conflicts are any state which have both a reducible item and -a shiftable item. +a shiftable item, or two reducible items. -LR05 conflicts only occurs if two possibly reductions exist, +LR05 conflicts only occur if two possible reductions exist, as shifts always over-ride reductions. ###### conflict functions @@ -1659,12 +1853,18 @@ as shifts always over-ride reductions. } SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping -look ahead, or if a symbol in a look-ahead can be shifted. The differ only +look ahead, or if a symbol in a look-ahead can be shifted. They differ only in the source of the look ahead set. -We build a dataset mapping terminal to item for possible SHIFTs and then -another for possible REDUCE operations. We report when we get conflicts -between the two. +We build two datasets to reflect the "action" table: one which maps +terminals to items where that terminal could be shifted and another +which maps terminals to items that could be reduced when the terminal +is in look-ahead. We report when we get conflicts between the two. + +As a special case, if we find a SHIFT/REDUCE conflict, on the NEWLINE +terminal, we ignore it. NEWLINES are handled specially with its own +rules for when to shift and when to reduce. Conflicts are expected, +but handled internally. static int conflicts_slr(struct grammar *g, enum grammar_type type) { @@ -1684,16 +1884,22 @@ between the two. int p = item_prod(itm); int bp = item_index(itm); struct production *pr = g->productions[p]; + struct symbol *s; - if (bp < pr->body_size && - pr->body[bp]->type == Terminal) { - /* shiftable */ - int sym = pr->body[bp]->num; - if (symset_find(&shifts, sym) < 0) - symset_add(&shifts, sym, itm); - } + if (bp >= pr->body_size || + pr->body[bp]->type != Terminal) + /* not shiftable */ + continue; + + s = pr->body[bp]; + if (s->precedence && is->precedence) + /* Precedence resolves this, so no conflict */ + continue; + + if (symset_find(&shifts, s->num) < 0) + symset_add(&shifts, s->num, itm); } - /* Now look for reduction and conflicts */ + /* Now look for reductions and conflicts */ for (j = 0; j < is->items.cnt; j++) { unsigned short itm = is->items.syms[j]; int p = item_prod(itm); @@ -1711,13 +1917,13 @@ between the two. int k; for (k = 0; k < la.cnt; k++) { int pos = symset_find(&shifts, la.syms[k]); - if (pos >= 0) { + if (pos >= 0 && la.syms[k] != TK_newline) { printf(" State %d has SHIFT/REDUCE conflict on ", i); - prtxt(g->symtab[la.syms[k]]->name); + cnt++; + prtxt(g->symtab[la.syms[k]]->name); printf(":\n"); report_item(g, shifts.data[pos]); report_item(g, itm); - cnt++; } pos = symset_find(&reduce, la.syms[k]); if (pos < 0) { @@ -1739,9 +1945,45 @@ between the two. } +### Reporting non-final left-recursive symbols. + +Left recursive symbols are a problem for parses that honour indentation +when they appear other than at the end of the production. So we need to +report these when asked. + +###### functions + + static int report_left_recursive(struct grammar *g) + { + int p; + int bad_left_recursive = 0; + + for (p = 0; p < g->production_count; p++) { + struct production *pr = g->productions[p]; + int sn; + + for (sn = 0; sn < pr->body_size - 1; sn++) { + struct symbol *s = pr->body[sn]; + + if (s->left_recursive == 1 && + s != pr->head) { + if (!bad_left_recursive) { + bad_left_recursive = 1; + printf("Misplaced left recursive symbols.\n"); + } + printf(" "); + prtxt(s->name); + printf(" in production [%d]\n", p); + s->left_recursive = 2; + } + } + } + return bad_left_recursive; + } + ## Generating the parser -The export part of the parser is the `parse_XX` function, where the name +The exported part of the parser is the `parse_XX` function, where the name `XX` is based on the name of the parser files. This takes a `code_node`, a partially initialized `token_config`, and an @@ -1749,19 +1991,20 @@ optional `FILE` to send tracing to. The `token_config` gets the list of known words added and then is used with the `code_node` to initialize the scanner. -`parse_XX` then call the library function `parser_run` to actually complete +`parse_XX` then calls the library function `parser_run` to actually complete the parse. This needs the `states` table and function to call the various pieces of code provided in the grammar file, so they are generated first. ###### parser_generate - static void gen_parser(FILE *f, struct grammar *g, char *file, char *name) + static void gen_parser(FILE *f, struct grammar *g, char *file, char *name, + struct code_node *pre_reduce) { gen_known(f, g); gen_non_term(f, g); gen_goto(f, g); gen_states(f, g); - gen_reduce(f, g, file); + gen_reduce(f, g, file, pre_reduce); gen_free(f, g); fprintf(f, "#line 0 \"gen_parser\"\n"); @@ -1771,18 +2014,19 @@ pieces of code provided in the grammar file, so they are generated first. fprintf(f, "\tstruct token_state *tokens;\n"); fprintf(f, "\tconfig->words_marks = known;\n"); fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n"); - fprintf(f, "\tconfig->ignored |= (1 << TK_line_comment) | (1 << TK_block_comment);\n"); fprintf(f, "\ttokens = token_open(code, config);\n"); - fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config->known_count);\n"); + fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n"); fprintf(f, "\ttoken_close(tokens);\n"); fprintf(f, "\treturn rv;\n"); fprintf(f, "}\n\n"); } -### Table words table +### Known words table -The know words is simply an array of terminal symbols. -The table of nonterminals used for tracing is a similar array. +The known words table is simply an array of terminal symbols. +The table of nonterminals used for tracing is a similar array. We +include virtual symbols in the table of non_terminals to keep the +numbers right. ###### functions @@ -1837,11 +2081,11 @@ The go to table is stored in a simple array of `sym` and corresponding short reduce_prod; short reduce_size; short reduce_sym; - short shift_sym; - short starts_line; + char starts_line; + char newline_only; + short min_prefix; }; - ###### functions static void gen_goto(FILE *f, struct grammar *g) @@ -1870,23 +2114,15 @@ The go to table is stored in a simple array of `sym` and corresponding for (i = 0; i < g->states; i++) { struct itemset *is = g->statetab[i]; int j, prod = -1, prod_len; - int shift_sym = -1; - int shift_len = 0, shift_remain = 0; + for (j = 0; j < is->items.cnt; j++) { int itm = is->items.syms[j]; int p = item_prod(itm); int bp = item_index(itm); struct production *pr = g->productions[p]; - if (bp < pr->body_size) { - if (shift_sym < 0 || - (shift_len == bp && shift_remain > pr->body_size - bp)) { - shift_sym = pr->body[bp]->num; - shift_len = bp; - shift_remain = pr->body_size - bp; - } + if (bp < pr->body_size) continue; - } /* This is what we reduce */ if (prod < 0 || prod_len < pr->body_size) { prod = p; @@ -1895,15 +2131,17 @@ The go to table is stored in a simple array of `sym` and corresponding } if (prod >= 0) - fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, 0, %d },\n", + fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, %d, %d, %d },\n", i, is->go_to.cnt, i, prod, g->productions[prod]->body_size, g->productions[prod]->head->num, - is->starts_line); + is->starts_line, + g->productions[prod]->line_like, + is->min_prefix); else - fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, %d },\n", - i, is->go_to.cnt, i, shift_sym, - is->starts_line); + fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, 0, %d },\n", + i, is->go_to.cnt, i, + is->starts_line, is->min_prefix); } fprintf(f, "};\n\n"); } @@ -1921,21 +2159,126 @@ This code needs to be able to store data somewhere. Rather than requiring `do_reduce` to `malloc` that "somewhere", we pass in a large buffer and have `do_reduce` return the size to be saved. +In order for the code to access "global" context, we pass in the +"config" pointer that was passed to parser function. If the `struct +token_config` is embedded in some larger structure, the reducing code +can access the larger structure using pointer manipulation. + The code fragment requires translation when written out. Any `$N` needs to be converted to a reference either to that buffer (if `$0`) or to the -structure returned by a previous reduction. These pointer need to be cast -to the appropriate type for each access. All this is handling in +structure returned by a previous reduction. These pointers need to be cast +to the appropriate type for each access. All this is handled in `gen_code`. +`gen_code` also allows symbol references to contain a '`<`' as in +'`$<2`'. This is particularly useful for references (or pointers), but +can be used with structures too. The `<` implies that the value it +being moved out, so the object will not be automatically freed. It is +equivalent to assigning `NULL` to the pointer or filling a structure +with zeros. + +Instead of a number `N`, the `$` or `$<` can be followed by some letters +and possibly a number. A number by itself (other than zero) selects a +symbol from the body of the production. A sequence of letters selects +the shortest symbol in the body which contains those letters in the given +order. If a number follows the letters, then a later occurrence of +that symbol is chosen. So "`$AB2`" will refer to the structure attached +to the second occurrence of the shortest symbol which contains an `A` +followed by a `B`. If there is no unique shortest system, or if the +number given is too large, then the symbol reference is not transformed, +and will cause an error when the code is compiled. ###### functions + static int textchr(struct text t, char c, int s) + { + int i; + for (i = s; i < t.len; i++) + if (t.txt[i] == c) + return i; + return -1; + } + + static int subseq_match(char *seq, int slen, struct text name) + { + int st = 0; + while (slen > 0) { + st = textchr(name, *seq, st); + if (st < 0) + return 0; + slen -= 1; + seq += 1; + st += 1; + } + return 1; + } + + static int choose_sym(char **namep, int len, struct production *p) + { + char *name = *namep; + char *nam = name; + int namlen; + int n = 0; + int i, s, slen; + char c; + + c = *name; + while (len > 0 && + ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'))) { + name += 1; + len -= 1; + c = *name; + } + namlen = name-nam; + while (len > 0 && (c >= '0' && c <= '9')) { + name += 1; + len -= 1; + n = n * 10 + (c - '0'); + c = *name; + } + if (namlen == 0) { + if (name == *namep) + return -1; + *namep = name; + return n; + } + slen = 0; s = -1; + for (i = 0; i < p->body_size; i++) { + if (!subseq_match(nam, namlen, p->body[i]->name)) + continue; + if (slen == 0 || p->body[i]->name.len < slen) + s = i; + if (s >= 0 && p->body[i] != p->body[s] && + p->body[i]->name.len == p->body[s]->name.len) + /* not unique, so s cannot be used */ + s = -1; + } + if (s < 0) + return -1; + if (n == 0); + n = 1; + for (i = 0; i < p->body_size; i++) + if (p->body[i] == p->body[s]) { + n -= 1; + if (n == 0) + break; + } + if (n > 1) + return -1; + *namep = name; + return i + 1; + } + static void gen_code(struct production *p, FILE *f, struct grammar *g) { char *c; + char *used = calloc(1, p->body_size); + int i; + fprintf(f, "\t\t\t"); for (c = p->code.txt; c < p->code.txt + p->code.len; c++) { int n; + int use = 0; if (*c != '$') { fputc(*c, f); if (*c == '\n') @@ -1943,56 +2286,80 @@ to the appropriate type for each access. All this is handling in continue; } c++; - if (*c < '0' || *c > '9') { + if (*c == '<') { + use = 1; + c++; + } + n = choose_sym(&c, p->code.txt + p->code.len - c, p); + if (n < 0) { + fputc('$', f); + if (use) + fputc('<', f); fputc(*c, f); continue; } - n = *c - '0'; - while (c[1] >= '0' && c[1] <= '9') { - c += 1; - n = n * 10 + *c - '0'; - } if (n == 0) - fprintf(f, "(*(struct %.*s*)ret)", + fprintf(f, "(*(struct %.*s*%s)ret)", p->head->struct_name.len, - p->head->struct_name.txt); - else if (n > p->body_size) - fprintf(f, "$%d", n); + p->head->struct_name.txt, + p->head->isref ? "*":""); else if (p->body[n-1]->type == Terminal) fprintf(f, "(*(struct token *)body[%d])", n-1); else if (p->body[n-1]->struct_name.txt == NULL) fprintf(f, "$%d", n); - else - fprintf(f, "(*(struct %.*s*)body[%d])", + else { + fprintf(f, "(*(struct %.*s*%s)body[%d])", p->body[n-1]->struct_name.len, - p->body[n-1]->struct_name.txt, n-1); + p->body[n-1]->struct_name.txt, + p->body[n-1]->isref ? "*":"", n-1); + used[n-1] = use; + } + c -= 1; } fputs("\n", f); + for (i = 0; i < p->body_size; i++) { + if (p->body[i]->struct_name.txt && + used[i]) { + // assume this has been copied out + if (p->body[i]->isref) + fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i); + else + fprintf(f, "\t\tmemset(body[%d], 0, sizeof(struct %.*s));\n", i, p->body[i]->struct_name.len, p->body[i]->struct_name.txt); + } + } + free(used); } ###### functions - static void gen_reduce(FILE *f, struct grammar *g, char *file) + static void gen_reduce(FILE *f, struct grammar *g, char *file, + struct code_node *code) { int i; - fprintf(f, "#line 0 \"gen_reduce\"\n"); - fprintf(f, "static int do_reduce(int prod, void **body, void *ret)\n"); + fprintf(f, "#line 1 \"gen_reduce\"\n"); + fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n"); fprintf(f, "{\n"); fprintf(f, "\tint ret_size = 0;\n"); + if (code) + code_node_print(f, code, file); + fprintf(f, "#line 4 \"gen_reduce\"\n"); fprintf(f, "\tswitch(prod) {\n"); for (i = 0; i < g->production_count; i++) { struct production *p = g->productions[i]; fprintf(f, "\tcase %d:\n", i); - if (p->code.txt) + if (p->code.txt) { + fprintf(f, "#line %d \"%s\"\n", p->code_line, file); gen_code(p, f, g); + } if (p->head->struct_name.txt) - fprintf(f, "\t\tret_size = sizeof(struct %.*s);\n", + fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n", p->head->struct_name.len, - p->head->struct_name.txt); + p->head->struct_name.txt, + p->head->isref ? "*":""); fprintf(f, "\t\tbreak;\n"); } @@ -2008,10 +2375,10 @@ done. It is particularly important to have fine control over freeing during error recovery where individual stack frames might need to be freed. -For this, the grammar author required to defined a `free_XX` function for -each structure that is used by a non-terminal. `do_free` all call whichever +For this, the grammar author is required to defined a `free_XX` function for +each structure that is used by a non-terminal. `do_free` will call whichever is appropriate given a symbol number, and will call `free` (as is -appropriate for tokens` on any terminal symbol. +appropriate for tokens) on any terminal symbol. ###### functions @@ -2035,9 +2402,15 @@ appropriate for tokens` on any terminal symbol. continue; fprintf(f, "\tcase %d:\n", s->num); - fprintf(f, "\t\tfree_%.*s(asn);\n", - s->struct_name.len, - s->struct_name.txt); + if (s->isref) { + fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n", + s->struct_name.len, + s->struct_name.txt); + fprintf(f, "\t\tfree(asn);\n"); + } else + fprintf(f, "\t\tfree_%.*s(asn);\n", + s->struct_name.len, + s->struct_name.txt); fprintf(f, "\t\tbreak;\n"); } fprintf(f, "\t}\n}\n\n"); @@ -2069,17 +2442,19 @@ grammar file). { "SLR", 0, NULL, 'S' }, { "LALR", 0, NULL, 'L' }, { "LR1", 0, NULL, '1' }, + { "tag", 1, NULL, 't' }, { "report", 0, NULL, 'R' }, { "output", 1, NULL, 'o' }, { NULL, 0, NULL, 0 } }; - const char *options = "05SL1Ro:"; + const char *options = "05SL1t:Ro:"; ###### process arguments int opt; char *outfile = NULL; char *infile; char *name; + char *tag = NULL; int report = 1; enum grammar_type type = LR05; while ((opt = getopt_long(argc, argv, options, @@ -2099,6 +2474,8 @@ grammar file). report = 2; break; case 'o': outfile = optarg; break; + case 't': + tag = optarg; break; default: fprintf(stderr, "Usage: parsergen ...\n"); exit(1); @@ -2126,8 +2503,9 @@ grammar file). To be able to run `mdcode` and `scanner` on the grammar we need to memory map it. -One we have extracted the code (with `mdcode`) we expect to file three -sections: header, code, and grammar. Anything else is an error. +Once we have extracted the code (with `mdcode`) we expect to find three +sections: header, code, and grammar. Anything else that is not +excluded by the `--tag` option is an error. "header" and "code" are optional, though it is hard to build a working parser with neither. "grammar" must be provided. @@ -2163,13 +2541,19 @@ parser with neither. "grammar" must be provided. struct code_node *hdr = NULL; struct code_node *code = NULL; struct code_node *gram = NULL; + struct code_node *pre_reduce = NULL; for (s = table; s; s = s->next) { - if (text_is(s->section, "header")) + struct text sec = s->section; + if (tag && !strip_tag(&sec, tag)) + continue; + if (text_is(sec, "header")) hdr = s->code; - else if (text_is(s->section, "code")) + else if (text_is(sec, "code")) code = s->code; - else if (text_is(s->section, "grammar")) + else if (text_is(sec, "grammar")) gram = s->code; + else if (text_is(sec, "reduce")) + pre_reduce = s->code; else { fprintf(stderr, "Unknown content section: %.*s\n", s->section.len, s->section.txt); @@ -2216,7 +2600,7 @@ the report finds conflicts we will exit with an error status. rv |= 1; } -If a headers section is defined, we write it out and include a declaration +If a "headers" section is defined, we write it out and include a declaration for the `parse_XX` function so it can be used from separate code. if (rv == 0 && hdr && outfile) { @@ -2241,7 +2625,7 @@ file with the code section (if any) and the parser tables and function. if (f) { if (code) code_node_print(f, code, infile); - gen_parser(f, g, infile, name); + gen_parser(f, g, infile, name, pre_reduce); fclose(f); } else { fprintf(stderr, "Cannot create %s.c\n", @@ -2251,7 +2635,7 @@ file with the code section (if any) and the parser tables and function. } And that about wraps it up. We need to set the locale so that UTF-8 is -recognised properly, and link with `libicuuc` is `libmdcode` requires that. +recognised properly, and link with `libicuuc` as `libmdcode` requires that. ###### File: parsergen.mk parsergen : parsergen.o libscanner.o libmdcode.o @@ -2278,7 +2662,7 @@ recognised properly, and link with `libicuuc` is `libmdcode` requires that. ## The SHIFT/REDUCE parser -Having analysed the grammar and generated all the table, we only need the +Having analysed the grammar and generated all the tables, we only need the shift/reduce engine to bring it all together. ### Goto table lookup @@ -2324,31 +2708,44 @@ We keep the stack as two separate allocations. One, `asn_stack` stores the production, and by keeping a separate `asn` stack, we can just pass a pointer into this stack. -The other allocation stores all other stack fields of which there are four. +The other allocation stores all other stack fields of which there are six. The `state` is the most important one and guides the parsing process. The `sym` is nearly unnecessary. However when we want to free entries from the `asn_stack`, it helps to know what type they are so we can call the right freeing function. The symbol leads us to the right free function through `do_free`. -The `indents` count and the `starts_indented` flag track the line -indents in the symbol. These are used to allow indent information to +The `indents` count tracks the line indents with-in the symbol or +immediately follow it. These are used to allow indent information to guide parsing and error recovery. -As well as the stack of frames we have a `next` frame which is -assembled from the incoming token and other information prior to -pushing it onto the stack. +`since_newline` tracks how many stack frames since the last +start-of-line (whether indented or not). So if `since_newline` is +zero, then this symbol is at the start of a line. Similarly +`since_indent` counts the number of states since an indent, it is zero +precisely when `indents` is not zero. + +`newline_permitted` keeps track of whether newlines should be ignored +or not. + +The stack is most properly seen as alternating states and symbols - +states, like the 'DOT' in items, are between symbols. Each frame in +our stack holds a state and the symbol that was before it. The +bottom of stack holds the start state but no symbol, as nothing came +before the beginning. ###### parser functions struct parser { struct frame { short state; + short newline_permitted; + short sym; - short starts_indented; short indents; - short newline_permitted; - } *stack, next; + short since_newline; + short since_indent; + } *stack; void **asn_stack; int stack_size; int tos; @@ -2358,28 +2755,45 @@ pushing it onto the stack. Two operations are needed on the stack - shift (which is like push) and pop. -Shift applies not only to terminals but also to non-terminals. When we -reduce a production we will pop off entries corresponding to the body -symbols, then push on an item for the head of the production. This last is -exactly the same process as shifting in a terminal so we use the same -function for both. +Shift applies not only to terminals but also to non-terminals. When +we reduce a production we will pop off entries corresponding to the +body symbols, then push on an item for the head of the production. +This last is exactly the same process as shifting in a terminal so we +use the same function for both. In both cases we provide the symbol, +the number of indents the symbol contains (which will be zero for a +terminal symbol) and a flag indicating the the symbol was at (or was +reduced from a symbol which was at) the start of a line. The state is +deduced from the current top-of-stack state and the new symbol. To simplify other code we arrange for `shift` to fail if there is no `goto` state for the symbol. This is useful in basic parsing due to our design that we shift when we can, and reduce when we cannot. So the `shift` function reports if it could. -So `shift` finds the next state. If that succeed it extends the allocations -if needed and pushes all the information onto the stacks. +`shift` is also used to push state zero onto the stack, so if the +stack is empty, it always chooses zero as the next state. + +So `shift` finds the next state. If that succeeds it extends the +allocations if needed and pushes all the information onto the stacks. + +Newlines are permitted after a `starts_line` state until an internal +indent. If the new frame has neither a `starts_line` state nor an +indent, newlines are permitted if the previous stack frame permitted +them. ###### parser functions static int shift(struct parser *p, + short sym, short indents, short start_of_line, void *asn, const struct state states[]) { // Push an entry onto the stack - int newstate = search(&states[p->next.state], p->next.sym); + struct frame next = {0}; + int newstate = p->tos + ? search(&states[p->stack[p->tos-1].state], + sym) + : 0; if (newstate < 0) return 0; if (p->tos >= p->stack_size) { @@ -2389,42 +2803,64 @@ if needed and pushes all the information onto the stacks. p->asn_stack = realloc(p->asn_stack, p->stack_size * sizeof(p->asn_stack[0])); } - p->stack[p->tos] = p->next; + next.sym = sym; + next.indents = indents; + next.state = newstate; + if (states[newstate].starts_line) + next.newline_permitted = 1; + else if (indents) + next.newline_permitted = 0; + else if (p->tos) + next.newline_permitted = + p->stack[p->tos-1].newline_permitted; + else + next.newline_permitted = 0; + + if (!start_of_line) { + if (p->tos) + next.since_newline = p->stack[p->tos-1].since_newline + 1; + else + next.since_newline = 1; + } + if (indents) + next.since_indent = 0; + else if (p->tos) + next.since_indent = p->stack[p->tos-1].since_indent + 1; + else + next.since_indent = 1; + + p->stack[p->tos] = next; p->asn_stack[p->tos] = asn; p->tos++; - p->next.state = newstate; - p->next.indents = 0; - p->next.starts_indented = 0; - // if new state doesn't start a line, we inherit newline_permitted status - if (states[newstate].starts_line) - p->next.newline_permitted = 1; return 1; } -`pop` simply moves the top of stack (`tos`) back down the required amount -and frees any `asn` entries that need to be freed. It is called _after_ we -reduce a production, just before we `shift` the nonterminal in. +`pop` primarily moves the top of stack (`tos`) back down the required +amount and frees any `asn` entries that need to be freed. It also +collects a summary of the indents and line starts in the symbols that +are being removed. It is called _after_ we reduce a production, just +before we `shift` the nonterminal in. ###### parser functions - static void pop(struct parser *p, int num, - void(*do_free)(short sym, void *asn)) + static int pop(struct parser *p, int num, + short *start_of_line, + void(*do_free)(short sym, void *asn)) { int i; + short indents = 0; + int sol = 0; + p->tos -= num; for (i = 0; i < num; i++) { - p->next.indents += p->stack[p->tos+i].indents; + sol |= !p->stack[p->tos+i].since_newline; + indents += p->stack[p->tos+i].indents; do_free(p->stack[p->tos+i].sym, p->asn_stack[p->tos+i]); } - - if (num) { - p->next.state = p->stack[p->tos].state; - p->next.starts_indented = p->stack[p->tos].starts_indented; - p->next.newline_permitted = p->stack[p->tos].newline_permitted; - if (p->next.indents > p->next.starts_indented) - p->next.newline_permitted = 0; - } + if (start_of_line) + *start_of_line = sol; + return indents; } ### Memory allocation @@ -2458,113 +2894,213 @@ copying, hence `memdup` and `tokcopy`. ### The heart of the parser. -Now we have the parser. If we can shift, we do. If not and we can reduce -we do. If the production we reduced was production zero, then we have +Now we have the parser. If we can shift we do, though newlines and +reducing indenting may block that. If not and we can reduce we do +that. If the production we reduced was production zero, then we have accepted the input and can finish. We return whatever `asn` was returned by reducing production zero. If we can neither shift nor reduce we have an error to handle. We pop -single entries off the stack until we can shift the `TK_error` symbol, then -drop input tokens until we find one we can shift into the new error state. +single entries off the stack until we can shift the `TK_error` symbol, +then drop input tokens until we find one we can shift into the new error +state. We need to ensure that something is dropped or shifted after an +error, or we could get into an infinite loop, only shifting in +`TK_error`, then reducing. So we track if there has been a shift since +the last error, and if not the next error always discards one token. When we find `TK_in` and `TK_out` tokens which report indents we need to handle them directly as the grammar cannot express what we want to do with them. -`TK_in` tokens are easy: we simply update the `next` stack frame to -record how many indents there are and that the next token started with -an indent. - -`TK_out` tokens must either be counted off against any pending indent, -or must force reductions until there is a pending indent which isn't -at the start of a production. +`TK_in` tokens are easy: we simply update indent count in the top stack frame to +record how many indents there are following the previous token. + +`TK_out` tokens must be canceled against an indent count +within the stack. If we can reduce some symbols that are all since +the most recent indent, then we do that first. If the minimum prefix +of the current state then extends back before the most recent indent, +that indent can be cancelled. If the minimum prefix is shorter then +the indent had ended prematurely and we must start error handling, which +is still a work-in-progress. + +`TK_newline` tokens are ignored unless the top stack frame records +that they are permitted. In that case they will not be considered for +shifting if it is possible to reduce some symbols that are all since +the most recent start of line. This is how a newline forcibly +terminates any line-like structure - we try to reduce down to at most +one symbol for each line where newlines are allowed. +A consequence of this is that a rule like + +###### Example: newlines - broken + + Newlines -> + | NEWLINE Newlines + IfStatement -> Newlines if .... + +cannot work, as the NEWLINE will never be shifted as the empty string +will be reduced first. Optional sets of newlines need to be include +in the thing that preceed: + +###### Example: newlines - works + + If -> if + | NEWLINE If + IfStatement -> If .... + +Here the NEWLINE will be shifted because nothing can be reduced until +the `if` is seen. + +When during error handling we discard tokens read in, we want to keep +discarding until we see one that is recognised. If we had a full set +of LR(1) grammar states, this would mean looking in the look-ahead set, +but we don't keep a full look-ahead set. We only record the subset +that leads to SHIFT. We can, however, deduce the look-ahead set by +looking at the SHIFT subsets for all states that we can get to by +reducing zero or more times. So we need a little function which +checks if a given token is in any of these look-ahead sets. ###### parser includes #include "parser.h" + ###### parser_run + + static int in_lookahead(struct token *tk, const struct state *states, int state) + { + while (state >= 0) { + if (search(&states[state], tk->num) >= 0) + return 1; + if (states[state].reduce_prod < 0) + return 0; + state = search(&states[state], states[state].reduce_sym); + } + return 0; + } + void *parser_run(struct token_state *tokens, const struct state states[], - int (*do_reduce)(int, void**, void*), + int (*do_reduce)(int, void**, struct token_config*, void*), void (*do_free)(short, void*), - FILE *trace, const char *non_term[], int knowns) + FILE *trace, const char *non_term[], + struct token_config *config) { struct parser p = { 0 }; struct token *tk = NULL; int accepted = 0; - void *ret; + int shift_since_err = 1; + void *ret = NULL; + shift(&p, TK_eof, 0, 1, NULL, states); while (!accepted) { struct token *err_tk; + struct frame *tos = &p.stack[p.tos-1]; if (!tk) tk = tok_copy(token_next(tokens)); - p.next.sym = tk->num; - if (trace) - parser_trace(trace, &p, tk, states, non_term, knowns); - - if (p.next.sym == TK_in) { - p.next.starts_indented = 1; - p.next.indents = 1; + parser_trace(trace, &p, + tk, states, non_term, config->known_count); + + if (tk->num == TK_in) { + tos->indents += 1; + tos->since_newline = 0; + tos->since_indent = 0; + if (!states[tos->state].starts_line) + tos->newline_permitted = 0; free(tk); tk = NULL; + parser_trace_action(trace, "Record"); continue; } - if (p.next.sym == TK_out) { - if (p.stack[p.tos-1].indents > p.stack[p.tos-1].starts_indented || - (p.stack[p.tos-1].indents == 1 && - states[p.next.state].reduce_size > 1)) { - p.stack[p.tos-1].indents -= 1; - if (p.stack[p.tos-1].indents == p.stack[p.tos-1].starts_indented) { - // no internal indent any more, reassess 'newline_permitted' - if (states[p.stack[p.tos-1].state].starts_line) - p.stack[p.tos-1].newline_permitted = 1; - else if (p.tos > 1) - p.stack[p.tos-1].newline_permitted = p.stack[p.tos-2].newline_permitted; + if (tk->num == TK_out) { + if (states[tos->state].reduce_size >= 0 && + states[tos->state].reduce_size <= tos->since_indent) + goto force_reduce; + if (states[tos->state].min_prefix >= tos->since_indent) { + // OK to cancel + struct frame *in = tos - tos->since_indent; + in->indents -= 1; + if (in->indents == 0) { + /* Reassess since_indent and newline_permitted */ + if (in > p.stack) { + in->since_indent = in[-1].since_indent + 1; + in->newline_permitted = in[-1].newline_permitted; + } else { + in->since_indent = 0; + in->newline_permitted = 0; + } + if (states[in->state].starts_line) + in->newline_permitted = 1; + while (in < tos) { + in += 1; + in->since_indent = in[-1].since_indent + 1; + if (states[in->state].starts_line) + in->newline_permitted = 1; + else + in->newline_permitted = in[-1].newline_permitted; + } } free(tk); tk = NULL; + parser_trace_action(trace, "Cancel"); continue; } - // fall through and force a REDUCE (as 'shift' - // will fail). + // fall through to error handling as both SHIFT and REDUCE + // will fail. } - if (p.next.sym == TK_newline) { - if (! p.stack[p.tos-1].newline_permitted) { + if (tk->num == TK_newline) { + if (!tos->newline_permitted) { free(tk); tk = NULL; + parser_trace_action(trace, "Discard"); continue; } + if (tos->since_newline > 1 && + states[tos->state].reduce_size >= 0 && + states[tos->state].reduce_size <= tos->since_newline) + goto force_reduce; } - if (shift(&p, tk, states)) { + if (shift(&p, tk->num, 0, tk->num == TK_newline, tk, states)) { + shift_since_err = 1; tk = NULL; + parser_trace_action(trace, "Shift"); continue; } - if (states[p.next.state].reduce_prod >= 0) { + force_reduce: + if (states[tos->state].reduce_prod >= 0 && + states[tos->state].newline_only && + !(tk->num == TK_newline || + tk->num == TK_eof || + tk->num == TK_out || + (tos->indents == 0 && tos->since_newline == 0))) { + /* Anything other than newline or out or eof + * in an error unless we are already at start + * of line, as this production must end at EOL. + */ + } else if (states[tos->state].reduce_prod >= 0) { void **body; - int prod = states[p.next.state].reduce_prod; - int size = states[p.next.state].reduce_size; + void *res; + const struct state *nextstate = &states[tos->state]; + int prod = nextstate->reduce_prod; + int size = nextstate->reduce_size; int bufsize; static char buf[16*1024]; - p.next.sym = states[p.next.state].reduce_sym; + short indents, start_of_line; - body = p.asn_stack + - (p.tos - states[p.next.state].reduce_size); + body = p.asn_stack + (p.tos - size); - bufsize = do_reduce(prod, body, buf); + bufsize = do_reduce(prod, body, config, buf); - pop(&p, size, do_free); - shift(&p, memdup(buf, bufsize), states); - if (prod == 0) + indents = pop(&p, size, &start_of_line, + do_free); + res = memdup(buf, bufsize); + memset(buf, 0, bufsize); + if (!shift(&p, nextstate->reduce_sym, + indents, start_of_line, + res, states)) { + if (prod != 0) abort(); accepted = 1; - continue; - } - if (tk->num == TK_out) { - // Indent problem - synthesise tokens to get us - // out of here. - fprintf(stderr, "Synthesize %d to handle indent problem\n", states[p.next.state].shift_sym); - p.next.sym = states[p.next.state].shift_sym; - shift(&p, tok_copy(*tk), states); - // FIXME need to report this error somehow + ret = res; + } + parser_trace_action(trace, "Reduce"); continue; } /* Error. We walk up the stack until we @@ -2574,38 +3110,49 @@ at the start of a production. * Then we discard input tokens until * we find one that is acceptable. */ + parser_trace_action(trace, "ERROR"); + short indents = 0, start_of_line; err_tk = tok_copy(*tk); - p.next.sym = TK_error; - while (shift(&p, err_tk, states) == 0 - && p.tos > 0) + while (p.tos > 0 && + shift(&p, TK_error, 0, 0, + err_tk, states) == 0) // discard this state - pop(&p, 1, do_free); + indents += pop(&p, 1, &start_of_line, do_free); if (p.tos == 0) { free(err_tk); // no state accepted TK_error break; } - while (search(&states[p.next.state], tk->num) < 0 && + if (!shift_since_err) { + /* must discard at least one token to avoid + * infinite loop. + */ + if (tk->num == TK_eof) + break; + free(tk); + tk = tok_copy(token_next(tokens)); + } + shift_since_err = 0; + tos = &p.stack[p.tos-1]; + while (!in_lookahead(tk, states, tos->state) && tk->num != TK_eof) { free(tk); tk = tok_copy(token_next(tokens)); + shift_since_err = 1; if (tk->num == TK_in) - p.next.indents += 1; + indents += 1; if (tk->num == TK_out) { - if (p.next.indents == 0) + if (indents == 0) break; - p.next.indents -= 1; + indents -= 1; + // FIXME update since_indent here } } - if (p.tos == 0 && tk->num == TK_eof) - break; + tos->indents += indents; } free(tk); - if (accepted) - ret = p.asn_stack[0]; - else - pop(&p, p.tos, do_free); + pop(&p, p.tos, NULL, do_free); free(p.asn_stack); free(p.stack); return ret; @@ -2614,9 +3161,10 @@ at the start of a production. ###### exported functions void *parser_run(struct token_state *tokens, const struct state states[], - int (*do_reduce)(int, void**, void*), + int (*do_reduce)(int, void**, struct token_config*, void*), void (*do_free)(short, void*), - FILE *trace, const char *non_term[], int knowns); + FILE *trace, const char *non_term[], + struct token_config *config); ### Tracing @@ -2647,13 +3195,10 @@ end inside square brackets. static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[]) { fprintf(trace, "(%d", f->state); - if (f->indents) - fprintf(trace, "%c%d", f->starts_indented?':':'.', - f->indents); if (states[f->state].starts_line) fprintf(trace, "s"); if (f->newline_permitted) - fprintf(trace, "n"); + fprintf(trace, "n%d", f->since_newline); fprintf(trace, ") "); } @@ -2662,28 +3207,42 @@ end inside square brackets. const char *non_term[], int knowns) { int i; + if (!trace) + return; for (i = 0; i < p->tos; i++) { - int sym = p->stack[i].sym; - parser_trace_state(trace, &p->stack[i], states); - if (sym < TK_reserved && - reserved_words[sym] != NULL) - fputs(reserved_words[sym], trace); - else if (sym < TK_reserved + knowns) { - struct token *t = p->asn_stack[i]; - text_dump(trace, t->txt, 20); - } else - fputs(non_term[sym - TK_reserved - knowns], - trace); - fputs(" ", trace); + struct frame *f = &p->stack[i]; + if (i) { + int sym = f->sym; + if (sym < TK_reserved && + reserved_words[sym] != NULL) + fputs(reserved_words[sym], trace); + else if (sym < TK_reserved + knowns) { + struct token *t = p->asn_stack[i]; + text_dump(trace, t->txt, 20); + } else + fputs(non_term[sym - TK_reserved - knowns], + trace); + if (f->indents) + fprintf(trace, ".%d", f->indents); + if (f->since_newline == 0) + fputs("/", trace); + fputs(" ", trace); + } + parser_trace_state(trace, f, states); } - parser_trace_state(trace, &p->next, states); - fprintf(trace, " ["); + fprintf(trace, "["); if (tk->num < TK_reserved && reserved_words[tk->num] != NULL) fputs(reserved_words[tk->num], trace); else text_dump(trace, tk->txt, 20); - fputs("]\n", trace); + fprintf(trace, ":%d:%d]", tk->line, tk->col); + } + + void parser_trace_action(FILE *trace, char *action) + { + if (trace) + fprintf(trace, " - %s\n", action); } # A Worked Example @@ -2693,26 +3252,22 @@ The obvious example for a parser is a calculator. As `scanner` provides number parsing function using `libgmp` is it not much work to perform arbitrary rational number calculations. -This calculator takes one expression, or an equality test per line. The -results are printed and in any equality test fails, the program exits with +This calculator takes one expression, or an equality test, per line. The +results are printed and if any equality test fails, the program exits with an error. -Embedding mdcode inside mdcode is rather horrible. I'd like to find a -better approach, but as the grammar file must have 3 components I need -something like this. - ###### File: parsergen.mk - calc.c : parsergen calc.cgm - ./parsergen -o calc calc.cgm + calc.c calc.h : parsergen parsergen.mdc + ./parsergen --tag calc -o calc parsergen.mdc calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o $(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp + calctest : calc + ./calc parsergen.mdc + demos :: calctest -###### demo grammar - - # header - ~~~~~ +# calc: header - #include "number.h" + #include "parse_number.h" // what do we use for a demo-grammar? A calculator of course. struct number { mpq_t val; @@ -2720,9 +3275,7 @@ something like this. int err; }; - ~~~~~ - # code - ~~~~~ +# calc: code #include #include @@ -2731,9 +3284,9 @@ something like this. #include #include #include + #include #include "mdcode.h" #include "scanner.h" - #include "number.h" #include "parser.h" #include "calc.h" @@ -2744,28 +3297,43 @@ something like this. free(n); } + static int text_is(struct text t, char *s) + { + return (strlen(s) == t.len && + strncmp(s, t.txt, t.len) == 0); + } + int main(int argc, char *argv[]) { int fd = open(argv[1], O_RDONLY); int len = lseek(fd, 0, 2); char *file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0); - struct section *s = code_extract(file, file+len, NULL); + struct section *table = code_extract(file, file+len, NULL); + struct section *s; struct token_config config = { .ignored = (1 << TK_line_comment) - | (1 << TK_block_comment) | (1 << TK_in) | (1 << TK_out), .number_chars = ".,_+-", .word_start = "", .word_cont = "", }; - parse_calc(s->code, &config, argc > 2 ? stderr : NULL); + for (s = table; s; s = s->next) + if (text_is(s->section, "example: input")) + parse_calc(s->code, &config, argc > 2 ? stderr : NULL); + while (table) { + struct section *t = table->next; + code_free(table->code); + free(table); + table = t; + } exit(0); } - ~~~~~ - # grammar - ~~~~~ +# calc: grammar + + $LEFT + - + $LEFT * / // Session -> Session Line | Line @@ -2789,13 +3357,32 @@ something like this. | ERROR NEWLINE ${ printf("Skipped a bad line\n"); }$ $number - Expression -> Expression + Term ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$ - | Expression - Term ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$ - | Term ${ mpq_init($0.val); mpq_set($0.val, $1.val); }$ + Expression -> Expression + Expression ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$ + | Expression - Expression ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$ + | Expression * Expression ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$ + | Expression / Expression ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$ + | Expression // Expression ${ { + mpz_t z0, z1, z2; + mpq_init($0.val); + mpz_init(z0); mpz_init(z1); mpz_init(z2); + mpz_tdiv_q(z1, mpq_numref($1.val), mpq_denref($1.val)); + mpz_tdiv_q(z2, mpq_numref($3.val), mpq_denref($3.val)); + mpz_tdiv_q(z0, z1, z2); + mpq_set_z($0.val, z0); + mpz_clear(z0); mpz_clear(z1); mpz_clear(z2); + } }$ + | NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$ + | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$ - Term -> Term * Factor ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$ - | Term / Factor ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$ - | Factor ${ mpq_init($0.val); mpq_set($0.val, $1.val); }$ +# example: input - Factor -> NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$ - | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$ + 355/113 + 3.1415926535 - 355/113 + 2 + 4 * 5 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 * 9 / 2 + 1 * 1000 + 2 * 100 + 3 * 10 + 4 * 1 + + 355//113 + + error