1 # LR(1) Parser Generator with 2D support #
3 This parser generator takes a grammar description combined with code
4 fragments, analyses it, and can report details about the analysis and
5 write out C code files which can be compiled to make a parser.
7 "2D support" means that indentation and line breaks can be significant.
9 There are several distinct sections.
11 1. `grammar_read` will read a grammar from a literate-code file and
12 store the productions, symbols, and code fragments.
13 2. `grammar_analyse` will take that grammar and build LR parsing
14 states and look-ahead tables.
15 3. `grammar_report` will present the details of the analysis
16 in a readable format and will report any conflicts.
17 4. `parser_generate` will write out C code files with various
18 tables and with the code fragments arranged in useful places.
19 5. `parser_run` is a library function intended to be linked together
20 with the generated parser tables to complete the implementation of
22 6. Finally `calc` is a test grammar for a simple calculator. The
23 `parsergen` program built from the C code in this file can extract
24 that grammar directly from this file and process it.
26 ###### File: parsergen.c
31 ## forward declarations
42 ###### File: libparser.c
49 ###### File: parsergen.mk
52 parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc
55 ## Reading the grammar
57 The grammar must be stored in a markdown literate code file as parsed by
58 "[mdcode][]". It may have up to four top level (i.e. unreferenced)
59 sections: `header`, `code`, `grammar` and `reduce`. The first two, if
60 present, will be literally copied into the generated `.c` and `.h`
61 files. The third is required and contains the grammar which is
62 tokenised with "[scanner][]". `reduce` can provide variable
63 declarations and common intialization code for all the reduction code
64 fragments in the grammar.
66 If the `--tag` option is given, then any top level header that doesn't
67 start with the tag is ignored, and the tag is striped from the rest. So
68 `--tag Foo` means that the four sections expected are `Foo: header`,
69 `Foo: code`, `Foo: grammar` and `Foo: reduce`. The tag `calc` is used
70 to extract the sample calculator from this file.
73 [scanner]: scanner.html
79 ###### parser includes
83 The grammar contains production sets, precedence/associativity
84 declarations, general terminal declarations, and data type declarations.
85 These are all parsed with _ad hoc_ parsing as we don't have a parser
88 The precedence and associativity can be set for each production, and
89 can be inherited from symbols. The data type (either structure or a
90 reference to a structure) is potentially defined for each non-terminal
91 and describes what C structure is used to store information about each
95 enum assoc {Left, Right, Non};
96 char *assoc_names[] = {"Left","Right","Non"};
99 struct text struct_name;
102 unsigned short precedence;
106 unsigned short precedence;
115 The strings reported by `mdcode` and `scanner` are `struct text` which
116 have length rather than being nul terminated. To help with printing and
117 comparing we define `text_is` and `prtxt`, which should possibly go in
118 `mdcode`. `scanner` does provide `text_dump` which is useful for
119 strings which might contain control characters.
121 `strip_tag` is a bit like `strncmp`, but adds a test for a colon,
122 because that is what we need to detect tags.
125 static int text_is(struct text t, char *s)
127 return (strlen(s) == t.len &&
128 strncmp(s, t.txt, t.len) == 0);
130 static void prtxt(struct text t)
132 printf("%.*s", t.len, t.txt);
135 static int strip_tag(struct text *t, char *tag)
137 int skip = strlen(tag) + 1;
138 if (skip >= t->len ||
139 strncmp(t->txt, tag, skip-1) != 0 ||
140 t->txt[skip-1] != ':')
142 while (skip < t->len && t->txt[skip] == ' ')
151 Productions are comprised primarily of symbols - terminal and
152 non-terminal. We do not make a syntactic distinction between the two,
153 though non-terminals must be identifiers. Non-terminal symbols are
154 those which appear in the head of a production, terminal symbols are
155 those which don't. There are also "virtual" symbols used for precedence
156 marking discussed later, and sometimes we won't know what type a symbol
159 To help with code safety it is possible to declare the terminal symbols.
160 If this is done, then any symbol used in a production that does not
161 appear in the head of any production and is not declared as a terminal
162 is treated as an error.
164 ###### forward declarations
165 enum symtype { Unknown, Virtual, Terminal, Nonterminal };
166 char *symtypes = "UVTN";
169 ###### grammar fields
170 int terminals_declared;
172 Symbols can be either `TK_ident` or `TK_mark`. They are saved in a
173 table of known symbols and the resulting parser will report them as
174 `TK_reserved + N`. A small set of identifiers are reserved for the
175 different token types that `scanner` can report.
179 static struct { int num; char *name; } reserved_words[] = {
180 { TK_error, "ERROR" },
181 { TK_number, "NUMBER" },
182 { TK_ident, "IDENTIFIER" },
184 { TK_string, "STRING" },
185 { TK_multi_string, "MULTI_STRING" },
188 { TK_newline, "NEWLINE" },
194 Note that `TK_eof` and the two `TK_*_comment` tokens cannot be
195 recognised. The former is automatically expected at the end of the text
196 being parsed. The latter are always ignored by the parser.
198 All of these symbols are stored in a simple symbol table. We use the
199 `struct text` from `mdcode` to store the name and link them together
200 into a sorted list using an insertion sort.
202 We don't have separate `find` and `insert` functions as any symbol we
203 find needs to be remembered. We simply expect `find` to always return a
204 symbol, but its type might be `Unknown`.
213 ###### grammar fields
218 static struct symbol *sym_find(struct grammar *g, struct text s)
220 struct symbol **l = &g->syms;
225 (cmp = text_cmp((*l)->name, s)) < 0)
229 n = calloc(1, sizeof(*n));
238 static void symbols_init(struct grammar *g)
240 int entries = sizeof(reserved_words)/sizeof(reserved_words[0]);
242 for (i = 0; i < entries; i++) {
245 t.txt = reserved_words[i].name;
246 t.len = strlen(t.txt);
249 s->num = reserved_words[i].num;
253 ### Data types and precedence.
255 Data type specification, precedence specification, and declaration of
256 terminals are all introduced by a dollar sign at the start of the line.
257 If the next word is `LEFT`, `RIGHT` or `NON`, then the line specifies a
258 precedence, if it is `TERM` then the line declares terminals without
259 precedence, otherwise it specifies a data type.
261 The data type name is simply stored and applied to the head of all
262 subsequent productions. It must be the name of a structure optionally
263 preceded by an asterisk which means a reference or "pointer". So
264 `$expression` maps to `struct expression` and `$*statement` maps to
265 `struct statement *`.
267 Any productions given before the first data type declaration will have
268 no data type associated with them and can carry no information. In
269 order to allow other non-terminals to have no type, the data type
270 `$void` can be given. This does *not* mean that `struct void` will be
271 used, but rather than no type will be associated with subsequent
274 The precedence line must contain a list of symbols, either terminal
275 symbols or virtual symbols. It can only contain symbols that have not
276 been seen yet, so precedence declaration must precede the productions
279 A "virtual" symbol is an identifier preceded by `$$`. Virtual symbols
280 carry precedence information but are not included in the grammar. A
281 production can declare that it inherits the precedence of a given
284 This use for `$$` precludes it from being used as a symbol in the
285 described language. Two other symbols: `${` and `}$` are also
288 Each new precedence line introduces a new precedence level and
289 declares how it associates. This level is stored in each symbol
290 listed and may be inherited by any production which uses the symbol. A
291 production inherits from the last symbol which has a precedence.
293 The symbols on the first precedence line have the lowest precedence.
294 Subsequent lines introduce symbols with higher precedence and so bind
297 Note that a declaration line (or "dollar line") can start with either of
298 two different marks: "$" or "$*". The `dollar_line()` function defined
299 here is told which was found in the `isref` argument.
301 ###### grammar fields
302 struct text current_type;
307 enum symbols { TK_virtual = TK_reserved, TK_open, TK_close };
308 static const char *known[] = { "$$", "${", "}$" };
311 static char *dollar_line(struct token_state *ts, struct grammar *g, int isref)
313 struct token t = token_next(ts);
319 if (t.num != TK_ident) {
320 err = "type or assoc expected after '$'";
323 if (text_is(t.txt, "LEFT"))
325 else if (text_is(t.txt, "RIGHT"))
327 else if (text_is(t.txt, "NON"))
329 else if (text_is(t.txt, "TERM")) {
331 g->terminals_declared = 1;
333 g->current_type = t.txt;
334 g->type_isref = isref;
335 if (text_is(t.txt, "void"))
336 g->current_type.txt = NULL;
338 if (t.num != TK_newline) {
339 err = "Extra tokens after type name";
346 err = "$* cannot be followed by a precedence";
350 // This is a precedence or TERM line, need some symbols.
354 while (t.num != TK_newline) {
355 enum symtype type = Terminal;
357 if (t.num == TK_virtual) {
360 if (t.num != TK_ident) {
361 err = "$$ must be followed by a word";
365 err = "Virtual symbols not permitted on $TERM line";
368 } else if (t.num != TK_ident &&
370 err = "Illegal token in precedence line";
373 s = sym_find(g, t.txt);
374 if (s->type != Unknown) {
375 err = "Symbols in precedence/TERM line must not already be known.";
380 s->precedence = g->prec_levels;
387 err = "No symbols given on precedence/TERM line";
391 while (t.num != TK_newline && t.num != TK_eof)
398 A production either starts with an identifier which is the head
399 non-terminal, or a vertical bar (`|`) in which case this production
400 uses the same head as the previous one. The identifier must be
401 followed by a `->` mark. All productions for a given non-terminal must
402 be grouped together with the `nonterminal ->` given only once.
404 After this a (possibly empty) sequence of identifiers and marks form
405 the body of the production. A virtual symbol may be given after the
406 body by preceding it with `$$`. If a virtual symbol is given, the
407 precedence of the production is that for the virtual symbol. If none
408 is given, the precedence is inherited from the last symbol in the
409 production which has a precedence specified.
411 After the optional precedence may come the `${` mark. This indicates
412 the start of a code fragment. If present, this must be on the same
413 line as the start of the production.
415 All of the text from the `${` through to the matching `}$` is
416 collected and forms the code-fragment for the production. It must all
417 be in one `code_node` of the literate code. The `}$` must be
418 at the end of a line.
420 Text in the code fragment will undergo substitutions where `$N` or
421 `$<N`,for some numeric `N` (or non-numeric indicator as described
422 later), will be replaced with a variable holding the parse information
423 for the particular symbol in the production. `$0` is the head of the
424 production, `$1` is the first symbol of the body, etc. The type of `$N`
425 for a terminal symbol is `struct token`. For a non-terminal, it is
426 whatever has been declared for that symbol. The `<` may be included and
427 means that the value (usually a reference) is being moved out, so it
428 will not automatically be freed. The effect of using '<' is that the
429 variable is cleared to all-zeros.
431 While building productions we will need to add to an array which needs to
435 static void array_add(void *varray, int *cnt, void *new)
437 void ***array = varray;
440 current = ((*cnt-1) | (step-1))+1;
441 if (*cnt == current) {
444 *array = realloc(*array, current * sizeof(void*));
446 (*array)[*cnt] = new;
450 Collecting the code fragment simply involves reading tokens until we
451 hit the end token `}$`, and noting the character position of the start and
455 static struct text collect_code(struct token_state *state,
460 code.txt = start.txt.txt + start.txt.len;
462 t = token_next(state);
463 while (t.node == start.node &&
464 t.num != TK_close && t.num != TK_error &&
466 if (t.num == TK_close && t.node == start.node)
467 code.len = t.txt.txt - code.txt;
473 Now we have all the bits we need to parse a full production.
478 ###### grammar fields
479 struct production **productions;
480 int production_count;
482 ###### production fields
484 struct symbol **body;
490 int first_production;
493 static char *parse_production(struct grammar *g,
495 struct token_state *state)
497 /* Head has already been parsed. */
500 struct production p, *pp;
502 memset(&p, 0, sizeof(p));
504 tk = token_next(state);
505 while (tk.num == TK_ident || tk.num == TK_mark) {
506 struct symbol *bs = sym_find(g, tk.txt);
507 if (bs->type == Unknown) {
508 if (!g->terminals_declared)
511 if (bs->type == Virtual) {
512 err = "Virtual symbol not permitted in production";
515 if (bs->precedence) {
516 p.precedence = bs->precedence;
519 array_add(&p.body, &p.body_size, bs);
520 tk = token_next(state);
522 if (tk.num == TK_virtual) {
524 tk = token_next(state);
525 if (tk.num != TK_ident) {
526 err = "word required after $$";
529 vs = sym_find(g, tk.txt);
530 if (vs->num == TK_newline)
532 else if (vs->num == TK_out)
534 else if (vs->precedence == 0) {
535 err = "symbol after $$ must have precedence";
538 p.precedence = vs->precedence;
541 tk = token_next(state);
543 if (tk.num == TK_open) {
544 p.code_line = tk.line;
545 p.code = collect_code(state, tk);
546 if (p.code.txt == NULL) {
547 err = "code fragment not closed properly";
550 tk = token_next(state);
553 if (tk.num != TK_newline && tk.num != TK_eof) {
554 err = "stray tokens at end of line";
557 pp = malloc(sizeof(*pp));
559 array_add(&g->productions, &g->production_count, pp);
562 while (tk.num != TK_newline && tk.num != TK_eof)
563 tk = token_next(state);
567 With the ability to parse productions and dollar-lines, we have nearly all
568 that we need to parse a grammar from a `code_node`.
570 The head of the first production will effectively be the `start` symbol
571 of the grammar. However it won't _actually_ be so. Processing the
572 grammar is greatly simplified if the real start symbol only has a single
573 production, and expects `$eof` as the final terminal. So when we find
574 the first explicit production we insert an extra implicit production as
575 production zero which looks like
577 ###### Example: production 0
580 where `START` is the first non-terminal given.
582 ###### create production zero
583 struct production *p = calloc(1,sizeof(*p));
584 struct text start = {"$start",6};
585 struct text eof = {"$eof",4};
586 struct text code = {"$0 = $<1;", 9};
587 p->head = sym_find(g, start);
588 p->head->type = Nonterminal;
589 p->head->struct_name = g->current_type;
590 p->head->isref = g->type_isref;
591 if (g->current_type.txt)
593 array_add(&p->body, &p->body_size, head);
594 array_add(&p->body, &p->body_size, sym_find(g, eof));
595 p->head->first_production = g->production_count;
596 array_add(&g->productions, &g->production_count, p);
598 Now we are ready to read in the grammar. We ignore comments
599 and strings so that the marks which introduce them can be
600 used as terminals in the grammar. We don't ignore numbers
601 even though we don't allow them as that causes the scanner
602 to produce errors that the parser is better positioned to handle.
603 We also ignore indentation, but we expect and use newlines.
605 To allow comments in the grammar, we explicitly check for "//" as the
606 first token on the line and those lines are skipped. "//" can still be
607 used as a terminal anywhere that a terminal is expected.
610 static struct grammar *grammar_read(struct code_node *code)
612 struct token_config conf = {
615 .words_marks = known,
616 .known_count = sizeof(known)/sizeof(known[0]),
618 .ignored = (1 << TK_line_comment)
619 | (1 << TK_block_comment)
622 | (1 << TK_multi_string)
627 struct token_state *state = token_open(code, &conf);
629 struct symbol *head = NULL;
633 g = calloc(1, sizeof(*g));
636 for (tk = token_next(state); tk.num != TK_eof;
637 tk = token_next(state)) {
638 if (tk.num == TK_newline)
640 if (tk.num == TK_ident) {
642 head = sym_find(g, tk.txt);
643 if (head->type == Nonterminal)
644 err = "This non-terminal has already be used.";
645 else if (head->type == Virtual)
646 err = "Virtual symbol not permitted in head of production";
648 head->type = Nonterminal;
649 head->struct_name = g->current_type;
650 head->isref = g->type_isref;
651 if (g->production_count == 0) {
652 ## create production zero
654 head->first_production = g->production_count;
655 tk = token_next(state);
656 if (tk.num == TK_mark &&
657 text_is(tk.txt, "->"))
658 err = parse_production(g, head, state);
660 err = "'->' missing in production";
662 } else if (tk.num == TK_mark
663 && text_is(tk.txt, "|")) {
664 // another production for same non-term
666 err = parse_production(g, head, state);
668 err = "First production must have a head";
669 } else if (tk.num == TK_mark
670 && text_is(tk.txt, "$")) {
671 err = dollar_line(state, g, 0);
672 } else if (tk.num == TK_mark
673 && text_is(tk.txt, "$*")) {
674 err = dollar_line(state, g, 1);
675 } else if (tk.num == TK_mark
676 && text_is(tk.txt, "//")) {
677 while (tk.num != TK_newline &&
679 tk = token_next(state);
681 err = "Unrecognised token at start of line.";
687 if (g->terminals_declared) {
690 for (s = g->syms; s; s = s->next) {
691 if (s->type != Unknown)
694 fprintf(stderr, "Token %.*s not declared\n",
695 s->name.len, s->name.txt);
704 fprintf(stderr, "Error at line %d: %s\n",
711 ## Analysing the grammar
713 The central task in analysing the grammar is to determine a set of
714 states to drive the parsing process. This will require finding various
715 sets of symbols and of "items". Some of these sets will need to attach
716 information to each element in the set, so they are more like maps.
718 Each "item" may have a 'look-ahead' or `LA` set associated with it.
719 Many of these will be the same as each other. To avoid memory wastage,
720 and to simplify some comparisons of sets, these sets will be stored in a
721 list of unique sets, each assigned a number.
723 Once we have the data structures in hand to manage these sets and lists,
724 we can start setting the 'nullable' flag, build the 'FIRST' sets, and
725 then create the item sets which define the various states.
729 Though we don't only store symbols in these sets, they are the main
730 things we store, so they are called symbol sets or "symsets".
732 A symset has a size, an array of shorts, and an optional array of data
733 values, which are also shorts. If the array of data is not present, we
734 store an impossible pointer, as `NULL` is used to indicate that no
735 memory has been allocated yet;
740 unsigned short *syms, *data;
742 #define NO_DATA ((unsigned short *)1)
743 const struct symset INIT_SYMSET = { 0, NULL, NO_DATA };
744 const struct symset INIT_DATASET = { 0, NULL, NULL };
746 The arrays of shorts are allocated in blocks of 8 and are kept sorted by
747 using an insertion sort. We don't explicitly record the amount of
748 allocated space as it can be derived directly from the current `cnt`
749 using `((cnt - 1) | 7) + 1`.
752 static void symset_add(struct symset *s, unsigned short key, unsigned short val)
755 int current = ((s->cnt-1) | 7) + 1;
756 if (current == s->cnt) {
758 s->syms = realloc(s->syms, sizeof(*s->syms) * current);
759 if (s->data != NO_DATA)
760 s->data = realloc(s->data, sizeof(*s->data) * current);
763 while (i > 0 && s->syms[i-1] > key) {
764 s->syms[i] = s->syms[i-1];
765 if (s->data != NO_DATA)
766 s->data[i] = s->data[i-1];
770 if (s->data != NO_DATA)
775 Finding a symbol (or item) in a `symset` uses a simple binary search.
776 We return the index where the value was found (so data can be accessed),
777 or `-1` to indicate failure.
779 static int symset_find(struct symset *ss, unsigned short key)
786 while (lo + 1 < hi) {
787 int mid = (lo + hi) / 2;
788 if (ss->syms[mid] <= key)
793 if (ss->syms[lo] == key)
798 We will often want to form the union of two symsets. When we do, we
799 will often want to know if anything changed (as that might mean there is
800 more work to do). So `symset_union` reports whether anything was added
801 to the first set. We use a slow quadratic approach as these sets are
802 rarely large. If profiling shows this to be a problem it can be
805 static int symset_union(struct symset *a, struct symset *b)
809 for (i = 0; i < b->cnt; i++)
810 if (symset_find(a, b->syms[i]) < 0) {
811 unsigned short data = 0;
812 if (b->data != NO_DATA)
814 symset_add(a, b->syms[i], data);
820 And of course we must be able to free a symset.
822 static void symset_free(struct symset ss)
825 if (ss.data != NO_DATA)
831 Some symsets are simply stored somewhere appropriate in the `grammar`
832 data structure, others needs a bit of indirection. This applies
833 particularly to "LA" sets. These will be paired with "items" in an
834 "itemset". As itemsets will be stored in a symset, the "LA" set needs
835 to be stored in the `data` field, so we need a mapping from a small
836 (short) number to an LA `symset`.
838 As mentioned earlier this involves creating a list of unique symsets.
840 For now, we just use a linear list sorted by insertion. A skiplist
841 would be more efficient and may be added later.
848 struct setlist *next;
851 ###### grammar fields
852 struct setlist *sets;
857 static int ss_cmp(struct symset a, struct symset b)
860 int diff = a.cnt - b.cnt;
863 for (i = 0; i < a.cnt; i++) {
864 diff = (int)a.syms[i] - (int)b.syms[i];
871 static int save_set(struct grammar *g, struct symset ss)
873 struct setlist **sl = &g->sets;
877 while (*sl && (cmp = ss_cmp((*sl)->ss, ss)) < 0)
884 s = malloc(sizeof(*s));
893 Finding a set by number is currently performed by a simple linear
894 search. If this turns out to hurt performance, we can store the sets in
895 a dynamic array like the productions.
897 static struct symset set_find(struct grammar *g, int num)
899 struct setlist *sl = g->sets;
900 while (sl && sl->num != num)
905 ### Setting `nullable`
907 We set `nullable` on the head symbol for any production for which all
908 the body symbols (if any) are nullable. As this is a recursive
909 definition, any change in the `nullable` setting means that we need to
910 re-evaluate where it needs to be set.
912 We simply loop around performing the same calculations until no more
919 static void set_nullable(struct grammar *g)
922 while (check_again) {
925 for (p = 0; p < g->production_count; p++) {
926 struct production *pr = g->productions[p];
929 if (pr->head->nullable)
931 for (s = 0; s < pr->body_size; s++)
932 if (! pr->body[s]->nullable)
934 if (s == pr->body_size) {
935 pr->head->nullable = 1;
942 ### Setting `line_like`
944 In order to be able to ignore newline tokens when not relevant, but
945 still include them in the parse when needed, we will need to know
946 which states can start a "line-like" section of code. We ignore
947 newlines when there is an indent since the most recent start of a
950 A "line_like" symbol is simply any symbol that can derive a NEWLINE.
951 If a symbol cannot derive a NEWLINE, then it is only part of a line -
952 so is word-like. If it can derive a NEWLINE, then we consider it to
955 Clearly the `TK_newline` token can derive a NEWLINE. Any symbol which
956 is the head of a production that contains a line_like symbol is also a
957 line-like symbol. We use a new field `line_like` to record this
958 attribute of symbols, and compute it in a repetitive manner similar to
965 static void set_line_like(struct grammar *g)
968 g->symtab[TK_newline]->line_like = 1;
969 while (check_again) {
972 for (p = 0; p < g->production_count; p++) {
973 struct production *pr = g->productions[p];
976 if (pr->head->line_like)
979 for (s = 0 ; s < pr->body_size; s++) {
980 if (pr->body[s]->line_like) {
981 pr->head->line_like = 1;
990 ### Building the `first` sets
992 When calculating what can follow a particular non-terminal, we will need
993 to know what the "first" terminal in any subsequent non-terminal might
994 be. So we calculate the `first` set for every non-terminal and store
995 them in an array. We don't bother recording the "first" set for
996 terminals as they are trivial.
998 As the "first" for one symbol might depend on the "first" of another, we
999 repeat the calculations until no changes happen, just like with
1000 `set_nullable`. This makes use of the fact that `symset_union` reports
1001 if any change happens.
1003 The core of this, which finds the "first" of part of a production body,
1004 will be reused for computing the "follow" sets, so we split that out
1005 into a separate function, `add_first`, which also reports if it got all
1006 the way to the end of the production without finding a non-nullable
1009 ###### grammar fields
1010 struct symset *first;
1014 static int add_first(struct production *pr, int start,
1015 struct symset *target, struct grammar *g,
1020 for (s = start; s < pr->body_size; s++) {
1021 struct symbol *bs = pr->body[s];
1022 if (bs->type == Terminal) {
1023 if (symset_find(target, bs->num) < 0) {
1024 symset_add(target, bs->num, 0);
1028 } else if (symset_union(target, &g->first[bs->num]))
1034 *to_end = (s == pr->body_size);
1038 static void build_first(struct grammar *g)
1040 int check_again = 1;
1042 g->first = calloc(g->num_syms, sizeof(g->first[0]));
1043 for (s = 0; s < g->num_syms; s++)
1044 g->first[s] = INIT_SYMSET;
1046 while (check_again) {
1049 for (p = 0; p < g->production_count; p++) {
1050 struct production *pr = g->productions[p];
1051 struct symset *head = &g->first[pr->head->num];
1053 if (add_first(pr, 0, head, g, NULL))
1059 ### Building the `follow` sets.
1061 There are two different situations when we will want to generate
1062 "follow" sets. If we are doing an SLR analysis, we want to generate a
1063 single "follow" set for each non-terminal in the grammar. That is what
1064 is happening here. If we are doing an LALR or LR analysis we will want
1065 to generate a separate "LA" set for each item. We do that later in
1068 There are two parts to generating a "follow" set. Firstly we look at
1069 every place that any non-terminal appears in the body of any production,
1070 and we find the set of possible "first" symbols after there. This is
1071 done using `add_first` above and only needs to be done once as the
1072 "first" sets are now stable and will not change.
1076 for (p = 0; p < g->production_count; p++) {
1077 struct production *pr = g->productions[p];
1080 for (b = 0; b < pr->body_size - 1; b++) {
1081 struct symbol *bs = pr->body[b];
1082 if (bs->type == Terminal)
1084 add_first(pr, b+1, &g->follow[bs->num], g, NULL);
1088 The second part is to add the "follow" set of the head of a production
1089 to the "follow" sets of the final symbol in the production, and any
1090 other symbol which is followed only by `nullable` symbols. As this
1091 depends on "follow" itself we need to repeatedly perform the process
1092 until no further changes happen.
1094 Rather than 2 nested loops, one that repeats the whole process until
1095 there is no change, and one that iterates of the set of productions, we
1096 combine these two functions into a single loop.
1100 for (again = 0, p = 0;
1101 p < g->production_count;
1102 p < g->production_count-1
1103 ? p++ : again ? (again = 0, p = 0)
1105 struct production *pr = g->productions[p];
1108 for (b = pr->body_size - 1; b >= 0; b--) {
1109 struct symbol *bs = pr->body[b];
1110 if (bs->type == Terminal)
1112 if (symset_union(&g->follow[bs->num],
1113 &g->follow[pr->head->num]))
1120 We now just need to create and initialise the `follow` list to get a
1123 ###### grammar fields
1124 struct symset *follow;
1127 static void build_follow(struct grammar *g)
1130 g->follow = calloc(g->num_syms, sizeof(g->follow[0]));
1131 for (s = 0; s < g->num_syms; s++)
1132 g->follow[s] = INIT_SYMSET;
1136 ### Building itemsets and states
1138 There are three different levels of detail that can be chosen for
1139 building the itemsets and states for the LR grammar. They are:
1141 1. LR(0), LR(0.5), or SLR(1), where no look-ahead is included in the
1142 itemsets - look-ahead, if used at all, is simply the 'follow' sets
1144 2. LALR(1) where we build look-ahead sets with each item and merge
1145 the LA sets when we find two paths to the same "kernel" set of items.
1146 3. LR(1) where different look-ahead for any item in the set means
1147 a different item set must be created.
1149 ###### forward declarations
1150 enum grammar_type { LR0, LR05, SLR, LALR, LR1 };
1152 We need to be able to look through existing states to see if a newly
1153 generated state already exists. For now we use a simple sorted linked
1156 An item is a pair of numbers: the production number and the position of
1157 "DOT", which is an index into the body of the production. As the
1158 numbers are not enormous we can combine them into a single "short" and
1159 store them in a `symset` - 5 bits for "DOT" and 11 bits for the
1160 production number (so 2000 productions with maximum of 31 symbols in the
1163 Comparing the itemsets will be a little different to comparing symsets
1164 as we want to do the lookup after generating the "kernel" of an
1165 itemset, so we need to ignore the offset=zero items which are added during
1168 To facilitate this, we modify the "DOT" number so that "0" sorts to
1169 the end of the list in the symset, and then only compare items before
1173 static inline unsigned short item_num(int production, int dot)
1175 return production | ((31-dot) << 11);
1177 static inline int item_prod(unsigned short item)
1179 return item & 0x7ff;
1181 static inline int item_dot(unsigned short item)
1183 return (31-(item >> 11)) & 0x1f;
1186 For LR(1) analysis we need to compare not just the itemset in a state
1187 but also the LA sets. As we assign each unique LA set a number, we
1188 can just compare the symset and the data values together.
1191 static int itemset_cmp(struct symset a, struct symset b,
1192 enum grammar_type type)
1198 i < a.cnt && i < b.cnt &&
1199 item_dot(a.syms[i]) > 0 &&
1200 item_dot(b.syms[i]) > 0;
1202 int diff = a.syms[i] - b.syms[i];
1206 diff = a.data[i] - b.data[i];
1211 if (i == a.cnt || item_dot(a.syms[i]) == 0)
1215 if (i == b.cnt || item_dot(b.syms[i]) == 0)
1221 if (type < LR1 || av == -1)
1224 a.data[i] - b.data[i];
1227 It will be helpful to know if an itemset has been "completed" or not,
1228 particularly for LALR where itemsets get merged, at which point they
1229 need to be consider for completion again. So a `completed` flag is
1232 For correct handling of `TK_newline` when parsing, we will need to
1233 know which states (itemsets) can occur at the start of a line, so we
1234 will record a `starts_line` flag too whenever DOT is at the start of a
1237 Finally, for handling `TK_out` we need to know whether productions in the
1238 current state started *before* the most recent indent. A state
1239 doesn't usually keep details of individual productions, so we need to
1240 add one extra detail. `min_prefix` is the smallest non-zero number of
1241 symbols *before* DOT in any production in an itemset. This will allow
1242 us to determine if the the most recent indent is sufficiently recent
1243 to cancel it against a `TK_out`. If it was seen longer ago than the
1244 `min_prefix`, and if the current state cannot be reduced, then the
1245 indented section must have ended in the middle of a syntactic unit, so
1246 an error must be signaled.
1248 And now we can build the list of itemsets. The lookup routine returns
1249 both a success flag and a pointer to where in the list an insert should
1250 happen, so we don't need to search a second time.
1254 struct itemset *next;
1256 struct symset items;
1257 struct symset go_to;
1259 unsigned short precedence;
1265 ###### grammar fields
1266 struct itemset *items;
1270 static int itemset_find(struct grammar *g, struct itemset ***where,
1271 struct symset kernel, enum grammar_type type)
1273 struct itemset **ip;
1275 for (ip = &g->items; *ip ; ip = & (*ip)->next) {
1276 struct itemset *i = *ip;
1278 diff = itemset_cmp(i->items, kernel, type);
1291 Adding an itemset may require merging the LA sets if LALR analysis is
1292 happening. If any new LA set adds any symbols that weren't in the old
1293 LA set, we clear the `completed` flag so that the dependants of this
1294 itemset will be recalculated and their LA sets updated.
1296 `add_itemset` must consume the symsets it is passed, either by adding
1297 them to a data structure, of freeing them.
1299 static int add_itemset(struct grammar *g, struct symset ss,
1300 enum assoc assoc, unsigned short precedence,
1301 enum grammar_type type)
1303 struct itemset **where, *is;
1305 int found = itemset_find(g, &where, ss, type);
1307 is = calloc(1, sizeof(*is));
1308 is->state = g->states;
1312 is->precedence = precedence;
1314 is->go_to = INIT_DATASET;
1323 for (i = 0; i < ss.cnt; i++) {
1324 struct symset temp = INIT_SYMSET, s;
1325 if (ss.data[i] == is->items.data[i])
1327 s = set_find(g, is->items.data[i]);
1328 symset_union(&temp, &s);
1329 s = set_find(g, ss.data[i]);
1330 if (symset_union(&temp, &s)) {
1331 is->items.data[i] = save_set(g, temp);
1342 To build all the itemsets, we first insert the initial itemset made from
1343 production zero, complete each itemset, and then generate new itemsets
1344 from old until no new ones can be made.
1346 Completing an itemset means finding all the items where "DOT" is
1347 followed by a nonterminal and adding "DOT=0" items for every production
1348 from that non-terminal - providing each item hasn't already been added.
1349 When we add such an item it might get inserted before the current
1350 one, so to make sure we process it we reset the loop counter to the
1353 If LA sets are needed, the LA set for each new item is found using
1354 `add_first` which was developed earlier for `FIRST` and `FOLLOW`. This
1355 may be supplemented by the LA set for the item which produced the new
1358 We also collect a set of all symbols which follow "DOT" (in `done`) as
1359 this is used in the next stage. If any of these symbols are flagged as
1360 `line_like`, then this state must be a `starts_line` state so now is a
1361 good time to record that.
1363 When itemsets are created we assign a precedence to the itemset from the
1364 complete item, if there is one. We ignore the possibility of there
1365 being two and don't (currently) handle precedence in such grammars.
1366 When completing a grammar we ignore any item where DOT is followed by a
1367 terminal with a precedence lower than that for the itemset. Unless the
1368 terminal has right associativity, we also ignore items where the
1369 terminal has the same precedence. The result is that unwanted items are
1370 still in the itemset, but the terminal doesn't get into the go to set,
1371 so the item is ineffective.
1373 ###### complete itemset
1374 for (i = 0; i < is->items.cnt; i++) {
1375 int p = item_prod(is->items.syms[i]);
1376 int bs = item_dot(is->items.syms[i]);
1377 struct production *pr = g->productions[p];
1380 struct symset LA = INIT_SYMSET;
1381 unsigned short sn = 0;
1382 struct symset LAnl = INIT_SYMSET;
1383 unsigned short snnl = 0;
1385 if (is->min_prefix == 0 ||
1386 (bs > 0 && bs < is->min_prefix))
1387 is->min_prefix = bs;
1388 if (bs == pr->body_size)
1391 if (s->precedence && is->precedence &&
1392 is->precedence > s->precedence)
1393 /* This terminal has a low precedence and
1394 * shouldn't be shifted
1397 if (s->precedence && is->precedence &&
1398 is->precedence == s->precedence && s->assoc != Right)
1399 /* This terminal has a matching precedence and is
1400 * not Right-associative, so we mustn't shift it.
1403 if (symset_find(&done, s->num) < 0) {
1404 symset_add(&done, s->num, 0);
1406 if (s->type != Nonterminal)
1409 is->starts_line = 1;
1414 add_first(pr, bs+1, &LA, g, &to_end);
1416 struct symset ss = set_find(g, is->items.data[i]);
1417 symset_union(&LA, &ss);
1419 sn = save_set(g, LA);
1420 LA = set_find(g, sn);
1421 if (symset_find(&LA, TK_newline))
1422 symset_add(&LAnl, TK_newline, 0);
1423 snnl = save_set(g, LAnl);
1424 LAnl = set_find(g, snnl);
1427 /* Add productions for this symbol */
1428 for (p2 = s->first_production;
1429 p2 < g->production_count &&
1430 g->productions[p2]->head == s;
1432 int itm = item_num(p2, 0);
1433 int pos = symset_find(&is->items, itm);
1435 if (g->productions[p2]->line_like)
1436 symset_add(&is->items, itm, snnl);
1438 symset_add(&is->items, itm, sn);
1439 /* Will have re-ordered, so start
1440 * from beginning again */
1442 } else if (type >= LALR) {
1443 struct symset ss = set_find(g, is->items.data[pos]);
1444 struct symset tmp = INIT_SYMSET;
1445 struct symset *la = &LA;
1447 if (g->productions[p2]->line_like)
1449 symset_union(&tmp, &ss);
1450 if (symset_union(&tmp, la)) {
1451 is->items.data[pos] = save_set(g, tmp);
1459 For each symbol we found (and placed in `done`) we collect all the items
1460 for which this symbol is next, and create a new itemset with "DOT"
1461 advanced over the symbol. This is then added to the collection of
1462 itemsets (or merged with a pre-existing itemset).
1464 ###### derive itemsets
1465 // Now we have a completed itemset, so we need to
1466 // compute all the 'next' itemsets and create them
1467 // if they don't exist.
1468 for (i = 0; i < done.cnt; i++) {
1470 unsigned short state;
1471 struct symbol *sym = g->symtab[done.syms[i]];
1472 enum assoc assoc = Non;
1473 unsigned short precedence = 0;
1474 struct symset newitemset = INIT_SYMSET;
1476 newitemset = INIT_DATASET;
1478 for (j = 0; j < is->items.cnt; j++) {
1479 int itm = is->items.syms[j];
1480 int p = item_prod(itm);
1481 int bp = item_dot(itm);
1482 struct production *pr = g->productions[p];
1483 unsigned short la = 0;
1486 if (bp == pr->body_size)
1488 if (pr->body[bp] != sym)
1491 la = is->items.data[j];
1492 pos = symset_find(&newitemset, pr->head->num);
1493 if (bp + 1 == pr->body_size &&
1494 pr->precedence > 0 &&
1495 pr->precedence > precedence) {
1496 // new itemset is reducible and has a precedence.
1497 precedence = pr->precedence;
1501 symset_add(&newitemset, item_num(p, bp+1), la);
1502 else if (type >= LALR) {
1503 // Need to merge la set.
1504 int la2 = newitemset.data[pos];
1506 struct symset ss = set_find(g, la2);
1507 struct symset LA = INIT_SYMSET;
1508 symset_union(&LA, &ss);
1509 ss = set_find(g, la);
1510 if (symset_union(&LA, &ss))
1511 newitemset.data[pos] = save_set(g, LA);
1517 state = add_itemset(g, newitemset, assoc, precedence, type);
1518 if (symset_find(&is->go_to, done.syms[i]) < 0)
1519 symset_add(&is->go_to, done.syms[i], state);
1522 All that is left is to create the initial itemset from production zero, and
1523 with `TK_eof` as the LA set.
1526 static void build_itemsets(struct grammar *g, enum grammar_type type)
1528 struct symset first = INIT_SYMSET;
1531 unsigned short la = 0;
1533 // LA set just has eof
1534 struct symset eof = INIT_SYMSET;
1535 symset_add(&eof, TK_eof, 0);
1536 la = save_set(g, eof);
1537 first = INIT_DATASET;
1539 // production 0, offset 0 (with no data)
1540 symset_add(&first, item_num(0, 0), la);
1541 add_itemset(g, first, Non, 0, type);
1542 for (again = 0, is = g->items;
1544 is = is->next ?: again ? (again = 0, g->items) : NULL) {
1546 struct symset done = INIT_SYMSET;
1557 ### Completing the analysis.
1559 The exact process of analysis depends on which level was requested. For
1560 `LR0` and `LR05` we don't need first and follow sets at all. For `LALR` and
1561 `LR1` we need first, but not follow. For `SLR` we need both.
1563 We don't build the "action" tables that you might expect as the parser
1564 doesn't use them. They will be calculated to some extent if needed for
1567 Once we have built everything we allocate arrays for the two lists:
1568 symbols and itemsets. This allows more efficient access during
1569 reporting. The symbols are grouped as terminals and then non-terminals,
1570 and we record the changeover point in `first_nonterm`.
1572 ###### grammar fields
1573 struct symbol **symtab;
1574 struct itemset **statetab;
1577 ###### grammar_analyse
1579 static void grammar_analyse(struct grammar *g, enum grammar_type type)
1583 int snum = TK_reserved;
1584 for (s = g->syms; s; s = s->next)
1585 if (s->num < 0 && s->type == Terminal) {
1589 g->first_nonterm = snum;
1590 for (s = g->syms; s; s = s->next)
1591 if (s->num < 0 && s->type != Virtual) {
1595 for (s = g->syms; s; s = s->next)
1601 g->symtab = calloc(g->num_syms, sizeof(g->symtab[0]));
1602 for (s = g->syms; s; s = s->next)
1603 g->symtab[s->num] = s;
1613 build_itemsets(g, type);
1615 g->statetab = calloc(g->states, sizeof(g->statetab[0]));
1616 for (is = g->items; is ; is = is->next)
1617 g->statetab[is->state] = is;
1620 ## Reporting on the Grammar
1622 The purpose of the report is to give the grammar developer insight into
1623 how the grammar parser will work. It is basically a structured dump of
1624 all the tables that have been generated, plus a description of any conflicts.
1626 ###### grammar_report
1627 static int grammar_report(struct grammar *g, enum grammar_type type)
1633 return report_conflicts(g, type);
1636 Firstly we have the complete list of symbols, together with the
1637 "FIRST" set if that was generated. We add a mark to each symbol to
1638 show if it is considered to be "line-like" (`<`), or if it is nullable (`.`).
1642 static void report_symbols(struct grammar *g)
1646 printf("SYMBOLS + FIRST:\n");
1648 printf("SYMBOLS:\n");
1650 for (n = 0; n < g->num_syms; n++) {
1651 struct symbol *s = g->symtab[n];
1655 printf(" %c%c%3d%c: ",
1656 s->nullable ? '.':' ',
1657 s->line_like ? '<':' ',
1658 s->num, symtypes[s->type]);
1661 printf(" (%d%s)", s->precedence,
1662 assoc_names[s->assoc]);
1664 if (g->first && s->type == Nonterminal) {
1667 for (j = 0; j < g->first[n].cnt; j++) {
1670 prtxt(g->symtab[g->first[n].syms[j]]->name);
1677 Then we have the follow sets if they were computed.
1679 static void report_follow(struct grammar *g)
1682 printf("FOLLOW:\n");
1683 for (n = 0; n < g->num_syms; n++)
1684 if (g->follow[n].cnt) {
1688 prtxt(g->symtab[n]->name);
1689 for (j = 0; j < g->follow[n].cnt; j++) {
1692 prtxt(g->symtab[g->follow[n].syms[j]]->name);
1698 And finally the item sets. These include the GO TO tables and, for
1699 LALR and LR1, the LA set for each item. Lots of stuff, so we break
1700 it up a bit. First the items, with production number and associativity.
1702 static void report_item(struct grammar *g, int itm)
1704 int p = item_prod(itm);
1705 int dot = item_dot(itm);
1706 struct production *pr = g->productions[p];
1710 prtxt(pr->head->name);
1712 for (i = 0; i < pr->body_size; i++) {
1713 printf(" %s", (dot == i ? ". ": ""));
1714 prtxt(pr->body[i]->name);
1716 if (dot == pr->body_size)
1719 if (pr->precedence && dot == pr->body_size)
1720 printf(" (%d%s)", pr->precedence,
1721 assoc_names[pr->assoc]);
1722 if (dot < pr->body_size &&
1723 pr->body[dot]->precedence) {
1724 struct symbol *s = pr->body[dot];
1725 printf(" [%d%s]", s->precedence,
1726 assoc_names[s->assoc]);
1728 if (pr->line_like == 1)
1729 printf(" $$NEWLINE");
1730 else if (pr->line_like)
1735 The LA sets which are (possibly) reported with each item:
1737 static void report_la(struct grammar *g, int lanum)
1739 struct symset la = set_find(g, lanum);
1743 printf(" LOOK AHEAD(%d)", lanum);
1744 for (i = 0; i < la.cnt; i++) {
1747 prtxt(g->symtab[la.syms[i]]->name);
1752 Then the go to sets:
1754 static void report_goto(struct grammar *g, struct symset gt)
1759 for (i = 0; i < gt.cnt; i++) {
1761 prtxt(g->symtab[gt.syms[i]]->name);
1762 printf(" -> %d\n", gt.data[i]);
1766 Now we can report all the item sets complete with items, LA sets, and GO TO.
1768 static void report_itemsets(struct grammar *g)
1771 printf("ITEM SETS(%d)\n", g->states);
1772 for (s = 0; s < g->states; s++) {
1774 struct itemset *is = g->statetab[s];
1775 printf(" Itemset %d:%s min prefix=%d",
1776 s, is->starts_line?" (startsline)":"", is->min_prefix);
1778 printf(" %d%s", is->precedence, assoc_names[is->assoc]);
1780 for (j = 0; j < is->items.cnt; j++) {
1781 report_item(g, is->items.syms[j]);
1782 if (is->items.data != NO_DATA)
1783 report_la(g, is->items.data[j]);
1785 report_goto(g, is->go_to);
1789 ### Reporting conflicts
1791 Conflict detection varies a lot among different analysis levels. However
1792 LR0 and LR0.5 are quite similar - having no follow sets - and SLR, LALR and
1793 LR1 are also similar as they have FOLLOW or LA sets.
1797 ## conflict functions
1799 static int report_conflicts(struct grammar *g, enum grammar_type type)
1802 printf("Conflicts:\n");
1804 cnt = conflicts_lr0(g, type);
1806 cnt = conflicts_slr(g, type);
1808 printf(" - no conflicts\n");
1812 LR0 conflicts are any state which have both a reducible item and
1813 a shiftable item, or two reducible items.
1815 LR05 conflicts only occur if two possible reductions exist,
1816 as shifts always over-ride reductions.
1818 ###### conflict functions
1819 static int conflicts_lr0(struct grammar *g, enum grammar_type type)
1823 for (i = 0; i < g->states; i++) {
1824 struct itemset *is = g->statetab[i];
1825 int last_reduce = -1;
1826 int prev_reduce = -1;
1827 int last_shift = -1;
1831 for (j = 0; j < is->items.cnt; j++) {
1832 int itm = is->items.syms[j];
1833 int p = item_prod(itm);
1834 int bp = item_dot(itm);
1835 struct production *pr = g->productions[p];
1837 if (bp == pr->body_size) {
1838 prev_reduce = last_reduce;
1842 if (pr->body[bp]->type == Terminal)
1845 if (type == LR0 && last_reduce >= 0 && last_shift >= 0) {
1846 printf(" State %d has both SHIFT and REDUCE:\n", i);
1847 report_item(g, is->items.syms[last_shift]);
1848 report_item(g, is->items.syms[last_reduce]);
1851 if (prev_reduce >= 0) {
1852 printf(" State %d has 2 (or more) reducible items\n", i);
1853 report_item(g, is->items.syms[prev_reduce]);
1854 report_item(g, is->items.syms[last_reduce]);
1861 SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping
1862 look ahead, or if a symbol in a look-ahead can be shifted. They differ only
1863 in the source of the look ahead set.
1865 We build two datasets to reflect the "action" table: one which maps
1866 terminals to items where that terminal could be shifted and another
1867 which maps terminals to items that could be reduced when the terminal
1868 is in look-ahead. We report when we get conflicts between the two.
1870 As a special case, if we find a SHIFT/REDUCE conflict, on the NEWLINE
1871 terminal, we ignore it. NEWLINES are handled specially with its own
1872 rules for when to shift and when to reduce. Conflicts are expected,
1873 but handled internally.
1875 static int conflicts_slr(struct grammar *g, enum grammar_type type)
1880 for (i = 0; i < g->states; i++) {
1881 struct itemset *is = g->statetab[i];
1882 struct symset shifts = INIT_DATASET;
1883 struct symset reduce = INIT_DATASET;
1887 /* First collect the shifts */
1888 for (j = 0; j < is->items.cnt; j++) {
1889 unsigned short itm = is->items.syms[j];
1890 int p = item_prod(itm);
1891 int bp = item_dot(itm);
1892 struct production *pr = g->productions[p];
1895 if (bp >= pr->body_size ||
1896 pr->body[bp]->type != Terminal)
1901 if (s->precedence && is->precedence)
1902 /* Precedence resolves this, so no conflict */
1905 if (symset_find(&shifts, s->num) < 0)
1906 symset_add(&shifts, s->num, itm);
1908 /* Now look for reductions and conflicts */
1909 for (j = 0; j < is->items.cnt; j++) {
1910 unsigned short itm = is->items.syms[j];
1911 int p = item_prod(itm);
1912 int bp = item_dot(itm);
1913 struct production *pr = g->productions[p];
1915 if (bp < pr->body_size)
1920 la = g->follow[pr->head->num];
1922 la = set_find(g, is->items.data[j]);
1924 for (k = 0; k < la.cnt; k++) {
1925 int pos = symset_find(&shifts, la.syms[k]);
1926 if (pos >= 0 && la.syms[k] != TK_newline) {
1927 printf(" State %d has SHIFT/REDUCE conflict on ", i);
1929 prtxt(g->symtab[la.syms[k]]->name);
1931 report_item(g, shifts.data[pos]);
1932 report_item(g, itm);
1934 pos = symset_find(&reduce, la.syms[k]);
1936 symset_add(&reduce, la.syms[k], itm);
1939 printf(" State %d has REDUCE/REDUCE conflict on ", i);
1940 prtxt(g->symtab[la.syms[k]]->name);
1942 report_item(g, itm);
1943 report_item(g, reduce.data[pos]);
1947 symset_free(shifts);
1948 symset_free(reduce);
1954 ## Generating the parser
1956 The exported part of the parser is the `parse_XX` function, where the name
1957 `XX` is based on the name of the parser files.
1959 This takes a `code_node`, a partially initialized `token_config`, and an
1960 optional `FILE` to send tracing to. The `token_config` gets the list of
1961 known words added and then is used with the `code_node` to initialize the
1964 `parse_XX` then calls the library function `parser_run` to actually complete
1965 the parse. This needs the `states` table and functions to call the various
1966 pieces of code provided in the grammar file, so they are generated first.
1968 ###### parser_generate
1970 static void gen_parser(FILE *f, struct grammar *g, char *file, char *name,
1971 struct code_node *pre_reduce)
1977 gen_reduce(f, g, file, pre_reduce);
1980 fprintf(f, "#line 0 \"gen_parser\"\n");
1981 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace)\n",
1984 fprintf(f, "\tstruct token_state *tokens;\n");
1985 fprintf(f, "\tconfig->words_marks = known;\n");
1986 fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
1987 fprintf(f, "\ttokens = token_open(code, config);\n");
1988 fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n");
1989 fprintf(f, "\ttoken_close(tokens);\n");
1990 fprintf(f, "\treturn rv;\n");
1991 fprintf(f, "}\n\n");
1994 ### Known words table
1996 The known words table is simply an array of terminal symbols.
1997 The table of nonterminals used for tracing is a similar array.
2001 static void gen_known(FILE *f, struct grammar *g)
2004 fprintf(f, "#line 0 \"gen_known\"\n");
2005 fprintf(f, "static const char *known[] = {\n");
2006 for (i = TK_reserved;
2007 i < g->num_syms && g->symtab[i]->type == Terminal;
2009 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
2010 g->symtab[i]->name.txt);
2011 fprintf(f, "};\n\n");
2014 static void gen_non_term(FILE *f, struct grammar *g)
2017 fprintf(f, "#line 0 \"gen_non_term\"\n");
2018 fprintf(f, "static const char *non_term[] = {\n");
2019 for (i = TK_reserved;
2022 if (g->symtab[i]->type == Nonterminal)
2023 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
2024 g->symtab[i]->name.txt);
2025 fprintf(f, "};\n\n");
2028 ### States and the goto tables.
2030 For each state we record the goto table and details of the reducible
2031 production if there is one.
2032 Some of the details of the reducible production are stored in the
2033 `do_reduce` function to come later. Here we store the production
2034 number, the body size (useful for stack management), and the resulting
2035 symbol (useful for knowing how to free data later).
2037 The go to table is stored in a simple array of `sym` and corresponding
2040 ###### exported types
2048 const struct lookup * go_to;
2059 static void gen_goto(FILE *f, struct grammar *g)
2062 fprintf(f, "#line 0 \"gen_goto\"\n");
2063 for (i = 0; i < g->states; i++) {
2065 fprintf(f, "static const struct lookup goto_%d[] = {\n",
2067 struct symset gt = g->statetab[i]->go_to;
2068 for (j = 0; j < gt.cnt; j++)
2069 fprintf(f, "\t{ %d, %d },\n",
2070 gt.syms[j], gt.data[j]);
2077 static void gen_states(FILE *f, struct grammar *g)
2080 fprintf(f, "#line 0 \"gen_states\"\n");
2081 fprintf(f, "static const struct state states[] = {\n");
2082 for (i = 0; i < g->states; i++) {
2083 struct itemset *is = g->statetab[i];
2084 int j, prod = -1, prod_len;
2086 for (j = 0; j < is->items.cnt; j++) {
2087 int itm = is->items.syms[j];
2088 int p = item_prod(itm);
2089 int bp = item_dot(itm);
2090 struct production *pr = g->productions[p];
2092 if (bp < pr->body_size)
2094 /* This is what we reduce - choose longest */
2095 if (prod < 0 || prod_len < pr->body_size) {
2097 prod_len = pr->body_size;
2102 fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, %d, %d, %d },\n",
2103 i, is->go_to.cnt, i, prod,
2104 g->productions[prod]->body_size,
2105 g->productions[prod]->head->num,
2107 g->productions[prod]->line_like,
2110 fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, 0, %d },\n",
2111 i, is->go_to.cnt, i,
2112 is->starts_line, is->min_prefix);
2114 fprintf(f, "};\n\n");
2117 ### The `do_reduce` function and the code
2119 When the parser engine decides to reduce a production, it calls
2120 `do_reduce` which runs the code that was included with the production,
2123 This code needs to be able to store data somewhere. Rather than
2124 requiring `do_reduce` to `malloc` that "somewhere", we pass in a large
2125 buffer and have `do_reduce` return the size to be saved.
2127 In order for the code to access "global" context, we pass in the
2128 "config" pointer that was passed to the parser function. If the `struct
2129 token_config` is embedded in some larger structure, the reducing code
2130 can access the larger structure using pointer manipulation. Performing
2131 that pointer manipulation, and any other common code or variables for
2132 all reduce actions, can be provided in code node called "reduce" which
2133 is passed around in `pre_reduce` which you might have already noticed.
2135 The code fragments require translation when written out. Any `$N` needs
2136 to be converted to a reference either to that buffer (if `$0`) or to the
2137 structure returned by a previous reduction. These pointers need to be
2138 cast to the appropriate type for each access. All this is handled in
2141 `gen_code` also allows symbol references to contain a '`<`' as in
2142 '`$<2`'. This is particularly useful for references (or pointers), but
2143 can be used with structures too. The `<` implies that the value is
2144 being moved out, so the object will not be automatically freed. It is
2145 equivalent to assigning `NULL` to the pointer or filling a structure
2148 Instead of a number `N`, the `$` or `$<` can be followed by some letters
2149 and possibly a number. A number by itself (other than zero) selects a
2150 symbol from the body of the production. A sequence of letters selects
2151 the shortest symbol in the body which contains those letters in the
2152 given order. If a number follows the letters, then a later occurrence
2153 of that symbol is chosen. So "`$AB2`" will refer to the structure
2154 attached to the second occurrence of the shortest symbol which contains
2155 an `A` followed by a `B`. If there is no unique shortest system, or if
2156 the number given is too large, then the symbol reference is not
2157 transformed, and will cause an error when the code is compiled.
2161 static int textchr(struct text t, char c, int s)
2164 for (i = s; i < t.len; i++)
2170 static int subseq_match(char *seq, int slen, struct text name)
2174 st = textchr(name, *seq, st);
2184 static int choose_sym(char **namep, int len, struct production *p)
2186 char *name = *namep;
2195 ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'))) {
2201 while (len > 0 && (c >= '0' && c <= '9')) {
2204 n = n * 10 + (c - '0');
2214 for (i = 0; i < p->body_size; i++) {
2215 if (!subseq_match(nam, namlen, p->body[i]->name))
2217 if (slen == 0 || p->body[i]->name.len < slen)
2219 if (s >= 0 && p->body[i] != p->body[s] &&
2220 p->body[i]->name.len == p->body[s]->name.len)
2221 /* not unique, so s cannot be used */
2228 for (i = 0; i < p->body_size; i++)
2229 if (p->body[i] == p->body[s]) {
2240 static void gen_code(struct production *p, FILE *f, struct grammar *g)
2243 char *used = calloc(1, p->body_size);
2246 fprintf(f, "\t\t\t");
2247 for (c = p->code.txt; c < p->code.txt + p->code.len; c++) {
2261 n = choose_sym(&c, p->code.txt + p->code.len - c, p);
2270 fprintf(f, "(*(struct %.*s*%s)ret)",
2271 p->head->struct_name.len,
2272 p->head->struct_name.txt,
2273 p->head->isref ? "*":"");
2274 else if (p->body[n-1]->type == Terminal)
2275 fprintf(f, "(*(struct token *)body[%d])",
2277 else if (p->body[n-1]->struct_name.txt == NULL)
2278 fprintf(f, "$%d", n);
2280 fprintf(f, "(*(struct %.*s*%s)body[%d])",
2281 p->body[n-1]->struct_name.len,
2282 p->body[n-1]->struct_name.txt,
2283 p->body[n-1]->isref ? "*":"", n-1);
2289 for (i = 0; i < p->body_size; i++) {
2290 if (p->body[i]->struct_name.txt &&
2292 // assume this has been copied out
2293 if (p->body[i]->isref)
2294 fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i);
2296 fprintf(f, "\t\tmemset(body[%d], 0, sizeof(struct %.*s));\n", i, p->body[i]->struct_name.len, p->body[i]->struct_name.txt);
2304 static void gen_reduce(FILE *f, struct grammar *g, char *file,
2305 struct code_node *code)
2308 fprintf(f, "#line 1 \"gen_reduce\"\n");
2309 fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n");
2311 fprintf(f, "\tint ret_size = 0;\n");
2313 code_node_print(f, code, file);
2315 fprintf(f, "#line 4 \"gen_reduce\"\n");
2316 fprintf(f, "\tswitch(prod) {\n");
2317 for (i = 0; i < g->production_count; i++) {
2318 struct production *p = g->productions[i];
2319 fprintf(f, "\tcase %d:\n", i);
2322 fprintf(f, "#line %d \"%s\"\n", p->code_line, file);
2326 if (p->head->struct_name.txt)
2327 fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n",
2328 p->head->struct_name.len,
2329 p->head->struct_name.txt,
2330 p->head->isref ? "*":"");
2332 fprintf(f, "\t\tbreak;\n");
2334 fprintf(f, "\t}\n\treturn ret_size;\n}\n\n");
2339 As each non-terminal can potentially cause a different type of data
2340 structure to be allocated and filled in, we need to be able to free it when
2343 It is particularly important to have fine control over freeing during error
2344 recovery where individual stack frames might need to be freed.
2346 For this, the grammar author is required to defined a `free_XX` function for
2347 each structure that is used by a non-terminal. `do_free` will call whichever
2348 is appropriate given a symbol number, and will call `free` (as is
2349 appropriate for tokens) on any terminal symbol.
2353 static void gen_free(FILE *f, struct grammar *g)
2357 fprintf(f, "#line 0 \"gen_free\"\n");
2358 fprintf(f, "static void do_free(short sym, void *asn)\n");
2360 fprintf(f, "\tif (!asn) return;\n");
2361 fprintf(f, "\tif (sym < %d) {\n", g->first_nonterm);
2362 fprintf(f, "\t\tfree(asn);\n\t\treturn;\n\t}\n");
2363 fprintf(f, "\tswitch(sym) {\n");
2365 for (i = 0; i < g->num_syms; i++) {
2366 struct symbol *s = g->symtab[i];
2368 s->type != Nonterminal ||
2369 s->struct_name.txt == NULL)
2372 fprintf(f, "\tcase %d:\n", s->num);
2374 fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n",
2376 s->struct_name.txt);
2377 fprintf(f, "\t\tfree(asn);\n");
2379 fprintf(f, "\t\tfree_%.*s(asn);\n",
2381 s->struct_name.txt);
2382 fprintf(f, "\t\tbreak;\n");
2384 fprintf(f, "\t}\n}\n\n");
2387 ## The main routine.
2389 There are three key parts to the "main" function of parsergen: processing
2390 the arguments, loading the grammar file, and dealing with the grammar.
2392 ### Argument processing.
2394 Command line options allow the selection of analysis mode, name of output
2395 file, and whether or not a report should be generated. By default we create
2396 a report only if no code output was requested.
2398 The `parse_XX` function name uses the basename of the output file which
2399 should not have a suffix (`.c`). `.c` is added to the given name for the
2400 code, and `.h` is added for the header (if header text is specifed in the
2407 static const struct option long_options[] = {
2408 { "LR0", 0, NULL, '0' },
2409 { "LR05", 0, NULL, '5' },
2410 { "SLR", 0, NULL, 'S' },
2411 { "LALR", 0, NULL, 'L' },
2412 { "LR1", 0, NULL, '1' },
2413 { "tag", 1, NULL, 't' },
2414 { "report", 0, NULL, 'R' },
2415 { "output", 1, NULL, 'o' },
2416 { NULL, 0, NULL, 0 }
2418 const char *options = "05SL1t:Ro:";
2420 ###### process arguments
2422 char *outfile = NULL;
2427 enum grammar_type type = LR05;
2428 while ((opt = getopt_long(argc, argv, options,
2429 long_options, NULL)) != -1) {
2444 outfile = optarg; break;
2446 tag = optarg; break;
2448 fprintf(stderr, "Usage: parsergen ...\n");
2453 infile = argv[optind++];
2455 fprintf(stderr, "No input file given\n");
2458 if (outfile && report == 1)
2461 if (name && strchr(name, '/'))
2462 name = strrchr(name, '/')+1;
2464 if (optind < argc) {
2465 fprintf(stderr, "Excess command line arguments\n");
2469 ### Loading the grammar file
2471 To be able to run `mdcode` and `scanner` on the grammar we need to memory
2474 Once we have extracted the code (with `mdcode`) we expect to find three
2475 or four sections: header, code, grammar, reduce.
2476 Anything else that is not excluded by the `--tag` option is an error.
2478 "header", "code", and "reduce" are optional, though it is hard to build
2479 a working parser without the first two. "grammar" must be provided.
2483 #include <sys/mman.h>
2488 static void pr_err(char *msg)
2491 fprintf(stderr, "%s\n", msg);
2495 struct section *table;
2499 fd = open(infile, O_RDONLY);
2501 fprintf(stderr, "parsergen: cannot open %s: %s\n",
2502 infile, strerror(errno));
2505 len = lseek(fd, 0, 2);
2506 file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2507 table = code_extract(file, file+len, pr_err);
2509 struct code_node *hdr = NULL;
2510 struct code_node *code = NULL;
2511 struct code_node *gram = NULL;
2512 struct code_node *pre_reduce = NULL;
2513 for (s = table; s; s = s->next) {
2514 struct text sec = s->section;
2515 if (tag && !strip_tag(&sec, tag))
2517 if (text_is(sec, "header"))
2519 else if (text_is(sec, "code"))
2521 else if (text_is(sec, "grammar"))
2523 else if (text_is(sec, "reduce"))
2524 pre_reduce = s->code;
2526 fprintf(stderr, "Unknown content section: %.*s\n",
2527 s->section.len, s->section.txt);
2532 ### Processing the input
2534 As we need to append an extention to a filename and then open it for
2535 writing, and we need to do this twice, it helps to have a separate function.
2539 static FILE *open_ext(char *base, char *ext)
2541 char *fn = malloc(strlen(base) + strlen(ext) + 1);
2543 strcat(strcpy(fn, base), ext);
2549 If we can read the grammar, then we analyse and optionally report on it. If
2550 the report finds conflicts we will exit with an error status.
2552 ###### process input
2553 struct grammar *g = NULL;
2555 fprintf(stderr, "No grammar section provided\n");
2558 g = grammar_read(gram);
2560 fprintf(stderr, "Failure to parse grammar\n");
2565 grammar_analyse(g, type);
2567 if (grammar_report(g, type))
2571 If a "headers" section is defined, we write it out and include a declaration
2572 for the `parse_XX` function so it can be used from separate code.
2574 if (rv == 0 && hdr && outfile) {
2575 FILE *f = open_ext(outfile, ".h");
2577 code_node_print(f, hdr, infile);
2578 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace);\n",
2582 fprintf(stderr, "Cannot create %s.h\n",
2588 And if all goes well and an output file was provided, we create the `.c`
2589 file with the code section (if any) and the parser tables and function.
2591 if (rv == 0 && outfile) {
2592 FILE *f = open_ext(outfile, ".c");
2595 code_node_print(f, code, infile);
2596 gen_parser(f, g, infile, name, pre_reduce);
2599 fprintf(stderr, "Cannot create %s.c\n",
2605 And that about wraps it up. We need to set the locale so that UTF-8 is
2606 recognised properly, and link with `libicuuc` as `libmdcode` requires that.
2608 ###### File: parsergen.mk
2609 parsergen : parsergen.o libscanner.o libmdcode.o
2610 $(CC) $(CFLAGS) -o parsergen parsergen.o libscanner.o libmdcode.o -licuuc
2617 int main(int argc, char *argv[])
2622 setlocale(LC_ALL,"");
2624 ## process arguments
2631 ## The SHIFT/REDUCE parser
2633 Having analysed the grammar and generated all the tables, we only need
2634 the shift/reduce engine to bring it all together.
2636 ### Goto table lookup
2638 The parser generator has nicely provided us with goto tables sorted by
2639 symbol number. We need a binary search function to find a symbol in the
2642 ###### parser functions
2644 static int search(const struct state *l, int sym)
2647 int hi = l->go_to_cnt;
2651 while (lo + 1 < hi) {
2652 int mid = (lo + hi) / 2;
2653 if (l->go_to[mid].sym <= sym)
2658 if (l->go_to[lo].sym == sym)
2659 return l->go_to[lo].state;
2664 ### The state stack.
2666 The core data structure for the parser is the stack. This tracks all
2667 the symbols that have been recognised or partially recognised.
2669 The stack usually won't grow very large - maybe a few tens of entries.
2670 So we dynamically grow an array as required but never bother to
2671 shrink it down again.
2673 We keep the stack as two separate allocations. One, `asn_stack` stores
2674 the "abstract syntax nodes" which are created by each reduction. When
2675 we call `do_reduce` we need to pass an array of the `asn`s of the body
2676 of the production, and by keeping a separate `asn` stack, we can just
2677 pass a pointer into this stack.
2679 The other allocation stores all other stack fields of which there are
2680 several. The `state` is the most important one and guides the parsing
2681 process. The `sym` is nearly unnecessary as it is implicit in the
2682 state. However when we want to free entries from the `asn_stack`, it
2683 helps to know what type they are so we can call the right freeing
2684 function. The symbol leads us to the right free function through
2687 The `indents` count tracks the line indents with-in the symbol or
2688 immediately follow it. These are used to allow indent information to
2689 guide parsing and error recovery.
2691 `since_newline` tracks how many stack frames since the last
2692 start-of-line (whether indented or not). So if `since_newline` is
2693 zero, then this symbol is at the start of a line. Similarly
2694 `since_indent` counts the number of states since an indent, it is zero
2695 precisely when `indents` is not zero.
2697 `newline_permitted` keeps track of whether newlines should be ignored
2700 The stack is most properly seen as alternating states and symbols -
2701 states, like the 'DOT' in items, are between symbols. Each frame in
2702 our stack holds a state and the symbol that was before it. The
2703 bottom of stack holds the start state but no symbol, as nothing came
2704 before the beginning. As we need to store some value, `TK_eof` is used
2705 to mark the beginning of the file as well as the end.
2707 ###### parser functions
2712 short newline_permitted;
2716 short since_newline;
2726 Two operations are needed on the stack - shift (which is like push) and pop.
2728 Shift applies not only to terminals but also to non-terminals. When we
2729 reduce a production we will pop off frames corresponding to the body
2730 symbols, then push on a frame for the head of the production. This last
2731 is exactly the same process as shifting in a terminal so we use the same
2732 function for both. In both cases we provide the symbol, the number of
2733 indents the symbol contains (which will be zero for a terminal symbol)
2734 and a flag indicating the the symbol was at (or was reduced from a
2735 symbol which was at) the start of a line. The state is deduced from the
2736 current top-of-stack state and the new symbol.
2738 To simplify other code we arrange for `shift` to fail if there is no `goto`
2739 state for the symbol. This is useful in basic parsing due to our design
2740 that we shift when we can, and reduce when we cannot. So the `shift`
2741 function reports if it could.
2743 `shift` is also used to push state zero onto the stack, so if the
2744 stack is empty, it always chooses zero as the next state.
2746 So `shift` finds the next state. If that succeeds it extends the
2747 allocations if needed and pushes all the information onto the stacks.
2749 Newlines are permitted after a `starts_line` state until an internal
2750 indent. If the new frame has neither a `starts_line` state nor an
2751 indent, newlines are permitted if the previous stack frame permitted
2754 ###### parser functions
2756 static int shift(struct parser *p,
2757 short sym, short indents, short start_of_line,
2759 const struct state states[])
2761 // Push an entry onto the stack
2762 struct frame next = {0};
2763 int newstate = p->tos
2764 ? search(&states[p->stack[p->tos-1].state],
2769 if (p->tos >= p->stack_size) {
2770 p->stack_size += 10;
2771 p->stack = realloc(p->stack, p->stack_size
2772 * sizeof(p->stack[0]));
2773 p->asn_stack = realloc(p->asn_stack, p->stack_size
2774 * sizeof(p->asn_stack[0]));
2777 next.indents = indents;
2778 next.state = newstate;
2779 if (states[newstate].starts_line)
2780 next.newline_permitted = 1;
2782 next.newline_permitted = 0;
2784 next.newline_permitted =
2785 p->stack[p->tos-1].newline_permitted;
2787 next.newline_permitted = 0;
2789 if (!start_of_line) {
2791 next.since_newline = p->stack[p->tos-1].since_newline + 1;
2793 next.since_newline = 1;
2796 next.since_indent = 0;
2798 next.since_indent = p->stack[p->tos-1].since_indent + 1;
2800 next.since_indent = 1;
2802 p->stack[p->tos] = next;
2803 p->asn_stack[p->tos] = asn;
2808 `pop` primarily moves the top of stack (`tos`) back down the required
2809 amount and frees any `asn` entries that need to be freed. It also
2810 collects a summary of the indents and line starts in the symbols that
2811 are being removed. It is called _after_ we reduce a production, just
2812 before we `shift` the nonterminal in.
2814 ###### parser functions
2816 static int pop(struct parser *p, int num,
2817 short *start_of_line,
2818 void(*do_free)(short sym, void *asn))
2825 for (i = 0; i < num; i++) {
2826 sol |= !p->stack[p->tos+i].since_newline;
2827 indents += p->stack[p->tos+i].indents;
2828 do_free(p->stack[p->tos+i].sym,
2829 p->asn_stack[p->tos+i]);
2832 *start_of_line = sol;
2836 ### Memory allocation
2838 The `scanner` returns tokens in a local variable - we want them in allocated
2839 memory so they can live in the `asn_stack`. Similarly the `asn` produced by
2840 a reduce is in a large buffer. Both of these require some allocation and
2841 copying, hence `memdup` and `tokcopy`.
2843 ###### parser includes
2846 ###### parser functions
2848 void *memdup(void *m, int len)
2854 memcpy(ret, m, len);
2858 static struct token *tok_copy(struct token tk)
2860 struct token *new = malloc(sizeof(*new));
2865 ### The heart of the parser.
2867 Now we have the parser. For each token we might shift it, trigger a
2868 reduction, or start error handling. 2D tokens (IN, OUT, EOL) also need
2871 We return whatever `asn` was returned by reducing production zero.
2873 If we can neither shift nor reduce we have an error to handle. We pop
2874 single entries off the stack until we can shift the `TK_error` symbol,
2875 then drop input tokens until we find one we can shift into the new error
2876 state. We need to ensure that something is dropped or shifted after an
2877 error, or we could get into an infinite loop, only shifting in
2878 `TK_error`, then reducing. So we track if there has been a shift since
2879 the last error, and if not the next error always discards one token.
2881 When we find `TK_in` and `TK_out` tokens which report indents we need
2882 to handle them directly as the grammar cannot express what we want to
2885 `TK_in` tokens are easy: we simply update indent count in the top stack frame to
2886 record how many indents there are following the previous token.
2888 `TK_out` tokens must be canceled against an indent count
2889 within the stack. If we can reduce some symbols that are all since
2890 the most recent indent, then we do that first. If the minimum prefix
2891 of the current state then extends back before the most recent indent,
2892 that indent can be cancelled. If the minimum prefix is shorter then
2893 the indent had ended prematurely and we must start error handling, which
2894 is still a work-in-progress.
2896 `TK_newline` tokens are ignored unless the top stack frame records
2897 that they are permitted. In that case they will not be considered for
2898 shifting if it is possible to reduce some symbols that are all since
2899 the most recent start of line. This is how a newline forcibly
2900 terminates any line-like structure - we try to reduce down to at most
2901 one symbol for each line where newlines are allowed.
2902 A consequence of this is that a rule like
2904 ###### Example: newlines - broken
2908 IfStatement -> Newlines if ....
2910 cannot work, as the NEWLINE will never be shifted as the empty string
2911 will be reduced first. Optional sets of newlines need to be include
2912 in the thing that preceed:
2914 ###### Example: newlines - works
2918 IfStatement -> If ....
2920 Here the NEWLINE will be shifted because nothing can be reduced until
2923 When during error handling we discard tokens read in, we want to keep
2924 discarding until we see one that is recognised. If we had a full set
2925 of LR(1) grammar states, this would mean looking in the look-ahead set,
2926 but we don't keep a full look-ahead set. We only record the subset
2927 that leads to SHIFT. We can, however, deduce the look-ahead set by
2928 looking at the SHIFT subsets for all states that we can get to by
2929 reducing zero or more times. So we need a little function which
2930 checks if a given token is in any of these look-ahead sets.
2932 ###### parser includes
2937 static int in_lookahead(struct token *tk, const struct state *states, int state)
2939 while (state >= 0) {
2940 if (search(&states[state], tk->num) >= 0)
2942 if (states[state].reduce_prod < 0)
2944 state = search(&states[state], states[state].reduce_sym);
2949 void *parser_run(struct token_state *tokens,
2950 const struct state states[],
2951 int (*do_reduce)(int, void**, struct token_config*, void*),
2952 void (*do_free)(short, void*),
2953 FILE *trace, const char *non_term[],
2954 struct token_config *config)
2956 struct parser p = { 0 };
2957 struct token *tk = NULL;
2959 int shift_since_err = 1;
2962 shift(&p, TK_eof, 0, 1, NULL, states);
2963 while (!accepted && p.tos > 0) {
2964 struct token *err_tk;
2965 struct frame *tos = &p.stack[p.tos-1];
2967 tk = tok_copy(token_next(tokens));
2968 parser_trace(trace, &p,
2969 tk, states, non_term, config->known_count);
2971 if (tk->num == TK_in) {
2973 tos->since_newline = 0;
2974 tos->since_indent = 0;
2975 if (!states[tos->state].starts_line)
2976 tos->newline_permitted = 0;
2979 parser_trace_action(trace, "Record");
2982 if (tk->num == TK_out) {
2983 if (states[tos->state].reduce_size >= 0 &&
2984 states[tos->state].reduce_size <= tos->since_indent)
2986 if (states[tos->state].min_prefix >= tos->since_indent) {
2988 struct frame *in = tos - tos->since_indent;
2990 if (in->indents == 0) {
2991 /* Reassess since_indent and newline_permitted */
2993 in->since_indent = in[-1].since_indent + 1;
2994 in->newline_permitted = in[-1].newline_permitted;
2996 in->since_indent = 0;
2997 in->newline_permitted = 0;
2999 if (states[in->state].starts_line)
3000 in->newline_permitted = 1;
3003 in->since_indent = in[-1].since_indent + 1;
3004 if (states[in->state].starts_line)
3005 in->newline_permitted = 1;
3007 in->newline_permitted = in[-1].newline_permitted;
3012 parser_trace_action(trace, "Cancel");
3015 // fall through to error handling as both SHIFT and REDUCE
3018 if (tk->num == TK_newline) {
3019 if (!tos->newline_permitted) {
3022 parser_trace_action(trace, "Discard");
3025 if (tos->since_newline > 1 &&
3026 states[tos->state].reduce_size >= 0 &&
3027 states[tos->state].reduce_size <= tos->since_newline)
3030 if (shift(&p, tk->num, 0, tk->num == TK_newline, tk, states)) {
3031 shift_since_err = 1;
3033 parser_trace_action(trace, "Shift");
3037 if (states[tos->state].reduce_prod >= 0 &&
3038 states[tos->state].newline_only &&
3039 !(tk->num == TK_newline ||
3040 tk->num == TK_eof ||
3041 tk->num == TK_out ||
3042 (tos->indents == 0 && tos->since_newline == 0))) {
3043 /* Anything other than newline or out or eof
3044 * in an error unless we are already at start
3045 * of line, as this production must end at EOL.
3047 } else if (states[tos->state].reduce_prod >= 0) {
3050 const struct state *nextstate = &states[tos->state];
3051 int prod = nextstate->reduce_prod;
3052 int size = nextstate->reduce_size;
3054 static char buf[16*1024];
3055 short indents, start_of_line;
3057 body = p.asn_stack + (p.tos - size);
3059 bufsize = do_reduce(prod, body, config, buf);
3061 indents = pop(&p, size, &start_of_line,
3063 res = memdup(buf, bufsize);
3064 memset(buf, 0, bufsize);
3065 if (!shift(&p, nextstate->reduce_sym,
3066 indents, start_of_line,
3068 if (prod != 0) abort();
3072 parser_trace_action(trace, "Reduce");
3075 /* Error. We walk up the stack until we
3076 * find a state which will accept TK_error.
3077 * We then shift in TK_error and see what state
3078 * that takes us too.
3079 * Then we discard input tokens until
3080 * we find one that is acceptable.
3082 parser_trace_action(trace, "ERROR");
3083 short indents = 0, start_of_line;
3085 err_tk = tok_copy(*tk);
3087 shift(&p, TK_error, 0, 0,
3088 err_tk, states) == 0)
3089 // discard this state
3090 indents += pop(&p, 1, &start_of_line, do_free);
3093 // no state accepted TK_error
3096 if (!shift_since_err) {
3097 /* must discard at least one token to avoid
3100 if (tk->num == TK_eof)
3103 tk = tok_copy(token_next(tokens));
3105 shift_since_err = 0;
3106 tos = &p.stack[p.tos-1];
3107 while (!in_lookahead(tk, states, tos->state) &&
3108 tk->num != TK_eof) {
3110 tk = tok_copy(token_next(tokens));
3111 shift_since_err = 1;
3112 if (tk->num == TK_in)
3114 if (tk->num == TK_out) {
3118 // FIXME update since_indent here
3121 tos->indents += indents;
3124 pop(&p, p.tos, NULL, do_free);
3130 ###### exported functions
3131 void *parser_run(struct token_state *tokens,
3132 const struct state states[],
3133 int (*do_reduce)(int, void**, struct token_config*, void*),
3134 void (*do_free)(short, void*),
3135 FILE *trace, const char *non_term[],
3136 struct token_config *config);
3140 Being able to visualize the parser in action can be invaluable when
3141 debugging the parser code, or trying to understand how the parse of a
3142 particular grammar progresses. The stack contains all the important
3143 state, so just printing out the stack every time around the parse loop
3144 can make it possible to see exactly what is happening.
3146 This doesn't explicitly show each SHIFT and REDUCE action. However they
3147 are easily deduced from the change between consecutive lines, and the
3148 details of each state can be found by cross referencing the states list
3149 in the stack with the "report" that parsergen can generate.
3151 For terminal symbols, we just dump the token. For non-terminals we
3152 print the name of the symbol. The look ahead token is reported at the
3153 end inside square brackets.
3155 ###### parser functions
3157 static char *reserved_words[] = {
3158 [TK_error] = "ERROR",
3161 [TK_newline] = "NEWLINE",
3164 static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
3166 fprintf(trace, "(%d", f->state);
3167 if (states[f->state].starts_line)
3168 fprintf(trace, "s");
3169 if (f->newline_permitted)
3170 fprintf(trace, "n%d", f->since_newline);
3171 fprintf(trace, ") ");
3174 void parser_trace(FILE *trace, struct parser *p,
3175 struct token *tk, const struct state states[],
3176 const char *non_term[], int knowns)
3181 for (i = 0; i < p->tos; i++) {
3182 struct frame *f = &p->stack[i];
3185 if (sym < TK_reserved &&
3186 reserved_words[sym] != NULL)
3187 fputs(reserved_words[sym], trace);
3188 else if (sym < TK_reserved + knowns) {
3189 struct token *t = p->asn_stack[i];
3190 text_dump(trace, t->txt, 20);
3192 fputs(non_term[sym - TK_reserved - knowns],
3195 fprintf(trace, ".%d", f->indents);
3196 if (f->since_newline == 0)
3200 parser_trace_state(trace, f, states);
3202 fprintf(trace, "[");
3203 if (tk->num < TK_reserved &&
3204 reserved_words[tk->num] != NULL)
3205 fputs(reserved_words[tk->num], trace);
3207 text_dump(trace, tk->txt, 20);
3208 fprintf(trace, ":%d:%d]", tk->line, tk->col);
3211 void parser_trace_action(FILE *trace, char *action)
3214 fprintf(trace, " - %s\n", action);
3219 The obvious example for a parser is a calculator.
3221 As `scanner` provides number parsing function using `libgmp` it is not much
3222 work to perform arbitrary rational number calculations.
3224 This calculator takes one expression, or an equality test, per line.
3225 The results are printed and if any equality test fails, the program
3226 exits with an error.
3228 ###### File: parsergen.mk
3229 calc.c calc.h : parsergen parsergen.mdc
3230 ./parsergen --tag calc -o calc parsergen.mdc
3231 calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o
3232 $(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp
3234 ./calc parsergen.mdc
3239 #include "parse_number.h"
3240 // what do we use for a demo-grammar? A calculator of course.
3252 #include <sys/mman.h>
3258 #include "scanner.h"
3263 static void free_number(struct number *n)
3269 static int text_is(struct text t, char *s)
3271 return (strlen(s) == t.len &&
3272 strncmp(s, t.txt, t.len) == 0);
3275 int main(int argc, char *argv[])
3277 int fd = open(argv[1], O_RDONLY);
3278 int len = lseek(fd, 0, 2);
3279 char *file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
3280 struct section *table = code_extract(file, file+len, NULL);
3282 struct token_config config = {
3283 .ignored = (1 << TK_line_comment)
3286 .number_chars = ".,_+-",
3290 for (s = table; s; s = s->next)
3291 if (text_is(s->section, "example: input"))
3292 parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
3294 struct section *t = table->next;
3295 code_free(table->code);
3307 Session -> Session Line
3310 Line -> Expression NEWLINE ${ gmp_printf("Answer = %Qd\n", $1.val);
3311 { mpf_t fl; mpf_init2(fl, 20); mpf_set_q(fl, $1.val);
3312 gmp_printf(" or as a decimal: %Fg\n", fl);
3316 | Expression = Expression NEWLINE ${
3317 if (mpq_equal($1.val, $3.val))
3318 gmp_printf("Both equal %Qd\n", $1.val);
3320 gmp_printf("NOT EQUAL: %Qd\n != : %Qd\n",
3325 | NEWLINE ${ printf("Blank line\n"); }$
3326 | ERROR NEWLINE ${ printf("Skipped a bad line\n"); }$
3329 Expression -> Expression + Expression ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$
3330 | Expression - Expression ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$
3331 | Expression * Expression ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$
3332 | Expression / Expression ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$
3333 | Expression // Expression ${ {
3336 mpz_init(z0); mpz_init(z1); mpz_init(z2);
3337 mpz_tdiv_q(z1, mpq_numref($1.val), mpq_denref($1.val));
3338 mpz_tdiv_q(z2, mpq_numref($3.val), mpq_denref($3.val));
3339 mpz_tdiv_q(z0, z1, z2);
3340 mpq_set_z($0.val, z0);
3341 mpz_clear(z0); mpz_clear(z1); mpz_clear(z2);
3343 | NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$
3344 | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$
3349 3.1415926535 - 355/113
3351 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9
3353 1 * 1000 + 2 * 100 + 3 * 10 + 4 * 1