1 # LR(1) Parser Generator #
3 This parser generator takes a grammar description combined with code
4 fragments, analyses it, and can report details about the analysis and
5 write out C code files which can be compiled to make a parser.
7 There are several distinct sections.
9 1. `grammar_read` will read a grammar from a literate-code file and
10 store the productions, symbols, and code fragments.
11 2. `grammar_analyse` will take that grammar and build LR parsing
12 states and look-ahead tables.
13 3. `grammar_report` will present the details of the analysis
14 in a readable format and will report any conflicts.
15 4. `parser_generate` will write out C code files with various
16 tables and with the code fragments arranged in useful places.
17 5. `parser_run` is a library function intended to be linked together
18 with the generated parser tables to complete the implementation of
20 6. Finally `calc` is a test grammar for a simple calculator. The
21 `parsergen` program built from the C code in this file can extract
22 that grammar directly from this file and process it.
25 ###### File: parsergen.c
30 ## forward declarations
41 ###### File: libparser.c
48 ###### File: parsergen.mk
51 parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc
54 ## Reading the grammar
56 The grammar must be stored in a markdown literate code file as parsed
57 by "[mdcode][]". It must have three top level (i.e. unreferenced)
58 sections: `header`, `code`, and `grammar`. The first two will be
59 literally copied into the generated `.c` and `.h`. files. The last
60 contains the grammar. This is tokenised with "[scanner][]".
62 If the `--tag` option is given, then any top level header that doesn't
63 start with the tag is ignored, and the tag is striped from the rest. So
65 means that the three needed sections must be `Foo: header`, `Foo: code`,
69 [scanner]: scanner.html
75 ###### parser includes
79 The grammar contains production sets, precedence/associativity
80 declarations, and data type declarations. These are all parsed with
81 _ad hoc_ parsing as we don't have a parser generator yet.
83 The precedence and associativity can be set for each production, but
84 can be inherited from symbols. The data type (either structure or a
85 reference to a structure) is potentially defined for each non-terminal
86 and describes what C structure is used to store information about each
90 enum assoc {Left, Right, Non};
91 char *assoc_names[] = {"Left","Right","Non"};
94 struct text struct_name;
97 unsigned short precedence;
101 unsigned short precedence;
109 The strings reported by `mdcode` and `scanner` are `struct text` which have
110 length rather than being null terminated. To help with printing and
111 comparing we define `text_is` and `prtxt`, which should possibly go in
112 `mdcode`. `scanner` does provide `text_dump` which is useful for strings
113 which might contain control characters.
115 `strip_tag` is a bit like `strncmp`, but adds a test for a colon,
116 because that is what we need to detect tags.
119 static int text_is(struct text t, char *s)
121 return (strlen(s) == t.len &&
122 strncmp(s, t.txt, t.len) == 0);
124 static void prtxt(struct text t)
126 printf("%.*s", t.len, t.txt);
129 static int strip_tag(struct text *t, char *tag)
131 int skip = strlen(tag) + 1;
132 if (skip >= t->len ||
133 strncmp(t->txt, tag, skip-1) != 0 ||
134 t->txt[skip-1] != ':')
136 while (skip < t->len && t->txt[skip] == ' ')
145 Productions are comprised primarily of symbols - terminal and
146 non-terminal. We do not make a syntactic distinction between the two,
147 though non-terminals must be identifiers. Non-terminal symbols are
148 those which appear in the head of a production, terminal symbols are
149 those which don't. There are also "virtual" symbols used for precedence
150 marking discussed later, and sometimes we won't know what type a symbol
153 ###### forward declarations
154 enum symtype { Unknown, Virtual, Terminal, Nonterminal };
155 char *symtypes = "UVTN";
159 Symbols can be either `TK_ident` or `TK_mark`. They are saved in a
160 table of known symbols and the resulting parser will report them as
161 `TK_reserved + N`. A small set of identifiers are reserved for the
162 different token types that `scanner` can report.
165 static char *reserved_words[] = {
166 [TK_error] = "ERROR",
167 [TK_number] = "NUMBER",
168 [TK_ident] = "IDENTIFIER",
170 [TK_string] = "STRING",
171 [TK_multi_string] = "MULTI_STRING",
174 [TK_newline] = "NEWLINE",
180 Note that `TK_eof` and the two `TK_*_comment` tokens cannot be
181 recognised. The former is automatically expected at the end of the text
182 being parsed. The latter are always ignored by the parser.
184 All of these symbols are stored in a simple symbol table. We use the
185 `struct text` from `mdcode` to store the name and link them together
186 into a sorted list using an insertion sort.
188 We don't have separate `find` and `insert` functions as any symbol we
189 find needs to be remembered. We simply expect `find` to always return a
190 symbol, but its type might be `Unknown`.
199 ###### grammar fields
204 static struct symbol *sym_find(struct grammar *g, struct text s)
206 struct symbol **l = &g->syms;
211 (cmp = text_cmp((*l)->name, s)) < 0)
215 n = calloc(1, sizeof(*n));
224 static void symbols_init(struct grammar *g)
226 int entries = sizeof(reserved_words)/sizeof(reserved_words[0]);
228 for (i = 0; i < entries; i++) {
231 t.txt = reserved_words[i];
234 t.len = strlen(t.txt);
241 ### Data types and precedence.
243 Data type specification and precedence specification are both
244 introduced by a dollar sign at the start of the line. If the next
245 word is `LEFT`, `RIGHT` or `NON`, then the line specifies a
246 precedence, otherwise it specifies a data type.
248 The data type name is simply stored and applied to the head of all
249 subsequent productions. It must be the name of a structure optionally
250 preceded by an asterisk which means a reference or "pointer". So
251 `$expression` maps to `struct expression` and `$*statement` maps to
252 `struct statement *`.
254 Any productions given before the first data type declaration will have
255 no data type associated with them and can carry no information. In
256 order to allow other non-terminals to have no type, the data type
257 `$void` can be given. This does *not* mean that `struct void` will be
258 used, but rather than no type will be associated with future
261 The precedence line must contain a list of symbols - typically
262 terminal symbols, but not necessarily. It can only contain symbols
263 that have not been seen yet, so precedence declaration must precede
264 the productions that they affect.
266 A precedence line may also contain a "virtual" symbol which is an
267 identifier preceded by `$$`. Virtual terminals carry precedence
268 information but are not included in the grammar. A production can
269 declare that it inherits the precedence of a given virtual symbol.
271 This use for `$$` precludes it from being used as a symbol in the
272 described language. Two other symbols: `${` and `}$` are also
275 Each new precedence line introduces a new precedence level and
276 declares how it associates. This level is stored in each symbol
277 listed and may be inherited by any production which uses the symbol. A
278 production inherits from the last symbol which has a precedence.
280 ###### grammar fields
281 struct text current_type;
286 enum symbols { TK_virtual = TK_reserved, TK_open, TK_close };
287 static const char *known[] = { "$$", "${", "}$" };
290 static char *dollar_line(struct token_state *ts, struct grammar *g, int isref)
292 struct token t = token_next(ts);
297 if (t.num != TK_ident) {
298 err = "type or assoc expected after '$'";
301 if (text_is(t.txt, "LEFT"))
303 else if (text_is(t.txt, "RIGHT"))
305 else if (text_is(t.txt, "NON"))
308 g->current_type = t.txt;
309 g->type_isref = isref;
310 if (text_is(t.txt, "void"))
311 g->current_type.txt = NULL;
313 if (t.num != TK_newline) {
314 err = "Extra tokens after type name";
321 err = "$* cannot be followed by a precedence";
325 // This is a precedence line, need some symbols.
329 while (t.num != TK_newline) {
330 enum symtype type = Terminal;
332 if (t.num == TK_virtual) {
335 if (t.num != TK_ident) {
336 err = "$$ must be followed by a word";
339 } else if (t.num != TK_ident &&
341 err = "Illegal token in precedence line";
344 s = sym_find(g, t.txt);
345 if (s->type != Unknown) {
346 err = "Symbols in precedence line must not already be known.";
350 s->precedence = g->prec_levels;
355 err = "No symbols given on precedence line";
359 while (t.num != TK_newline && t.num != TK_eof)
366 A production either starts with an identifier which is the head
367 non-terminal, or a vertical bar (`|`) in which case this production
368 uses the same head as the previous one. The identifier must be
369 followed by a `->` mark. All productions for a given non-terminal must
370 be grouped together with the `nonterminal ->` given only once.
372 After this a (possibly empty) sequence of identifiers and marks form
373 the body of the production. A virtual symbol may be given after the
374 body by preceding it with `$$`. If a virtual symbol is given, the
375 precedence of the production is that for the virtual symbol. If none
376 is given, the precedence is inherited from the last symbol in the
377 production which has a precedence specified.
379 After the optional precedence may come the `${` mark. This indicates
380 the start of a code fragment. If present, this must be on the same
381 line as the start of the production.
383 All of the text from the `${` through to the matching `}$` is
384 collected and forms the code-fragment for the production. It must all
385 be in one `code_node` of the literate code. The `}$` must be
386 at the end of a line.
388 Text in the code fragment will undergo substitutions where `$N` or
389 `$<N`,for some numeric `N`, will be replaced with a variable holding
390 the parse information for the particular symbol in the production.
391 `$0` is the head of the production, `$1` is the first symbol of the
392 body, etc. The type of `$N` for a terminal symbol is `struct token`.
393 For a non-terminal, it is whatever has been declared for that symbol.
394 The `<` may be included for symbols declared as storing a reference
395 (not a structure) and means that the reference is being moved out, so
396 it will not automatically be freed.
398 While building productions we will need to add to an array which needs to
402 static void array_add(void *varray, int *cnt, void *new)
404 void ***array = varray;
407 current = ((*cnt-1) | (step-1))+1;
408 if (*cnt == current) {
411 *array = realloc(*array, current * sizeof(void*));
413 (*array)[*cnt] = new;
417 Collecting the code fragment simply involves reading tokens until we
418 hit the end token `}$`, and noting the character position of the start and
422 static struct text collect_code(struct token_state *state,
427 code.txt = start.txt.txt + start.txt.len;
429 t = token_next(state);
430 while (t.node == start.node &&
431 t.num != TK_close && t.num != TK_error &&
433 if (t.num == TK_close && t.node == start.node)
434 code.len = t.txt.txt - code.txt;
440 Now we have all the bit we need to parse a full production.
445 ###### grammar fields
446 struct production **productions;
447 int production_count;
449 ###### production fields
451 struct symbol **body;
456 int first_production;
459 static char *parse_production(struct grammar *g,
461 struct token_state *state)
463 /* Head has already been parsed. */
466 struct production p, *pp;
468 memset(&p, 0, sizeof(p));
470 tk = token_next(state);
471 while (tk.num == TK_ident || tk.num == TK_mark) {
472 struct symbol *bs = sym_find(g, tk.txt);
473 if (bs->type == Unknown)
475 if (bs->type == Virtual) {
476 err = "Virtual symbol not permitted in production";
479 if (bs->precedence) {
480 p.precedence = bs->precedence;
483 array_add(&p.body, &p.body_size, bs);
484 tk = token_next(state);
486 if (tk.num == TK_virtual) {
488 tk = token_next(state);
489 if (tk.num != TK_ident) {
490 err = "word required after $$";
493 vs = sym_find(g, tk.txt);
494 if (vs->type != Virtual) {
495 err = "symbol after $$ must be virtual";
498 p.precedence = vs->precedence;
500 tk = token_next(state);
502 if (tk.num == TK_open) {
503 p.code = collect_code(state, tk);
504 if (p.code.txt == NULL) {
505 err = "code fragment not closed properly";
508 tk = token_next(state);
510 if (tk.num != TK_newline && tk.num != TK_eof) {
511 err = "stray tokens at end of line";
514 pp = malloc(sizeof(*pp));
516 array_add(&g->productions, &g->production_count, pp);
519 while (tk.num != TK_newline && tk.num != TK_eof)
520 tk = token_next(state);
524 With the ability to parse production and dollar-lines, we have nearly all
525 that we need to parse a grammar from a `code_node`.
527 The head of the first production will effectively be the `start` symbol of
528 the grammar. However it won't _actually_ be so. Processing the grammar is
529 greatly simplified if the real start symbol only has a single production,
530 and expects `$eof` as the final terminal. So when we find the first
531 explicit production we insert an extra production as production zero which
534 ###### Example: production 0
537 where `START` is the first non-terminal given.
539 ###### create production zero
540 struct production *p = calloc(1,sizeof(*p));
541 struct text start = {"$start",6};
542 struct text eof = {"$eof",4};
543 p->head = sym_find(g, start);
544 p->head->type = Nonterminal;
545 array_add(&p->body, &p->body_size, head);
546 array_add(&p->body, &p->body_size, sym_find(g, eof));
547 p->head->first_production = g->production_count;
548 array_add(&g->productions, &g->production_count, p);
550 Now we are ready to read in the grammar.
553 static struct grammar *grammar_read(struct code_node *code)
555 struct token_config conf = {
558 .words_marks = known,
559 .known_count = sizeof(known)/sizeof(known[0]),
561 .ignored = (1 << TK_line_comment)
562 | (1 << TK_block_comment)
565 | (1 << TK_multi_string)
570 struct token_state *state = token_open(code, &conf);
572 struct symbol *head = NULL;
576 g = calloc(1, sizeof(*g));
579 for (tk = token_next(state); tk.num != TK_eof;
580 tk = token_next(state)) {
581 if (tk.num == TK_newline)
583 if (tk.num == TK_ident) {
585 head = sym_find(g, tk.txt);
586 if (head->type == Nonterminal)
587 err = "This non-terminal has already be used.";
588 else if (head->type == Virtual)
589 err = "Virtual symbol not permitted in head of production";
591 head->type = Nonterminal;
592 head->struct_name = g->current_type;
593 head->isref = g->type_isref;
594 if (g->production_count == 0) {
595 ## create production zero
597 head->first_production = g->production_count;
598 tk = token_next(state);
599 if (tk.num == TK_mark &&
600 text_is(tk.txt, "->"))
601 err = parse_production(g, head, state);
603 err = "'->' missing in production";
605 } else if (tk.num == TK_mark
606 && text_is(tk.txt, "|")) {
607 // another production for same non-term
609 err = parse_production(g, head, state);
611 err = "First production must have a head";
612 } else if (tk.num == TK_mark
613 && text_is(tk.txt, "$")) {
614 err = dollar_line(state, g, 0);
615 } else if (tk.num == TK_mark
616 && text_is(tk.txt, "$*")) {
617 err = dollar_line(state, g, 1);
619 err = "Unrecognised token at start of line.";
627 fprintf(stderr, "Error at line %d: %s\n",
634 ## Analysing the grammar
636 The central task in analysing the grammar is to determine a set of
637 states to drive the parsing process. This will require finding
638 various sets of symbols and of "items". Some of these sets will need
639 to attach information to each element in the set, so they are more
642 Each "item" may have a 'look-ahead' or `LA` set associated with
643 it. Many of these will be the same as each other. To avoid memory
644 wastage, and to simplify some comparisons of sets, these sets will be
645 stored in a list of unique sets, each assigned a number.
647 Once we have the data structures in hand to manage these sets and
648 lists, we can start setting the 'nullable' flag, build the 'FIRST'
649 sets, and then create the item sets which define the various states.
653 Though we don't only store symbols in these sets, they are the main
654 things we store, so they are called symbol sets or "symsets".
656 A symset has a size, an array of shorts, and an optional array of data
657 values, which are also shorts. If the array of data is not present,
658 we store an impossible pointer, as `NULL` is used to indicate that no
659 memory has been allocated yet;
664 unsigned short *syms, *data;
666 #define NO_DATA ((unsigned short *)1)
667 const struct symset INIT_SYMSET = { 0, NULL, NO_DATA };
668 const struct symset INIT_DATASET = { 0, NULL, NULL };
670 The arrays of shorts are allocated in blocks of 8 and are kept sorted
671 by using an insertion sort. We don't explicitly record the amount of
672 allocated space as it can be derived directly from the current `cnt` using
673 `((cnt - 1) | 7) + 1`.
676 static void symset_add(struct symset *s, unsigned short key, unsigned short val)
679 int current = ((s->cnt-1) | 7) + 1;
680 if (current == s->cnt) {
682 s->syms = realloc(s->syms, sizeof(*s->syms) * current);
683 if (s->data != NO_DATA)
684 s->data = realloc(s->data, sizeof(*s->data) * current);
687 while (i > 0 && s->syms[i-1] > key) {
688 s->syms[i] = s->syms[i-1];
689 if (s->data != NO_DATA)
690 s->data[i] = s->data[i-1];
694 if (s->data != NO_DATA)
699 Finding a symbol (or item) in a `symset` uses a simple binary search.
700 We return the index where the value was found (so data can be accessed),
701 or `-1` to indicate failure.
703 static int symset_find(struct symset *ss, unsigned short key)
710 while (lo + 1 < hi) {
711 int mid = (lo + hi) / 2;
712 if (ss->syms[mid] <= key)
717 if (ss->syms[lo] == key)
722 We will often want to form the union of two symsets. When we do, we
723 will often want to know if anything changed (as they might mean there
724 is more work to do). So `symset_union` reports whether anything was
725 added to the first set. We use a slow quadratic approach as these
726 sets don't really get very big. If profiles shows this to be a problem is
727 can be optimised later.
729 static int symset_union(struct symset *a, struct symset *b)
733 for (i = 0; i < b->cnt; i++)
734 if (symset_find(a, b->syms[i]) < 0) {
735 unsigned short data = 0;
736 if (b->data != NO_DATA)
738 symset_add(a, b->syms[i], data);
744 And of course we must be able to free a symset.
746 static void symset_free(struct symset ss)
749 if (ss.data != NO_DATA)
755 Some symsets are simply stored somewhere appropriate in the `grammar`
756 data structure, others needs a bit of indirection. This applies
757 particularly to "LA" sets. These will be paired with "items" in an
758 "itemset". As itemsets will be stored in a symset, the "LA" set needs to be
759 stored in the `data` field, so we need a mapping from a small (short)
760 number to an LA `symset`.
762 As mentioned earlier this involves creating a list of unique symsets.
764 For now, we just use a linear list sorted by insertion. A skiplist
765 would be more efficient and may be added later.
772 struct setlist *next;
775 ###### grammar fields
776 struct setlist *sets;
781 static int ss_cmp(struct symset a, struct symset b)
784 int diff = a.cnt - b.cnt;
787 for (i = 0; i < a.cnt; i++) {
788 diff = (int)a.syms[i] - (int)b.syms[i];
795 static int save_set(struct grammar *g, struct symset ss)
797 struct setlist **sl = &g->sets;
801 while (*sl && (cmp = ss_cmp((*sl)->ss, ss)) < 0)
808 s = malloc(sizeof(*s));
817 Finding a set by number is currently performed by a simple linear search.
818 If this turns out to hurt performance, we can store the sets in a dynamic
819 array like the productions.
821 static struct symset set_find(struct grammar *g, int num)
823 struct setlist *sl = g->sets;
824 while (sl && sl->num != num)
830 ### Setting `nullable`
832 We set `nullable` on the head symbol for any production for which all
833 the body symbols (if any) are nullable. As this is a recursive
834 definition, any change in the `nullable` setting means that we need to
835 re-evaluate where it needs to be set.
837 We simply loop around performing the same calculations until no more
844 static void set_nullable(struct grammar *g)
847 while (check_again) {
850 for (p = 0; p < g->production_count; p++) {
851 struct production *pr = g->productions[p];
854 if (pr->head->nullable)
856 for (s = 0; s < pr->body_size; s++)
857 if (! pr->body[s]->nullable)
859 if (s == pr->body_size) {
860 pr->head->nullable = 1;
867 ### Setting `can_eol`
869 In order to be able to ignore newline tokens when not relevant, but
870 still include them in the parse when needed, we will need to know
871 which states can start a "line-like" section of code. We ignore
872 newlines when there is an indent since the most recent start of a
875 To know what is line-like, we first need to know which symbols can end
876 a line-like section, which is precisely those which can end with a
877 newline token. These symbols don't necessarily alway end with a
878 newline, but they can. Hence they are not described as "lines" but
881 Clearly the `TK_newline` token can end with a newline. Any symbol
882 which is the head of a production that contains a line-ending symbol
883 followed only by nullable symbols is also a line-ending symbol. We
884 use a new field `can_eol` to record this attribute of symbols, and
885 compute it in a repetitive manner similar to `set_nullable`.
891 static void set_can_eol(struct grammar *g)
894 g->symtab[TK_newline]->can_eol = 1;
895 while (check_again) {
898 for (p = 0; p < g->production_count; p++) {
899 struct production *pr = g->productions[p];
902 if (pr->head->can_eol)
905 for (s = pr->body_size - 1; s >= 0; s--) {
906 if (pr->body[s]->can_eol) {
907 pr->head->can_eol = 1;
911 if (!pr->body[s]->nullable)
918 ### Building the `first` sets
920 When calculating what can follow a particular non-terminal, we will need to
921 know what the "first" terminal in any subsequent non-terminal might be. So
922 we calculate the `first` set for every non-terminal and store them in an
923 array. We don't bother recording the "first" set for terminals as they are
926 As the "first" for one symbol might depend on the "first" of another,
927 we repeat the calculations until no changes happen, just like with
928 `set_nullable`. This makes use of the fact that `symset_union`
929 reports if any change happens.
931 The core of this which finds the "first" of part of a production body
932 will be reused for computing the "follow" sets, so we split it out
933 into a separate function.
935 ###### grammar fields
936 struct symset *first;
940 static int add_first(struct production *pr, int start,
941 struct symset *target, struct grammar *g,
946 for (s = start; s < pr->body_size; s++) {
947 struct symbol *bs = pr->body[s];
948 if (bs->type == Terminal) {
949 if (symset_find(target, bs->num) < 0) {
950 symset_add(target, bs->num, 0);
954 } else if (symset_union(target, &g->first[bs->num]))
960 *to_end = (s == pr->body_size);
964 static void build_first(struct grammar *g)
968 g->first = calloc(g->num_syms, sizeof(g->first[0]));
969 for (s = 0; s < g->num_syms; s++)
970 g->first[s] = INIT_SYMSET;
972 while (check_again) {
975 for (p = 0; p < g->production_count; p++) {
976 struct production *pr = g->productions[p];
977 struct symset *head = &g->first[pr->head->num];
979 if (add_first(pr, 0, head, g, NULL))
985 ### Building the `follow` sets.
987 There are two different situations when we will want to generate "follow"
988 sets. If we are doing an SLR analysis, we want to generate a single
989 "follow" set for each non-terminal in the grammar. That is what is
990 happening here. If we are doing an LALR or LR analysis we will want
991 to generate a separate "LA" set for each item. We do that later
994 There are two parts to generating a "follow" set. Firstly we look at
995 every place that any non-terminal appears in the body of any
996 production, and we find the set of possible "first" symbols after
997 there. This is done using `add_first` above and only needs to be done
998 once as the "first" sets are now stable and will not change.
1002 for (p = 0; p < g->production_count; p++) {
1003 struct production *pr = g->productions[p];
1006 for (b = 0; b < pr->body_size - 1; b++) {
1007 struct symbol *bs = pr->body[b];
1008 if (bs->type == Terminal)
1010 add_first(pr, b+1, &g->follow[bs->num], g, NULL);
1014 The second part is to add the "follow" set of the head of a production
1015 to the "follow" sets of the final symbol in the production, and any
1016 other symbol which is followed only by `nullable` symbols. As this
1017 depends on "follow" itself we need to repeatedly perform the process
1018 until no further changes happen.
1022 for (again = 0, p = 0;
1023 p < g->production_count;
1024 p < g->production_count-1
1025 ? p++ : again ? (again = 0, p = 0)
1027 struct production *pr = g->productions[p];
1030 for (b = pr->body_size - 1; b >= 0; b--) {
1031 struct symbol *bs = pr->body[b];
1032 if (bs->type == Terminal)
1034 if (symset_union(&g->follow[bs->num],
1035 &g->follow[pr->head->num]))
1042 We now just need to create and initialise the `follow` list to get a
1045 ###### grammar fields
1046 struct symset *follow;
1049 static void build_follow(struct grammar *g)
1052 g->follow = calloc(g->num_syms, sizeof(g->follow[0]));
1053 for (s = 0; s < g->num_syms; s++)
1054 g->follow[s] = INIT_SYMSET;
1058 ### Building itemsets and states
1060 There are three different levels of detail that can be chosen for
1061 building the itemsets and states for the LR grammar. They are:
1063 1. LR(0) or SLR(1), where no look-ahead is considered.
1064 2. LALR(1) where we build look-ahead sets with each item and merge
1065 the LA sets when we find two paths to the same "kernel" set of items.
1066 3. LR(1) where different look-ahead for any item in the set means
1067 a different state must be created.
1069 ###### forward declarations
1070 enum grammar_type { LR0, LR05, SLR, LALR, LR1 };
1072 We need to be able to look through existing states to see if a newly
1073 generated state already exists. For now we use a simple sorted linked
1076 An item is a pair of numbers: the production number and the position of
1077 "DOT", which is an index into the body of the production.
1078 As the numbers are not enormous we can combine them into a single "short"
1079 and store them in a `symset` - 4 bits for "DOT" and 12 bits for the
1080 production number (so 4000 productions with maximum of 15 symbols in the
1083 Comparing the itemsets will be a little different to comparing symsets
1084 as we want to do the lookup after generating the "kernel" of an
1085 itemset, so we need to ignore the offset=zero items which are added during
1088 To facilitate this, we modify the "DOT" number so that "0" sorts to
1089 the end of the list in the symset, and then only compare items before
1093 static inline unsigned short item_num(int production, int index)
1095 return production | ((31-index) << 11);
1097 static inline int item_prod(unsigned short item)
1099 return item & 0x7ff;
1101 static inline int item_index(unsigned short item)
1103 return (31-(item >> 11)) & 0x1f;
1106 For LR(1) analysis we need to compare not just the itemset in a state
1107 but also the LA sets. As we assign each unique LA set a number, we
1108 can just compare the symset and the data values together.
1111 static int itemset_cmp(struct symset a, struct symset b,
1112 enum grammar_type type)
1118 i < a.cnt && i < b.cnt &&
1119 item_index(a.syms[i]) > 0 &&
1120 item_index(b.syms[i]) > 0;
1122 int diff = a.syms[i] - b.syms[i];
1126 diff = a.data[i] - b.data[i];
1131 if (i == a.cnt || item_index(a.syms[i]) == 0)
1135 if (i == b.cnt || item_index(b.syms[i]) == 0)
1141 if (type < LR1 || av == -1)
1144 a.data[i] - b.data[i];
1147 And now we can build the list of itemsets. The lookup routine returns
1148 both a success flag and a pointer to where in the list an insert
1149 should happen, so we don't need to search a second time.
1153 struct itemset *next;
1155 struct symset items;
1156 struct symset go_to;
1161 ###### grammar fields
1162 struct itemset *items;
1166 static int itemset_find(struct grammar *g, struct itemset ***where,
1167 struct symset kernel, enum grammar_type type)
1169 struct itemset **ip;
1171 for (ip = &g->items; *ip ; ip = & (*ip)->next) {
1172 struct itemset *i = *ip;
1174 diff = itemset_cmp(i->items, kernel, type);
1187 Adding an itemset may require merging the LA sets if LALR analysis is
1188 happening. If any new LA set adds any symbols that weren't in the old LA set, we
1189 clear the `completed` flag so that the dependants of this itemset will be
1190 recalculated and their LA sets updated.
1192 `add_itemset` must consume the symsets it is passed, either by adding
1193 them to a data structure, of freeing them.
1195 static int add_itemset(struct grammar *g, struct symset ss,
1196 enum grammar_type type, int starts_line)
1198 struct itemset **where, *is;
1200 int found = itemset_find(g, &where, ss, type);
1202 is = calloc(1, sizeof(*is));
1203 is->state = g->states;
1207 is->go_to = INIT_DATASET;
1208 is->starts_line = starts_line;
1217 for (i = 0; i < ss.cnt; i++) {
1218 struct symset temp = INIT_SYMSET, s;
1219 if (ss.data[i] == is->items.data[i])
1221 s = set_find(g, is->items.data[i]);
1222 symset_union(&temp, &s);
1223 s = set_find(g, ss.data[i]);
1224 if (symset_union(&temp, &s)) {
1225 is->items.data[i] = save_set(g, temp);
1236 To build all the itemsets, we first insert the initial itemset made
1237 from production zero, complete each itemset, and then generate new
1238 itemsets from old until no new ones can be made.
1240 Completing an itemset means finding all the items where "DOT" is followed by
1241 a nonterminal and adding "DOT=0" items for every production from that
1242 non-terminal - providing each item hasn't already been added.
1244 If LA sets are needed, the LA set for each new item is found using
1245 `add_first` which was developed earlier for `FIRST` and `FOLLOW`. This may
1246 be supplemented by the LA set for the item which produce the new item.
1248 We also collect a set of all symbols which follow "DOT" (in `done`) as this
1249 is used in the next stage.
1251 NOTE: precedence handling should happen here - I haven't written this yet
1254 ###### complete itemset
1255 for (i = 0; i < is->items.cnt; i++) {
1256 int p = item_prod(is->items.syms[i]);
1257 int bs = item_index(is->items.syms[i]);
1258 struct production *pr = g->productions[p];
1261 struct symset LA = INIT_SYMSET;
1262 unsigned short sn = 0;
1264 if (bs == pr->body_size)
1267 if (symset_find(&done, s->num) < 0)
1268 symset_add(&done, s->num, 0);
1269 if (s->type != Nonterminal)
1275 add_first(pr, bs+1, &LA, g, &to_end);
1277 struct symset ss = set_find(g, is->items.data[i]);
1278 symset_union(&LA, &ss);
1280 sn = save_set(g, LA);
1281 LA = set_find(g, sn);
1284 /* Add productions for this symbol */
1285 for (p2 = s->first_production;
1286 p2 < g->production_count &&
1287 g->productions[p2]->head == s;
1289 int itm = item_num(p2, 0);
1290 int pos = symset_find(&is->items, itm);
1292 symset_add(&is->items, itm, sn);
1293 /* Will have re-ordered, so start
1294 * from beginning again */
1296 } else if (type >= LALR) {
1297 struct symset ss = set_find(g, is->items.data[pos]);
1298 struct symset tmp = INIT_SYMSET;
1300 symset_union(&tmp, &ss);
1301 if (symset_union(&tmp, &LA)) {
1302 is->items.data[pos] = save_set(g, tmp);
1310 For each symbol we found (and placed in `done`) we collect all the items for
1311 which this symbol is next, and create a new itemset with "DOT" advanced over
1312 the symbol. This is then added to the collection of itemsets (or merged
1313 with a pre-existing itemset).
1315 ###### derive itemsets
1316 // Now we have a completed itemset, so we need to
1317 // compute all the 'next' itemsets and create them
1318 // if they don't exist.
1319 for (i = 0; i < done.cnt; i++) {
1321 unsigned short state;
1322 int starts_line = 0;
1323 struct symbol *sym = g->symtab[done.syms[i]];
1324 struct symset newitemset = INIT_SYMSET;
1326 newitemset = INIT_DATASET;
1329 (sym->nullable && is->starts_line))
1331 for (j = 0; j < is->items.cnt; j++) {
1332 int itm = is->items.syms[j];
1333 int p = item_prod(itm);
1334 int bp = item_index(itm);
1335 struct production *pr = g->productions[p];
1336 unsigned short la = 0;
1339 if (bp == pr->body_size)
1341 if (pr->body[bp] != sym)
1344 la = is->items.data[j];
1345 pos = symset_find(&newitemset, pr->head->num);
1347 symset_add(&newitemset, item_num(p, bp+1), la);
1348 else if (type >= LALR) {
1349 // Need to merge la set.
1350 int la2 = newitemset.data[pos];
1352 struct symset ss = set_find(g, la2);
1353 struct symset LA = INIT_SYMSET;
1354 symset_union(&LA, &ss);
1355 ss = set_find(g, la);
1356 if (symset_union(&LA, &ss))
1357 newitemset.data[pos] = save_set(g, LA);
1363 state = add_itemset(g, newitemset, type, starts_line);
1364 if (symset_find(&is->go_to, done.syms[i]) < 0)
1365 symset_add(&is->go_to, done.syms[i], state);
1368 All that is left is to crate the initial itemset from production zero, and
1369 with `TK_eof` as the LA set.
1372 static void build_itemsets(struct grammar *g, enum grammar_type type)
1374 struct symset first = INIT_SYMSET;
1377 unsigned short la = 0;
1379 // LA set just has eof
1380 struct symset eof = INIT_SYMSET;
1381 symset_add(&eof, TK_eof, 0);
1382 la = save_set(g, eof);
1383 first = INIT_DATASET;
1385 // production 0, offset 0 (with no data)
1386 symset_add(&first, item_num(0, 0), la);
1387 add_itemset(g, first, type, g->productions[0]->body[0]->can_eol);
1388 for (again = 0, is = g->items;
1390 is = is->next ?: again ? (again = 0, g->items) : NULL) {
1392 struct symset done = INIT_SYMSET;
1403 ### Completing the analysis.
1405 The exact process of analysis depends on which level was requested. For
1406 `LR0` and `LR05` we don't need first and follow sets at all. For `LALR` and
1407 `LR1` we need first, but not follow. For `SLR` we need both.
1409 We don't build the "action" tables that you might expect as the parser
1410 doesn't use them. They will be calculated to some extent if needed for
1413 Once we have built everything we allocate arrays for the two lists:
1414 symbols and itemsets. This allows more efficient access during reporting.
1415 The symbols are grouped as terminals and non-terminals and we record the
1416 changeover point in `first_nonterm`.
1418 ###### grammar fields
1419 struct symbol **symtab;
1420 struct itemset **statetab;
1423 ###### grammar_analyse
1425 static void grammar_analyse(struct grammar *g, enum grammar_type type)
1429 int snum = TK_reserved;
1430 for (s = g->syms; s; s = s->next)
1431 if (s->num < 0 && s->type == Terminal) {
1435 g->first_nonterm = snum;
1436 for (s = g->syms; s; s = s->next)
1442 g->symtab = calloc(g->num_syms, sizeof(g->symtab[0]));
1443 for (s = g->syms; s; s = s->next)
1444 g->symtab[s->num] = s;
1454 build_itemsets(g, type);
1456 g->statetab = calloc(g->states, sizeof(g->statetab[0]));
1457 for (is = g->items; is ; is = is->next)
1458 g->statetab[is->state] = is;
1461 ## Reporting on the Grammar
1463 The purpose of the report is to give the grammar developer insight into
1464 how the grammar parser will work. It is basically a structured dump of
1465 all the tables that have been generated, plus a description of any conflicts.
1467 ###### grammar_report
1468 static int grammar_report(struct grammar *g, enum grammar_type type)
1474 return report_conflicts(g, type);
1477 Firstly we have the complete list of symbols, together with the
1478 "FIRST" set if that was generated. We add a mark to each symbol to
1479 show if it can end in a newline (`>`), or if it is nullable (`.`).
1483 static void report_symbols(struct grammar *g)
1487 printf("SYMBOLS + FIRST:\n");
1489 printf("SYMBOLS:\n");
1491 for (n = 0; n < g->num_syms; n++) {
1492 struct symbol *s = g->symtab[n];
1496 printf(" %c%c%3d%c: ",
1497 s->nullable ? '.':' ',
1498 s->can_eol ? '>':' ',
1499 s->num, symtypes[s->type]);
1502 printf(" (%d%s)", s->precedence,
1503 assoc_names[s->assoc]);
1505 if (g->first && s->type == Nonterminal) {
1508 for (j = 0; j < g->first[n].cnt; j++) {
1511 prtxt(g->symtab[g->first[n].syms[j]]->name);
1518 Then we have the follow sets if they were computed.
1520 static void report_follow(struct grammar *g)
1523 printf("FOLLOW:\n");
1524 for (n = 0; n < g->num_syms; n++)
1525 if (g->follow[n].cnt) {
1529 prtxt(g->symtab[n]->name);
1530 for (j = 0; j < g->follow[n].cnt; j++) {
1533 prtxt(g->symtab[g->follow[n].syms[j]]->name);
1539 And finally the item sets. These include the GO TO tables and, for
1540 LALR and LR1, the LA set for each item. Lots of stuff, so we break
1541 it up a bit. First the items, with production number and associativity.
1543 static void report_item(struct grammar *g, int itm)
1545 int p = item_prod(itm);
1546 int dot = item_index(itm);
1547 struct production *pr = g->productions[p];
1551 prtxt(pr->head->name);
1553 for (i = 0; i < pr->body_size; i++) {
1554 printf(" %s", (dot == i ? ". ": ""));
1555 prtxt(pr->body[i]->name);
1557 if (dot == pr->body_size)
1561 printf(" (%d%s)", pr->precedence,
1562 assoc_names[pr->assoc]);
1566 The LA sets which are (possibly) reported with each item:
1568 static void report_la(struct grammar *g, int lanum)
1570 struct symset la = set_find(g, lanum);
1574 printf(" LOOK AHEAD(%d)", lanum);
1575 for (i = 0; i < la.cnt; i++) {
1578 prtxt(g->symtab[la.syms[i]]->name);
1583 Then the go to sets:
1586 static void report_goto(struct grammar *g, struct symset gt)
1591 for (i = 0; i < gt.cnt; i++) {
1593 prtxt(g->symtab[gt.syms[i]]->name);
1594 printf(" -> %d\n", gt.data[i]);
1598 Now we can report all the item sets complete with items, LA sets, and GO TO.
1600 static void report_itemsets(struct grammar *g)
1603 printf("ITEM SETS(%d)\n", g->states);
1604 for (s = 0; s < g->states; s++) {
1606 struct itemset *is = g->statetab[s];
1607 printf(" Itemset %d:%s\n", s, is->starts_line?" (startsline)":"");
1608 for (j = 0; j < is->items.cnt; j++) {
1609 report_item(g, is->items.syms[j]);
1610 if (is->items.data != NO_DATA)
1611 report_la(g, is->items.data[j]);
1613 report_goto(g, is->go_to);
1617 ### Reporting conflicts
1619 Conflict detection varies a lot among different analysis levels. However
1620 LR0 and LR0.5 are quite similar - having no follow sets, and SLR, LALR and
1621 LR1 are also similar as they have FOLLOW or LA sets.
1625 ## conflict functions
1627 static int report_conflicts(struct grammar *g, enum grammar_type type)
1630 printf("Conflicts:\n");
1632 cnt = conflicts_lr0(g, type);
1634 cnt = conflicts_slr(g, type);
1636 printf(" - no conflicts\n");
1640 LR0 conflicts are any state which have both a reducible item and
1641 a shiftable item, or two reducible items.
1643 LR05 conflicts only occurs if two possibly reductions exist,
1644 as shifts always over-ride reductions.
1646 ###### conflict functions
1647 static int conflicts_lr0(struct grammar *g, enum grammar_type type)
1651 for (i = 0; i < g->states; i++) {
1652 struct itemset *is = g->statetab[i];
1653 int last_reduce = -1;
1654 int prev_reduce = -1;
1655 int last_shift = -1;
1659 for (j = 0; j < is->items.cnt; j++) {
1660 int itm = is->items.syms[j];
1661 int p = item_prod(itm);
1662 int bp = item_index(itm);
1663 struct production *pr = g->productions[p];
1665 if (bp == pr->body_size) {
1666 prev_reduce = last_reduce;
1670 if (pr->body[bp]->type == Terminal)
1673 if (type == LR0 && last_reduce >= 0 && last_shift >= 0) {
1674 printf(" State %d has both SHIFT and REDUCE:\n", i);
1675 report_item(g, is->items.syms[last_shift]);
1676 report_item(g, is->items.syms[last_reduce]);
1679 if (prev_reduce >= 0) {
1680 printf(" State %d has 2 (or more) reducible items\n", i);
1681 report_item(g, is->items.syms[prev_reduce]);
1682 report_item(g, is->items.syms[last_reduce]);
1689 SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping
1690 look ahead, or if a symbol in a look-ahead can be shifted. They differ only
1691 in the source of the look ahead set.
1693 We build two datasets to reflect the "action" table: one which maps
1694 terminals to items where that terminal could be shifted and another
1695 which maps terminals to items that could be reduced when the terminal
1696 is in look-ahead. We report when we get conflicts between the two.
1698 static int conflicts_slr(struct grammar *g, enum grammar_type type)
1703 for (i = 0; i < g->states; i++) {
1704 struct itemset *is = g->statetab[i];
1705 struct symset shifts = INIT_DATASET;
1706 struct symset reduce = INIT_DATASET;
1710 /* First collect the shifts */
1711 for (j = 0; j < is->items.cnt; j++) {
1712 unsigned short itm = is->items.syms[j];
1713 int p = item_prod(itm);
1714 int bp = item_index(itm);
1715 struct production *pr = g->productions[p];
1717 if (bp < pr->body_size &&
1718 pr->body[bp]->type == Terminal) {
1720 int sym = pr->body[bp]->num;
1721 if (symset_find(&shifts, sym) < 0)
1722 symset_add(&shifts, sym, itm);
1725 /* Now look for reduction and conflicts */
1726 for (j = 0; j < is->items.cnt; j++) {
1727 unsigned short itm = is->items.syms[j];
1728 int p = item_prod(itm);
1729 int bp = item_index(itm);
1730 struct production *pr = g->productions[p];
1732 if (bp < pr->body_size)
1737 la = g->follow[pr->head->num];
1739 la = set_find(g, is->items.data[j]);
1741 for (k = 0; k < la.cnt; k++) {
1742 int pos = symset_find(&shifts, la.syms[k]);
1744 printf(" State %d has SHIFT/REDUCE conflict on ", i);
1745 prtxt(g->symtab[la.syms[k]]->name);
1747 report_item(g, shifts.data[pos]);
1748 report_item(g, itm);
1751 pos = symset_find(&reduce, la.syms[k]);
1753 symset_add(&reduce, la.syms[k], itm);
1756 printf(" State %d has REDUCE/REDUCE conflict on ", i);
1757 prtxt(g->symtab[la.syms[k]]->name);
1759 report_item(g, itm);
1760 report_item(g, reduce.data[pos]);
1764 symset_free(shifts);
1765 symset_free(reduce);
1771 ## Generating the parser
1773 The exported part of the parser is the `parse_XX` function, where the name
1774 `XX` is based on the name of the parser files.
1776 This takes a `code_node`, a partially initialized `token_config`, and an
1777 optional `FILE` to send tracing to. The `token_config` gets the list of
1778 known words added and then is used with the `code_node` to initialize the
1781 `parse_XX` then calls the library function `parser_run` to actually complete
1782 the parse. This needs the `states` table and function to call the various
1783 pieces of code provided in the grammar file, so they are generated first.
1785 ###### parser_generate
1787 static void gen_parser(FILE *f, struct grammar *g, char *file, char *name)
1793 gen_reduce(f, g, file);
1796 fprintf(f, "#line 0 \"gen_parser\"\n");
1797 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace)\n",
1800 fprintf(f, "\tstruct token_state *tokens;\n");
1801 fprintf(f, "\tconfig->words_marks = known;\n");
1802 fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
1803 fprintf(f, "\tconfig->ignored |= (1 << TK_line_comment) | (1 << TK_block_comment);\n");
1804 fprintf(f, "\ttokens = token_open(code, config);\n");
1805 fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n");
1806 fprintf(f, "\ttoken_close(tokens);\n");
1807 fprintf(f, "\treturn rv;\n");
1808 fprintf(f, "}\n\n");
1811 ### Known words table
1813 The known words table is simply an array of terminal symbols.
1814 The table of nonterminals used for tracing is a similar array.
1818 static void gen_known(FILE *f, struct grammar *g)
1821 fprintf(f, "#line 0 \"gen_known\"\n");
1822 fprintf(f, "static const char *known[] = {\n");
1823 for (i = TK_reserved;
1824 i < g->num_syms && g->symtab[i]->type == Terminal;
1826 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1827 g->symtab[i]->name.txt);
1828 fprintf(f, "};\n\n");
1831 static void gen_non_term(FILE *f, struct grammar *g)
1834 fprintf(f, "#line 0 \"gen_non_term\"\n");
1835 fprintf(f, "static const char *non_term[] = {\n");
1836 for (i = TK_reserved;
1839 if (g->symtab[i]->type == Nonterminal)
1840 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1841 g->symtab[i]->name.txt);
1842 fprintf(f, "};\n\n");
1845 ### States and the goto tables.
1847 For each state we record the goto table, the reducible production if
1848 there is one, or a symbol to shift for error recovery.
1849 Some of the details of the reducible production are stored in the
1850 `do_reduce` function to come later. Here we store the production number,
1851 the body size (useful for stack management) and the resulting symbol (useful
1852 for knowing how to free data later).
1854 The go to table is stored in a simple array of `sym` and corresponding
1857 ###### exported types
1865 const struct lookup * go_to;
1876 static void gen_goto(FILE *f, struct grammar *g)
1879 fprintf(f, "#line 0 \"gen_goto\"\n");
1880 for (i = 0; i < g->states; i++) {
1882 fprintf(f, "static const struct lookup goto_%d[] = {\n",
1884 struct symset gt = g->statetab[i]->go_to;
1885 for (j = 0; j < gt.cnt; j++)
1886 fprintf(f, "\t{ %d, %d },\n",
1887 gt.syms[j], gt.data[j]);
1894 static void gen_states(FILE *f, struct grammar *g)
1897 fprintf(f, "#line 0 \"gen_states\"\n");
1898 fprintf(f, "static const struct state states[] = {\n");
1899 for (i = 0; i < g->states; i++) {
1900 struct itemset *is = g->statetab[i];
1901 int j, prod = -1, prod_len;
1903 int shift_len = 0, shift_remain = 0;
1904 for (j = 0; j < is->items.cnt; j++) {
1905 int itm = is->items.syms[j];
1906 int p = item_prod(itm);
1907 int bp = item_index(itm);
1908 struct production *pr = g->productions[p];
1910 if (bp < pr->body_size) {
1911 if (shift_sym < 0 ||
1912 (shift_len == bp && shift_remain > pr->body_size - bp)) {
1913 shift_sym = pr->body[bp]->num;
1915 shift_remain = pr->body_size - bp;
1919 /* This is what we reduce */
1920 if (prod < 0 || prod_len < pr->body_size) {
1922 prod_len = pr->body_size;
1927 fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, 0, %d },\n",
1928 i, is->go_to.cnt, i, prod,
1929 g->productions[prod]->body_size,
1930 g->productions[prod]->head->num,
1933 fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, %d },\n",
1934 i, is->go_to.cnt, i, shift_sym,
1937 fprintf(f, "};\n\n");
1940 ### The `do_reduce` function and the code
1942 When the parser engine decides to reduce a production, it calls `do_reduce`.
1943 This has two functions.
1945 Firstly, if a non-NULL `trace` file is passed, it prints out details of the
1946 production being reduced. Secondly it runs the code that was included with
1947 the production if any.
1949 This code needs to be able to store data somewhere. Rather than requiring
1950 `do_reduce` to `malloc` that "somewhere", we pass in a large buffer and have
1951 `do_reduce` return the size to be saved.
1953 In order for the code to access "global" context, we pass in the
1954 "config" pointer that was passed to parser function. If the `struct
1955 token_config` is embedded in some larger structure, the reducing code
1956 can access the larger structure using pointer manipulation.
1958 The code fragment requires translation when written out. Any `$N` needs to
1959 be converted to a reference either to that buffer (if `$0`) or to the
1960 structure returned by a previous reduction. These pointers need to be cast
1961 to the appropriate type for each access. All this is handling in
1964 `gen_code` also allows symbol references to contain a '`<`' as in '`$<2`'.
1965 This applied only to symbols with references (or pointers), not those with structures.
1966 The `<` implies that the reference it being moved out, so the object will not be
1967 automatically freed. This is equivalent to assigning `NULL` to the pointer.
1971 static void gen_code(struct production *p, FILE *f, struct grammar *g)
1974 char *used = calloc(1, p->body_size);
1977 fprintf(f, "\t\t\t");
1978 for (c = p->code.txt; c < p->code.txt + p->code.len; c++) {
1992 if (*c < '0' || *c > '9') {
1999 while (c[1] >= '0' && c[1] <= '9') {
2001 n = n * 10 + *c - '0';
2004 fprintf(f, "(*(struct %.*s*%s)ret)",
2005 p->head->struct_name.len,
2006 p->head->struct_name.txt,
2007 p->head->isref ? "*":"");
2008 else if (n > p->body_size)
2009 fprintf(f, "$%d", n);
2010 else if (p->body[n-1]->type == Terminal)
2011 fprintf(f, "(*(struct token *)body[%d])",
2013 else if (p->body[n-1]->struct_name.txt == NULL)
2014 fprintf(f, "$%d", n);
2016 fprintf(f, "(*(struct %.*s*%s)body[%d])",
2017 p->body[n-1]->struct_name.len,
2018 p->body[n-1]->struct_name.txt,
2019 p->body[n-1]->isref ? "*":"", n-1);
2024 for (i = 0; i < p->body_size; i++) {
2025 if (p->body[i]->struct_name.txt &&
2026 p->body[i]->isref &&
2028 // assume this has been copied out
2029 fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i);
2036 static void gen_reduce(FILE *f, struct grammar *g, char *file)
2039 fprintf(f, "#line 0 \"gen_reduce\"\n");
2040 fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n");
2042 fprintf(f, "\tint ret_size = 0;\n");
2044 fprintf(f, "\tswitch(prod) {\n");
2045 for (i = 0; i < g->production_count; i++) {
2046 struct production *p = g->productions[i];
2047 fprintf(f, "\tcase %d:\n", i);
2052 if (p->head->struct_name.txt)
2053 fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n",
2054 p->head->struct_name.len,
2055 p->head->struct_name.txt,
2056 p->head->isref ? "*":"");
2058 fprintf(f, "\t\tbreak;\n");
2060 fprintf(f, "\t}\n\treturn ret_size;\n}\n\n");
2065 As each non-terminal can potentially cause a different type of data
2066 structure to be allocated and filled in, we need to be able to free it when
2069 It is particularly important to have fine control over freeing during error
2070 recovery where individual stack frames might need to be freed.
2072 For this, the grammar author is required to defined a `free_XX` function for
2073 each structure that is used by a non-terminal. `do_free` will call whichever
2074 is appropriate given a symbol number, and will call `free` (as is
2075 appropriate for tokens on any terminal symbol.
2079 static void gen_free(FILE *f, struct grammar *g)
2083 fprintf(f, "#line 0 \"gen_free\"\n");
2084 fprintf(f, "static void do_free(short sym, void *asn)\n");
2086 fprintf(f, "\tif (!asn) return;\n");
2087 fprintf(f, "\tif (sym < %d) {\n", g->first_nonterm);
2088 fprintf(f, "\t\tfree(asn);\n\t\treturn;\n\t}\n");
2089 fprintf(f, "\tswitch(sym) {\n");
2091 for (i = 0; i < g->num_syms; i++) {
2092 struct symbol *s = g->symtab[i];
2094 s->type != Nonterminal ||
2095 s->struct_name.txt == NULL)
2098 fprintf(f, "\tcase %d:\n", s->num);
2100 fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n",
2102 s->struct_name.txt);
2103 fprintf(f, "\t\tfree(asn);\n");
2105 fprintf(f, "\t\tfree_%.*s(asn);\n",
2107 s->struct_name.txt);
2108 fprintf(f, "\t\tbreak;\n");
2110 fprintf(f, "\t}\n}\n\n");
2113 ## The main routine.
2115 There are three key parts to the "main" function of parsergen: processing
2116 the arguments, loading the grammar file, and dealing with the grammar.
2118 ### Argument processing.
2120 Command line options allow the selection of analysis mode, name of output
2121 file, and whether or not a report should be generated. By default we create
2122 a report only if no code output was requested.
2124 The `parse_XX` function name uses the basename of the output file which
2125 should not have a suffix (`.c`). `.c` is added to the given name for the
2126 code, and `.h` is added for the header (if header text is specifed in the
2133 static const struct option long_options[] = {
2134 { "LR0", 0, NULL, '0' },
2135 { "LR05", 0, NULL, '5' },
2136 { "SLR", 0, NULL, 'S' },
2137 { "LALR", 0, NULL, 'L' },
2138 { "LR1", 0, NULL, '1' },
2139 { "tag", 1, NULL, 't' },
2140 { "report", 0, NULL, 'R' },
2141 { "output", 1, NULL, 'o' },
2142 { NULL, 0, NULL, 0 }
2144 const char *options = "05SL1t:Ro:";
2146 ###### process arguments
2148 char *outfile = NULL;
2153 enum grammar_type type = LR05;
2154 while ((opt = getopt_long(argc, argv, options,
2155 long_options, NULL)) != -1) {
2170 outfile = optarg; break;
2172 tag = optarg; break;
2174 fprintf(stderr, "Usage: parsergen ...\n");
2179 infile = argv[optind++];
2181 fprintf(stderr, "No input file given\n");
2184 if (outfile && report == 1)
2187 if (name && strchr(name, '/'))
2188 name = strrchr(name, '/')+1;
2190 if (optind < argc) {
2191 fprintf(stderr, "Excess command line arguments\n");
2195 ### Loading the grammar file
2197 To be able to run `mdcode` and `scanner` on the grammar we need to memory
2200 One we have extracted the code (with `mdcode`) we expect to find three
2201 sections: header, code, and grammar. Anything else that is not
2202 excluded by the `--tag` option is an error.
2204 "header" and "code" are optional, though it is hard to build a working
2205 parser with neither. "grammar" must be provided.
2209 #include <sys/mman.h>
2214 static void pr_err(char *msg)
2217 fprintf(stderr, "%s\n", msg);
2221 struct section *table;
2225 fd = open(infile, O_RDONLY);
2227 fprintf(stderr, "parsergen: cannot open %s: %s\n",
2228 infile, strerror(errno));
2231 len = lseek(fd, 0, 2);
2232 file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2233 table = code_extract(file, file+len, pr_err);
2235 struct code_node *hdr = NULL;
2236 struct code_node *code = NULL;
2237 struct code_node *gram = NULL;
2238 for (s = table; s; s = s->next) {
2239 struct text sec = s->section;
2240 if (tag && !strip_tag(&sec, tag))
2242 if (text_is(sec, "header"))
2244 else if (text_is(sec, "code"))
2246 else if (text_is(sec, "grammar"))
2249 fprintf(stderr, "Unknown content section: %.*s\n",
2250 s->section.len, s->section.txt);
2255 ### Processing the input
2257 As we need to append an extention to a filename and then open it for
2258 writing, and we need to do this twice, it helps to have a separate function.
2262 static FILE *open_ext(char *base, char *ext)
2264 char *fn = malloc(strlen(base) + strlen(ext) + 1);
2266 strcat(strcpy(fn, base), ext);
2272 If we can read the grammar, then we analyse and optionally report on it. If
2273 the report finds conflicts we will exit with an error status.
2275 ###### process input
2276 struct grammar *g = NULL;
2278 fprintf(stderr, "No grammar section provided\n");
2281 g = grammar_read(gram);
2283 fprintf(stderr, "Failure to parse grammar\n");
2288 grammar_analyse(g, type);
2290 if (grammar_report(g, type))
2294 If a headers section is defined, we write it out and include a declaration
2295 for the `parse_XX` function so it can be used from separate code.
2297 if (rv == 0 && hdr && outfile) {
2298 FILE *f = open_ext(outfile, ".h");
2300 code_node_print(f, hdr, infile);
2301 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace);\n",
2305 fprintf(stderr, "Cannot create %s.h\n",
2311 And if all goes well and an output file was provided, we create the `.c`
2312 file with the code section (if any) and the parser tables and function.
2314 if (rv == 0 && outfile) {
2315 FILE *f = open_ext(outfile, ".c");
2318 code_node_print(f, code, infile);
2319 gen_parser(f, g, infile, name);
2322 fprintf(stderr, "Cannot create %s.c\n",
2328 And that about wraps it up. We need to set the locale so that UTF-8 is
2329 recognised properly, and link with `libicuuc` as `libmdcode` requires that.
2331 ###### File: parsergen.mk
2332 parsergen : parsergen.o libscanner.o libmdcode.o
2333 $(CC) $(CFLAGS) -o parsergen parsergen.o libscanner.o libmdcode.o -licuuc
2340 int main(int argc, char *argv[])
2345 setlocale(LC_ALL,"");
2347 ## process arguments
2354 ## The SHIFT/REDUCE parser
2356 Having analysed the grammar and generated all the tables, we only need the
2357 shift/reduce engine to bring it all together.
2359 ### Goto table lookup
2361 The parser generator has nicely provided us with goto tables sorted by
2362 symbol number. We need a binary search function to find a symbol in the
2365 ###### parser functions
2367 static int search(const struct state *l, int sym)
2370 int hi = l->go_to_cnt;
2374 while (lo + 1 < hi) {
2375 int mid = (lo + hi) / 2;
2376 if (l->go_to[mid].sym <= sym)
2381 if (l->go_to[lo].sym == sym)
2382 return l->go_to[lo].state;
2387 ### The state stack.
2389 The core data structure for the parser is the stack. This tracks all the
2390 symbols that have been recognised or partially recognised.
2392 The stack usually won't grow very large - maybe a few tens of entries. So
2393 we dynamically resize and array as required but never bother to shrink it
2396 We keep the stack as two separate allocations. One, `asn_stack` stores the
2397 "abstract syntax nodes" which are created by each reduction. When we call
2398 `do_reduce` we need to pass an array of the `asn`s of the body of the
2399 production, and by keeping a separate `asn` stack, we can just pass a
2400 pointer into this stack.
2402 The other allocation stores all other stack fields of which there are six.
2403 The `state` is the most important one and guides the parsing process. The
2404 `sym` is nearly unnecessary. However when we want to free entries from the
2405 `asn_stack`, it helps to know what type they are so we can call the right
2406 freeing function. The symbol leads us to the right free function through
2409 The `indents` count and the `starts_indented` flag track the line
2410 indents in the symbol. These are used to allow indent information to
2411 guide parsing and error recovery.
2413 `newline_permitted` keeps track of whether newlines should be ignored
2414 or not, and `starts_line` records if this state stated on a newline.
2416 As well as the stack of frames we have a `next` frame which is
2417 assembled from the incoming token and other information prior to
2418 pushing it onto the stack.
2420 ###### parser functions
2426 short starts_indented;
2428 short newline_permitted;
2437 Two operations are needed on the stack - shift (which is like push) and pop.
2439 Shift applies not only to terminals but also to non-terminals. When we
2440 reduce a production we will pop off entries corresponding to the body
2441 symbols, then push on an item for the head of the production. This last is
2442 exactly the same process as shifting in a terminal so we use the same
2445 To simplify other code we arrange for `shift` to fail if there is no `goto`
2446 state for the symbol. This is useful in basic parsing due to our design
2447 that we shift when we can, and reduce when we cannot. So the `shift`
2448 function reports if it could.
2450 So `shift` finds the next state. If that succeed it extends the allocations
2451 if needed and pushes all the information onto the stacks.
2453 ###### parser functions
2455 static int shift(struct parser *p,
2457 const struct state states[])
2459 // Push an entry onto the stack
2460 int newstate = search(&states[p->next.state], p->next.sym);
2463 if (p->tos >= p->stack_size) {
2464 p->stack_size += 10;
2465 p->stack = realloc(p->stack, p->stack_size
2466 * sizeof(p->stack[0]));
2467 p->asn_stack = realloc(p->asn_stack, p->stack_size
2468 * sizeof(p->asn_stack[0]));
2470 p->stack[p->tos] = p->next;
2471 p->asn_stack[p->tos] = asn;
2473 p->next.state = newstate;
2474 p->next.indents = 0;
2475 p->next.starts_indented = 0;
2476 // if new state doesn't start a line, we inherit newline_permitted status
2477 if (states[newstate].starts_line)
2478 p->next.newline_permitted = 1;
2482 `pop` simply moves the top of stack (`tos`) back down the required amount
2483 and frees any `asn` entries that need to be freed. It is called _after_ we
2484 reduce a production, just before we `shift` the nonterminal in.
2486 ###### parser functions
2488 static void pop(struct parser *p, int num,
2489 void(*do_free)(short sym, void *asn))
2493 for (i = 0; i < num; i++) {
2494 p->next.indents += p->stack[p->tos+i].indents;
2495 do_free(p->stack[p->tos+i].sym,
2496 p->asn_stack[p->tos+i]);
2500 p->next.state = p->stack[p->tos].state;
2501 p->next.starts_indented = p->stack[p->tos].starts_indented;
2502 p->next.newline_permitted = p->stack[p->tos].newline_permitted;
2503 if (p->next.indents > p->next.starts_indented)
2504 p->next.newline_permitted = 0;
2508 ### Memory allocation
2510 The `scanner` returns tokens in a local variable - we want them in allocated
2511 memory so they can live in the `asn_stack`. Similarly the `asn` produced by
2512 a reduce is in a large buffer. Both of these require some allocation and
2513 copying, hence `memdup` and `tokcopy`.
2515 ###### parser includes
2518 ###### parser functions
2520 void *memdup(void *m, int len)
2526 memcpy(ret, m, len);
2530 static struct token *tok_copy(struct token tk)
2532 struct token *new = malloc(sizeof(*new));
2537 ### The heart of the parser.
2539 Now we have the parser. If we can shift, we do. If not and we can reduce
2540 we do. If the production we reduced was production zero, then we have
2541 accepted the input and can finish.
2543 We return whatever `asn` was returned by reducing production zero.
2545 If we can neither shift nor reduce we have an error to handle. We pop
2546 single entries off the stack until we can shift the `TK_error` symbol, then
2547 drop input tokens until we find one we can shift into the new error state.
2549 When we find `TK_in` and `TK_out` tokens which report indents we need
2550 to handle them directly as the grammar cannot express what we want to
2553 `TK_in` tokens are easy: we simply update the `next` stack frame to
2554 record how many indents there are and that the next token started with
2557 `TK_out` tokens must either be counted off against any pending indent,
2558 or must force reductions until there is a pending indent which isn't
2559 at the start of a production.
2561 `TK_newline` tokens are ignored precisely if there has been an indent
2562 since the last state which could have been at the start of a line.
2564 ###### parser includes
2567 void *parser_run(struct token_state *tokens,
2568 const struct state states[],
2569 int (*do_reduce)(int, void**, struct token_config*, void*),
2570 void (*do_free)(short, void*),
2571 FILE *trace, const char *non_term[],
2572 struct token_config *config)
2574 struct parser p = { 0 };
2575 struct token *tk = NULL;
2579 p.next.newline_permitted = states[0].starts_line;
2581 struct token *err_tk;
2583 tk = tok_copy(token_next(tokens));
2584 p.next.sym = tk->num;
2586 parser_trace(trace, &p, tk, states, non_term, config->known_count);
2588 if (p.next.sym == TK_in) {
2589 p.next.starts_indented = 1;
2595 if (p.next.sym == TK_out) {
2596 if (p.stack[p.tos-1].indents > p.stack[p.tos-1].starts_indented ||
2597 (p.stack[p.tos-1].indents == 1 &&
2598 states[p.next.state].reduce_size > 1)) {
2599 p.stack[p.tos-1].indents -= 1;
2600 if (p.stack[p.tos-1].indents == p.stack[p.tos-1].starts_indented) {
2601 // no internal indent any more, reassess 'newline_permitted'
2602 if (states[p.stack[p.tos-1].state].starts_line)
2603 p.stack[p.tos-1].newline_permitted = 1;
2605 p.stack[p.tos-1].newline_permitted = p.stack[p.tos-2].newline_permitted;
2611 // fall through and force a REDUCE (as 'shift'
2614 if (p.next.sym == TK_newline) {
2615 if (!p.tos || ! p.stack[p.tos-1].newline_permitted) {
2621 if (shift(&p, tk, states)) {
2625 if (states[p.next.state].reduce_prod >= 0) {
2627 int prod = states[p.next.state].reduce_prod;
2628 int size = states[p.next.state].reduce_size;
2630 static char buf[16*1024];
2631 p.next.sym = states[p.next.state].reduce_sym;
2633 body = p.asn_stack +
2634 (p.tos - states[p.next.state].reduce_size);
2636 bufsize = do_reduce(prod, body, config, buf);
2638 pop(&p, size, do_free);
2639 shift(&p, memdup(buf, bufsize), states);
2644 if (tk->num == TK_out) {
2645 // Indent problem - synthesise tokens to get us
2647 fprintf(stderr, "Synthesize %d to handle indent problem\n", states[p.next.state].shift_sym);
2648 p.next.sym = states[p.next.state].shift_sym;
2649 shift(&p, tok_copy(*tk), states);
2650 // FIXME need to report this error somehow
2653 /* Error. We walk up the stack until we
2654 * find a state which will accept TK_error.
2655 * We then shift in TK_error and see what state
2656 * that takes us too.
2657 * Then we discard input tokens until
2658 * we find one that is acceptable.
2661 err_tk = tok_copy(*tk);
2662 p.next.sym = TK_error;
2663 while (shift(&p, err_tk, states) == 0
2665 // discard this state
2666 pop(&p, 1, do_free);
2669 // no state accepted TK_error
2672 while (search(&states[p.next.state], tk->num) < 0 &&
2673 tk->num != TK_eof) {
2675 tk = tok_copy(token_next(tokens));
2676 if (tk->num == TK_in)
2677 p.next.indents += 1;
2678 if (tk->num == TK_out) {
2679 if (p.next.indents == 0)
2681 p.next.indents -= 1;
2684 if (p.tos == 0 && tk->num == TK_eof)
2689 ret = p.asn_stack[0];
2691 pop(&p, p.tos, do_free);
2697 ###### exported functions
2698 void *parser_run(struct token_state *tokens,
2699 const struct state states[],
2700 int (*do_reduce)(int, void**, struct token_config*, void*),
2701 void (*do_free)(short, void*),
2702 FILE *trace, const char *non_term[],
2703 struct token_config *config);
2707 Being able to visualize the parser in action can be invaluable when
2708 debugging the parser code, or trying to understand how the parse of a
2709 particular grammar progresses. The stack contains all the important
2710 state, so just printing out the stack every time around the parse loop
2711 can make it possible to see exactly what is happening.
2713 This doesn't explicitly show each SHIFT and REDUCE action. However they
2714 are easily deduced from the change between consecutive lines, and the
2715 details of each state can be found by cross referencing the states list
2716 in the stack with the "report" that parsergen can generate.
2718 For terminal symbols, we just dump the token. For non-terminals we
2719 print the name of the symbol. The look ahead token is reported at the
2720 end inside square brackets.
2722 ###### parser functions
2724 static char *reserved_words[] = {
2725 [TK_error] = "ERROR",
2728 [TK_newline] = "NEWLINE",
2731 static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
2733 fprintf(trace, "(%d", f->state);
2735 fprintf(trace, "%c%d", f->starts_indented?':':'.',
2737 if (states[f->state].starts_line)
2738 fprintf(trace, "s");
2739 if (f->newline_permitted)
2740 fprintf(trace, "n");
2741 fprintf(trace, ") ");
2744 void parser_trace(FILE *trace, struct parser *p,
2745 struct token *tk, const struct state states[],
2746 const char *non_term[], int knowns)
2749 for (i = 0; i < p->tos; i++) {
2750 int sym = p->stack[i].sym;
2751 parser_trace_state(trace, &p->stack[i], states);
2752 if (sym < TK_reserved &&
2753 reserved_words[sym] != NULL)
2754 fputs(reserved_words[sym], trace);
2755 else if (sym < TK_reserved + knowns) {
2756 struct token *t = p->asn_stack[i];
2757 text_dump(trace, t->txt, 20);
2759 fputs(non_term[sym - TK_reserved - knowns],
2763 parser_trace_state(trace, &p->next, states);
2764 fprintf(trace, " [");
2765 if (tk->num < TK_reserved &&
2766 reserved_words[tk->num] != NULL)
2767 fputs(reserved_words[tk->num], trace);
2769 text_dump(trace, tk->txt, 20);
2770 fputs("]\n", trace);
2775 The obvious example for a parser is a calculator.
2777 As `scanner` provides number parsing function using `libgmp` is it not much
2778 work to perform arbitrary rational number calculations.
2780 This calculator takes one expression, or an equality test per line. The
2781 results are printed and if any equality test fails, the program exits with
2784 ###### File: parsergen.mk
2785 calc.c calc.h : parsergen parsergen.mdc
2786 ./parsergen --tag calc -o calc parsergen.mdc
2787 calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o
2788 $(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp
2793 // what do we use for a demo-grammar? A calculator of course.
2805 #include <sys/mman.h>
2810 #include "scanner.h"
2816 static void free_number(struct number *n)
2822 int main(int argc, char *argv[])
2824 int fd = open(argv[1], O_RDONLY);
2825 int len = lseek(fd, 0, 2);
2826 char *file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2827 struct section *s = code_extract(file, file+len, NULL);
2828 struct token_config config = {
2829 .ignored = (1 << TK_line_comment)
2830 | (1 << TK_block_comment)
2833 .number_chars = ".,_+-",
2837 parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
2839 struct section *t = s->next;
2849 Session -> Session Line
2852 Line -> Expression NEWLINE ${ gmp_printf("Answer = %Qd\n", $1.val);
2853 { mpf_t fl; mpf_init2(fl, 20); mpf_set_q(fl, $1.val);
2854 gmp_printf(" or as a decimal: %Fg\n", fl);
2858 | Expression = Expression NEWLINE ${
2859 if (mpq_equal($1.val, $3.val))
2860 gmp_printf("Both equal %Qd\n", $1.val);
2862 gmp_printf("NOT EQUAL: %Qd\n != : %Qd\n",
2867 | NEWLINE ${ printf("Blank line\n"); }$
2868 | ERROR NEWLINE ${ printf("Skipped a bad line\n"); }$
2871 Expression -> Expression + Term ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$
2872 | Expression - Term ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$
2873 | Term ${ mpq_init($0.val); mpq_set($0.val, $1.val); }$
2875 Term -> Term * Factor ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$
2876 | Term / Factor ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$
2877 | Factor ${ mpq_init($0.val); mpq_set($0.val, $1.val); }$
2879 Factor -> NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$
2880 | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$