1 # LR(1) Parser Generator #
3 This parser generator takes a grammar description combined with code
4 fragments, analyses it, and can report details about the analysis and
5 write out C code files which can be compiled to make a parser.
7 There are several distinct sections.
9 1. `grammar_read` will read a grammar from a literate-code file and
10 store the productions, symbols, and code fragments.
11 2. `grammar_analyse` will take that grammar and build LR parsing
12 states and look-ahead tables.
13 3. `grammar_report` will present the details of the analysis
14 in a readable format and will report any conflicts.
15 4. `parser_generate` will write out C code files with various
16 tables and with the code fragments arranged in useful places.
17 5. `parser_run` is a library function intended to be linked together
18 with the generated parser tables to complete the implementation of
20 6. Finally `calc` is a test grammar for a simple calculator. The
21 `parsergen` program built from the C code in this file can extract
22 that grammar directly from this file and process it.
25 ###### File: parsergen.c
30 ## forward declarations
41 ###### File: libparser.c
48 ###### File: parsergen.mk
51 parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc
54 ## Reading the grammar
56 The grammar must be stored in a markdown literate code file as parsed
57 by "[mdcode][]". It must have three top level (i.e. unreferenced)
58 sections: `header`, `code`, and `grammar`. The first two will be
59 literally copied into the generated `.c` and `.h`. files. The last
60 contains the grammar. This is tokenised with "[scanner][]".
62 If the `--tag` option is given, then any top level header that doesn't
63 start with the tag is ignored, and the tag is striped from the rest. So
65 means that the three needed sections must be `Foo: header`, `Foo: code`,
69 [scanner]: scanner.html
75 ###### parser includes
79 The grammar contains production sets, precedence/associativity
80 declarations, and data type declarations. These are all parsed with
81 _ad hoc_ parsing as we don't have a parser generator yet.
83 The precedence and associativity can be set for each production, but
84 can be inherited from symbols. The data type (either structure or a
85 reference to a structure) is potentially defined for each non-terminal
86 and describes what C structure is used to store information about each
90 enum assoc {Left, Right, Non};
91 char *assoc_names[] = {"Left","Right","Non"};
94 struct text struct_name;
97 unsigned short precedence;
101 unsigned short precedence;
109 The strings reported by `mdcode` and `scanner` are `struct text` which have
110 length rather than being null terminated. To help with printing and
111 comparing we define `text_is` and `prtxt`, which should possibly go in
112 `mdcode`. `scanner` does provide `text_dump` which is useful for strings
113 which might contain control characters.
115 `strip_tag` is a bit like `strncmp`, but adds a test for a colon,
116 because that is what we need to detect tags.
119 static int text_is(struct text t, char *s)
121 return (strlen(s) == t.len &&
122 strncmp(s, t.txt, t.len) == 0);
124 static void prtxt(struct text t)
126 printf("%.*s", t.len, t.txt);
129 static int strip_tag(struct text *t, char *tag)
131 int skip = strlen(tag) + 1;
132 if (skip >= t->len ||
133 strncmp(t->txt, tag, skip-1) != 0 ||
134 t->txt[skip-1] != ':')
136 while (skip < t->len && t->txt[skip] == ' ')
145 Productions are comprised primarily of symbols - terminal and
146 non-terminal. We do not make a syntactic distinction between the two,
147 though non-terminals must be identifiers. Non-terminal symbols are
148 those which appear in the head of a production, terminal symbols are
149 those which don't. There are also "virtual" symbols used for precedence
150 marking discussed later, and sometimes we won't know what type a symbol
153 ###### forward declarations
154 enum symtype { Unknown, Virtual, Terminal, Nonterminal };
155 char *symtypes = "UVTN";
159 Symbols can be either `TK_ident` or `TK_mark`. They are saved in a
160 table of known symbols and the resulting parser will report them as
161 `TK_reserved + N`. A small set of identifiers are reserved for the
162 different token types that `scanner` can report.
165 static char *reserved_words[] = {
166 [TK_error] = "ERROR",
167 [TK_number] = "NUMBER",
168 [TK_ident] = "IDENTIFIER",
170 [TK_string] = "STRING",
171 [TK_multi_string] = "MULTI_STRING",
174 [TK_newline] = "NEWLINE",
180 Note that `TK_eof` and the two `TK_*_comment` tokens cannot be
181 recognised. The former is automatically expected at the end of the text
182 being parsed. The latter are always ignored by the parser.
184 All of these symbols are stored in a simple symbol table. We use the
185 `struct text` from `mdcode` to store the name and link them together
186 into a sorted list using an insertion sort.
188 We don't have separate `find` and `insert` functions as any symbol we
189 find needs to be remembered. We simply expect `find` to always return a
190 symbol, but its type might be `Unknown`.
199 ###### grammar fields
204 static int text_cmp(struct text a, struct text b)
209 int cmp = strncmp(a.txt, b.txt, len);
213 return a.len - b.len;
216 static struct symbol *sym_find(struct grammar *g, struct text s)
218 struct symbol **l = &g->syms;
223 (cmp = text_cmp((*l)->name, s)) < 0)
227 n = calloc(1, sizeof(*n));
236 static void symbols_init(struct grammar *g)
238 int entries = sizeof(reserved_words)/sizeof(reserved_words[0]);
240 for (i = 0; i < entries; i++) {
243 t.txt = reserved_words[i];
246 t.len = strlen(t.txt);
253 ### Data types and precedence.
255 Data type specification and precedence specification are both
256 introduced by a dollar sign at the start of the line. If the next
257 word is `LEFT`, `RIGHT` or `NON`, then the line specifies a
258 precedence, otherwise it specifies a data type.
260 The data type name is simply stored and applied to the head of all
261 subsequent productions. It must be the name of a structure optionally
262 preceded by an asterisk which means a reference or "pointer". So
263 `$expression` maps to `struct expression` and `$*statement` maps to
264 `struct statement *`.
266 Any productions given before the first data type declaration will have
267 no data type associated with them and can carry no information. In
268 order to allow other non-terminals to have no type, the data type
269 `$void` can be given. This does *not* mean that `struct void` will be
270 used, but rather than no type will be associated with future
273 The precedence line must contain a list of symbols - typically
274 terminal symbols, but not necessarily. It can only contain symbols
275 that have not been seen yet, so precedence declaration must precede
276 the productions that they affect.
278 A precedence line may also contain a "virtual" symbol which is an
279 identifier preceded by `$$`. Virtual terminals carry precedence
280 information but are not included in the grammar. A production can
281 declare that it inherits the precedence of a given virtual symbol.
283 This use for `$$` precludes it from being used as a symbol in the
284 described language. Two other symbols: `${` and `}$` are also
287 Each new precedence line introduces a new precedence level and
288 declares how it associates. This level is stored in each symbol
289 listed and may be inherited by any production which uses the symbol. A
290 production inherits from the last symbol which has a precedence.
292 ###### grammar fields
293 struct text current_type;
298 enum symbols { TK_virtual = TK_reserved, TK_open, TK_close };
299 static const char *known[] = { "$$", "${", "}$" };
302 static char *dollar_line(struct token_state *ts, struct grammar *g, int isref)
304 struct token t = token_next(ts);
309 if (t.num != TK_ident) {
310 err = "type or assoc expected after '$'";
313 if (text_is(t.txt, "LEFT"))
315 else if (text_is(t.txt, "RIGHT"))
317 else if (text_is(t.txt, "NON"))
320 g->current_type = t.txt;
321 g->type_isref = isref;
322 if (text_is(t.txt, "void"))
323 g->current_type.txt = NULL;
325 if (t.num != TK_newline) {
326 err = "Extra tokens after type name";
333 err = "$* cannot be followed by a precedence";
337 // This is a precedence line, need some symbols.
341 while (t.num != TK_newline) {
342 enum symtype type = Terminal;
344 if (t.num == TK_virtual) {
347 if (t.num != TK_ident) {
348 err = "$$ must be followed by a word";
351 } else if (t.num != TK_ident &&
353 err = "Illegal token in precedence line";
356 s = sym_find(g, t.txt);
357 if (s->type != Unknown) {
358 err = "Symbols in precedence line must not already be known.";
362 s->precedence = g->prec_levels;
367 err = "No symbols given on precedence line";
371 while (t.num != TK_newline && t.num != TK_eof)
378 A production either starts with an identifier which is the head
379 non-terminal, or a vertical bar (`|`) in which case this production
380 uses the same head as the previous one. The identifier must be
381 followed by a `->` mark. All productions for a given non-terminal must
382 be grouped together with the `nonterminal ->` given only once.
384 After this a (possibly empty) sequence of identifiers and marks form
385 the body of the production. A virtual symbol may be given after the
386 body by preceding it with `$$`. If a virtual symbol is given, the
387 precedence of the production is that for the virtual symbol. If none
388 is given, the precedence is inherited from the last symbol in the
389 production which has a precedence specified.
391 After the optional precedence may come the `${` mark. This indicates
392 the start of a code fragment. If present, this must be on the same
393 line as the start of the production.
395 All of the text from the `${` through to the matching `}$` is
396 collected and forms the code-fragment for the production. It must all
397 be in one `code_node` of the literate code. The `}$` must be
398 at the end of a line.
400 Text in the code fragment will undergo substitutions where `$N` or
401 `$<N`,for some numeric `N`, will be replaced with a variable holding
402 the parse information for the particular symbol in the production.
403 `$0` is the head of the production, `$1` is the first symbol of the
404 body, etc. The type of `$N` for a terminal symbol is `struct token`.
405 For a non-terminal, it is whatever has been declared for that symbol.
406 The `<` may be included for symbols declared as storing a reference
407 (not a structure) and means that the reference is being moved out, so
408 it will not automatically be freed.
410 While building productions we will need to add to an array which needs to
414 static void array_add(void *varray, int *cnt, void *new)
416 void ***array = varray;
419 current = ((*cnt-1) | (step-1))+1;
420 if (*cnt == current) {
423 *array = realloc(*array, current * sizeof(void*));
425 (*array)[*cnt] = new;
429 Collecting the code fragment simply involves reading tokens until we
430 hit the end token `}$`, and noting the character position of the start and
434 static struct text collect_code(struct token_state *state,
439 code.txt = start.txt.txt + start.txt.len;
441 t = token_next(state);
442 while (t.node == start.node &&
443 t.num != TK_close && t.num != TK_error &&
445 if (t.num == TK_close && t.node == start.node)
446 code.len = t.txt.txt - code.txt;
452 Now we have all the bit we need to parse a full production.
457 ###### grammar fields
458 struct production **productions;
459 int production_count;
461 ###### production fields
463 struct symbol **body;
468 int first_production;
471 static char *parse_production(struct grammar *g,
473 struct token_state *state)
475 /* Head has already been parsed. */
478 struct production p, *pp;
480 memset(&p, 0, sizeof(p));
482 tk = token_next(state);
483 while (tk.num == TK_ident || tk.num == TK_mark) {
484 struct symbol *bs = sym_find(g, tk.txt);
485 if (bs->type == Unknown)
487 if (bs->type == Virtual) {
488 err = "Virtual symbol not permitted in production";
491 if (bs->precedence) {
492 p.precedence = bs->precedence;
495 array_add(&p.body, &p.body_size, bs);
496 tk = token_next(state);
498 if (tk.num == TK_virtual) {
500 tk = token_next(state);
501 if (tk.num != TK_ident) {
502 err = "word required after $$";
505 vs = sym_find(g, tk.txt);
506 if (vs->type != Virtual) {
507 err = "symbol after $$ must be virtual";
510 p.precedence = vs->precedence;
512 tk = token_next(state);
514 if (tk.num == TK_open) {
515 p.code = collect_code(state, tk);
516 if (p.code.txt == NULL) {
517 err = "code fragment not closed properly";
520 tk = token_next(state);
522 if (tk.num != TK_newline && tk.num != TK_eof) {
523 err = "stray tokens at end of line";
526 pp = malloc(sizeof(*pp));
528 array_add(&g->productions, &g->production_count, pp);
531 while (tk.num != TK_newline && tk.num != TK_eof)
532 tk = token_next(state);
536 With the ability to parse production and dollar-lines, we have nearly all
537 that we need to parse a grammar from a `code_node`.
539 The head of the first production will effectively be the `start` symbol of
540 the grammar. However it won't _actually_ be so. Processing the grammar is
541 greatly simplified if the real start symbol only has a single production,
542 and expects `$eof` as the final terminal. So when we find the first
543 explicit production we insert an extra production as production zero which
546 ###### Example: production 0
549 where `START` is the first non-terminal given.
551 ###### create production zero
552 struct production *p = calloc(1,sizeof(*p));
553 struct text start = {"$start",6};
554 struct text eof = {"$eof",4};
555 p->head = sym_find(g, start);
556 p->head->type = Nonterminal;
557 array_add(&p->body, &p->body_size, head);
558 array_add(&p->body, &p->body_size, sym_find(g, eof));
559 p->head->first_production = g->production_count;
560 array_add(&g->productions, &g->production_count, p);
562 Now we are ready to read in the grammar.
565 static struct grammar *grammar_read(struct code_node *code)
567 struct token_config conf = {
570 .words_marks = known,
571 .known_count = sizeof(known)/sizeof(known[0]),
573 .ignored = (1 << TK_line_comment)
574 | (1 << TK_block_comment)
577 | (1 << TK_multi_string)
582 struct token_state *state = token_open(code, &conf);
584 struct symbol *head = NULL;
588 g = calloc(1, sizeof(*g));
591 for (tk = token_next(state); tk.num != TK_eof;
592 tk = token_next(state)) {
593 if (tk.num == TK_newline)
595 if (tk.num == TK_ident) {
597 head = sym_find(g, tk.txt);
598 if (head->type == Nonterminal)
599 err = "This non-terminal has already be used.";
600 else if (head->type == Virtual)
601 err = "Virtual symbol not permitted in head of production";
603 head->type = Nonterminal;
604 head->struct_name = g->current_type;
605 head->isref = g->type_isref;
606 if (g->production_count == 0) {
607 ## create production zero
609 head->first_production = g->production_count;
610 tk = token_next(state);
611 if (tk.num == TK_mark &&
612 text_is(tk.txt, "->"))
613 err = parse_production(g, head, state);
615 err = "'->' missing in production";
617 } else if (tk.num == TK_mark
618 && text_is(tk.txt, "|")) {
619 // another production for same non-term
621 err = parse_production(g, head, state);
623 err = "First production must have a head";
624 } else if (tk.num == TK_mark
625 && text_is(tk.txt, "$")) {
626 err = dollar_line(state, g, 0);
627 } else if (tk.num == TK_mark
628 && text_is(tk.txt, "$*")) {
629 err = dollar_line(state, g, 1);
631 err = "Unrecognised token at start of line.";
639 fprintf(stderr, "Error at line %d: %s\n",
646 ## Analysing the grammar
648 The central task in analysing the grammar is to determine a set of
649 states to drive the parsing process. This will require finding
650 various sets of symbols and of "items". Some of these sets will need
651 to attach information to each element in the set, so they are more
654 Each "item" may have a 'look-ahead' or `LA` set associated with
655 it. Many of these will be the same as each other. To avoid memory
656 wastage, and to simplify some comparisons of sets, these sets will be
657 stored in a list of unique sets, each assigned a number.
659 Once we have the data structures in hand to manage these sets and
660 lists, we can start setting the 'nullable' flag, build the 'FIRST'
661 sets, and then create the item sets which define the various states.
665 Though we don't only store symbols in these sets, they are the main
666 things we store, so they are called symbol sets or "symsets".
668 A symset has a size, an array of shorts, and an optional array of data
669 values, which are also shorts. If the array of data is not present,
670 we store an impossible pointer, as `NULL` is used to indicate that no
671 memory has been allocated yet;
676 unsigned short *syms, *data;
678 #define NO_DATA ((unsigned short *)1)
679 const struct symset INIT_SYMSET = { 0, NULL, NO_DATA };
680 const struct symset INIT_DATASET = { 0, NULL, NULL };
682 The arrays of shorts are allocated in blocks of 8 and are kept sorted
683 by using an insertion sort. We don't explicitly record the amount of
684 allocated space as it can be derived directly from the current `cnt` using
685 `((cnt - 1) | 7) + 1`.
688 static void symset_add(struct symset *s, unsigned short key, unsigned short val)
691 int current = ((s->cnt-1) | 7) + 1;
692 if (current == s->cnt) {
694 s->syms = realloc(s->syms, sizeof(*s->syms) * current);
695 if (s->data != NO_DATA)
696 s->data = realloc(s->data, sizeof(*s->data) * current);
699 while (i > 0 && s->syms[i-1] > key) {
700 s->syms[i] = s->syms[i-1];
701 if (s->data != NO_DATA)
702 s->data[i] = s->data[i-1];
706 if (s->data != NO_DATA)
711 Finding a symbol (or item) in a `symset` uses a simple binary search.
712 We return the index where the value was found (so data can be accessed),
713 or `-1` to indicate failure.
715 static int symset_find(struct symset *ss, unsigned short key)
722 while (lo + 1 < hi) {
723 int mid = (lo + hi) / 2;
724 if (ss->syms[mid] <= key)
729 if (ss->syms[lo] == key)
734 We will often want to form the union of two symsets. When we do, we
735 will often want to know if anything changed (as they might mean there
736 is more work to do). So `symset_union` reports whether anything was
737 added to the first set. We use a slow quadratic approach as these
738 sets don't really get very big. If profiles shows this to be a problem is
739 can be optimised later.
741 static int symset_union(struct symset *a, struct symset *b)
745 for (i = 0; i < b->cnt; i++)
746 if (symset_find(a, b->syms[i]) < 0) {
747 unsigned short data = 0;
748 if (b->data != NO_DATA)
750 symset_add(a, b->syms[i], data);
756 And of course we must be able to free a symset.
758 static void symset_free(struct symset ss)
761 if (ss.data != NO_DATA)
767 Some symsets are simply stored somewhere appropriate in the `grammar`
768 data structure, others needs a bit of indirection. This applies
769 particularly to "LA" sets. These will be paired with "items" in an
770 "itemset". As itemsets will be stored in a symset, the "LA" set needs to be
771 stored in the `data` field, so we need a mapping from a small (short)
772 number to an LA `symset`.
774 As mentioned earlier this involves creating a list of unique symsets.
776 For now, we just use a linear list sorted by insertion. A skiplist
777 would be more efficient and may be added later.
784 struct setlist *next;
787 ###### grammar fields
788 struct setlist *sets;
793 static int ss_cmp(struct symset a, struct symset b)
796 int diff = a.cnt - b.cnt;
799 for (i = 0; i < a.cnt; i++) {
800 diff = (int)a.syms[i] - (int)b.syms[i];
807 static int save_set(struct grammar *g, struct symset ss)
809 struct setlist **sl = &g->sets;
813 while (*sl && (cmp = ss_cmp((*sl)->ss, ss)) < 0)
820 s = malloc(sizeof(*s));
829 Finding a set by number is currently performed by a simple linear search.
830 If this turns out to hurt performance, we can store the sets in a dynamic
831 array like the productions.
833 static struct symset set_find(struct grammar *g, int num)
835 struct setlist *sl = g->sets;
836 while (sl && sl->num != num)
842 ### Setting `nullable`
844 We set `nullable` on the head symbol for any production for which all
845 the body symbols (if any) are nullable. As this is a recursive
846 definition, any change in the `nullable` setting means that we need to
847 re-evaluate where it needs to be set.
849 We simply loop around performing the same calculations until no more
856 static void set_nullable(struct grammar *g)
859 while (check_again) {
862 for (p = 0; p < g->production_count; p++) {
863 struct production *pr = g->productions[p];
866 if (pr->head->nullable)
868 for (s = 0; s < pr->body_size; s++)
869 if (! pr->body[s]->nullable)
871 if (s == pr->body_size) {
872 pr->head->nullable = 1;
879 ### Setting `can_eol`
881 In order to be able to ignore newline tokens when not relevant, but
882 still include them in the parse when needed, we will need to know
883 which states can start a "line-like" section of code. We ignore
884 newlines when there is an indent since the most recent start of a
887 To know what is line-like, we first need to know which symbols can end
888 a line-like section, which is precisely those which can end with a
889 newline token. These symbols don't necessarily alway end with a
890 newline, but they can. Hence they are not described as "lines" but
893 Clearly the `TK_newline` token can end with a newline. Any symbol
894 which is the head of a production that contains a line-ending symbol
895 followed only by nullable symbols is also a line-ending symbol. We
896 use a new field `can_eol` to record this attribute of symbols, and
897 compute it in a repetitive manner similar to `set_nullable`.
903 static void set_can_eol(struct grammar *g)
906 g->symtab[TK_newline]->can_eol = 1;
907 while (check_again) {
910 for (p = 0; p < g->production_count; p++) {
911 struct production *pr = g->productions[p];
914 if (pr->head->can_eol)
917 for (s = pr->body_size - 1; s >= 0; s--) {
918 if (pr->body[s]->can_eol) {
919 pr->head->can_eol = 1;
923 if (!pr->body[s]->nullable)
930 ### Building the `first` sets
932 When calculating what can follow a particular non-terminal, we will need to
933 know what the "first" terminal in any subsequent non-terminal might be. So
934 we calculate the `first` set for every non-terminal and store them in an
935 array. We don't bother recording the "first" set for terminals as they are
938 As the "first" for one symbol might depend on the "first" of another,
939 we repeat the calculations until no changes happen, just like with
940 `set_nullable`. This makes use of the fact that `symset_union`
941 reports if any change happens.
943 The core of this which finds the "first" of part of a production body
944 will be reused for computing the "follow" sets, so we split it out
945 into a separate function.
947 ###### grammar fields
948 struct symset *first;
952 static int add_first(struct production *pr, int start,
953 struct symset *target, struct grammar *g,
958 for (s = start; s < pr->body_size; s++) {
959 struct symbol *bs = pr->body[s];
960 if (bs->type == Terminal) {
961 if (symset_find(target, bs->num) < 0) {
962 symset_add(target, bs->num, 0);
966 } else if (symset_union(target, &g->first[bs->num]))
972 *to_end = (s == pr->body_size);
976 static void build_first(struct grammar *g)
980 g->first = calloc(g->num_syms, sizeof(g->first[0]));
981 for (s = 0; s < g->num_syms; s++)
982 g->first[s] = INIT_SYMSET;
984 while (check_again) {
987 for (p = 0; p < g->production_count; p++) {
988 struct production *pr = g->productions[p];
989 struct symset *head = &g->first[pr->head->num];
991 if (add_first(pr, 0, head, g, NULL))
997 ### Building the `follow` sets.
999 There are two different situations when we will want to generate "follow"
1000 sets. If we are doing an SLR analysis, we want to generate a single
1001 "follow" set for each non-terminal in the grammar. That is what is
1002 happening here. If we are doing an LALR or LR analysis we will want
1003 to generate a separate "LA" set for each item. We do that later
1004 in state generation.
1006 There are two parts to generating a "follow" set. Firstly we look at
1007 every place that any non-terminal appears in the body of any
1008 production, and we find the set of possible "first" symbols after
1009 there. This is done using `add_first` above and only needs to be done
1010 once as the "first" sets are now stable and will not change.
1014 for (p = 0; p < g->production_count; p++) {
1015 struct production *pr = g->productions[p];
1018 for (b = 0; b < pr->body_size - 1; b++) {
1019 struct symbol *bs = pr->body[b];
1020 if (bs->type == Terminal)
1022 add_first(pr, b+1, &g->follow[bs->num], g, NULL);
1026 The second part is to add the "follow" set of the head of a production
1027 to the "follow" sets of the final symbol in the production, and any
1028 other symbol which is followed only by `nullable` symbols. As this
1029 depends on "follow" itself we need to repeatedly perform the process
1030 until no further changes happen.
1034 for (again = 0, p = 0;
1035 p < g->production_count;
1036 p < g->production_count-1
1037 ? p++ : again ? (again = 0, p = 0)
1039 struct production *pr = g->productions[p];
1042 for (b = pr->body_size - 1; b >= 0; b--) {
1043 struct symbol *bs = pr->body[b];
1044 if (bs->type == Terminal)
1046 if (symset_union(&g->follow[bs->num],
1047 &g->follow[pr->head->num]))
1054 We now just need to create and initialise the `follow` list to get a
1057 ###### grammar fields
1058 struct symset *follow;
1061 static void build_follow(struct grammar *g)
1064 g->follow = calloc(g->num_syms, sizeof(g->follow[0]));
1065 for (s = 0; s < g->num_syms; s++)
1066 g->follow[s] = INIT_SYMSET;
1070 ### Building itemsets and states
1072 There are three different levels of detail that can be chosen for
1073 building the itemsets and states for the LR grammar. They are:
1075 1. LR(0) or SLR(1), where no look-ahead is considered.
1076 2. LALR(1) where we build look-ahead sets with each item and merge
1077 the LA sets when we find two paths to the same "kernel" set of items.
1078 3. LR(1) where different look-ahead for any item in the set means
1079 a different state must be created.
1081 ###### forward declarations
1082 enum grammar_type { LR0, LR05, SLR, LALR, LR1 };
1084 We need to be able to look through existing states to see if a newly
1085 generated state already exists. For now we use a simple sorted linked
1088 An item is a pair of numbers: the production number and the position of
1089 "DOT", which is an index into the body of the production.
1090 As the numbers are not enormous we can combine them into a single "short"
1091 and store them in a `symset` - 4 bits for "DOT" and 12 bits for the
1092 production number (so 4000 productions with maximum of 15 symbols in the
1095 Comparing the itemsets will be a little different to comparing symsets
1096 as we want to do the lookup after generating the "kernel" of an
1097 itemset, so we need to ignore the offset=zero items which are added during
1100 To facilitate this, we modify the "DOT" number so that "0" sorts to
1101 the end of the list in the symset, and then only compare items before
1105 static inline unsigned short item_num(int production, int index)
1107 return production | ((31-index) << 11);
1109 static inline int item_prod(unsigned short item)
1111 return item & 0x7ff;
1113 static inline int item_index(unsigned short item)
1115 return (31-(item >> 11)) & 0x1f;
1118 For LR(1) analysis we need to compare not just the itemset in a state
1119 but also the LA sets. As we assign each unique LA set a number, we
1120 can just compare the symset and the data values together.
1123 static int itemset_cmp(struct symset a, struct symset b,
1124 enum grammar_type type)
1130 i < a.cnt && i < b.cnt &&
1131 item_index(a.syms[i]) > 0 &&
1132 item_index(b.syms[i]) > 0;
1134 int diff = a.syms[i] - b.syms[i];
1138 diff = a.data[i] - b.data[i];
1143 if (i == a.cnt || item_index(a.syms[i]) == 0)
1147 if (i == b.cnt || item_index(b.syms[i]) == 0)
1153 if (type < LR1 || av == -1)
1156 a.data[i] - b.data[i];
1159 And now we can build the list of itemsets. The lookup routine returns
1160 both a success flag and a pointer to where in the list an insert
1161 should happen, so we don't need to search a second time.
1165 struct itemset *next;
1167 struct symset items;
1168 struct symset go_to;
1173 ###### grammar fields
1174 struct itemset *items;
1178 static int itemset_find(struct grammar *g, struct itemset ***where,
1179 struct symset kernel, enum grammar_type type)
1181 struct itemset **ip;
1183 for (ip = &g->items; *ip ; ip = & (*ip)->next) {
1184 struct itemset *i = *ip;
1186 diff = itemset_cmp(i->items, kernel, type);
1199 Adding an itemset may require merging the LA sets if LALR analysis is
1200 happening. If any new LA set adds any symbols that weren't in the old LA set, we
1201 clear the `completed` flag so that the dependants of this itemset will be
1202 recalculated and their LA sets updated.
1204 `add_itemset` must consume the symsets it is passed, either by adding
1205 them to a data structure, of freeing them.
1207 static int add_itemset(struct grammar *g, struct symset ss,
1208 enum grammar_type type, int starts_line)
1210 struct itemset **where, *is;
1212 int found = itemset_find(g, &where, ss, type);
1214 is = calloc(1, sizeof(*is));
1215 is->state = g->states;
1219 is->go_to = INIT_DATASET;
1220 is->starts_line = starts_line;
1229 for (i = 0; i < ss.cnt; i++) {
1230 struct symset temp = INIT_SYMSET, s;
1231 if (ss.data[i] == is->items.data[i])
1233 s = set_find(g, is->items.data[i]);
1234 symset_union(&temp, &s);
1235 s = set_find(g, ss.data[i]);
1236 if (symset_union(&temp, &s)) {
1237 is->items.data[i] = save_set(g, temp);
1248 To build all the itemsets, we first insert the initial itemset made
1249 from production zero, complete each itemset, and then generate new
1250 itemsets from old until no new ones can be made.
1252 Completing an itemset means finding all the items where "DOT" is followed by
1253 a nonterminal and adding "DOT=0" items for every production from that
1254 non-terminal - providing each item hasn't already been added.
1256 If LA sets are needed, the LA set for each new item is found using
1257 `add_first` which was developed earlier for `FIRST` and `FOLLOW`. This may
1258 be supplemented by the LA set for the item which produce the new item.
1260 We also collect a set of all symbols which follow "DOT" (in `done`) as this
1261 is used in the next stage.
1263 NOTE: precedence handling should happen here - I haven't written this yet
1266 ###### complete itemset
1267 for (i = 0; i < is->items.cnt; i++) {
1268 int p = item_prod(is->items.syms[i]);
1269 int bs = item_index(is->items.syms[i]);
1270 struct production *pr = g->productions[p];
1273 struct symset LA = INIT_SYMSET;
1274 unsigned short sn = 0;
1276 if (bs == pr->body_size)
1279 if (symset_find(&done, s->num) < 0)
1280 symset_add(&done, s->num, 0);
1281 if (s->type != Nonterminal)
1287 add_first(pr, bs+1, &LA, g, &to_end);
1289 struct symset ss = set_find(g, is->items.data[i]);
1290 symset_union(&LA, &ss);
1292 sn = save_set(g, LA);
1293 LA = set_find(g, sn);
1296 /* Add productions for this symbol */
1297 for (p2 = s->first_production;
1298 p2 < g->production_count &&
1299 g->productions[p2]->head == s;
1301 int itm = item_num(p2, 0);
1302 int pos = symset_find(&is->items, itm);
1304 symset_add(&is->items, itm, sn);
1305 /* Will have re-ordered, so start
1306 * from beginning again */
1308 } else if (type >= LALR) {
1309 struct symset ss = set_find(g, is->items.data[pos]);
1310 struct symset tmp = INIT_SYMSET;
1312 symset_union(&tmp, &ss);
1313 if (symset_union(&tmp, &LA)) {
1314 is->items.data[pos] = save_set(g, tmp);
1322 For each symbol we found (and placed in `done`) we collect all the items for
1323 which this symbol is next, and create a new itemset with "DOT" advanced over
1324 the symbol. This is then added to the collection of itemsets (or merged
1325 with a pre-existing itemset).
1327 ###### derive itemsets
1328 // Now we have a completed itemset, so we need to
1329 // compute all the 'next' itemsets and create them
1330 // if they don't exist.
1331 for (i = 0; i < done.cnt; i++) {
1333 unsigned short state;
1334 int starts_line = 0;
1335 struct symbol *sym = g->symtab[done.syms[i]];
1336 struct symset newitemset = INIT_SYMSET;
1338 newitemset = INIT_DATASET;
1341 (sym->nullable && is->starts_line))
1343 for (j = 0; j < is->items.cnt; j++) {
1344 int itm = is->items.syms[j];
1345 int p = item_prod(itm);
1346 int bp = item_index(itm);
1347 struct production *pr = g->productions[p];
1348 unsigned short la = 0;
1351 if (bp == pr->body_size)
1353 if (pr->body[bp] != sym)
1356 la = is->items.data[j];
1357 pos = symset_find(&newitemset, pr->head->num);
1359 symset_add(&newitemset, item_num(p, bp+1), la);
1360 else if (type >= LALR) {
1361 // Need to merge la set.
1362 int la2 = newitemset.data[pos];
1364 struct symset ss = set_find(g, la2);
1365 struct symset LA = INIT_SYMSET;
1366 symset_union(&LA, &ss);
1367 ss = set_find(g, la);
1368 if (symset_union(&LA, &ss))
1369 newitemset.data[pos] = save_set(g, LA);
1375 state = add_itemset(g, newitemset, type, starts_line);
1376 if (symset_find(&is->go_to, done.syms[i]) < 0)
1377 symset_add(&is->go_to, done.syms[i], state);
1380 All that is left is to crate the initial itemset from production zero, and
1381 with `TK_eof` as the LA set.
1384 static void build_itemsets(struct grammar *g, enum grammar_type type)
1386 struct symset first = INIT_SYMSET;
1389 unsigned short la = 0;
1391 // LA set just has eof
1392 struct symset eof = INIT_SYMSET;
1393 symset_add(&eof, TK_eof, 0);
1394 la = save_set(g, eof);
1395 first = INIT_DATASET;
1397 // production 0, offset 0 (with no data)
1398 symset_add(&first, item_num(0, 0), la);
1399 add_itemset(g, first, type, g->productions[0]->body[0]->can_eol);
1400 for (again = 0, is = g->items;
1402 is = is->next ?: again ? (again = 0, g->items) : NULL) {
1404 struct symset done = INIT_SYMSET;
1415 ### Completing the analysis.
1417 The exact process of analysis depends on which level was requested. For
1418 `LR0` and `LR05` we don't need first and follow sets at all. For `LALR` and
1419 `LR1` we need first, but not follow. For `SLR` we need both.
1421 We don't build the "action" tables that you might expect as the parser
1422 doesn't use them. They will be calculated to some extent if needed for
1425 Once we have built everything we allocate arrays for the two lists:
1426 symbols and itemsets. This allows more efficient access during reporting.
1427 The symbols are grouped as terminals and non-terminals and we record the
1428 changeover point in `first_nonterm`.
1430 ###### grammar fields
1431 struct symbol **symtab;
1432 struct itemset **statetab;
1435 ###### grammar_analyse
1437 static void grammar_analyse(struct grammar *g, enum grammar_type type)
1441 int snum = TK_reserved;
1442 for (s = g->syms; s; s = s->next)
1443 if (s->num < 0 && s->type == Terminal) {
1447 g->first_nonterm = snum;
1448 for (s = g->syms; s; s = s->next)
1454 g->symtab = calloc(g->num_syms, sizeof(g->symtab[0]));
1455 for (s = g->syms; s; s = s->next)
1456 g->symtab[s->num] = s;
1466 build_itemsets(g, type);
1468 g->statetab = calloc(g->states, sizeof(g->statetab[0]));
1469 for (is = g->items; is ; is = is->next)
1470 g->statetab[is->state] = is;
1473 ## Reporting on the Grammar
1475 The purpose of the report is to give the grammar developer insight into
1476 how the grammar parser will work. It is basically a structured dump of
1477 all the tables that have been generated, plus a description of any conflicts.
1479 ###### grammar_report
1480 static int grammar_report(struct grammar *g, enum grammar_type type)
1486 return report_conflicts(g, type);
1489 Firstly we have the complete list of symbols, together with the
1490 "FIRST" set if that was generated. We add a mark to each symbol to
1491 show if it can end in a newline (`>`), or if it is nullable (`.`).
1495 static void report_symbols(struct grammar *g)
1499 printf("SYMBOLS + FIRST:\n");
1501 printf("SYMBOLS:\n");
1503 for (n = 0; n < g->num_syms; n++) {
1504 struct symbol *s = g->symtab[n];
1508 printf(" %c%c%3d%c: ",
1509 s->nullable ? '.':' ',
1510 s->can_eol ? '>':' ',
1511 s->num, symtypes[s->type]);
1514 printf(" (%d%s)", s->precedence,
1515 assoc_names[s->assoc]);
1517 if (g->first && s->type == Nonterminal) {
1520 for (j = 0; j < g->first[n].cnt; j++) {
1523 prtxt(g->symtab[g->first[n].syms[j]]->name);
1530 Then we have the follow sets if they were computed.
1532 static void report_follow(struct grammar *g)
1535 printf("FOLLOW:\n");
1536 for (n = 0; n < g->num_syms; n++)
1537 if (g->follow[n].cnt) {
1541 prtxt(g->symtab[n]->name);
1542 for (j = 0; j < g->follow[n].cnt; j++) {
1545 prtxt(g->symtab[g->follow[n].syms[j]]->name);
1551 And finally the item sets. These include the GO TO tables and, for
1552 LALR and LR1, the LA set for each item. Lots of stuff, so we break
1553 it up a bit. First the items, with production number and associativity.
1555 static void report_item(struct grammar *g, int itm)
1557 int p = item_prod(itm);
1558 int dot = item_index(itm);
1559 struct production *pr = g->productions[p];
1563 prtxt(pr->head->name);
1565 for (i = 0; i < pr->body_size; i++) {
1566 printf(" %s", (dot == i ? ". ": ""));
1567 prtxt(pr->body[i]->name);
1569 if (dot == pr->body_size)
1573 printf(" (%d%s)", pr->precedence,
1574 assoc_names[pr->assoc]);
1578 The LA sets which are (possibly) reported with each item:
1580 static void report_la(struct grammar *g, int lanum)
1582 struct symset la = set_find(g, lanum);
1586 printf(" LOOK AHEAD(%d)", lanum);
1587 for (i = 0; i < la.cnt; i++) {
1590 prtxt(g->symtab[la.syms[i]]->name);
1595 Then the go to sets:
1598 static void report_goto(struct grammar *g, struct symset gt)
1603 for (i = 0; i < gt.cnt; i++) {
1605 prtxt(g->symtab[gt.syms[i]]->name);
1606 printf(" -> %d\n", gt.data[i]);
1610 Now we can report all the item sets complete with items, LA sets, and GO TO.
1612 static void report_itemsets(struct grammar *g)
1615 printf("ITEM SETS(%d)\n", g->states);
1616 for (s = 0; s < g->states; s++) {
1618 struct itemset *is = g->statetab[s];
1619 printf(" Itemset %d:%s\n", s, is->starts_line?" (startsline)":"");
1620 for (j = 0; j < is->items.cnt; j++) {
1621 report_item(g, is->items.syms[j]);
1622 if (is->items.data != NO_DATA)
1623 report_la(g, is->items.data[j]);
1625 report_goto(g, is->go_to);
1629 ### Reporting conflicts
1631 Conflict detection varies a lot among different analysis levels. However
1632 LR0 and LR0.5 are quite similar - having no follow sets, and SLR, LALR and
1633 LR1 are also similar as they have FOLLOW or LA sets.
1637 ## conflict functions
1639 static int report_conflicts(struct grammar *g, enum grammar_type type)
1642 printf("Conflicts:\n");
1644 cnt = conflicts_lr0(g, type);
1646 cnt = conflicts_slr(g, type);
1648 printf(" - no conflicts\n");
1652 LR0 conflicts are any state which have both a reducible item and
1653 a shiftable item, or two reducible items.
1655 LR05 conflicts only occurs if two possibly reductions exist,
1656 as shifts always over-ride reductions.
1658 ###### conflict functions
1659 static int conflicts_lr0(struct grammar *g, enum grammar_type type)
1663 for (i = 0; i < g->states; i++) {
1664 struct itemset *is = g->statetab[i];
1665 int last_reduce = -1;
1666 int prev_reduce = -1;
1667 int last_shift = -1;
1671 for (j = 0; j < is->items.cnt; j++) {
1672 int itm = is->items.syms[j];
1673 int p = item_prod(itm);
1674 int bp = item_index(itm);
1675 struct production *pr = g->productions[p];
1677 if (bp == pr->body_size) {
1678 prev_reduce = last_reduce;
1682 if (pr->body[bp]->type == Terminal)
1685 if (type == LR0 && last_reduce >= 0 && last_shift >= 0) {
1686 printf(" State %d has both SHIFT and REDUCE:\n", i);
1687 report_item(g, is->items.syms[last_shift]);
1688 report_item(g, is->items.syms[last_reduce]);
1691 if (prev_reduce >= 0) {
1692 printf(" State %d has 2 (or more) reducible items\n", i);
1693 report_item(g, is->items.syms[prev_reduce]);
1694 report_item(g, is->items.syms[last_reduce]);
1701 SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping
1702 look ahead, or if a symbol in a look-ahead can be shifted. They differ only
1703 in the source of the look ahead set.
1705 We build two datasets to reflect the "action" table: one which maps
1706 terminals to items where that terminal could be shifted and another
1707 which maps terminals to items that could be reduced when the terminal
1708 is in look-ahead. We report when we get conflicts between the two.
1710 static int conflicts_slr(struct grammar *g, enum grammar_type type)
1715 for (i = 0; i < g->states; i++) {
1716 struct itemset *is = g->statetab[i];
1717 struct symset shifts = INIT_DATASET;
1718 struct symset reduce = INIT_DATASET;
1722 /* First collect the shifts */
1723 for (j = 0; j < is->items.cnt; j++) {
1724 unsigned short itm = is->items.syms[j];
1725 int p = item_prod(itm);
1726 int bp = item_index(itm);
1727 struct production *pr = g->productions[p];
1729 if (bp < pr->body_size &&
1730 pr->body[bp]->type == Terminal) {
1732 int sym = pr->body[bp]->num;
1733 if (symset_find(&shifts, sym) < 0)
1734 symset_add(&shifts, sym, itm);
1737 /* Now look for reduction and conflicts */
1738 for (j = 0; j < is->items.cnt; j++) {
1739 unsigned short itm = is->items.syms[j];
1740 int p = item_prod(itm);
1741 int bp = item_index(itm);
1742 struct production *pr = g->productions[p];
1744 if (bp < pr->body_size)
1749 la = g->follow[pr->head->num];
1751 la = set_find(g, is->items.data[j]);
1753 for (k = 0; k < la.cnt; k++) {
1754 int pos = symset_find(&shifts, la.syms[k]);
1756 printf(" State %d has SHIFT/REDUCE conflict on ", i);
1757 prtxt(g->symtab[la.syms[k]]->name);
1759 report_item(g, shifts.data[pos]);
1760 report_item(g, itm);
1763 pos = symset_find(&reduce, la.syms[k]);
1765 symset_add(&reduce, la.syms[k], itm);
1768 printf(" State %d has REDUCE/REDUCE conflict on ", i);
1769 prtxt(g->symtab[la.syms[k]]->name);
1771 report_item(g, itm);
1772 report_item(g, reduce.data[pos]);
1776 symset_free(shifts);
1777 symset_free(reduce);
1783 ## Generating the parser
1785 The exported part of the parser is the `parse_XX` function, where the name
1786 `XX` is based on the name of the parser files.
1788 This takes a `code_node`, a partially initialized `token_config`, and an
1789 optional `FILE` to send tracing to. The `token_config` gets the list of
1790 known words added and then is used with the `code_node` to initialize the
1793 `parse_XX` then calls the library function `parser_run` to actually complete
1794 the parse. This needs the `states` table and function to call the various
1795 pieces of code provided in the grammar file, so they are generated first.
1797 ###### parser_generate
1799 static void gen_parser(FILE *f, struct grammar *g, char *file, char *name)
1805 gen_reduce(f, g, file);
1808 fprintf(f, "#line 0 \"gen_parser\"\n");
1809 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace)\n",
1812 fprintf(f, "\tstruct token_state *tokens;\n");
1813 fprintf(f, "\tconfig->words_marks = known;\n");
1814 fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
1815 fprintf(f, "\tconfig->ignored |= (1 << TK_line_comment) | (1 << TK_block_comment);\n");
1816 fprintf(f, "\ttokens = token_open(code, config);\n");
1817 fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config->known_count);\n");
1818 fprintf(f, "\ttoken_close(tokens);\n");
1819 fprintf(f, "\treturn rv;\n");
1820 fprintf(f, "}\n\n");
1823 ### Known words table
1825 The known words table is simply an array of terminal symbols.
1826 The table of nonterminals used for tracing is a similar array.
1830 static void gen_known(FILE *f, struct grammar *g)
1833 fprintf(f, "#line 0 \"gen_known\"\n");
1834 fprintf(f, "static const char *known[] = {\n");
1835 for (i = TK_reserved;
1836 i < g->num_syms && g->symtab[i]->type == Terminal;
1838 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1839 g->symtab[i]->name.txt);
1840 fprintf(f, "};\n\n");
1843 static void gen_non_term(FILE *f, struct grammar *g)
1846 fprintf(f, "#line 0 \"gen_non_term\"\n");
1847 fprintf(f, "static const char *non_term[] = {\n");
1848 for (i = TK_reserved;
1851 if (g->symtab[i]->type == Nonterminal)
1852 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1853 g->symtab[i]->name.txt);
1854 fprintf(f, "};\n\n");
1857 ### States and the goto tables.
1859 For each state we record the goto table, the reducible production if
1860 there is one, or a symbol to shift for error recovery.
1861 Some of the details of the reducible production are stored in the
1862 `do_reduce` function to come later. Here we store the production number,
1863 the body size (useful for stack management) and the resulting symbol (useful
1864 for knowing how to free data later).
1866 The go to table is stored in a simple array of `sym` and corresponding
1869 ###### exported types
1877 const struct lookup * go_to;
1888 static void gen_goto(FILE *f, struct grammar *g)
1891 fprintf(f, "#line 0 \"gen_goto\"\n");
1892 for (i = 0; i < g->states; i++) {
1894 fprintf(f, "static const struct lookup goto_%d[] = {\n",
1896 struct symset gt = g->statetab[i]->go_to;
1897 for (j = 0; j < gt.cnt; j++)
1898 fprintf(f, "\t{ %d, %d },\n",
1899 gt.syms[j], gt.data[j]);
1906 static void gen_states(FILE *f, struct grammar *g)
1909 fprintf(f, "#line 0 \"gen_states\"\n");
1910 fprintf(f, "static const struct state states[] = {\n");
1911 for (i = 0; i < g->states; i++) {
1912 struct itemset *is = g->statetab[i];
1913 int j, prod = -1, prod_len;
1915 int shift_len = 0, shift_remain = 0;
1916 for (j = 0; j < is->items.cnt; j++) {
1917 int itm = is->items.syms[j];
1918 int p = item_prod(itm);
1919 int bp = item_index(itm);
1920 struct production *pr = g->productions[p];
1922 if (bp < pr->body_size) {
1923 if (shift_sym < 0 ||
1924 (shift_len == bp && shift_remain > pr->body_size - bp)) {
1925 shift_sym = pr->body[bp]->num;
1927 shift_remain = pr->body_size - bp;
1931 /* This is what we reduce */
1932 if (prod < 0 || prod_len < pr->body_size) {
1934 prod_len = pr->body_size;
1939 fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, 0, %d },\n",
1940 i, is->go_to.cnt, i, prod,
1941 g->productions[prod]->body_size,
1942 g->productions[prod]->head->num,
1945 fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, %d },\n",
1946 i, is->go_to.cnt, i, shift_sym,
1949 fprintf(f, "};\n\n");
1952 ### The `do_reduce` function and the code
1954 When the parser engine decides to reduce a production, it calls `do_reduce`.
1955 This has two functions.
1957 Firstly, if a non-NULL `trace` file is passed, it prints out details of the
1958 production being reduced. Secondly it runs the code that was included with
1959 the production if any.
1961 This code needs to be able to store data somewhere. Rather than requiring
1962 `do_reduce` to `malloc` that "somewhere", we pass in a large buffer and have
1963 `do_reduce` return the size to be saved.
1965 The code fragment requires translation when written out. Any `$N` needs to
1966 be converted to a reference either to that buffer (if `$0`) or to the
1967 structure returned by a previous reduction. These pointers need to be cast
1968 to the appropriate type for each access. All this is handling in
1971 `gen_code` also allows symbol references to contain a '`<`' as in '`$<2`'.
1972 This applied only to symbols with references (or pointers), not those with structures.
1973 The `<` implies that the reference it being moved out, so the object will not be
1974 automatically freed. This is equivalent to assigning `NULL` to the pointer.
1978 static void gen_code(struct production *p, FILE *f, struct grammar *g)
1981 char *used = calloc(1, p->body_size);
1984 fprintf(f, "\t\t\t");
1985 for (c = p->code.txt; c < p->code.txt + p->code.len; c++) {
1999 if (*c < '0' || *c > '9') {
2006 while (c[1] >= '0' && c[1] <= '9') {
2008 n = n * 10 + *c - '0';
2011 fprintf(f, "(*(struct %.*s*%s)ret)",
2012 p->head->struct_name.len,
2013 p->head->struct_name.txt,
2014 p->head->isref ? "*":"");
2015 else if (n > p->body_size)
2016 fprintf(f, "$%d", n);
2017 else if (p->body[n-1]->type == Terminal)
2018 fprintf(f, "(*(struct token *)body[%d])",
2020 else if (p->body[n-1]->struct_name.txt == NULL)
2021 fprintf(f, "$%d", n);
2023 fprintf(f, "(*(struct %.*s*%s)body[%d])",
2024 p->body[n-1]->struct_name.len,
2025 p->body[n-1]->struct_name.txt,
2026 p->body[n-1]->isref ? "*":"", n-1);
2031 for (i = 0; i < p->body_size; i++) {
2032 if (p->body[i]->struct_name.txt &&
2033 p->body[i]->isref &&
2035 // assume this has been copied out
2036 fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i);
2043 static void gen_reduce(FILE *f, struct grammar *g, char *file)
2046 fprintf(f, "#line 0 \"gen_reduce\"\n");
2047 fprintf(f, "static int do_reduce(int prod, void **body, void *ret)\n");
2049 fprintf(f, "\tint ret_size = 0;\n");
2051 fprintf(f, "\tswitch(prod) {\n");
2052 for (i = 0; i < g->production_count; i++) {
2053 struct production *p = g->productions[i];
2054 fprintf(f, "\tcase %d:\n", i);
2059 if (p->head->struct_name.txt)
2060 fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n",
2061 p->head->struct_name.len,
2062 p->head->struct_name.txt,
2063 p->head->isref ? "*":"");
2065 fprintf(f, "\t\tbreak;\n");
2067 fprintf(f, "\t}\n\treturn ret_size;\n}\n\n");
2072 As each non-terminal can potentially cause a different type of data
2073 structure to be allocated and filled in, we need to be able to free it when
2076 It is particularly important to have fine control over freeing during error
2077 recovery where individual stack frames might need to be freed.
2079 For this, the grammar author is required to defined a `free_XX` function for
2080 each structure that is used by a non-terminal. `do_free` will call whichever
2081 is appropriate given a symbol number, and will call `free` (as is
2082 appropriate for tokens on any terminal symbol.
2086 static void gen_free(FILE *f, struct grammar *g)
2090 fprintf(f, "#line 0 \"gen_free\"\n");
2091 fprintf(f, "static void do_free(short sym, void *asn)\n");
2093 fprintf(f, "\tif (!asn) return;\n");
2094 fprintf(f, "\tif (sym < %d) {\n", g->first_nonterm);
2095 fprintf(f, "\t\tfree(asn);\n\t\treturn;\n\t}\n");
2096 fprintf(f, "\tswitch(sym) {\n");
2098 for (i = 0; i < g->num_syms; i++) {
2099 struct symbol *s = g->symtab[i];
2101 s->type != Nonterminal ||
2102 s->struct_name.txt == NULL)
2105 fprintf(f, "\tcase %d:\n", s->num);
2107 fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n",
2109 s->struct_name.txt);
2110 fprintf(f, "\t\tfree(asn);\n");
2112 fprintf(f, "\t\tfree_%.*s(asn);\n",
2114 s->struct_name.txt);
2115 fprintf(f, "\t\tbreak;\n");
2117 fprintf(f, "\t}\n}\n\n");
2120 ## The main routine.
2122 There are three key parts to the "main" function of parsergen: processing
2123 the arguments, loading the grammar file, and dealing with the grammar.
2125 ### Argument processing.
2127 Command line options allow the selection of analysis mode, name of output
2128 file, and whether or not a report should be generated. By default we create
2129 a report only if no code output was requested.
2131 The `parse_XX` function name uses the basename of the output file which
2132 should not have a suffix (`.c`). `.c` is added to the given name for the
2133 code, and `.h` is added for the header (if header text is specifed in the
2140 static const struct option long_options[] = {
2141 { "LR0", 0, NULL, '0' },
2142 { "LR05", 0, NULL, '5' },
2143 { "SLR", 0, NULL, 'S' },
2144 { "LALR", 0, NULL, 'L' },
2145 { "LR1", 0, NULL, '1' },
2146 { "tag", 1, NULL, 't' },
2147 { "report", 0, NULL, 'R' },
2148 { "output", 1, NULL, 'o' },
2149 { NULL, 0, NULL, 0 }
2151 const char *options = "05SL1t:Ro:";
2153 ###### process arguments
2155 char *outfile = NULL;
2160 enum grammar_type type = LR05;
2161 while ((opt = getopt_long(argc, argv, options,
2162 long_options, NULL)) != -1) {
2177 outfile = optarg; break;
2179 tag = optarg; break;
2181 fprintf(stderr, "Usage: parsergen ...\n");
2186 infile = argv[optind++];
2188 fprintf(stderr, "No input file given\n");
2191 if (outfile && report == 1)
2194 if (name && strchr(name, '/'))
2195 name = strrchr(name, '/')+1;
2197 if (optind < argc) {
2198 fprintf(stderr, "Excess command line arguments\n");
2202 ### Loading the grammar file
2204 To be able to run `mdcode` and `scanner` on the grammar we need to memory
2207 One we have extracted the code (with `mdcode`) we expect to find three
2208 sections: header, code, and grammar. Anything else that is not
2209 excluded by the `--tag` option is an error.
2211 "header" and "code" are optional, though it is hard to build a working
2212 parser with neither. "grammar" must be provided.
2216 #include <sys/mman.h>
2221 static void pr_err(char *msg)
2224 fprintf(stderr, "%s\n", msg);
2228 struct section *table;
2232 fd = open(infile, O_RDONLY);
2234 fprintf(stderr, "parsergen: cannot open %s: %s\n",
2235 infile, strerror(errno));
2238 len = lseek(fd, 0, 2);
2239 file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2240 table = code_extract(file, file+len, pr_err);
2242 struct code_node *hdr = NULL;
2243 struct code_node *code = NULL;
2244 struct code_node *gram = NULL;
2245 for (s = table; s; s = s->next) {
2246 struct text sec = s->section;
2247 if (tag && !strip_tag(&sec, tag))
2249 if (text_is(sec, "header"))
2251 else if (text_is(sec, "code"))
2253 else if (text_is(sec, "grammar"))
2256 fprintf(stderr, "Unknown content section: %.*s\n",
2257 s->section.len, s->section.txt);
2262 ### Processing the input
2264 As we need to append an extention to a filename and then open it for
2265 writing, and we need to do this twice, it helps to have a separate function.
2269 static FILE *open_ext(char *base, char *ext)
2271 char *fn = malloc(strlen(base) + strlen(ext) + 1);
2273 strcat(strcpy(fn, base), ext);
2279 If we can read the grammar, then we analyse and optionally report on it. If
2280 the report finds conflicts we will exit with an error status.
2282 ###### process input
2283 struct grammar *g = NULL;
2285 fprintf(stderr, "No grammar section provided\n");
2288 g = grammar_read(gram);
2290 fprintf(stderr, "Failure to parse grammar\n");
2295 grammar_analyse(g, type);
2297 if (grammar_report(g, type))
2301 If a headers section is defined, we write it out and include a declaration
2302 for the `parse_XX` function so it can be used from separate code.
2304 if (rv == 0 && hdr && outfile) {
2305 FILE *f = open_ext(outfile, ".h");
2307 code_node_print(f, hdr, infile);
2308 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace);\n",
2312 fprintf(stderr, "Cannot create %s.h\n",
2318 And if all goes well and an output file was provided, we create the `.c`
2319 file with the code section (if any) and the parser tables and function.
2321 if (rv == 0 && outfile) {
2322 FILE *f = open_ext(outfile, ".c");
2325 code_node_print(f, code, infile);
2326 gen_parser(f, g, infile, name);
2329 fprintf(stderr, "Cannot create %s.c\n",
2335 And that about wraps it up. We need to set the locale so that UTF-8 is
2336 recognised properly, and link with `libicuuc` as `libmdcode` requires that.
2338 ###### File: parsergen.mk
2339 parsergen : parsergen.o libscanner.o libmdcode.o
2340 $(CC) $(CFLAGS) -o parsergen parsergen.o libscanner.o libmdcode.o -licuuc
2347 int main(int argc, char *argv[])
2352 setlocale(LC_ALL,"");
2354 ## process arguments
2361 ## The SHIFT/REDUCE parser
2363 Having analysed the grammar and generated all the tables, we only need the
2364 shift/reduce engine to bring it all together.
2366 ### Goto table lookup
2368 The parser generator has nicely provided us with goto tables sorted by
2369 symbol number. We need a binary search function to find a symbol in the
2372 ###### parser functions
2374 static int search(const struct state *l, int sym)
2377 int hi = l->go_to_cnt;
2381 while (lo + 1 < hi) {
2382 int mid = (lo + hi) / 2;
2383 if (l->go_to[mid].sym <= sym)
2388 if (l->go_to[lo].sym == sym)
2389 return l->go_to[lo].state;
2394 ### The state stack.
2396 The core data structure for the parser is the stack. This tracks all the
2397 symbols that have been recognised or partially recognised.
2399 The stack usually won't grow very large - maybe a few tens of entries. So
2400 we dynamically resize and array as required but never bother to shrink it
2403 We keep the stack as two separate allocations. One, `asn_stack` stores the
2404 "abstract syntax nodes" which are created by each reduction. When we call
2405 `do_reduce` we need to pass an array of the `asn`s of the body of the
2406 production, and by keeping a separate `asn` stack, we can just pass a
2407 pointer into this stack.
2409 The other allocation stores all other stack fields of which there are six.
2410 The `state` is the most important one and guides the parsing process. The
2411 `sym` is nearly unnecessary. However when we want to free entries from the
2412 `asn_stack`, it helps to know what type they are so we can call the right
2413 freeing function. The symbol leads us to the right free function through
2416 The `indents` count and the `starts_indented` flag track the line
2417 indents in the symbol. These are used to allow indent information to
2418 guide parsing and error recovery.
2420 `newline_permitted` keeps track of whether newlines should be ignored
2421 or not, and `starts_line` records if this state stated on a newline.
2423 As well as the stack of frames we have a `next` frame which is
2424 assembled from the incoming token and other information prior to
2425 pushing it onto the stack.
2427 ###### parser functions
2433 short starts_indented;
2435 short newline_permitted;
2444 Two operations are needed on the stack - shift (which is like push) and pop.
2446 Shift applies not only to terminals but also to non-terminals. When we
2447 reduce a production we will pop off entries corresponding to the body
2448 symbols, then push on an item for the head of the production. This last is
2449 exactly the same process as shifting in a terminal so we use the same
2452 To simplify other code we arrange for `shift` to fail if there is no `goto`
2453 state for the symbol. This is useful in basic parsing due to our design
2454 that we shift when we can, and reduce when we cannot. So the `shift`
2455 function reports if it could.
2457 So `shift` finds the next state. If that succeed it extends the allocations
2458 if needed and pushes all the information onto the stacks.
2460 ###### parser functions
2462 static int shift(struct parser *p,
2464 const struct state states[])
2466 // Push an entry onto the stack
2467 int newstate = search(&states[p->next.state], p->next.sym);
2470 if (p->tos >= p->stack_size) {
2471 p->stack_size += 10;
2472 p->stack = realloc(p->stack, p->stack_size
2473 * sizeof(p->stack[0]));
2474 p->asn_stack = realloc(p->asn_stack, p->stack_size
2475 * sizeof(p->asn_stack[0]));
2477 p->stack[p->tos] = p->next;
2478 p->asn_stack[p->tos] = asn;
2480 p->next.state = newstate;
2481 p->next.indents = 0;
2482 p->next.starts_indented = 0;
2483 // if new state doesn't start a line, we inherit newline_permitted status
2484 if (states[newstate].starts_line)
2485 p->next.newline_permitted = 1;
2489 `pop` simply moves the top of stack (`tos`) back down the required amount
2490 and frees any `asn` entries that need to be freed. It is called _after_ we
2491 reduce a production, just before we `shift` the nonterminal in.
2493 ###### parser functions
2495 static void pop(struct parser *p, int num,
2496 void(*do_free)(short sym, void *asn))
2500 for (i = 0; i < num; i++) {
2501 p->next.indents += p->stack[p->tos+i].indents;
2502 do_free(p->stack[p->tos+i].sym,
2503 p->asn_stack[p->tos+i]);
2507 p->next.state = p->stack[p->tos].state;
2508 p->next.starts_indented = p->stack[p->tos].starts_indented;
2509 p->next.newline_permitted = p->stack[p->tos].newline_permitted;
2510 if (p->next.indents > p->next.starts_indented)
2511 p->next.newline_permitted = 0;
2515 ### Memory allocation
2517 The `scanner` returns tokens in a local variable - we want them in allocated
2518 memory so they can live in the `asn_stack`. Similarly the `asn` produced by
2519 a reduce is in a large buffer. Both of these require some allocation and
2520 copying, hence `memdup` and `tokcopy`.
2522 ###### parser includes
2525 ###### parser functions
2527 void *memdup(void *m, int len)
2533 memcpy(ret, m, len);
2537 static struct token *tok_copy(struct token tk)
2539 struct token *new = malloc(sizeof(*new));
2544 ### The heart of the parser.
2546 Now we have the parser. If we can shift, we do. If not and we can reduce
2547 we do. If the production we reduced was production zero, then we have
2548 accepted the input and can finish.
2550 We return whatever `asn` was returned by reducing production zero.
2552 If we can neither shift nor reduce we have an error to handle. We pop
2553 single entries off the stack until we can shift the `TK_error` symbol, then
2554 drop input tokens until we find one we can shift into the new error state.
2556 When we find `TK_in` and `TK_out` tokens which report indents we need
2557 to handle them directly as the grammar cannot express what we want to
2560 `TK_in` tokens are easy: we simply update the `next` stack frame to
2561 record how many indents there are and that the next token started with
2564 `TK_out` tokens must either be counted off against any pending indent,
2565 or must force reductions until there is a pending indent which isn't
2566 at the start of a production.
2568 `TK_newline` tokens are ignored precisely if there has been an indent
2569 since the last state which could have been at the start of a line.
2571 ###### parser includes
2574 void *parser_run(struct token_state *tokens,
2575 const struct state states[],
2576 int (*do_reduce)(int, void**, void*),
2577 void (*do_free)(short, void*),
2578 FILE *trace, const char *non_term[], int knowns)
2580 struct parser p = { 0 };
2581 struct token *tk = NULL;
2585 p.next.newline_permitted = states[0].starts_line;
2587 struct token *err_tk;
2589 tk = tok_copy(token_next(tokens));
2590 p.next.sym = tk->num;
2592 parser_trace(trace, &p, tk, states, non_term, knowns);
2594 if (p.next.sym == TK_in) {
2595 p.next.starts_indented = 1;
2601 if (p.next.sym == TK_out) {
2602 if (p.stack[p.tos-1].indents > p.stack[p.tos-1].starts_indented ||
2603 (p.stack[p.tos-1].indents == 1 &&
2604 states[p.next.state].reduce_size > 1)) {
2605 p.stack[p.tos-1].indents -= 1;
2606 if (p.stack[p.tos-1].indents == p.stack[p.tos-1].starts_indented) {
2607 // no internal indent any more, reassess 'newline_permitted'
2608 if (states[p.stack[p.tos-1].state].starts_line)
2609 p.stack[p.tos-1].newline_permitted = 1;
2611 p.stack[p.tos-1].newline_permitted = p.stack[p.tos-2].newline_permitted;
2617 // fall through and force a REDUCE (as 'shift'
2620 if (p.next.sym == TK_newline) {
2621 if (!p.tos || ! p.stack[p.tos-1].newline_permitted) {
2627 if (shift(&p, tk, states)) {
2631 if (states[p.next.state].reduce_prod >= 0) {
2633 int prod = states[p.next.state].reduce_prod;
2634 int size = states[p.next.state].reduce_size;
2636 static char buf[16*1024];
2637 p.next.sym = states[p.next.state].reduce_sym;
2639 body = p.asn_stack +
2640 (p.tos - states[p.next.state].reduce_size);
2642 bufsize = do_reduce(prod, body, buf);
2644 pop(&p, size, do_free);
2645 shift(&p, memdup(buf, bufsize), states);
2650 if (tk->num == TK_out) {
2651 // Indent problem - synthesise tokens to get us
2653 fprintf(stderr, "Synthesize %d to handle indent problem\n", states[p.next.state].shift_sym);
2654 p.next.sym = states[p.next.state].shift_sym;
2655 shift(&p, tok_copy(*tk), states);
2656 // FIXME need to report this error somehow
2659 /* Error. We walk up the stack until we
2660 * find a state which will accept TK_error.
2661 * We then shift in TK_error and see what state
2662 * that takes us too.
2663 * Then we discard input tokens until
2664 * we find one that is acceptable.
2667 err_tk = tok_copy(*tk);
2668 p.next.sym = TK_error;
2669 while (shift(&p, err_tk, states) == 0
2671 // discard this state
2672 pop(&p, 1, do_free);
2675 // no state accepted TK_error
2678 while (search(&states[p.next.state], tk->num) < 0 &&
2679 tk->num != TK_eof) {
2681 tk = tok_copy(token_next(tokens));
2682 if (tk->num == TK_in)
2683 p.next.indents += 1;
2684 if (tk->num == TK_out) {
2685 if (p.next.indents == 0)
2687 p.next.indents -= 1;
2690 if (p.tos == 0 && tk->num == TK_eof)
2695 ret = p.asn_stack[0];
2697 pop(&p, p.tos, do_free);
2703 ###### exported functions
2704 void *parser_run(struct token_state *tokens,
2705 const struct state states[],
2706 int (*do_reduce)(int, void**, void*),
2707 void (*do_free)(short, void*),
2708 FILE *trace, const char *non_term[], int knowns);
2712 Being able to visualize the parser in action can be invaluable when
2713 debugging the parser code, or trying to understand how the parse of a
2714 particular grammar progresses. The stack contains all the important
2715 state, so just printing out the stack every time around the parse loop
2716 can make it possible to see exactly what is happening.
2718 This doesn't explicitly show each SHIFT and REDUCE action. However they
2719 are easily deduced from the change between consecutive lines, and the
2720 details of each state can be found by cross referencing the states list
2721 in the stack with the "report" that parsergen can generate.
2723 For terminal symbols, we just dump the token. For non-terminals we
2724 print the name of the symbol. The look ahead token is reported at the
2725 end inside square brackets.
2727 ###### parser functions
2729 static char *reserved_words[] = {
2730 [TK_error] = "ERROR",
2733 [TK_newline] = "NEWLINE",
2736 static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
2738 fprintf(trace, "(%d", f->state);
2740 fprintf(trace, "%c%d", f->starts_indented?':':'.',
2742 if (states[f->state].starts_line)
2743 fprintf(trace, "s");
2744 if (f->newline_permitted)
2745 fprintf(trace, "n");
2746 fprintf(trace, ") ");
2749 void parser_trace(FILE *trace, struct parser *p,
2750 struct token *tk, const struct state states[],
2751 const char *non_term[], int knowns)
2754 for (i = 0; i < p->tos; i++) {
2755 int sym = p->stack[i].sym;
2756 parser_trace_state(trace, &p->stack[i], states);
2757 if (sym < TK_reserved &&
2758 reserved_words[sym] != NULL)
2759 fputs(reserved_words[sym], trace);
2760 else if (sym < TK_reserved + knowns) {
2761 struct token *t = p->asn_stack[i];
2762 text_dump(trace, t->txt, 20);
2764 fputs(non_term[sym - TK_reserved - knowns],
2768 parser_trace_state(trace, &p->next, states);
2769 fprintf(trace, " [");
2770 if (tk->num < TK_reserved &&
2771 reserved_words[tk->num] != NULL)
2772 fputs(reserved_words[tk->num], trace);
2774 text_dump(trace, tk->txt, 20);
2775 fputs("]\n", trace);
2780 The obvious example for a parser is a calculator.
2782 As `scanner` provides number parsing function using `libgmp` is it not much
2783 work to perform arbitrary rational number calculations.
2785 This calculator takes one expression, or an equality test per line. The
2786 results are printed and if any equality test fails, the program exits with
2789 ###### File: parsergen.mk
2790 calc.c calc.h : parsergen parsergen.mdc
2791 ./parsergen --tag calc -o calc parsergen.mdc
2792 calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o
2793 $(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp
2798 // what do we use for a demo-grammar? A calculator of course.
2810 #include <sys/mman.h>
2815 #include "scanner.h"
2821 static void free_number(struct number *n)
2827 int main(int argc, char *argv[])
2829 int fd = open(argv[1], O_RDONLY);
2830 int len = lseek(fd, 0, 2);
2831 char *file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2832 struct section *s = code_extract(file, file+len, NULL);
2833 struct token_config config = {
2834 .ignored = (1 << TK_line_comment)
2835 | (1 << TK_block_comment)
2838 .number_chars = ".,_+-",
2842 parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
2844 struct section *t = s->next;
2854 Session -> Session Line
2857 Line -> Expression NEWLINE ${ gmp_printf("Answer = %Qd\n", $1.val);
2858 { mpf_t fl; mpf_init2(fl, 20); mpf_set_q(fl, $1.val);
2859 gmp_printf(" or as a decimal: %Fg\n", fl);
2863 | Expression = Expression NEWLINE ${
2864 if (mpq_equal($1.val, $3.val))
2865 gmp_printf("Both equal %Qd\n", $1.val);
2867 gmp_printf("NOT EQUAL: %Qd\n != : %Qd\n",
2872 | NEWLINE ${ printf("Blank line\n"); }$
2873 | ERROR NEWLINE ${ printf("Skipped a bad line\n"); }$
2876 Expression -> Expression + Term ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$
2877 | Expression - Term ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$
2878 | Term ${ mpq_init($0.val); mpq_set($0.val, $1.val); }$
2880 Term -> Term * Factor ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$
2881 | Term / Factor ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$
2882 | Factor ${ mpq_init($0.val); mpq_set($0.val, $1.val); }$
2884 Factor -> NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$
2885 | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$