1 # LR(1) Parser Generator #
3 This parser generator takes a grammar description combined with code
4 fragments, analyses it, and can report details about the analysis and
5 write out C code files which can be compiled to make a parser.
7 There are several distinct sections.
9 1. `grammar_read` will read a grammar from a literate-code file and
10 store the productions, symbols, and code fragments.
11 2. `grammar_analyse` will take that grammar and build LR parsing
12 states and look-ahead tables.
13 3. `grammar_report` will present the details of the analysis
14 in a readable format and will report any conflicts.
15 4. `parser_generate` will write out C code files with various
16 tables and with the code fragments arranged in useful places.
17 5. `parser_run` is a library function intended to be linked together
18 with the generated parser tables to complete the implementation of
20 6. Finally `calc` is a test grammar for a simple calculator. The
21 `parsergen` program built from the C code in this file can extract
22 that grammar directly from this file and process it.
24 ###### File: parsergen.c
29 ## forward declarations
40 ###### File: libparser.c
47 ###### File: parsergen.mk
50 parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc
53 ## Reading the grammar
55 The grammar must be stored in a markdown literate code file as parsed
56 by "[mdcode][]". It must have three top level (i.e. unreferenced)
57 sections: `header`, `code`, and `grammar`. The first two will be
58 literally copied into the generated `.c` and `.h`. files. The last
59 contains the grammar. This is tokenised with "[scanner][]".
61 If the `--tag` option is given, then any top level header that doesn't
62 start with the tag is ignored, and the tag is striped from the rest. So
64 means that the three needed sections must be `Foo: header`, `Foo: code`,
65 and `Foo: grammar`. The tag `calc` is used to extract the same calculator
69 [scanner]: scanner.html
75 ###### parser includes
79 The grammar contains production sets, precedence/associativity
80 declarations, and data type declarations. These are all parsed with
81 _ad hoc_ parsing as we don't have a parser generator yet.
83 The precedence and associativity can be set for each production, but
84 can be inherited from symbols. The data type (either structure or a
85 reference to a structure) is potentially defined for each non-terminal
86 and describes what C structure is used to store information about each
90 enum assoc {Left, Right, Non};
91 char *assoc_names[] = {"Left","Right","Non"};
94 struct text struct_name;
97 unsigned short precedence;
101 unsigned short precedence;
110 The strings reported by `mdcode` and `scanner` are `struct text` which have
111 length rather than being null terminated. To help with printing and
112 comparing we define `text_is` and `prtxt`, which should possibly go in
113 `mdcode`. `scanner` does provide `text_dump` which is useful for strings
114 which might contain control characters.
116 `strip_tag` is a bit like `strncmp`, but adds a test for a colon,
117 because that is what we need to detect tags.
120 static int text_is(struct text t, char *s)
122 return (strlen(s) == t.len &&
123 strncmp(s, t.txt, t.len) == 0);
125 static void prtxt(struct text t)
127 printf("%.*s", t.len, t.txt);
130 static int strip_tag(struct text *t, char *tag)
132 int skip = strlen(tag) + 1;
133 if (skip >= t->len ||
134 strncmp(t->txt, tag, skip-1) != 0 ||
135 t->txt[skip-1] != ':')
137 while (skip < t->len && t->txt[skip] == ' ')
146 Productions are comprised primarily of symbols - terminal and
147 non-terminal. We do not make a syntactic distinction between the two,
148 though non-terminals must be identifiers. Non-terminal symbols are
149 those which appear in the head of a production, terminal symbols are
150 those which don't. There are also "virtual" symbols used for precedence
151 marking discussed later, and sometimes we won't know what type a symbol
154 To help with code safety it is possible to declare the terminal symbols.
155 If this is done, then any symbol used in a production that does not
156 appear in a head and is not declared is treated as an error.
158 ###### forward declarations
159 enum symtype { Unknown, Virtual, Terminal, Nonterminal };
160 char *symtypes = "UVTN";
163 ###### grammar fields
164 int terminals_declared;
166 Symbols can be either `TK_ident` or `TK_mark`. They are saved in a
167 table of known symbols and the resulting parser will report them as
168 `TK_reserved + N`. A small set of identifiers are reserved for the
169 different token types that `scanner` can report.
173 static struct { int num; char *name; } reserved_words[] = {
174 { TK_error, "ERROR" },
175 { TK_number, "NUMBER" },
176 { TK_ident, "IDENTIFIER" },
178 { TK_string, "STRING" },
179 { TK_multi_string, "MULTI_STRING" },
182 { TK_newline, "NEWLINE" },
188 Note that `TK_eof` and the two `TK_*_comment` tokens cannot be
189 recognised. The former is automatically expected at the end of the text
190 being parsed. The latter are always ignored by the parser.
192 All of these symbols are stored in a simple symbol table. We use the
193 `struct text` from `mdcode` to store the name and link them together
194 into a sorted list using an insertion sort.
196 We don't have separate `find` and `insert` functions as any symbol we
197 find needs to be remembered. We simply expect `find` to always return a
198 symbol, but its type might be `Unknown`.
207 ###### grammar fields
212 static struct symbol *sym_find(struct grammar *g, struct text s)
214 struct symbol **l = &g->syms;
219 (cmp = text_cmp((*l)->name, s)) < 0)
223 n = calloc(1, sizeof(*n));
232 static void symbols_init(struct grammar *g)
234 int entries = sizeof(reserved_words)/sizeof(reserved_words[0]);
236 for (i = 0; i < entries; i++) {
239 t.txt = reserved_words[i].name;
240 t.len = strlen(t.txt);
243 s->num = reserved_words[i].num;
247 ### Data types and precedence.
249 Data type specification, precedence specification, and declaration of
250 terminals are all introduced by a dollar sign at the start of the line.
251 If the next word is `LEFT`, `RIGHT` or `NON`, then the line specifies a
252 precedence, if it is `TERM` the the line declares terminals without
253 precedence, otherwise it specifies a data type.
255 The data type name is simply stored and applied to the head of all
256 subsequent productions. It must be the name of a structure optionally
257 preceded by an asterisk which means a reference or "pointer". So
258 `$expression` maps to `struct expression` and `$*statement` maps to
259 `struct statement *`.
261 Any productions given before the first data type declaration will have
262 no data type associated with them and can carry no information. In
263 order to allow other non-terminals to have no type, the data type
264 `$void` can be given. This does *not* mean that `struct void` will be
265 used, but rather than no type will be associated with future
268 The precedence line must contain a list of symbols - typically
269 terminal symbols, but not necessarily. It can only contain symbols
270 that have not been seen yet, so precedence declaration must precede
271 the productions that they affect.
273 A precedence line may also contain a "virtual" symbol which is an
274 identifier preceded by `$$`. Virtual terminals carry precedence
275 information but are not included in the grammar. A production can
276 declare that it inherits the precedence of a given virtual symbol.
278 This use for `$$` precludes it from being used as a symbol in the
279 described language. Two other symbols: `${` and `}$` are also
282 Each new precedence line introduces a new precedence level and
283 declares how it associates. This level is stored in each symbol
284 listed and may be inherited by any production which uses the symbol. A
285 production inherits from the last symbol which has a precedence.
287 The symbols on the first precedence line have the lowest precedence.
288 Subsequent lines introduce symbols with higher precedence.
290 ###### grammar fields
291 struct text current_type;
296 enum symbols { TK_virtual = TK_reserved, TK_open, TK_close };
297 static const char *known[] = { "$$", "${", "}$" };
300 static char *dollar_line(struct token_state *ts, struct grammar *g, int isref)
302 struct token t = token_next(ts);
308 if (t.num != TK_ident) {
309 err = "type or assoc expected after '$'";
312 if (text_is(t.txt, "LEFT"))
314 else if (text_is(t.txt, "RIGHT"))
316 else if (text_is(t.txt, "NON"))
318 else if (text_is(t.txt, "TERM")) {
320 g->terminals_declared = 1;
322 g->current_type = t.txt;
323 g->type_isref = isref;
324 if (text_is(t.txt, "void"))
325 g->current_type.txt = NULL;
327 if (t.num != TK_newline) {
328 err = "Extra tokens after type name";
335 err = "$* cannot be followed by a precedence";
339 // This is a precedence or TERM line, need some symbols.
343 while (t.num != TK_newline) {
344 enum symtype type = Terminal;
346 if (t.num == TK_virtual) {
349 if (t.num != TK_ident) {
350 err = "$$ must be followed by a word";
354 err = "Virtual symbols not permitted on $TERM line";
357 } else if (t.num != TK_ident &&
359 err = "Illegal token in precedence line";
362 s = sym_find(g, t.txt);
363 if (s->type != Unknown) {
364 err = "Symbols in precedence/TERM line must not already be known.";
369 s->precedence = g->prec_levels;
376 err = "No symbols given on precedence/TERM line";
380 while (t.num != TK_newline && t.num != TK_eof)
387 A production either starts with an identifier which is the head
388 non-terminal, or a vertical bar (`|`) in which case this production
389 uses the same head as the previous one. The identifier must be
390 followed by a `->` mark. All productions for a given non-terminal must
391 be grouped together with the `nonterminal ->` given only once.
393 After this a (possibly empty) sequence of identifiers and marks form
394 the body of the production. A virtual symbol may be given after the
395 body by preceding it with `$$`. If a virtual symbol is given, the
396 precedence of the production is that for the virtual symbol. If none
397 is given, the precedence is inherited from the last symbol in the
398 production which has a precedence specified.
400 After the optional precedence may come the `${` mark. This indicates
401 the start of a code fragment. If present, this must be on the same
402 line as the start of the production.
404 All of the text from the `${` through to the matching `}$` is
405 collected and forms the code-fragment for the production. It must all
406 be in one `code_node` of the literate code. The `}$` must be
407 at the end of a line.
409 Text in the code fragment will undergo substitutions where `$N` or
410 `$<N`,for some numeric `N` (or non-numeric indicator as described
411 later), will be replaced with a variable holding the parse information
412 for the particular symbol in the production. `$0` is the head of the
413 production, `$1` is the first symbol of the body, etc. The type of `$N`
414 for a terminal symbol is `struct token`. For a non-terminal, it is
415 whatever has been declared for that symbol. The `<` may be included and
416 means that the value (usually a reference) is being moved out, so it
417 will not automatically be freed. The effect of using '<' is that the
418 variable is cleareed to all-zeros.
420 Symbols that are left-recursive are a little special. These are symbols
421 that both the head of a production and the first body symbol of the same
422 production. They are problematic when they appear in other productions
423 elsewhere than at the end, and when indenting is used to describe
424 structure. In this case there is no way for a smaller indent to ensure
425 the left-recursive symbol cannot be extended. When it appears at the
426 end of a production, that production can be reduced to ensure the symbol
427 isn't extended. So we record left-recursive symbols while reading the
428 grammar, and produce a warning when reporting the grammar if they are
429 found in an unsuitable place.
431 A symbol that is only left recursive in a production where it is
432 followed by newline does not cause these problems because the newline
433 will effectively terminate it.
435 While building productions we will need to add to an array which needs to
439 static void array_add(void *varray, int *cnt, void *new)
441 void ***array = varray;
444 current = ((*cnt-1) | (step-1))+1;
445 if (*cnt == current) {
448 *array = realloc(*array, current * sizeof(void*));
450 (*array)[*cnt] = new;
454 Collecting the code fragment simply involves reading tokens until we
455 hit the end token `}$`, and noting the character position of the start and
459 static struct text collect_code(struct token_state *state,
464 code.txt = start.txt.txt + start.txt.len;
466 t = token_next(state);
467 while (t.node == start.node &&
468 t.num != TK_close && t.num != TK_error &&
470 if (t.num == TK_close && t.node == start.node)
471 code.len = t.txt.txt - code.txt;
477 Now we have all the bits we need to parse a full production.
482 ###### grammar fields
483 struct production **productions;
484 int production_count;
486 ###### production fields
488 struct symbol **body;
494 int first_production;
498 static char *parse_production(struct grammar *g,
500 struct token_state *state)
502 /* Head has already been parsed. */
505 struct production p, *pp;
507 memset(&p, 0, sizeof(p));
509 tk = token_next(state);
510 while (tk.num == TK_ident || tk.num == TK_mark) {
511 struct symbol *bs = sym_find(g, tk.txt);
512 if (bs->type == Unknown) {
513 if (!g->terminals_declared)
516 if (bs->type == Virtual) {
517 err = "Virtual symbol not permitted in production";
520 if (bs->precedence) {
521 p.precedence = bs->precedence;
524 array_add(&p.body, &p.body_size, bs);
525 tk = token_next(state);
527 if (tk.num == TK_virtual) {
529 tk = token_next(state);
530 if (tk.num != TK_ident) {
531 err = "word required after $$";
534 vs = sym_find(g, tk.txt);
535 if (vs->num == TK_newline)
537 else if (vs->num == TK_out)
539 else if (vs->precedence == 0) {
540 err = "symbol after $$ must have precedence";
543 p.precedence = vs->precedence;
546 tk = token_next(state);
548 if (tk.num == TK_open) {
549 p.code_line = tk.line;
550 p.code = collect_code(state, tk);
551 if (p.code.txt == NULL) {
552 err = "code fragment not closed properly";
555 tk = token_next(state);
557 if (p.body_size >= 2 &&
558 p.body[0] == p.head &&
559 p.body[1]->num != TK_newline)
560 p.head->left_recursive = 1;
562 if (tk.num != TK_newline && tk.num != TK_eof) {
563 err = "stray tokens at end of line";
566 pp = malloc(sizeof(*pp));
568 array_add(&g->productions, &g->production_count, pp);
571 while (tk.num != TK_newline && tk.num != TK_eof)
572 tk = token_next(state);
576 With the ability to parse production and dollar-lines, we have nearly all
577 that we need to parse a grammar from a `code_node`.
579 The head of the first production will effectively be the `start` symbol of
580 the grammar. However it won't _actually_ be so. Processing the grammar is
581 greatly simplified if the real start symbol only has a single production,
582 and expects `$eof` as the final terminal. So when we find the first
583 explicit production we insert an extra production as production zero which
586 ###### Example: production 0
589 where `START` is the first non-terminal given.
591 ###### create production zero
592 struct production *p = calloc(1,sizeof(*p));
593 struct text start = {"$start",6};
594 struct text eof = {"$eof",4};
595 struct text code = {"$0 = $<1;", 9};
596 p->head = sym_find(g, start);
597 p->head->type = Nonterminal;
598 p->head->struct_name = g->current_type;
599 p->head->isref = g->type_isref;
600 if (g->current_type.txt)
602 array_add(&p->body, &p->body_size, head);
603 array_add(&p->body, &p->body_size, sym_find(g, eof));
604 p->head->first_production = g->production_count;
605 array_add(&g->productions, &g->production_count, p);
607 Now we are ready to read in the grammar. We ignore comments
608 and strings so that the marks which introduce them can be
609 used as terminals in the grammar. We don't ignore numbers
610 even though we don't allow them as that causes the scanner
611 to produce errors that the parser is better positioned to handle.
614 static struct grammar *grammar_read(struct code_node *code)
616 struct token_config conf = {
619 .words_marks = known,
620 .known_count = sizeof(known)/sizeof(known[0]),
622 .ignored = (1 << TK_line_comment)
623 | (1 << TK_block_comment)
626 | (1 << TK_multi_string)
631 struct token_state *state = token_open(code, &conf);
633 struct symbol *head = NULL;
637 g = calloc(1, sizeof(*g));
640 for (tk = token_next(state); tk.num != TK_eof;
641 tk = token_next(state)) {
642 if (tk.num == TK_newline)
644 if (tk.num == TK_ident) {
646 head = sym_find(g, tk.txt);
647 if (head->type == Nonterminal)
648 err = "This non-terminal has already be used.";
649 else if (head->type == Virtual)
650 err = "Virtual symbol not permitted in head of production";
652 head->type = Nonterminal;
653 head->struct_name = g->current_type;
654 head->isref = g->type_isref;
655 if (g->production_count == 0) {
656 ## create production zero
658 head->first_production = g->production_count;
659 tk = token_next(state);
660 if (tk.num == TK_mark &&
661 text_is(tk.txt, "->"))
662 err = parse_production(g, head, state);
664 err = "'->' missing in production";
666 } else if (tk.num == TK_mark
667 && text_is(tk.txt, "|")) {
668 // another production for same non-term
670 err = parse_production(g, head, state);
672 err = "First production must have a head";
673 } else if (tk.num == TK_mark
674 && text_is(tk.txt, "$")) {
675 err = dollar_line(state, g, 0);
676 } else if (tk.num == TK_mark
677 && text_is(tk.txt, "$*")) {
678 err = dollar_line(state, g, 1);
679 } else if (tk.num == TK_mark
680 && text_is(tk.txt, "//")) {
681 while (tk.num != TK_newline &&
683 tk = token_next(state);
685 err = "Unrecognised token at start of line.";
691 if (g->terminals_declared) {
694 for (s = g->syms; s; s = s->next) {
695 if (s->type != Unknown)
698 fprintf(stderr, "Token %.*s not declared\n",
699 s->name.len, s->name.txt);
708 fprintf(stderr, "Error at line %d: %s\n",
715 ## Analysing the grammar
717 The central task in analysing the grammar is to determine a set of
718 states to drive the parsing process. This will require finding
719 various sets of symbols and of "items". Some of these sets will need
720 to attach information to each element in the set, so they are more
723 Each "item" may have a 'look-ahead' or `LA` set associated with
724 it. Many of these will be the same as each other. To avoid memory
725 wastage, and to simplify some comparisons of sets, these sets will be
726 stored in a list of unique sets, each assigned a number.
728 Once we have the data structures in hand to manage these sets and
729 lists, we can start setting the 'nullable' flag, build the 'FIRST'
730 sets, and then create the item sets which define the various states.
734 Though we don't only store symbols in these sets, they are the main
735 things we store, so they are called symbol sets or "symsets".
737 A symset has a size, an array of shorts, and an optional array of data
738 values, which are also shorts. If the array of data is not present,
739 we store an impossible pointer, as `NULL` is used to indicate that no
740 memory has been allocated yet;
745 unsigned short *syms, *data;
747 #define NO_DATA ((unsigned short *)1)
748 const struct symset INIT_SYMSET = { 0, NULL, NO_DATA };
749 const struct symset INIT_DATASET = { 0, NULL, NULL };
751 The arrays of shorts are allocated in blocks of 8 and are kept sorted
752 by using an insertion sort. We don't explicitly record the amount of
753 allocated space as it can be derived directly from the current `cnt` using
754 `((cnt - 1) | 7) + 1`.
757 static void symset_add(struct symset *s, unsigned short key, unsigned short val)
760 int current = ((s->cnt-1) | 7) + 1;
761 if (current == s->cnt) {
763 s->syms = realloc(s->syms, sizeof(*s->syms) * current);
764 if (s->data != NO_DATA)
765 s->data = realloc(s->data, sizeof(*s->data) * current);
768 while (i > 0 && s->syms[i-1] > key) {
769 s->syms[i] = s->syms[i-1];
770 if (s->data != NO_DATA)
771 s->data[i] = s->data[i-1];
775 if (s->data != NO_DATA)
780 Finding a symbol (or item) in a `symset` uses a simple binary search.
781 We return the index where the value was found (so data can be accessed),
782 or `-1` to indicate failure.
784 static int symset_find(struct symset *ss, unsigned short key)
791 while (lo + 1 < hi) {
792 int mid = (lo + hi) / 2;
793 if (ss->syms[mid] <= key)
798 if (ss->syms[lo] == key)
803 We will often want to form the union of two symsets. When we do, we
804 will often want to know if anything changed (as that might mean there
805 is more work to do). So `symset_union` reports whether anything was
806 added to the first set. We use a slow quadratic approach as these
807 sets don't really get very big. If profiles shows this to be a problem it
808 can be optimised later.
810 static int symset_union(struct symset *a, struct symset *b)
814 for (i = 0; i < b->cnt; i++)
815 if (symset_find(a, b->syms[i]) < 0) {
816 unsigned short data = 0;
817 if (b->data != NO_DATA)
819 symset_add(a, b->syms[i], data);
825 And of course we must be able to free a symset.
827 static void symset_free(struct symset ss)
830 if (ss.data != NO_DATA)
836 Some symsets are simply stored somewhere appropriate in the `grammar`
837 data structure, others needs a bit of indirection. This applies
838 particularly to "LA" sets. These will be paired with "items" in an
839 "itemset". As itemsets will be stored in a symset, the "LA" set needs to be
840 stored in the `data` field, so we need a mapping from a small (short)
841 number to an LA `symset`.
843 As mentioned earlier this involves creating a list of unique symsets.
845 For now, we just use a linear list sorted by insertion. A skiplist
846 would be more efficient and may be added later.
853 struct setlist *next;
856 ###### grammar fields
857 struct setlist *sets;
862 static int ss_cmp(struct symset a, struct symset b)
865 int diff = a.cnt - b.cnt;
868 for (i = 0; i < a.cnt; i++) {
869 diff = (int)a.syms[i] - (int)b.syms[i];
876 static int save_set(struct grammar *g, struct symset ss)
878 struct setlist **sl = &g->sets;
882 while (*sl && (cmp = ss_cmp((*sl)->ss, ss)) < 0)
889 s = malloc(sizeof(*s));
898 Finding a set by number is currently performed by a simple linear search.
899 If this turns out to hurt performance, we can store the sets in a dynamic
900 array like the productions.
902 static struct symset set_find(struct grammar *g, int num)
904 struct setlist *sl = g->sets;
905 while (sl && sl->num != num)
910 ### Setting `nullable`
912 We set `nullable` on the head symbol for any production for which all
913 the body symbols (if any) are nullable. As this is a recursive
914 definition, any change in the `nullable` setting means that we need to
915 re-evaluate where it needs to be set.
917 We simply loop around performing the same calculations until no more
924 static void set_nullable(struct grammar *g)
927 while (check_again) {
930 for (p = 0; p < g->production_count; p++) {
931 struct production *pr = g->productions[p];
934 if (pr->head->nullable)
936 for (s = 0; s < pr->body_size; s++)
937 if (! pr->body[s]->nullable)
939 if (s == pr->body_size) {
940 pr->head->nullable = 1;
947 ### Setting `line_like`
949 In order to be able to ignore newline tokens when not relevant, but
950 still include them in the parse when needed, we will need to know
951 which states can start a "line-like" section of code. We ignore
952 newlines when there is an indent since the most recent start of a
955 A "line_like" symbol is simply any symbol that can derive a NEWLINE.
956 If a symbol cannot derive a NEWLINE, then it is only part of a line -
957 so is word-like. If it can derive a NEWLINE, then we consider it to
960 Clearly the `TK_newline` token can derive a NEWLINE. Any symbol which
961 is the head of a production that contains a line_like symbol is also a
962 line-like symbol. We use a new field `line_like` to record this
963 attribute of symbols, and compute it in a repetitive manner similar to
970 static void set_line_like(struct grammar *g)
973 g->symtab[TK_newline]->line_like = 1;
974 while (check_again) {
977 for (p = 0; p < g->production_count; p++) {
978 struct production *pr = g->productions[p];
981 if (pr->head->line_like)
984 for (s = 0 ; s < pr->body_size; s++) {
985 if (pr->body[s]->line_like) {
986 pr->head->line_like = 1;
995 ### Building the `first` sets
997 When calculating what can follow a particular non-terminal, we will need to
998 know what the "first" terminal in any subsequent non-terminal might be. So
999 we calculate the `first` set for every non-terminal and store them in an
1000 array. We don't bother recording the "first" set for terminals as they are
1003 As the "first" for one symbol might depend on the "first" of another,
1004 we repeat the calculations until no changes happen, just like with
1005 `set_nullable`. This makes use of the fact that `symset_union`
1006 reports if any change happens.
1008 The core of this, which finds the "first" of part of a production body,
1009 will be reused for computing the "follow" sets, so we split it out
1010 into a separate function.
1012 ###### grammar fields
1013 struct symset *first;
1017 static int add_first(struct production *pr, int start,
1018 struct symset *target, struct grammar *g,
1023 for (s = start; s < pr->body_size; s++) {
1024 struct symbol *bs = pr->body[s];
1025 if (bs->type == Terminal) {
1026 if (symset_find(target, bs->num) < 0) {
1027 symset_add(target, bs->num, 0);
1031 } else if (symset_union(target, &g->first[bs->num]))
1037 *to_end = (s == pr->body_size);
1041 static void build_first(struct grammar *g)
1043 int check_again = 1;
1045 g->first = calloc(g->num_syms, sizeof(g->first[0]));
1046 for (s = 0; s < g->num_syms; s++)
1047 g->first[s] = INIT_SYMSET;
1049 while (check_again) {
1052 for (p = 0; p < g->production_count; p++) {
1053 struct production *pr = g->productions[p];
1054 struct symset *head = &g->first[pr->head->num];
1056 if (add_first(pr, 0, head, g, NULL))
1062 ### Building the `follow` sets.
1064 There are two different situations when we will want to generate "follow"
1065 sets. If we are doing an SLR analysis, we want to generate a single
1066 "follow" set for each non-terminal in the grammar. That is what is
1067 happening here. If we are doing an LALR or LR analysis we will want
1068 to generate a separate "LA" set for each item. We do that later
1069 in state generation.
1071 There are two parts to generating a "follow" set. Firstly we look at
1072 every place that any non-terminal appears in the body of any
1073 production, and we find the set of possible "first" symbols after
1074 there. This is done using `add_first` above and only needs to be done
1075 once as the "first" sets are now stable and will not change.
1079 for (p = 0; p < g->production_count; p++) {
1080 struct production *pr = g->productions[p];
1083 for (b = 0; b < pr->body_size - 1; b++) {
1084 struct symbol *bs = pr->body[b];
1085 if (bs->type == Terminal)
1087 add_first(pr, b+1, &g->follow[bs->num], g, NULL);
1091 The second part is to add the "follow" set of the head of a production
1092 to the "follow" sets of the final symbol in the production, and any
1093 other symbol which is followed only by `nullable` symbols. As this
1094 depends on "follow" itself we need to repeatedly perform the process
1095 until no further changes happen.
1099 for (again = 0, p = 0;
1100 p < g->production_count;
1101 p < g->production_count-1
1102 ? p++ : again ? (again = 0, p = 0)
1104 struct production *pr = g->productions[p];
1107 for (b = pr->body_size - 1; b >= 0; b--) {
1108 struct symbol *bs = pr->body[b];
1109 if (bs->type == Terminal)
1111 if (symset_union(&g->follow[bs->num],
1112 &g->follow[pr->head->num]))
1119 We now just need to create and initialise the `follow` list to get a
1122 ###### grammar fields
1123 struct symset *follow;
1126 static void build_follow(struct grammar *g)
1129 g->follow = calloc(g->num_syms, sizeof(g->follow[0]));
1130 for (s = 0; s < g->num_syms; s++)
1131 g->follow[s] = INIT_SYMSET;
1135 ### Building itemsets and states
1137 There are three different levels of detail that can be chosen for
1138 building the itemsets and states for the LR grammar. They are:
1140 1. LR(0) or SLR(1), where no look-ahead is considered.
1141 2. LALR(1) where we build look-ahead sets with each item and merge
1142 the LA sets when we find two paths to the same "kernel" set of items.
1143 3. LR(1) where different look-ahead for any item in the set means
1144 a different state must be created.
1146 ###### forward declarations
1147 enum grammar_type { LR0, LR05, SLR, LALR, LR1 };
1149 We need to be able to look through existing states to see if a newly
1150 generated state already exists. For now we use a simple sorted linked
1153 An item is a pair of numbers: the production number and the position of
1154 "DOT", which is an index into the body of the production.
1155 As the numbers are not enormous we can combine them into a single "short"
1156 and store them in a `symset` - 4 bits for "DOT" and 12 bits for the
1157 production number (so 4000 productions with maximum of 15 symbols in the
1160 Comparing the itemsets will be a little different to comparing symsets
1161 as we want to do the lookup after generating the "kernel" of an
1162 itemset, so we need to ignore the offset=zero items which are added during
1165 To facilitate this, we modify the "DOT" number so that "0" sorts to
1166 the end of the list in the symset, and then only compare items before
1170 static inline unsigned short item_num(int production, int dot)
1172 return production | ((31-dot) << 11);
1174 static inline int item_prod(unsigned short item)
1176 return item & 0x7ff;
1178 static inline int item_dot(unsigned short item)
1180 return (31-(item >> 11)) & 0x1f;
1183 For LR(1) analysis we need to compare not just the itemset in a state
1184 but also the LA sets. As we assign each unique LA set a number, we
1185 can just compare the symset and the data values together.
1188 static int itemset_cmp(struct symset a, struct symset b,
1189 enum grammar_type type)
1195 i < a.cnt && i < b.cnt &&
1196 item_dot(a.syms[i]) > 0 &&
1197 item_dot(b.syms[i]) > 0;
1199 int diff = a.syms[i] - b.syms[i];
1203 diff = a.data[i] - b.data[i];
1208 if (i == a.cnt || item_dot(a.syms[i]) == 0)
1212 if (i == b.cnt || item_dot(b.syms[i]) == 0)
1218 if (type < LR1 || av == -1)
1221 a.data[i] - b.data[i];
1224 It will be helpful to know if an itemset has been "completed" or not,
1225 particularly for LALR where itemsets get merged, at which point they
1226 need to be consider for completion again. So a `completed` flag is needed.
1228 For correct handling of `TK_newline` when parsing, we will need to
1229 know which states (itemsets) can occur at the start of a line, so we
1230 will record a `starts_line` flag too whenever DOT is at the start of a
1233 Finally, for handling `TK_out` we need to know whether productions in the
1234 current state started *before* the most recent indent. A state
1235 doesn't usually keep details of individual productions, so we need to
1236 add one extra detail. `min_prefix` is the smallest non-zero number of
1237 symbols *before* DOT in any production in an itemset. This will allow
1238 us to determine if the the most recent indent is sufficiently recent
1239 to cancel it against a `TK_out`. If it was seen longer ago than the
1240 `min_prefix`, and if the current state cannot be reduced, then the
1241 indented section must have ended in the middle of a syntactic unit, so
1242 an error must be signaled.
1244 And now we can build the list of itemsets. The lookup routine returns
1245 both a success flag and a pointer to where in the list an insert
1246 should happen, so we don't need to search a second time.
1250 struct itemset *next;
1252 struct symset items;
1253 struct symset go_to;
1255 unsigned short precedence;
1261 ###### grammar fields
1262 struct itemset *items;
1266 static int itemset_find(struct grammar *g, struct itemset ***where,
1267 struct symset kernel, enum grammar_type type)
1269 struct itemset **ip;
1271 for (ip = &g->items; *ip ; ip = & (*ip)->next) {
1272 struct itemset *i = *ip;
1274 diff = itemset_cmp(i->items, kernel, type);
1287 Adding an itemset may require merging the LA sets if LALR analysis is
1288 happening. If any new LA set adds any symbols that weren't in the old LA set, we
1289 clear the `completed` flag so that the dependants of this itemset will be
1290 recalculated and their LA sets updated.
1292 `add_itemset` must consume the symsets it is passed, either by adding
1293 them to a data structure, of freeing them.
1295 static int add_itemset(struct grammar *g, struct symset ss,
1296 enum assoc assoc, unsigned short precedence,
1297 enum grammar_type type)
1299 struct itemset **where, *is;
1301 int found = itemset_find(g, &where, ss, type);
1303 is = calloc(1, sizeof(*is));
1304 is->state = g->states;
1308 is->precedence = precedence;
1310 is->go_to = INIT_DATASET;
1319 for (i = 0; i < ss.cnt; i++) {
1320 struct symset temp = INIT_SYMSET, s;
1321 if (ss.data[i] == is->items.data[i])
1323 s = set_find(g, is->items.data[i]);
1324 symset_union(&temp, &s);
1325 s = set_find(g, ss.data[i]);
1326 if (symset_union(&temp, &s)) {
1327 is->items.data[i] = save_set(g, temp);
1338 To build all the itemsets, we first insert the initial itemset made
1339 from production zero, complete each itemset, and then generate new
1340 itemsets from old until no new ones can be made.
1342 Completing an itemset means finding all the items where "DOT" is followed by
1343 a nonterminal and adding "DOT=0" items for every production from that
1344 non-terminal - providing each item hasn't already been added.
1346 If LA sets are needed, the LA set for each new item is found using
1347 `add_first` which was developed earlier for `FIRST` and `FOLLOW`. This may
1348 be supplemented by the LA set for the item which produce the new item.
1350 We also collect a set of all symbols which follow "DOT" (in `done`) as this
1351 is used in the next stage.
1352 If any of these symbols are flagged as `line_like`, then this
1353 state must be a `starts_line` state so now is a good time to record that.
1355 When itemsets are created we assign a precedence to the itemset from
1356 the complete item, if there is one. We ignore the possibility of
1357 there being two and don't (currently) handle precedence in such
1358 grammars. When completing a grammar we ignore any item where DOT is
1359 followed by a terminal with a precedence lower than that for the
1360 itemset. Unless the terminal has right associativity, we also ignore
1361 items where the terminal has the same precedence. The result is that
1362 unwanted items are still in the itemset, but the terminal doesn't get
1363 into the go to set, so the item is ineffective.
1365 ###### complete itemset
1366 for (i = 0; i < is->items.cnt; i++) {
1367 int p = item_prod(is->items.syms[i]);
1368 int bs = item_dot(is->items.syms[i]);
1369 struct production *pr = g->productions[p];
1372 struct symset LA = INIT_SYMSET;
1373 unsigned short sn = 0;
1374 struct symset LAnl = INIT_SYMSET;
1375 unsigned short snnl = 0;
1377 if (is->min_prefix == 0 ||
1378 (bs > 0 && bs < is->min_prefix))
1379 is->min_prefix = bs;
1380 if (bs == pr->body_size)
1383 if (s->precedence && is->precedence &&
1384 is->precedence > s->precedence)
1385 /* This terminal has a low precedence and
1386 * shouldn't be shifted
1389 if (s->precedence && is->precedence &&
1390 is->precedence == s->precedence && s->assoc != Right)
1391 /* This terminal has a matching precedence and is
1392 * not Right-associative, so we mustn't shift it.
1395 if (symset_find(&done, s->num) < 0) {
1396 symset_add(&done, s->num, 0);
1398 if (s->type != Nonterminal)
1401 is->starts_line = 1;
1406 add_first(pr, bs+1, &LA, g, &to_end);
1408 struct symset ss = set_find(g, is->items.data[i]);
1409 symset_union(&LA, &ss);
1411 sn = save_set(g, LA);
1412 LA = set_find(g, sn);
1413 if (symset_find(&LA, TK_newline))
1414 symset_add(&LAnl, TK_newline, 0);
1415 snnl = save_set(g, LAnl);
1416 LAnl = set_find(g, snnl);
1419 /* Add productions for this symbol */
1420 for (p2 = s->first_production;
1421 p2 < g->production_count &&
1422 g->productions[p2]->head == s;
1424 int itm = item_num(p2, 0);
1425 int pos = symset_find(&is->items, itm);
1427 if (g->productions[p2]->line_like)
1428 symset_add(&is->items, itm, snnl);
1430 symset_add(&is->items, itm, sn);
1431 /* Will have re-ordered, so start
1432 * from beginning again */
1434 } else if (type >= LALR) {
1435 struct symset ss = set_find(g, is->items.data[pos]);
1436 struct symset tmp = INIT_SYMSET;
1437 struct symset *la = &LA;
1439 if (g->productions[p2]->line_like)
1441 symset_union(&tmp, &ss);
1442 if (symset_union(&tmp, la)) {
1443 is->items.data[pos] = save_set(g, tmp);
1451 For each symbol we found (and placed in `done`) we collect all the items for
1452 which this symbol is next, and create a new itemset with "DOT" advanced over
1453 the symbol. This is then added to the collection of itemsets (or merged
1454 with a pre-existing itemset).
1456 ###### derive itemsets
1457 // Now we have a completed itemset, so we need to
1458 // compute all the 'next' itemsets and create them
1459 // if they don't exist.
1460 for (i = 0; i < done.cnt; i++) {
1462 unsigned short state;
1463 struct symbol *sym = g->symtab[done.syms[i]];
1464 enum assoc assoc = Non;
1465 unsigned short precedence = 0;
1466 struct symset newitemset = INIT_SYMSET;
1468 newitemset = INIT_DATASET;
1470 for (j = 0; j < is->items.cnt; j++) {
1471 int itm = is->items.syms[j];
1472 int p = item_prod(itm);
1473 int bp = item_dot(itm);
1474 struct production *pr = g->productions[p];
1475 unsigned short la = 0;
1478 if (bp == pr->body_size)
1480 if (pr->body[bp] != sym)
1483 la = is->items.data[j];
1484 pos = symset_find(&newitemset, pr->head->num);
1485 if (bp + 1 == pr->body_size &&
1486 pr->precedence > 0 &&
1487 pr->precedence > precedence) {
1488 // new itemset is reducible and has a precedence.
1489 precedence = pr->precedence;
1493 symset_add(&newitemset, item_num(p, bp+1), la);
1494 else if (type >= LALR) {
1495 // Need to merge la set.
1496 int la2 = newitemset.data[pos];
1498 struct symset ss = set_find(g, la2);
1499 struct symset LA = INIT_SYMSET;
1500 symset_union(&LA, &ss);
1501 ss = set_find(g, la);
1502 if (symset_union(&LA, &ss))
1503 newitemset.data[pos] = save_set(g, LA);
1509 state = add_itemset(g, newitemset, assoc, precedence, type);
1510 if (symset_find(&is->go_to, done.syms[i]) < 0)
1511 symset_add(&is->go_to, done.syms[i], state);
1514 All that is left is to create the initial itemset from production zero, and
1515 with `TK_eof` as the LA set.
1518 static void build_itemsets(struct grammar *g, enum grammar_type type)
1520 struct symset first = INIT_SYMSET;
1523 unsigned short la = 0;
1525 // LA set just has eof
1526 struct symset eof = INIT_SYMSET;
1527 symset_add(&eof, TK_eof, 0);
1528 la = save_set(g, eof);
1529 first = INIT_DATASET;
1531 // production 0, offset 0 (with no data)
1532 symset_add(&first, item_num(0, 0), la);
1533 add_itemset(g, first, Non, 0, type);
1534 for (again = 0, is = g->items;
1536 is = is->next ?: again ? (again = 0, g->items) : NULL) {
1538 struct symset done = INIT_SYMSET;
1549 ### Completing the analysis.
1551 The exact process of analysis depends on which level was requested. For
1552 `LR0` and `LR05` we don't need first and follow sets at all. For `LALR` and
1553 `LR1` we need first, but not follow. For `SLR` we need both.
1555 We don't build the "action" tables that you might expect as the parser
1556 doesn't use them. They will be calculated to some extent if needed for
1559 Once we have built everything we allocate arrays for the two lists:
1560 symbols and itemsets. This allows more efficient access during reporting.
1561 The symbols are grouped as terminals and non-terminals and we record the
1562 changeover point in `first_nonterm`.
1564 ###### grammar fields
1565 struct symbol **symtab;
1566 struct itemset **statetab;
1569 ###### grammar_analyse
1571 static void grammar_analyse(struct grammar *g, enum grammar_type type)
1575 int snum = TK_reserved;
1576 for (s = g->syms; s; s = s->next)
1577 if (s->num < 0 && s->type == Terminal) {
1581 g->first_nonterm = snum;
1582 for (s = g->syms; s; s = s->next)
1583 if (s->num < 0 && s->type != Virtual) {
1587 for (s = g->syms; s; s = s->next)
1593 g->symtab = calloc(g->num_syms, sizeof(g->symtab[0]));
1594 for (s = g->syms; s; s = s->next)
1595 g->symtab[s->num] = s;
1605 build_itemsets(g, type);
1607 g->statetab = calloc(g->states, sizeof(g->statetab[0]));
1608 for (is = g->items; is ; is = is->next)
1609 g->statetab[is->state] = is;
1612 ## Reporting on the Grammar
1614 The purpose of the report is to give the grammar developer insight into
1615 how the grammar parser will work. It is basically a structured dump of
1616 all the tables that have been generated, plus a description of any conflicts.
1618 ###### grammar_report
1619 static int grammar_report(struct grammar *g, enum grammar_type type)
1625 return report_conflicts(g, type) + report_left_recursive(g);
1628 Firstly we have the complete list of symbols, together with the
1629 "FIRST" set if that was generated. We add a mark to each symbol to
1630 show if it can end in a newline (`>`), if it is considered to be
1631 "line-like" (`<`), or if it is nullable (`.`).
1635 static void report_symbols(struct grammar *g)
1639 printf("SYMBOLS + FIRST:\n");
1641 printf("SYMBOLS:\n");
1643 for (n = 0; n < g->num_syms; n++) {
1644 struct symbol *s = g->symtab[n];
1648 printf(" %c%c%3d%c: ",
1649 s->nullable ? '.':' ',
1650 s->line_like ? '<':' ',
1651 s->num, symtypes[s->type]);
1654 printf(" (%d%s)", s->precedence,
1655 assoc_names[s->assoc]);
1657 if (g->first && s->type == Nonterminal) {
1660 for (j = 0; j < g->first[n].cnt; j++) {
1663 prtxt(g->symtab[g->first[n].syms[j]]->name);
1670 Then we have the follow sets if they were computed.
1672 static void report_follow(struct grammar *g)
1675 printf("FOLLOW:\n");
1676 for (n = 0; n < g->num_syms; n++)
1677 if (g->follow[n].cnt) {
1681 prtxt(g->symtab[n]->name);
1682 for (j = 0; j < g->follow[n].cnt; j++) {
1685 prtxt(g->symtab[g->follow[n].syms[j]]->name);
1691 And finally the item sets. These include the GO TO tables and, for
1692 LALR and LR1, the LA set for each item. Lots of stuff, so we break
1693 it up a bit. First the items, with production number and associativity.
1695 static void report_item(struct grammar *g, int itm)
1697 int p = item_prod(itm);
1698 int dot = item_dot(itm);
1699 struct production *pr = g->productions[p];
1703 prtxt(pr->head->name);
1705 for (i = 0; i < pr->body_size; i++) {
1706 printf(" %s", (dot == i ? ". ": ""));
1707 prtxt(pr->body[i]->name);
1709 if (dot == pr->body_size)
1712 if (pr->precedence && dot == pr->body_size)
1713 printf(" (%d%s)", pr->precedence,
1714 assoc_names[pr->assoc]);
1715 if (dot < pr->body_size &&
1716 pr->body[dot]->precedence) {
1717 struct symbol *s = pr->body[dot];
1718 printf(" [%d%s]", s->precedence,
1719 assoc_names[s->assoc]);
1721 if (pr->line_like == 1)
1722 printf(" $$NEWLINE");
1723 else if (pr->line_like)
1728 The LA sets which are (possibly) reported with each item:
1730 static void report_la(struct grammar *g, int lanum)
1732 struct symset la = set_find(g, lanum);
1736 printf(" LOOK AHEAD(%d)", lanum);
1737 for (i = 0; i < la.cnt; i++) {
1740 prtxt(g->symtab[la.syms[i]]->name);
1745 Then the go to sets:
1747 static void report_goto(struct grammar *g, struct symset gt)
1752 for (i = 0; i < gt.cnt; i++) {
1754 prtxt(g->symtab[gt.syms[i]]->name);
1755 printf(" -> %d\n", gt.data[i]);
1759 Now we can report all the item sets complete with items, LA sets, and GO TO.
1761 static void report_itemsets(struct grammar *g)
1764 printf("ITEM SETS(%d)\n", g->states);
1765 for (s = 0; s < g->states; s++) {
1767 struct itemset *is = g->statetab[s];
1768 printf(" Itemset %d:%s min prefix=%d",
1769 s, is->starts_line?" (startsline)":"", is->min_prefix);
1771 printf(" %d%s", is->precedence, assoc_names[is->assoc]);
1773 for (j = 0; j < is->items.cnt; j++) {
1774 report_item(g, is->items.syms[j]);
1775 if (is->items.data != NO_DATA)
1776 report_la(g, is->items.data[j]);
1778 report_goto(g, is->go_to);
1782 ### Reporting conflicts
1784 Conflict detection varies a lot among different analysis levels. However
1785 LR0 and LR0.5 are quite similar - having no follow sets, and SLR, LALR and
1786 LR1 are also similar as they have FOLLOW or LA sets.
1790 ## conflict functions
1792 static int report_conflicts(struct grammar *g, enum grammar_type type)
1795 printf("Conflicts:\n");
1797 cnt = conflicts_lr0(g, type);
1799 cnt = conflicts_slr(g, type);
1801 printf(" - no conflicts\n");
1805 LR0 conflicts are any state which have both a reducible item and
1806 a shiftable item, or two reducible items.
1808 LR05 conflicts only occur if two possible reductions exist,
1809 as shifts always over-ride reductions.
1811 ###### conflict functions
1812 static int conflicts_lr0(struct grammar *g, enum grammar_type type)
1816 for (i = 0; i < g->states; i++) {
1817 struct itemset *is = g->statetab[i];
1818 int last_reduce = -1;
1819 int prev_reduce = -1;
1820 int last_shift = -1;
1824 for (j = 0; j < is->items.cnt; j++) {
1825 int itm = is->items.syms[j];
1826 int p = item_prod(itm);
1827 int bp = item_dot(itm);
1828 struct production *pr = g->productions[p];
1830 if (bp == pr->body_size) {
1831 prev_reduce = last_reduce;
1835 if (pr->body[bp]->type == Terminal)
1838 if (type == LR0 && last_reduce >= 0 && last_shift >= 0) {
1839 printf(" State %d has both SHIFT and REDUCE:\n", i);
1840 report_item(g, is->items.syms[last_shift]);
1841 report_item(g, is->items.syms[last_reduce]);
1844 if (prev_reduce >= 0) {
1845 printf(" State %d has 2 (or more) reducible items\n", i);
1846 report_item(g, is->items.syms[prev_reduce]);
1847 report_item(g, is->items.syms[last_reduce]);
1854 SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping
1855 look ahead, or if a symbol in a look-ahead can be shifted. They differ only
1856 in the source of the look ahead set.
1858 We build two datasets to reflect the "action" table: one which maps
1859 terminals to items where that terminal could be shifted and another
1860 which maps terminals to items that could be reduced when the terminal
1861 is in look-ahead. We report when we get conflicts between the two.
1863 As a special case, if we find a SHIFT/REDUCE conflict, on the NEWLINE
1864 terminal, we ignore it. NEWLINES are handled specially with its own
1865 rules for when to shift and when to reduce. Conflicts are expected,
1866 but handled internally.
1868 static int conflicts_slr(struct grammar *g, enum grammar_type type)
1873 for (i = 0; i < g->states; i++) {
1874 struct itemset *is = g->statetab[i];
1875 struct symset shifts = INIT_DATASET;
1876 struct symset reduce = INIT_DATASET;
1880 /* First collect the shifts */
1881 for (j = 0; j < is->items.cnt; j++) {
1882 unsigned short itm = is->items.syms[j];
1883 int p = item_prod(itm);
1884 int bp = item_dot(itm);
1885 struct production *pr = g->productions[p];
1888 if (bp >= pr->body_size ||
1889 pr->body[bp]->type != Terminal)
1894 if (s->precedence && is->precedence)
1895 /* Precedence resolves this, so no conflict */
1898 if (symset_find(&shifts, s->num) < 0)
1899 symset_add(&shifts, s->num, itm);
1901 /* Now look for reductions and conflicts */
1902 for (j = 0; j < is->items.cnt; j++) {
1903 unsigned short itm = is->items.syms[j];
1904 int p = item_prod(itm);
1905 int bp = item_dot(itm);
1906 struct production *pr = g->productions[p];
1908 if (bp < pr->body_size)
1913 la = g->follow[pr->head->num];
1915 la = set_find(g, is->items.data[j]);
1917 for (k = 0; k < la.cnt; k++) {
1918 int pos = symset_find(&shifts, la.syms[k]);
1919 if (pos >= 0 && la.syms[k] != TK_newline) {
1920 printf(" State %d has SHIFT/REDUCE conflict on ", i);
1922 prtxt(g->symtab[la.syms[k]]->name);
1924 report_item(g, shifts.data[pos]);
1925 report_item(g, itm);
1927 pos = symset_find(&reduce, la.syms[k]);
1929 symset_add(&reduce, la.syms[k], itm);
1932 printf(" State %d has REDUCE/REDUCE conflict on ", i);
1933 prtxt(g->symtab[la.syms[k]]->name);
1935 report_item(g, itm);
1936 report_item(g, reduce.data[pos]);
1940 symset_free(shifts);
1941 symset_free(reduce);
1947 ### Reporting non-final left-recursive symbols.
1949 Left recursive symbols are a problem for parses that honour indentation
1950 when they appear other than at the end of the production. So we need to
1951 report these when asked.
1955 static int report_left_recursive(struct grammar *g)
1958 int bad_left_recursive = 0;
1960 for (p = 0; p < g->production_count; p++) {
1961 struct production *pr = g->productions[p];
1964 for (sn = 0; sn < pr->body_size - 1; sn++) {
1965 struct symbol *s = pr->body[sn];
1967 if (s->left_recursive == 1 &&
1969 if (!bad_left_recursive) {
1970 bad_left_recursive = 1;
1971 printf("Misplaced left recursive symbols.\n");
1975 printf(" in production [%d]\n", p);
1976 s->left_recursive = 2;
1980 return bad_left_recursive;
1983 ## Generating the parser
1985 The exported part of the parser is the `parse_XX` function, where the name
1986 `XX` is based on the name of the parser files.
1988 This takes a `code_node`, a partially initialized `token_config`, and an
1989 optional `FILE` to send tracing to. The `token_config` gets the list of
1990 known words added and then is used with the `code_node` to initialize the
1993 `parse_XX` then calls the library function `parser_run` to actually complete
1994 the parse. This needs the `states` table and function to call the various
1995 pieces of code provided in the grammar file, so they are generated first.
1997 ###### parser_generate
1999 static void gen_parser(FILE *f, struct grammar *g, char *file, char *name,
2000 struct code_node *pre_reduce)
2006 gen_reduce(f, g, file, pre_reduce);
2009 fprintf(f, "#line 0 \"gen_parser\"\n");
2010 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace)\n",
2013 fprintf(f, "\tstruct token_state *tokens;\n");
2014 fprintf(f, "\tconfig->words_marks = known;\n");
2015 fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
2016 fprintf(f, "\ttokens = token_open(code, config);\n");
2017 fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n");
2018 fprintf(f, "\ttoken_close(tokens);\n");
2019 fprintf(f, "\treturn rv;\n");
2020 fprintf(f, "}\n\n");
2023 ### Known words table
2025 The known words table is simply an array of terminal symbols.
2026 The table of nonterminals used for tracing is a similar array. We
2027 include virtual symbols in the table of non_terminals to keep the
2032 static void gen_known(FILE *f, struct grammar *g)
2035 fprintf(f, "#line 0 \"gen_known\"\n");
2036 fprintf(f, "static const char *known[] = {\n");
2037 for (i = TK_reserved;
2038 i < g->num_syms && g->symtab[i]->type == Terminal;
2040 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
2041 g->symtab[i]->name.txt);
2042 fprintf(f, "};\n\n");
2045 static void gen_non_term(FILE *f, struct grammar *g)
2048 fprintf(f, "#line 0 \"gen_non_term\"\n");
2049 fprintf(f, "static const char *non_term[] = {\n");
2050 for (i = TK_reserved;
2053 if (g->symtab[i]->type == Nonterminal)
2054 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
2055 g->symtab[i]->name.txt);
2056 fprintf(f, "};\n\n");
2059 ### States and the goto tables.
2061 For each state we record the goto table, the reducible production if
2062 there is one, or a symbol to shift for error recovery.
2063 Some of the details of the reducible production are stored in the
2064 `do_reduce` function to come later. Here we store the production number,
2065 the body size (useful for stack management) and the resulting symbol (useful
2066 for knowing how to free data later).
2068 The go to table is stored in a simple array of `sym` and corresponding
2071 ###### exported types
2079 const struct lookup * go_to;
2090 static void gen_goto(FILE *f, struct grammar *g)
2093 fprintf(f, "#line 0 \"gen_goto\"\n");
2094 for (i = 0; i < g->states; i++) {
2096 fprintf(f, "static const struct lookup goto_%d[] = {\n",
2098 struct symset gt = g->statetab[i]->go_to;
2099 for (j = 0; j < gt.cnt; j++)
2100 fprintf(f, "\t{ %d, %d },\n",
2101 gt.syms[j], gt.data[j]);
2108 static void gen_states(FILE *f, struct grammar *g)
2111 fprintf(f, "#line 0 \"gen_states\"\n");
2112 fprintf(f, "static const struct state states[] = {\n");
2113 for (i = 0; i < g->states; i++) {
2114 struct itemset *is = g->statetab[i];
2115 int j, prod = -1, prod_len;
2117 for (j = 0; j < is->items.cnt; j++) {
2118 int itm = is->items.syms[j];
2119 int p = item_prod(itm);
2120 int bp = item_dot(itm);
2121 struct production *pr = g->productions[p];
2123 if (bp < pr->body_size)
2125 /* This is what we reduce */
2126 if (prod < 0 || prod_len < pr->body_size) {
2128 prod_len = pr->body_size;
2133 fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, %d, %d, %d },\n",
2134 i, is->go_to.cnt, i, prod,
2135 g->productions[prod]->body_size,
2136 g->productions[prod]->head->num,
2138 g->productions[prod]->line_like,
2141 fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, 0, %d },\n",
2142 i, is->go_to.cnt, i,
2143 is->starts_line, is->min_prefix);
2145 fprintf(f, "};\n\n");
2148 ### The `do_reduce` function and the code
2150 When the parser engine decides to reduce a production, it calls `do_reduce`.
2151 This has two functions.
2153 Firstly, if a non-NULL `trace` file is passed, it prints out details of the
2154 production being reduced. Secondly it runs the code that was included with
2155 the production if any.
2157 This code needs to be able to store data somewhere. Rather than requiring
2158 `do_reduce` to `malloc` that "somewhere", we pass in a large buffer and have
2159 `do_reduce` return the size to be saved.
2161 In order for the code to access "global" context, we pass in the
2162 "config" pointer that was passed to parser function. If the `struct
2163 token_config` is embedded in some larger structure, the reducing code
2164 can access the larger structure using pointer manipulation.
2166 The code fragment requires translation when written out. Any `$N` needs to
2167 be converted to a reference either to that buffer (if `$0`) or to the
2168 structure returned by a previous reduction. These pointers need to be cast
2169 to the appropriate type for each access. All this is handled in
2172 `gen_code` also allows symbol references to contain a '`<`' as in
2173 '`$<2`'. This is particularly useful for references (or pointers), but
2174 can be used with structures too. The `<` implies that the value it
2175 being moved out, so the object will not be automatically freed. It is
2176 equivalent to assigning `NULL` to the pointer or filling a structure
2179 Instead of a number `N`, the `$` or `$<` can be followed by some letters
2180 and possibly a number. A number by itself (other than zero) selects a
2181 symbol from the body of the production. A sequence of letters selects
2182 the shortest symbol in the body which contains those letters in the given
2183 order. If a number follows the letters, then a later occurrence of
2184 that symbol is chosen. So "`$AB2`" will refer to the structure attached
2185 to the second occurrence of the shortest symbol which contains an `A`
2186 followed by a `B`. If there is no unique shortest system, or if the
2187 number given is too large, then the symbol reference is not transformed,
2188 and will cause an error when the code is compiled.
2192 static int textchr(struct text t, char c, int s)
2195 for (i = s; i < t.len; i++)
2201 static int subseq_match(char *seq, int slen, struct text name)
2205 st = textchr(name, *seq, st);
2215 static int choose_sym(char **namep, int len, struct production *p)
2217 char *name = *namep;
2226 ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'))) {
2232 while (len > 0 && (c >= '0' && c <= '9')) {
2235 n = n * 10 + (c - '0');
2245 for (i = 0; i < p->body_size; i++) {
2246 if (!subseq_match(nam, namlen, p->body[i]->name))
2248 if (slen == 0 || p->body[i]->name.len < slen)
2250 if (s >= 0 && p->body[i] != p->body[s] &&
2251 p->body[i]->name.len == p->body[s]->name.len)
2252 /* not unique, so s cannot be used */
2259 for (i = 0; i < p->body_size; i++)
2260 if (p->body[i] == p->body[s]) {
2271 static void gen_code(struct production *p, FILE *f, struct grammar *g)
2274 char *used = calloc(1, p->body_size);
2277 fprintf(f, "\t\t\t");
2278 for (c = p->code.txt; c < p->code.txt + p->code.len; c++) {
2292 n = choose_sym(&c, p->code.txt + p->code.len - c, p);
2301 fprintf(f, "(*(struct %.*s*%s)ret)",
2302 p->head->struct_name.len,
2303 p->head->struct_name.txt,
2304 p->head->isref ? "*":"");
2305 else if (p->body[n-1]->type == Terminal)
2306 fprintf(f, "(*(struct token *)body[%d])",
2308 else if (p->body[n-1]->struct_name.txt == NULL)
2309 fprintf(f, "$%d", n);
2311 fprintf(f, "(*(struct %.*s*%s)body[%d])",
2312 p->body[n-1]->struct_name.len,
2313 p->body[n-1]->struct_name.txt,
2314 p->body[n-1]->isref ? "*":"", n-1);
2320 for (i = 0; i < p->body_size; i++) {
2321 if (p->body[i]->struct_name.txt &&
2323 // assume this has been copied out
2324 if (p->body[i]->isref)
2325 fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i);
2327 fprintf(f, "\t\tmemset(body[%d], 0, sizeof(struct %.*s));\n", i, p->body[i]->struct_name.len, p->body[i]->struct_name.txt);
2335 static void gen_reduce(FILE *f, struct grammar *g, char *file,
2336 struct code_node *code)
2339 fprintf(f, "#line 1 \"gen_reduce\"\n");
2340 fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n");
2342 fprintf(f, "\tint ret_size = 0;\n");
2344 code_node_print(f, code, file);
2346 fprintf(f, "#line 4 \"gen_reduce\"\n");
2347 fprintf(f, "\tswitch(prod) {\n");
2348 for (i = 0; i < g->production_count; i++) {
2349 struct production *p = g->productions[i];
2350 fprintf(f, "\tcase %d:\n", i);
2353 fprintf(f, "#line %d \"%s\"\n", p->code_line, file);
2357 if (p->head->struct_name.txt)
2358 fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n",
2359 p->head->struct_name.len,
2360 p->head->struct_name.txt,
2361 p->head->isref ? "*":"");
2363 fprintf(f, "\t\tbreak;\n");
2365 fprintf(f, "\t}\n\treturn ret_size;\n}\n\n");
2370 As each non-terminal can potentially cause a different type of data
2371 structure to be allocated and filled in, we need to be able to free it when
2374 It is particularly important to have fine control over freeing during error
2375 recovery where individual stack frames might need to be freed.
2377 For this, the grammar author is required to defined a `free_XX` function for
2378 each structure that is used by a non-terminal. `do_free` will call whichever
2379 is appropriate given a symbol number, and will call `free` (as is
2380 appropriate for tokens) on any terminal symbol.
2384 static void gen_free(FILE *f, struct grammar *g)
2388 fprintf(f, "#line 0 \"gen_free\"\n");
2389 fprintf(f, "static void do_free(short sym, void *asn)\n");
2391 fprintf(f, "\tif (!asn) return;\n");
2392 fprintf(f, "\tif (sym < %d) {\n", g->first_nonterm);
2393 fprintf(f, "\t\tfree(asn);\n\t\treturn;\n\t}\n");
2394 fprintf(f, "\tswitch(sym) {\n");
2396 for (i = 0; i < g->num_syms; i++) {
2397 struct symbol *s = g->symtab[i];
2399 s->type != Nonterminal ||
2400 s->struct_name.txt == NULL)
2403 fprintf(f, "\tcase %d:\n", s->num);
2405 fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n",
2407 s->struct_name.txt);
2408 fprintf(f, "\t\tfree(asn);\n");
2410 fprintf(f, "\t\tfree_%.*s(asn);\n",
2412 s->struct_name.txt);
2413 fprintf(f, "\t\tbreak;\n");
2415 fprintf(f, "\t}\n}\n\n");
2418 ## The main routine.
2420 There are three key parts to the "main" function of parsergen: processing
2421 the arguments, loading the grammar file, and dealing with the grammar.
2423 ### Argument processing.
2425 Command line options allow the selection of analysis mode, name of output
2426 file, and whether or not a report should be generated. By default we create
2427 a report only if no code output was requested.
2429 The `parse_XX` function name uses the basename of the output file which
2430 should not have a suffix (`.c`). `.c` is added to the given name for the
2431 code, and `.h` is added for the header (if header text is specifed in the
2438 static const struct option long_options[] = {
2439 { "LR0", 0, NULL, '0' },
2440 { "LR05", 0, NULL, '5' },
2441 { "SLR", 0, NULL, 'S' },
2442 { "LALR", 0, NULL, 'L' },
2443 { "LR1", 0, NULL, '1' },
2444 { "tag", 1, NULL, 't' },
2445 { "report", 0, NULL, 'R' },
2446 { "output", 1, NULL, 'o' },
2447 { NULL, 0, NULL, 0 }
2449 const char *options = "05SL1t:Ro:";
2451 ###### process arguments
2453 char *outfile = NULL;
2458 enum grammar_type type = LR05;
2459 while ((opt = getopt_long(argc, argv, options,
2460 long_options, NULL)) != -1) {
2475 outfile = optarg; break;
2477 tag = optarg; break;
2479 fprintf(stderr, "Usage: parsergen ...\n");
2484 infile = argv[optind++];
2486 fprintf(stderr, "No input file given\n");
2489 if (outfile && report == 1)
2492 if (name && strchr(name, '/'))
2493 name = strrchr(name, '/')+1;
2495 if (optind < argc) {
2496 fprintf(stderr, "Excess command line arguments\n");
2500 ### Loading the grammar file
2502 To be able to run `mdcode` and `scanner` on the grammar we need to memory
2505 Once we have extracted the code (with `mdcode`) we expect to find three
2506 sections: header, code, and grammar. Anything else that is not
2507 excluded by the `--tag` option is an error.
2509 "header" and "code" are optional, though it is hard to build a working
2510 parser with neither. "grammar" must be provided.
2514 #include <sys/mman.h>
2519 static void pr_err(char *msg)
2522 fprintf(stderr, "%s\n", msg);
2526 struct section *table;
2530 fd = open(infile, O_RDONLY);
2532 fprintf(stderr, "parsergen: cannot open %s: %s\n",
2533 infile, strerror(errno));
2536 len = lseek(fd, 0, 2);
2537 file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2538 table = code_extract(file, file+len, pr_err);
2540 struct code_node *hdr = NULL;
2541 struct code_node *code = NULL;
2542 struct code_node *gram = NULL;
2543 struct code_node *pre_reduce = NULL;
2544 for (s = table; s; s = s->next) {
2545 struct text sec = s->section;
2546 if (tag && !strip_tag(&sec, tag))
2548 if (text_is(sec, "header"))
2550 else if (text_is(sec, "code"))
2552 else if (text_is(sec, "grammar"))
2554 else if (text_is(sec, "reduce"))
2555 pre_reduce = s->code;
2557 fprintf(stderr, "Unknown content section: %.*s\n",
2558 s->section.len, s->section.txt);
2563 ### Processing the input
2565 As we need to append an extention to a filename and then open it for
2566 writing, and we need to do this twice, it helps to have a separate function.
2570 static FILE *open_ext(char *base, char *ext)
2572 char *fn = malloc(strlen(base) + strlen(ext) + 1);
2574 strcat(strcpy(fn, base), ext);
2580 If we can read the grammar, then we analyse and optionally report on it. If
2581 the report finds conflicts we will exit with an error status.
2583 ###### process input
2584 struct grammar *g = NULL;
2586 fprintf(stderr, "No grammar section provided\n");
2589 g = grammar_read(gram);
2591 fprintf(stderr, "Failure to parse grammar\n");
2596 grammar_analyse(g, type);
2598 if (grammar_report(g, type))
2602 If a "headers" section is defined, we write it out and include a declaration
2603 for the `parse_XX` function so it can be used from separate code.
2605 if (rv == 0 && hdr && outfile) {
2606 FILE *f = open_ext(outfile, ".h");
2608 code_node_print(f, hdr, infile);
2609 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace);\n",
2613 fprintf(stderr, "Cannot create %s.h\n",
2619 And if all goes well and an output file was provided, we create the `.c`
2620 file with the code section (if any) and the parser tables and function.
2622 if (rv == 0 && outfile) {
2623 FILE *f = open_ext(outfile, ".c");
2626 code_node_print(f, code, infile);
2627 gen_parser(f, g, infile, name, pre_reduce);
2630 fprintf(stderr, "Cannot create %s.c\n",
2636 And that about wraps it up. We need to set the locale so that UTF-8 is
2637 recognised properly, and link with `libicuuc` as `libmdcode` requires that.
2639 ###### File: parsergen.mk
2640 parsergen : parsergen.o libscanner.o libmdcode.o
2641 $(CC) $(CFLAGS) -o parsergen parsergen.o libscanner.o libmdcode.o -licuuc
2648 int main(int argc, char *argv[])
2653 setlocale(LC_ALL,"");
2655 ## process arguments
2662 ## The SHIFT/REDUCE parser
2664 Having analysed the grammar and generated all the tables, we only need the
2665 shift/reduce engine to bring it all together.
2667 ### Goto table lookup
2669 The parser generator has nicely provided us with goto tables sorted by
2670 symbol number. We need a binary search function to find a symbol in the
2673 ###### parser functions
2675 static int search(const struct state *l, int sym)
2678 int hi = l->go_to_cnt;
2682 while (lo + 1 < hi) {
2683 int mid = (lo + hi) / 2;
2684 if (l->go_to[mid].sym <= sym)
2689 if (l->go_to[lo].sym == sym)
2690 return l->go_to[lo].state;
2695 ### The state stack.
2697 The core data structure for the parser is the stack. This tracks all the
2698 symbols that have been recognised or partially recognised.
2700 The stack usually won't grow very large - maybe a few tens of entries. So
2701 we dynamically resize and array as required but never bother to shrink it
2704 We keep the stack as two separate allocations. One, `asn_stack` stores the
2705 "abstract syntax nodes" which are created by each reduction. When we call
2706 `do_reduce` we need to pass an array of the `asn`s of the body of the
2707 production, and by keeping a separate `asn` stack, we can just pass a
2708 pointer into this stack.
2710 The other allocation stores all other stack fields of which there are six.
2711 The `state` is the most important one and guides the parsing process. The
2712 `sym` is nearly unnecessary. However when we want to free entries from the
2713 `asn_stack`, it helps to know what type they are so we can call the right
2714 freeing function. The symbol leads us to the right free function through
2717 The `indents` count tracks the line indents with-in the symbol or
2718 immediately follow it. These are used to allow indent information to
2719 guide parsing and error recovery.
2721 `since_newline` tracks how many stack frames since the last
2722 start-of-line (whether indented or not). So if `since_newline` is
2723 zero, then this symbol is at the start of a line. Similarly
2724 `since_indent` counts the number of states since an indent, it is zero
2725 precisely when `indents` is not zero.
2727 `newline_permitted` keeps track of whether newlines should be ignored
2730 The stack is most properly seen as alternating states and symbols -
2731 states, like the 'DOT' in items, are between symbols. Each frame in
2732 our stack holds a state and the symbol that was before it. The
2733 bottom of stack holds the start state but no symbol, as nothing came
2734 before the beginning.
2736 ###### parser functions
2741 short newline_permitted;
2745 short since_newline;
2755 Two operations are needed on the stack - shift (which is like push) and pop.
2757 Shift applies not only to terminals but also to non-terminals. When
2758 we reduce a production we will pop off entries corresponding to the
2759 body symbols, then push on an item for the head of the production.
2760 This last is exactly the same process as shifting in a terminal so we
2761 use the same function for both. In both cases we provide the symbol,
2762 the number of indents the symbol contains (which will be zero for a
2763 terminal symbol) and a flag indicating the the symbol was at (or was
2764 reduced from a symbol which was at) the start of a line. The state is
2765 deduced from the current top-of-stack state and the new symbol.
2767 To simplify other code we arrange for `shift` to fail if there is no `goto`
2768 state for the symbol. This is useful in basic parsing due to our design
2769 that we shift when we can, and reduce when we cannot. So the `shift`
2770 function reports if it could.
2772 `shift` is also used to push state zero onto the stack, so if the
2773 stack is empty, it always chooses zero as the next state.
2775 So `shift` finds the next state. If that succeeds it extends the
2776 allocations if needed and pushes all the information onto the stacks.
2778 Newlines are permitted after a `starts_line` state until an internal
2779 indent. If the new frame has neither a `starts_line` state nor an
2780 indent, newlines are permitted if the previous stack frame permitted
2783 ###### parser functions
2785 static int shift(struct parser *p,
2786 short sym, short indents, short start_of_line,
2788 const struct state states[])
2790 // Push an entry onto the stack
2791 struct frame next = {0};
2792 int newstate = p->tos
2793 ? search(&states[p->stack[p->tos-1].state],
2798 if (p->tos >= p->stack_size) {
2799 p->stack_size += 10;
2800 p->stack = realloc(p->stack, p->stack_size
2801 * sizeof(p->stack[0]));
2802 p->asn_stack = realloc(p->asn_stack, p->stack_size
2803 * sizeof(p->asn_stack[0]));
2806 next.indents = indents;
2807 next.state = newstate;
2808 if (states[newstate].starts_line)
2809 next.newline_permitted = 1;
2811 next.newline_permitted = 0;
2813 next.newline_permitted =
2814 p->stack[p->tos-1].newline_permitted;
2816 next.newline_permitted = 0;
2818 if (!start_of_line) {
2820 next.since_newline = p->stack[p->tos-1].since_newline + 1;
2822 next.since_newline = 1;
2825 next.since_indent = 0;
2827 next.since_indent = p->stack[p->tos-1].since_indent + 1;
2829 next.since_indent = 1;
2831 p->stack[p->tos] = next;
2832 p->asn_stack[p->tos] = asn;
2837 `pop` primarily moves the top of stack (`tos`) back down the required
2838 amount and frees any `asn` entries that need to be freed. It also
2839 collects a summary of the indents and line starts in the symbols that
2840 are being removed. It is called _after_ we reduce a production, just
2841 before we `shift` the nonterminal in.
2843 ###### parser functions
2845 static int pop(struct parser *p, int num,
2846 short *start_of_line,
2847 void(*do_free)(short sym, void *asn))
2854 for (i = 0; i < num; i++) {
2855 sol |= !p->stack[p->tos+i].since_newline;
2856 indents += p->stack[p->tos+i].indents;
2857 do_free(p->stack[p->tos+i].sym,
2858 p->asn_stack[p->tos+i]);
2861 *start_of_line = sol;
2865 ### Memory allocation
2867 The `scanner` returns tokens in a local variable - we want them in allocated
2868 memory so they can live in the `asn_stack`. Similarly the `asn` produced by
2869 a reduce is in a large buffer. Both of these require some allocation and
2870 copying, hence `memdup` and `tokcopy`.
2872 ###### parser includes
2875 ###### parser functions
2877 void *memdup(void *m, int len)
2883 memcpy(ret, m, len);
2887 static struct token *tok_copy(struct token tk)
2889 struct token *new = malloc(sizeof(*new));
2894 ### The heart of the parser.
2896 Now we have the parser. If we can shift we do, though newlines and
2897 reducing indenting may block that. If not and we can reduce we do
2898 that. If the production we reduced was production zero, then we have
2899 accepted the input and can finish.
2901 We return whatever `asn` was returned by reducing production zero.
2903 If we can neither shift nor reduce we have an error to handle. We pop
2904 single entries off the stack until we can shift the `TK_error` symbol,
2905 then drop input tokens until we find one we can shift into the new error
2906 state. We need to ensure that something is dropped or shifted after an
2907 error, or we could get into an infinite loop, only shifting in
2908 `TK_error`, then reducing. So we track if there has been a shift since
2909 the last error, and if not the next error always discards one token.
2911 When we find `TK_in` and `TK_out` tokens which report indents we need
2912 to handle them directly as the grammar cannot express what we want to
2915 `TK_in` tokens are easy: we simply update indent count in the top stack frame to
2916 record how many indents there are following the previous token.
2918 `TK_out` tokens must be canceled against an indent count
2919 within the stack. If we can reduce some symbols that are all since
2920 the most recent indent, then we do that first. If the minimum prefix
2921 of the current state then extends back before the most recent indent,
2922 that indent can be cancelled. If the minimum prefix is shorter then
2923 the indent had ended prematurely and we must start error handling, which
2924 is still a work-in-progress.
2926 `TK_newline` tokens are ignored unless the top stack frame records
2927 that they are permitted. In that case they will not be considered for
2928 shifting if it is possible to reduce some symbols that are all since
2929 the most recent start of line. This is how a newline forcibly
2930 terminates any line-like structure - we try to reduce down to at most
2931 one symbol for each line where newlines are allowed.
2932 A consequence of this is that a rule like
2934 ###### Example: newlines - broken
2938 IfStatement -> Newlines if ....
2940 cannot work, as the NEWLINE will never be shifted as the empty string
2941 will be reduced first. Optional sets of newlines need to be include
2942 in the thing that preceed:
2944 ###### Example: newlines - works
2948 IfStatement -> If ....
2950 Here the NEWLINE will be shifted because nothing can be reduced until
2953 When during error handling we discard tokens read in, we want to keep
2954 discarding until we see one that is recognised. If we had a full set
2955 of LR(1) grammar states, this would mean looking in the look-ahead set,
2956 but we don't keep a full look-ahead set. We only record the subset
2957 that leads to SHIFT. We can, however, deduce the look-ahead set by
2958 looking at the SHIFT subsets for all states that we can get to by
2959 reducing zero or more times. So we need a little function which
2960 checks if a given token is in any of these look-ahead sets.
2962 ###### parser includes
2967 static int in_lookahead(struct token *tk, const struct state *states, int state)
2969 while (state >= 0) {
2970 if (search(&states[state], tk->num) >= 0)
2972 if (states[state].reduce_prod < 0)
2974 state = search(&states[state], states[state].reduce_sym);
2979 void *parser_run(struct token_state *tokens,
2980 const struct state states[],
2981 int (*do_reduce)(int, void**, struct token_config*, void*),
2982 void (*do_free)(short, void*),
2983 FILE *trace, const char *non_term[],
2984 struct token_config *config)
2986 struct parser p = { 0 };
2987 struct token *tk = NULL;
2989 int shift_since_err = 1;
2992 shift(&p, TK_eof, 0, 1, NULL, states);
2993 while (!accepted && p.tos > 0) {
2994 struct token *err_tk;
2995 struct frame *tos = &p.stack[p.tos-1];
2997 tk = tok_copy(token_next(tokens));
2998 parser_trace(trace, &p,
2999 tk, states, non_term, config->known_count);
3001 if (tk->num == TK_in) {
3003 tos->since_newline = 0;
3004 tos->since_indent = 0;
3005 if (!states[tos->state].starts_line)
3006 tos->newline_permitted = 0;
3009 parser_trace_action(trace, "Record");
3012 if (tk->num == TK_out) {
3013 if (states[tos->state].reduce_size >= 0 &&
3014 states[tos->state].reduce_size <= tos->since_indent)
3016 if (states[tos->state].min_prefix >= tos->since_indent) {
3018 struct frame *in = tos - tos->since_indent;
3020 if (in->indents == 0) {
3021 /* Reassess since_indent and newline_permitted */
3023 in->since_indent = in[-1].since_indent + 1;
3024 in->newline_permitted = in[-1].newline_permitted;
3026 in->since_indent = 0;
3027 in->newline_permitted = 0;
3029 if (states[in->state].starts_line)
3030 in->newline_permitted = 1;
3033 in->since_indent = in[-1].since_indent + 1;
3034 if (states[in->state].starts_line)
3035 in->newline_permitted = 1;
3037 in->newline_permitted = in[-1].newline_permitted;
3042 parser_trace_action(trace, "Cancel");
3045 // fall through to error handling as both SHIFT and REDUCE
3048 if (tk->num == TK_newline) {
3049 if (!tos->newline_permitted) {
3052 parser_trace_action(trace, "Discard");
3055 if (tos->since_newline > 1 &&
3056 states[tos->state].reduce_size >= 0 &&
3057 states[tos->state].reduce_size <= tos->since_newline)
3060 if (shift(&p, tk->num, 0, tk->num == TK_newline, tk, states)) {
3061 shift_since_err = 1;
3063 parser_trace_action(trace, "Shift");
3067 if (states[tos->state].reduce_prod >= 0 &&
3068 states[tos->state].newline_only &&
3069 !(tk->num == TK_newline ||
3070 tk->num == TK_eof ||
3071 tk->num == TK_out ||
3072 (tos->indents == 0 && tos->since_newline == 0))) {
3073 /* Anything other than newline or out or eof
3074 * in an error unless we are already at start
3075 * of line, as this production must end at EOL.
3077 } else if (states[tos->state].reduce_prod >= 0) {
3080 const struct state *nextstate = &states[tos->state];
3081 int prod = nextstate->reduce_prod;
3082 int size = nextstate->reduce_size;
3084 static char buf[16*1024];
3085 short indents, start_of_line;
3087 body = p.asn_stack + (p.tos - size);
3089 bufsize = do_reduce(prod, body, config, buf);
3091 indents = pop(&p, size, &start_of_line,
3093 res = memdup(buf, bufsize);
3094 memset(buf, 0, bufsize);
3095 if (!shift(&p, nextstate->reduce_sym,
3096 indents, start_of_line,
3098 if (prod != 0) abort();
3102 parser_trace_action(trace, "Reduce");
3105 /* Error. We walk up the stack until we
3106 * find a state which will accept TK_error.
3107 * We then shift in TK_error and see what state
3108 * that takes us too.
3109 * Then we discard input tokens until
3110 * we find one that is acceptable.
3112 parser_trace_action(trace, "ERROR");
3113 short indents = 0, start_of_line;
3115 err_tk = tok_copy(*tk);
3117 shift(&p, TK_error, 0, 0,
3118 err_tk, states) == 0)
3119 // discard this state
3120 indents += pop(&p, 1, &start_of_line, do_free);
3123 // no state accepted TK_error
3126 if (!shift_since_err) {
3127 /* must discard at least one token to avoid
3130 if (tk->num == TK_eof)
3133 tk = tok_copy(token_next(tokens));
3135 shift_since_err = 0;
3136 tos = &p.stack[p.tos-1];
3137 while (!in_lookahead(tk, states, tos->state) &&
3138 tk->num != TK_eof) {
3140 tk = tok_copy(token_next(tokens));
3141 shift_since_err = 1;
3142 if (tk->num == TK_in)
3144 if (tk->num == TK_out) {
3148 // FIXME update since_indent here
3151 tos->indents += indents;
3154 pop(&p, p.tos, NULL, do_free);
3160 ###### exported functions
3161 void *parser_run(struct token_state *tokens,
3162 const struct state states[],
3163 int (*do_reduce)(int, void**, struct token_config*, void*),
3164 void (*do_free)(short, void*),
3165 FILE *trace, const char *non_term[],
3166 struct token_config *config);
3170 Being able to visualize the parser in action can be invaluable when
3171 debugging the parser code, or trying to understand how the parse of a
3172 particular grammar progresses. The stack contains all the important
3173 state, so just printing out the stack every time around the parse loop
3174 can make it possible to see exactly what is happening.
3176 This doesn't explicitly show each SHIFT and REDUCE action. However they
3177 are easily deduced from the change between consecutive lines, and the
3178 details of each state can be found by cross referencing the states list
3179 in the stack with the "report" that parsergen can generate.
3181 For terminal symbols, we just dump the token. For non-terminals we
3182 print the name of the symbol. The look ahead token is reported at the
3183 end inside square brackets.
3185 ###### parser functions
3187 static char *reserved_words[] = {
3188 [TK_error] = "ERROR",
3191 [TK_newline] = "NEWLINE",
3194 static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
3196 fprintf(trace, "(%d", f->state);
3197 if (states[f->state].starts_line)
3198 fprintf(trace, "s");
3199 if (f->newline_permitted)
3200 fprintf(trace, "n%d", f->since_newline);
3201 fprintf(trace, ") ");
3204 void parser_trace(FILE *trace, struct parser *p,
3205 struct token *tk, const struct state states[],
3206 const char *non_term[], int knowns)
3211 for (i = 0; i < p->tos; i++) {
3212 struct frame *f = &p->stack[i];
3215 if (sym < TK_reserved &&
3216 reserved_words[sym] != NULL)
3217 fputs(reserved_words[sym], trace);
3218 else if (sym < TK_reserved + knowns) {
3219 struct token *t = p->asn_stack[i];
3220 text_dump(trace, t->txt, 20);
3222 fputs(non_term[sym - TK_reserved - knowns],
3225 fprintf(trace, ".%d", f->indents);
3226 if (f->since_newline == 0)
3230 parser_trace_state(trace, f, states);
3232 fprintf(trace, "[");
3233 if (tk->num < TK_reserved &&
3234 reserved_words[tk->num] != NULL)
3235 fputs(reserved_words[tk->num], trace);
3237 text_dump(trace, tk->txt, 20);
3238 fprintf(trace, ":%d:%d]", tk->line, tk->col);
3241 void parser_trace_action(FILE *trace, char *action)
3244 fprintf(trace, " - %s\n", action);
3249 The obvious example for a parser is a calculator.
3251 As `scanner` provides number parsing function using `libgmp` is it not much
3252 work to perform arbitrary rational number calculations.
3254 This calculator takes one expression, or an equality test, per line. The
3255 results are printed and if any equality test fails, the program exits with
3258 ###### File: parsergen.mk
3259 calc.c calc.h : parsergen parsergen.mdc
3260 ./parsergen --tag calc -o calc parsergen.mdc
3261 calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o
3262 $(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp
3264 ./calc parsergen.mdc
3269 #include "parse_number.h"
3270 // what do we use for a demo-grammar? A calculator of course.
3282 #include <sys/mman.h>
3288 #include "scanner.h"
3293 static void free_number(struct number *n)
3299 static int text_is(struct text t, char *s)
3301 return (strlen(s) == t.len &&
3302 strncmp(s, t.txt, t.len) == 0);
3305 int main(int argc, char *argv[])
3307 int fd = open(argv[1], O_RDONLY);
3308 int len = lseek(fd, 0, 2);
3309 char *file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
3310 struct section *table = code_extract(file, file+len, NULL);
3312 struct token_config config = {
3313 .ignored = (1 << TK_line_comment)
3316 .number_chars = ".,_+-",
3320 for (s = table; s; s = s->next)
3321 if (text_is(s->section, "example: input"))
3322 parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
3324 struct section *t = table->next;
3325 code_free(table->code);
3337 Session -> Session Line
3340 Line -> Expression NEWLINE ${ gmp_printf("Answer = %Qd\n", $1.val);
3341 { mpf_t fl; mpf_init2(fl, 20); mpf_set_q(fl, $1.val);
3342 gmp_printf(" or as a decimal: %Fg\n", fl);
3346 | Expression = Expression NEWLINE ${
3347 if (mpq_equal($1.val, $3.val))
3348 gmp_printf("Both equal %Qd\n", $1.val);
3350 gmp_printf("NOT EQUAL: %Qd\n != : %Qd\n",
3355 | NEWLINE ${ printf("Blank line\n"); }$
3356 | ERROR NEWLINE ${ printf("Skipped a bad line\n"); }$
3359 Expression -> Expression + Expression ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$
3360 | Expression - Expression ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$
3361 | Expression * Expression ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$
3362 | Expression / Expression ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$
3363 | Expression // Expression ${ {
3366 mpz_init(z0); mpz_init(z1); mpz_init(z2);
3367 mpz_tdiv_q(z1, mpq_numref($1.val), mpq_denref($1.val));
3368 mpz_tdiv_q(z2, mpq_numref($3.val), mpq_denref($3.val));
3369 mpz_tdiv_q(z0, z1, z2);
3370 mpq_set_z($0.val, z0);
3371 mpz_clear(z0); mpz_clear(z1); mpz_clear(z2);
3373 | NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$
3374 | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$
3379 3.1415926535 - 355/113
3381 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9
3383 1 * 1000 + 2 * 100 + 3 * 10 + 4 * 1