1 # LR(1) Parser Generator #
3 This parser generator takes a grammar description combined with code
4 fragments, analyses it, and can report details about the analysis and
5 write out C code files which can be compiled to make a parser.
7 There are several distinct sections.
9 1. `grammar_read` will read a grammar from a literate-code file and
10 store the productions, symbols, and code fragments.
11 2. `grammar_analyse` will take that grammar and build LR parsing
12 states and look-ahead tables.
13 3. `grammar_report` will present the details of the analysis
14 in a readable format and will report any conflicts.
15 4. `parser_generate` will write out C code files with various
16 tables and with the code fragments arranged in useful places.
17 5. `parser_run` is a library function intended to be linked together
18 with the generated parser tables to complete the implementation of
20 6. Finally `calc` is a test grammar for a simple calculator. The
21 `parsergen` program built from the C code in this file can extract
22 that grammar directly from this file and process it.
24 ###### File: parsergen.c
29 ## forward declarations
40 ###### File: libparser.c
47 ###### File: parsergen.mk
50 parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc
53 ## Reading the grammar
55 The grammar must be stored in a markdown literate code file as parsed
56 by "[mdcode][]". It must have three top level (i.e. unreferenced)
57 sections: `header`, `code`, and `grammar`. The first two will be
58 literally copied into the generated `.c` and `.h`. files. The last
59 contains the grammar. This is tokenised with "[scanner][]".
61 If the `--tag` option is given, then any top level header that doesn't
62 start with the tag is ignored, and the tag is striped from the rest. So
64 means that the three needed sections must be `Foo: header`, `Foo: code`,
65 and `Foo: grammar`. The tag `calc` is used to extract the same calculator
69 [scanner]: scanner.html
75 ###### parser includes
79 The grammar contains production sets, precedence/associativity
80 declarations, and data type declarations. These are all parsed with
81 _ad hoc_ parsing as we don't have a parser generator yet.
83 The precedence and associativity can be set for each production, but
84 can be inherited from symbols. The data type (either structure or a
85 reference to a structure) is potentially defined for each non-terminal
86 and describes what C structure is used to store information about each
90 enum assoc {Left, Right, Non};
91 char *assoc_names[] = {"Left","Right","Non"};
94 struct text struct_name;
97 unsigned short precedence;
101 unsigned short precedence;
110 The strings reported by `mdcode` and `scanner` are `struct text` which have
111 length rather than being null terminated. To help with printing and
112 comparing we define `text_is` and `prtxt`, which should possibly go in
113 `mdcode`. `scanner` does provide `text_dump` which is useful for strings
114 which might contain control characters.
116 `strip_tag` is a bit like `strncmp`, but adds a test for a colon,
117 because that is what we need to detect tags.
120 static int text_is(struct text t, char *s)
122 return (strlen(s) == t.len &&
123 strncmp(s, t.txt, t.len) == 0);
125 static void prtxt(struct text t)
127 printf("%.*s", t.len, t.txt);
130 static int strip_tag(struct text *t, char *tag)
132 int skip = strlen(tag) + 1;
133 if (skip >= t->len ||
134 strncmp(t->txt, tag, skip-1) != 0 ||
135 t->txt[skip-1] != ':')
137 while (skip < t->len && t->txt[skip] == ' ')
146 Productions are comprised primarily of symbols - terminal and
147 non-terminal. We do not make a syntactic distinction between the two,
148 though non-terminals must be identifiers. Non-terminal symbols are
149 those which appear in the head of a production, terminal symbols are
150 those which don't. There are also "virtual" symbols used for precedence
151 marking discussed later, and sometimes we won't know what type a symbol
154 To help with code safety it is possible to declare the terminal symbols.
155 If this is done, then any symbol used in a production that does not
156 appear in a head and is not declared is treated as an error.
158 ###### forward declarations
159 enum symtype { Unknown, Virtual, Terminal, Nonterminal };
160 char *symtypes = "UVTN";
163 ###### grammar fields
164 int terminals_declared;
166 Symbols can be either `TK_ident` or `TK_mark`. They are saved in a
167 table of known symbols and the resulting parser will report them as
168 `TK_reserved + N`. A small set of identifiers are reserved for the
169 different token types that `scanner` can report.
172 static char *reserved_words[] = {
173 [TK_error] = "ERROR",
174 [TK_number] = "NUMBER",
175 [TK_ident] = "IDENTIFIER",
177 [TK_string] = "STRING",
178 [TK_multi_string] = "MULTI_STRING",
181 [TK_newline] = "NEWLINE",
187 Note that `TK_eof` and the two `TK_*_comment` tokens cannot be
188 recognised. The former is automatically expected at the end of the text
189 being parsed. The latter are always ignored by the parser.
191 All of these symbols are stored in a simple symbol table. We use the
192 `struct text` from `mdcode` to store the name and link them together
193 into a sorted list using an insertion sort.
195 We don't have separate `find` and `insert` functions as any symbol we
196 find needs to be remembered. We simply expect `find` to always return a
197 symbol, but its type might be `Unknown`.
206 ###### grammar fields
211 static struct symbol *sym_find(struct grammar *g, struct text s)
213 struct symbol **l = &g->syms;
218 (cmp = text_cmp((*l)->name, s)) < 0)
222 n = calloc(1, sizeof(*n));
231 static void symbols_init(struct grammar *g)
233 int entries = sizeof(reserved_words)/sizeof(reserved_words[0]);
235 for (i = 0; i < entries; i++) {
238 t.txt = reserved_words[i];
241 t.len = strlen(t.txt);
248 ### Data types and precedence.
250 Data type specification, precedence specification, and declaration of
251 terminals are all introduced by a dollar sign at the start of the line.
252 If the next word is `LEFT`, `RIGHT` or `NON`, then the line specifies a
253 precedence, if it is `TERM` the the line declares terminals without
254 precedence, otherwise it specifies a data type.
256 The data type name is simply stored and applied to the head of all
257 subsequent productions. It must be the name of a structure optionally
258 preceded by an asterisk which means a reference or "pointer". So
259 `$expression` maps to `struct expression` and `$*statement` maps to
260 `struct statement *`.
262 Any productions given before the first data type declaration will have
263 no data type associated with them and can carry no information. In
264 order to allow other non-terminals to have no type, the data type
265 `$void` can be given. This does *not* mean that `struct void` will be
266 used, but rather than no type will be associated with future
269 The precedence line must contain a list of symbols - typically
270 terminal symbols, but not necessarily. It can only contain symbols
271 that have not been seen yet, so precedence declaration must precede
272 the productions that they affect.
274 A precedence line may also contain a "virtual" symbol which is an
275 identifier preceded by `$$`. Virtual terminals carry precedence
276 information but are not included in the grammar. A production can
277 declare that it inherits the precedence of a given virtual symbol.
279 This use for `$$` precludes it from being used as a symbol in the
280 described language. Two other symbols: `${` and `}$` are also
283 Each new precedence line introduces a new precedence level and
284 declares how it associates. This level is stored in each symbol
285 listed and may be inherited by any production which uses the symbol. A
286 production inherits from the last symbol which has a precedence.
288 The symbols on the first precedence line have the lowest precedence.
289 Subsequent lines introduce symbols with higher precedence.
291 ###### grammar fields
292 struct text current_type;
297 enum symbols { TK_virtual = TK_reserved, TK_open, TK_close };
298 static const char *known[] = { "$$", "${", "}$" };
301 static char *dollar_line(struct token_state *ts, struct grammar *g, int isref)
303 struct token t = token_next(ts);
309 if (t.num != TK_ident) {
310 err = "type or assoc expected after '$'";
313 if (text_is(t.txt, "LEFT"))
315 else if (text_is(t.txt, "RIGHT"))
317 else if (text_is(t.txt, "NON"))
319 else if (text_is(t.txt, "TERM")) {
321 g->terminals_declared = 1;
323 g->current_type = t.txt;
324 g->type_isref = isref;
325 if (text_is(t.txt, "void"))
326 g->current_type.txt = NULL;
328 if (t.num != TK_newline) {
329 err = "Extra tokens after type name";
336 err = "$* cannot be followed by a precedence";
340 // This is a precedence or TERM line, need some symbols.
344 while (t.num != TK_newline) {
345 enum symtype type = Terminal;
347 if (t.num == TK_virtual) {
350 if (t.num != TK_ident) {
351 err = "$$ must be followed by a word";
355 err = "Virtual symbols not permitted on $TERM line";
358 } else if (t.num != TK_ident &&
360 err = "Illegal token in precedence line";
363 s = sym_find(g, t.txt);
364 if (s->type != Unknown) {
365 err = "Symbols in precedence/TERM line must not already be known.";
370 s->precedence = g->prec_levels;
377 err = "No symbols given on precedence/TERM line";
381 while (t.num != TK_newline && t.num != TK_eof)
388 A production either starts with an identifier which is the head
389 non-terminal, or a vertical bar (`|`) in which case this production
390 uses the same head as the previous one. The identifier must be
391 followed by a `->` mark. All productions for a given non-terminal must
392 be grouped together with the `nonterminal ->` given only once.
394 After this a (possibly empty) sequence of identifiers and marks form
395 the body of the production. A virtual symbol may be given after the
396 body by preceding it with `$$`. If a virtual symbol is given, the
397 precedence of the production is that for the virtual symbol. If none
398 is given, the precedence is inherited from the last symbol in the
399 production which has a precedence specified.
401 After the optional precedence may come the `${` mark. This indicates
402 the start of a code fragment. If present, this must be on the same
403 line as the start of the production.
405 All of the text from the `${` through to the matching `}$` is
406 collected and forms the code-fragment for the production. It must all
407 be in one `code_node` of the literate code. The `}$` must be
408 at the end of a line.
410 Text in the code fragment will undergo substitutions where `$N` or
411 `$<N`,for some numeric `N` (or non-numeric indicator as described
412 later), will be replaced with a variable holding the parse information
413 for the particular symbol in the production. `$0` is the head of the
414 production, `$1` is the first symbol of the body, etc. The type of `$N`
415 for a terminal symbol is `struct token`. For a non-terminal, it is
416 whatever has been declared for that symbol. The `<` may be included and
417 means that the value (usually a reference) is being moved out, so it
418 will not automatically be freed. The effect of using '<' is that the
419 variable is cleareed to all-zeros.
421 Symbols that are left-recursive are a little special. These are symbols
422 that both the head of a production and the first body symbol of the same
423 production. They are problematic when they appear in other productions
424 elsewhere than at the end, and when indenting is used to describe
425 structure. In this case there is no way for a smaller indent to ensure
426 the left-recursive symbol cannot be extended. When it appears at the
427 end of a production, that production can be reduced to ensure the symbol
428 isn't extended. So we record left-recursive symbols while reading the
429 grammar, and produce a warning when reporting the grammar if they are
430 found in an unsuitable place.
432 A symbol that is only left recursive in a production where it is
433 followed by newline does not cause these problems because the newline
434 will effectively terminate it.
436 While building productions we will need to add to an array which needs to
440 static void array_add(void *varray, int *cnt, void *new)
442 void ***array = varray;
445 current = ((*cnt-1) | (step-1))+1;
446 if (*cnt == current) {
449 *array = realloc(*array, current * sizeof(void*));
451 (*array)[*cnt] = new;
455 Collecting the code fragment simply involves reading tokens until we
456 hit the end token `}$`, and noting the character position of the start and
460 static struct text collect_code(struct token_state *state,
465 code.txt = start.txt.txt + start.txt.len;
467 t = token_next(state);
468 while (t.node == start.node &&
469 t.num != TK_close && t.num != TK_error &&
471 if (t.num == TK_close && t.node == start.node)
472 code.len = t.txt.txt - code.txt;
478 Now we have all the bits we need to parse a full production.
483 ###### grammar fields
484 struct production **productions;
485 int production_count;
487 ###### production fields
489 struct symbol **body;
495 int first_production;
499 static char *parse_production(struct grammar *g,
501 struct token_state *state)
503 /* Head has already been parsed. */
506 struct production p, *pp;
508 memset(&p, 0, sizeof(p));
510 tk = token_next(state);
511 while (tk.num == TK_ident || tk.num == TK_mark) {
512 struct symbol *bs = sym_find(g, tk.txt);
513 if (bs->type == Unknown) {
514 if (!g->terminals_declared)
517 if (bs->type == Virtual) {
518 err = "Virtual symbol not permitted in production";
521 if (bs->precedence) {
522 p.precedence = bs->precedence;
525 array_add(&p.body, &p.body_size, bs);
526 tk = token_next(state);
528 if (tk.num == TK_virtual) {
530 tk = token_next(state);
531 if (tk.num != TK_ident) {
532 err = "word required after $$";
535 vs = sym_find(g, tk.txt);
536 if (vs->num == TK_newline)
538 else if (vs->num == TK_out)
540 else if (vs->precedence == 0) {
541 err = "symbol after $$ must have precedence";
544 p.precedence = vs->precedence;
547 tk = token_next(state);
549 if (tk.num == TK_open) {
550 p.code_line = tk.line;
551 p.code = collect_code(state, tk);
552 if (p.code.txt == NULL) {
553 err = "code fragment not closed properly";
556 tk = token_next(state);
558 if (p.body_size >= 2 &&
559 p.body[0] == p.head &&
560 p.body[1]->num != TK_newline)
561 p.head->left_recursive = 1;
563 if (tk.num != TK_newline && tk.num != TK_eof) {
564 err = "stray tokens at end of line";
567 pp = malloc(sizeof(*pp));
569 array_add(&g->productions, &g->production_count, pp);
572 while (tk.num != TK_newline && tk.num != TK_eof)
573 tk = token_next(state);
577 With the ability to parse production and dollar-lines, we have nearly all
578 that we need to parse a grammar from a `code_node`.
580 The head of the first production will effectively be the `start` symbol of
581 the grammar. However it won't _actually_ be so. Processing the grammar is
582 greatly simplified if the real start symbol only has a single production,
583 and expects `$eof` as the final terminal. So when we find the first
584 explicit production we insert an extra production as production zero which
587 ###### Example: production 0
590 where `START` is the first non-terminal given.
592 ###### create production zero
593 struct production *p = calloc(1,sizeof(*p));
594 struct text start = {"$start",6};
595 struct text eof = {"$eof",4};
596 struct text code = {"$0 = $<1;", 9};
597 p->head = sym_find(g, start);
598 p->head->type = Nonterminal;
599 p->head->struct_name = g->current_type;
600 p->head->isref = g->type_isref;
601 if (g->current_type.txt)
603 array_add(&p->body, &p->body_size, head);
604 array_add(&p->body, &p->body_size, sym_find(g, eof));
605 p->head->first_production = g->production_count;
606 array_add(&g->productions, &g->production_count, p);
608 Now we are ready to read in the grammar. We ignore comments
609 and strings so that the marks which introduce them can be
610 used as terminals in the grammar. We don't ignore numbers
611 even though we don't allow them as that causes the scanner
612 to produce errors that the parser is better positioned to handle.
615 static struct grammar *grammar_read(struct code_node *code)
617 struct token_config conf = {
620 .words_marks = known,
621 .known_count = sizeof(known)/sizeof(known[0]),
623 .ignored = (1 << TK_line_comment)
624 | (1 << TK_block_comment)
627 | (1 << TK_multi_string)
632 struct token_state *state = token_open(code, &conf);
634 struct symbol *head = NULL;
638 g = calloc(1, sizeof(*g));
641 for (tk = token_next(state); tk.num != TK_eof;
642 tk = token_next(state)) {
643 if (tk.num == TK_newline)
645 if (tk.num == TK_ident) {
647 head = sym_find(g, tk.txt);
648 if (head->type == Nonterminal)
649 err = "This non-terminal has already be used.";
650 else if (head->type == Virtual)
651 err = "Virtual symbol not permitted in head of production";
653 head->type = Nonterminal;
654 head->struct_name = g->current_type;
655 head->isref = g->type_isref;
656 if (g->production_count == 0) {
657 ## create production zero
659 head->first_production = g->production_count;
660 tk = token_next(state);
661 if (tk.num == TK_mark &&
662 text_is(tk.txt, "->"))
663 err = parse_production(g, head, state);
665 err = "'->' missing in production";
667 } else if (tk.num == TK_mark
668 && text_is(tk.txt, "|")) {
669 // another production for same non-term
671 err = parse_production(g, head, state);
673 err = "First production must have a head";
674 } else if (tk.num == TK_mark
675 && text_is(tk.txt, "$")) {
676 err = dollar_line(state, g, 0);
677 } else if (tk.num == TK_mark
678 && text_is(tk.txt, "$*")) {
679 err = dollar_line(state, g, 1);
680 } else if (tk.num == TK_mark
681 && text_is(tk.txt, "//")) {
682 while (tk.num != TK_newline &&
684 tk = token_next(state);
686 err = "Unrecognised token at start of line.";
692 if (g->terminals_declared) {
695 for (s = g->syms; s; s = s->next) {
696 if (s->type != Unknown)
699 fprintf(stderr, "Token %.*s not declared\n",
700 s->name.len, s->name.txt);
709 fprintf(stderr, "Error at line %d: %s\n",
716 ## Analysing the grammar
718 The central task in analysing the grammar is to determine a set of
719 states to drive the parsing process. This will require finding
720 various sets of symbols and of "items". Some of these sets will need
721 to attach information to each element in the set, so they are more
724 Each "item" may have a 'look-ahead' or `LA` set associated with
725 it. Many of these will be the same as each other. To avoid memory
726 wastage, and to simplify some comparisons of sets, these sets will be
727 stored in a list of unique sets, each assigned a number.
729 Once we have the data structures in hand to manage these sets and
730 lists, we can start setting the 'nullable' flag, build the 'FIRST'
731 sets, and then create the item sets which define the various states.
735 Though we don't only store symbols in these sets, they are the main
736 things we store, so they are called symbol sets or "symsets".
738 A symset has a size, an array of shorts, and an optional array of data
739 values, which are also shorts. If the array of data is not present,
740 we store an impossible pointer, as `NULL` is used to indicate that no
741 memory has been allocated yet;
746 unsigned short *syms, *data;
748 #define NO_DATA ((unsigned short *)1)
749 const struct symset INIT_SYMSET = { 0, NULL, NO_DATA };
750 const struct symset INIT_DATASET = { 0, NULL, NULL };
752 The arrays of shorts are allocated in blocks of 8 and are kept sorted
753 by using an insertion sort. We don't explicitly record the amount of
754 allocated space as it can be derived directly from the current `cnt` using
755 `((cnt - 1) | 7) + 1`.
758 static void symset_add(struct symset *s, unsigned short key, unsigned short val)
761 int current = ((s->cnt-1) | 7) + 1;
762 if (current == s->cnt) {
764 s->syms = realloc(s->syms, sizeof(*s->syms) * current);
765 if (s->data != NO_DATA)
766 s->data = realloc(s->data, sizeof(*s->data) * current);
769 while (i > 0 && s->syms[i-1] > key) {
770 s->syms[i] = s->syms[i-1];
771 if (s->data != NO_DATA)
772 s->data[i] = s->data[i-1];
776 if (s->data != NO_DATA)
781 Finding a symbol (or item) in a `symset` uses a simple binary search.
782 We return the index where the value was found (so data can be accessed),
783 or `-1` to indicate failure.
785 static int symset_find(struct symset *ss, unsigned short key)
792 while (lo + 1 < hi) {
793 int mid = (lo + hi) / 2;
794 if (ss->syms[mid] <= key)
799 if (ss->syms[lo] == key)
804 We will often want to form the union of two symsets. When we do, we
805 will often want to know if anything changed (as that might mean there
806 is more work to do). So `symset_union` reports whether anything was
807 added to the first set. We use a slow quadratic approach as these
808 sets don't really get very big. If profiles shows this to be a problem it
809 can be optimised later.
811 static int symset_union(struct symset *a, struct symset *b)
815 for (i = 0; i < b->cnt; i++)
816 if (symset_find(a, b->syms[i]) < 0) {
817 unsigned short data = 0;
818 if (b->data != NO_DATA)
820 symset_add(a, b->syms[i], data);
826 And of course we must be able to free a symset.
828 static void symset_free(struct symset ss)
831 if (ss.data != NO_DATA)
837 Some symsets are simply stored somewhere appropriate in the `grammar`
838 data structure, others needs a bit of indirection. This applies
839 particularly to "LA" sets. These will be paired with "items" in an
840 "itemset". As itemsets will be stored in a symset, the "LA" set needs to be
841 stored in the `data` field, so we need a mapping from a small (short)
842 number to an LA `symset`.
844 As mentioned earlier this involves creating a list of unique symsets.
846 For now, we just use a linear list sorted by insertion. A skiplist
847 would be more efficient and may be added later.
854 struct setlist *next;
857 ###### grammar fields
858 struct setlist *sets;
863 static int ss_cmp(struct symset a, struct symset b)
866 int diff = a.cnt - b.cnt;
869 for (i = 0; i < a.cnt; i++) {
870 diff = (int)a.syms[i] - (int)b.syms[i];
877 static int save_set(struct grammar *g, struct symset ss)
879 struct setlist **sl = &g->sets;
883 while (*sl && (cmp = ss_cmp((*sl)->ss, ss)) < 0)
890 s = malloc(sizeof(*s));
899 Finding a set by number is currently performed by a simple linear search.
900 If this turns out to hurt performance, we can store the sets in a dynamic
901 array like the productions.
903 static struct symset set_find(struct grammar *g, int num)
905 struct setlist *sl = g->sets;
906 while (sl && sl->num != num)
911 ### Setting `nullable`
913 We set `nullable` on the head symbol for any production for which all
914 the body symbols (if any) are nullable. As this is a recursive
915 definition, any change in the `nullable` setting means that we need to
916 re-evaluate where it needs to be set.
918 We simply loop around performing the same calculations until no more
925 static void set_nullable(struct grammar *g)
928 while (check_again) {
931 for (p = 0; p < g->production_count; p++) {
932 struct production *pr = g->productions[p];
935 if (pr->head->nullable)
937 for (s = 0; s < pr->body_size; s++)
938 if (! pr->body[s]->nullable)
940 if (s == pr->body_size) {
941 pr->head->nullable = 1;
948 ### Setting `line_like`
950 In order to be able to ignore newline tokens when not relevant, but
951 still include them in the parse when needed, we will need to know
952 which states can start a "line-like" section of code. We ignore
953 newlines when there is an indent since the most recent start of a
956 A "line_like" symbol is simply any symbol that can derive a NEWLINE.
957 If a symbol cannot derive a NEWLINE, then it is only part of a line -
958 so is word-like. If it can derive a NEWLINE, then we consider it to
961 Clearly the `TK_newline` token can derive a NEWLINE. Any symbol which
962 is the head of a production that contains a line_like symbol is also a
963 line-like symbol. We use a new field `line_like` to record this
964 attribute of symbols, and compute it in a repetitive manner similar to
971 static void set_line_like(struct grammar *g)
974 g->symtab[TK_newline]->line_like = 1;
975 while (check_again) {
978 for (p = 0; p < g->production_count; p++) {
979 struct production *pr = g->productions[p];
982 if (pr->head->line_like)
985 for (s = 0 ; s < pr->body_size; s++) {
986 if (pr->body[s]->line_like) {
987 pr->head->line_like = 1;
996 ### Building the `first` sets
998 When calculating what can follow a particular non-terminal, we will need to
999 know what the "first" terminal in any subsequent non-terminal might be. So
1000 we calculate the `first` set for every non-terminal and store them in an
1001 array. We don't bother recording the "first" set for terminals as they are
1004 As the "first" for one symbol might depend on the "first" of another,
1005 we repeat the calculations until no changes happen, just like with
1006 `set_nullable`. This makes use of the fact that `symset_union`
1007 reports if any change happens.
1009 The core of this, which finds the "first" of part of a production body,
1010 will be reused for computing the "follow" sets, so we split it out
1011 into a separate function.
1013 ###### grammar fields
1014 struct symset *first;
1018 static int add_first(struct production *pr, int start,
1019 struct symset *target, struct grammar *g,
1024 for (s = start; s < pr->body_size; s++) {
1025 struct symbol *bs = pr->body[s];
1026 if (bs->type == Terminal) {
1027 if (symset_find(target, bs->num) < 0) {
1028 symset_add(target, bs->num, 0);
1032 } else if (symset_union(target, &g->first[bs->num]))
1038 *to_end = (s == pr->body_size);
1042 static void build_first(struct grammar *g)
1044 int check_again = 1;
1046 g->first = calloc(g->num_syms, sizeof(g->first[0]));
1047 for (s = 0; s < g->num_syms; s++)
1048 g->first[s] = INIT_SYMSET;
1050 while (check_again) {
1053 for (p = 0; p < g->production_count; p++) {
1054 struct production *pr = g->productions[p];
1055 struct symset *head = &g->first[pr->head->num];
1057 if (add_first(pr, 0, head, g, NULL))
1063 ### Building the `follow` sets.
1065 There are two different situations when we will want to generate "follow"
1066 sets. If we are doing an SLR analysis, we want to generate a single
1067 "follow" set for each non-terminal in the grammar. That is what is
1068 happening here. If we are doing an LALR or LR analysis we will want
1069 to generate a separate "LA" set for each item. We do that later
1070 in state generation.
1072 There are two parts to generating a "follow" set. Firstly we look at
1073 every place that any non-terminal appears in the body of any
1074 production, and we find the set of possible "first" symbols after
1075 there. This is done using `add_first` above and only needs to be done
1076 once as the "first" sets are now stable and will not change.
1080 for (p = 0; p < g->production_count; p++) {
1081 struct production *pr = g->productions[p];
1084 for (b = 0; b < pr->body_size - 1; b++) {
1085 struct symbol *bs = pr->body[b];
1086 if (bs->type == Terminal)
1088 add_first(pr, b+1, &g->follow[bs->num], g, NULL);
1092 The second part is to add the "follow" set of the head of a production
1093 to the "follow" sets of the final symbol in the production, and any
1094 other symbol which is followed only by `nullable` symbols. As this
1095 depends on "follow" itself we need to repeatedly perform the process
1096 until no further changes happen.
1100 for (again = 0, p = 0;
1101 p < g->production_count;
1102 p < g->production_count-1
1103 ? p++ : again ? (again = 0, p = 0)
1105 struct production *pr = g->productions[p];
1108 for (b = pr->body_size - 1; b >= 0; b--) {
1109 struct symbol *bs = pr->body[b];
1110 if (bs->type == Terminal)
1112 if (symset_union(&g->follow[bs->num],
1113 &g->follow[pr->head->num]))
1120 We now just need to create and initialise the `follow` list to get a
1123 ###### grammar fields
1124 struct symset *follow;
1127 static void build_follow(struct grammar *g)
1130 g->follow = calloc(g->num_syms, sizeof(g->follow[0]));
1131 for (s = 0; s < g->num_syms; s++)
1132 g->follow[s] = INIT_SYMSET;
1136 ### Building itemsets and states
1138 There are three different levels of detail that can be chosen for
1139 building the itemsets and states for the LR grammar. They are:
1141 1. LR(0) or SLR(1), where no look-ahead is considered.
1142 2. LALR(1) where we build look-ahead sets with each item and merge
1143 the LA sets when we find two paths to the same "kernel" set of items.
1144 3. LR(1) where different look-ahead for any item in the set means
1145 a different state must be created.
1147 ###### forward declarations
1148 enum grammar_type { LR0, LR05, SLR, LALR, LR1 };
1150 We need to be able to look through existing states to see if a newly
1151 generated state already exists. For now we use a simple sorted linked
1154 An item is a pair of numbers: the production number and the position of
1155 "DOT", which is an index into the body of the production.
1156 As the numbers are not enormous we can combine them into a single "short"
1157 and store them in a `symset` - 4 bits for "DOT" and 12 bits for the
1158 production number (so 4000 productions with maximum of 15 symbols in the
1161 Comparing the itemsets will be a little different to comparing symsets
1162 as we want to do the lookup after generating the "kernel" of an
1163 itemset, so we need to ignore the offset=zero items which are added during
1166 To facilitate this, we modify the "DOT" number so that "0" sorts to
1167 the end of the list in the symset, and then only compare items before
1171 static inline unsigned short item_num(int production, int index)
1173 return production | ((31-index) << 11);
1175 static inline int item_prod(unsigned short item)
1177 return item & 0x7ff;
1179 static inline int item_index(unsigned short item)
1181 return (31-(item >> 11)) & 0x1f;
1184 For LR(1) analysis we need to compare not just the itemset in a state
1185 but also the LA sets. As we assign each unique LA set a number, we
1186 can just compare the symset and the data values together.
1189 static int itemset_cmp(struct symset a, struct symset b,
1190 enum grammar_type type)
1196 i < a.cnt && i < b.cnt &&
1197 item_index(a.syms[i]) > 0 &&
1198 item_index(b.syms[i]) > 0;
1200 int diff = a.syms[i] - b.syms[i];
1204 diff = a.data[i] - b.data[i];
1209 if (i == a.cnt || item_index(a.syms[i]) == 0)
1213 if (i == b.cnt || item_index(b.syms[i]) == 0)
1219 if (type < LR1 || av == -1)
1222 a.data[i] - b.data[i];
1225 It will be helpful to know if an itemset has been "completed" or not,
1226 particularly for LALR where itemsets get merged, at which point they
1227 need to be consider for completion again. So a `completed` flag is needed.
1229 For correct handling of `TK_newline` when parsing, we will need to
1230 know which states (itemsets) can occur at the start of a line, so we
1231 will record a `starts_line` flag too whenever DOT is at the start of a
1234 Finally, for handling `TK_out` we need to know whether productions in the
1235 current state started *before* the most recent indent. A state
1236 doesn't usually keep details of individual productions, so we need to
1237 add one extra detail. `min_prefix` is the smallest non-zero number of
1238 symbols *before* DOT in any production in an itemset. This will allow
1239 us to determine if the the most recent indent is sufficiently recent
1240 to cancel it against a `TK_out`. If it was seen longer ago than the
1241 `min_prefix`, and if the current state cannot be reduced, then the
1242 indented section must have ended in the middle of a syntactic unit, so
1243 an error must be signaled.
1245 And now we can build the list of itemsets. The lookup routine returns
1246 both a success flag and a pointer to where in the list an insert
1247 should happen, so we don't need to search a second time.
1251 struct itemset *next;
1253 struct symset items;
1254 struct symset go_to;
1256 unsigned short precedence;
1262 ###### grammar fields
1263 struct itemset *items;
1267 static int itemset_find(struct grammar *g, struct itemset ***where,
1268 struct symset kernel, enum grammar_type type)
1270 struct itemset **ip;
1272 for (ip = &g->items; *ip ; ip = & (*ip)->next) {
1273 struct itemset *i = *ip;
1275 diff = itemset_cmp(i->items, kernel, type);
1288 Adding an itemset may require merging the LA sets if LALR analysis is
1289 happening. If any new LA set adds any symbols that weren't in the old LA set, we
1290 clear the `completed` flag so that the dependants of this itemset will be
1291 recalculated and their LA sets updated.
1293 `add_itemset` must consume the symsets it is passed, either by adding
1294 them to a data structure, of freeing them.
1296 static int add_itemset(struct grammar *g, struct symset ss,
1297 enum assoc assoc, unsigned short precedence,
1298 enum grammar_type type)
1300 struct itemset **where, *is;
1302 int found = itemset_find(g, &where, ss, type);
1304 is = calloc(1, sizeof(*is));
1305 is->state = g->states;
1309 is->precedence = precedence;
1311 is->go_to = INIT_DATASET;
1320 for (i = 0; i < ss.cnt; i++) {
1321 struct symset temp = INIT_SYMSET, s;
1322 if (ss.data[i] == is->items.data[i])
1324 s = set_find(g, is->items.data[i]);
1325 symset_union(&temp, &s);
1326 s = set_find(g, ss.data[i]);
1327 if (symset_union(&temp, &s)) {
1328 is->items.data[i] = save_set(g, temp);
1339 To build all the itemsets, we first insert the initial itemset made
1340 from production zero, complete each itemset, and then generate new
1341 itemsets from old until no new ones can be made.
1343 Completing an itemset means finding all the items where "DOT" is followed by
1344 a nonterminal and adding "DOT=0" items for every production from that
1345 non-terminal - providing each item hasn't already been added.
1347 If LA sets are needed, the LA set for each new item is found using
1348 `add_first` which was developed earlier for `FIRST` and `FOLLOW`. This may
1349 be supplemented by the LA set for the item which produce the new item.
1351 We also collect a set of all symbols which follow "DOT" (in `done`) as this
1352 is used in the next stage.
1353 If any of these symbols are flagged as `line_like`, then this
1354 state must be a `starts_line` state so now is a good time to record that.
1356 When itemsets are created we assign a precedence to the itemset from
1357 the complete item, if there is one. We ignore the possibility of
1358 there being two and don't (currently) handle precedence in such
1359 grammars. When completing a grammar we ignore any item where DOT is
1360 followed by a terminal with a precedence lower than that for the
1361 itemset. Unless the terminal has right associativity, we also ignore
1362 items where the terminal has the same precedence. The result is that
1363 unwanted items are still in the itemset, but the terminal doesn't get
1364 into the go to set, so the item is ineffective.
1366 ###### complete itemset
1367 for (i = 0; i < is->items.cnt; i++) {
1368 int p = item_prod(is->items.syms[i]);
1369 int bs = item_index(is->items.syms[i]);
1370 struct production *pr = g->productions[p];
1373 struct symset LA = INIT_SYMSET;
1374 unsigned short sn = 0;
1375 struct symset LAnl = INIT_SYMSET;
1376 unsigned short snnl = 0;
1378 if (is->min_prefix == 0 ||
1379 (bs > 0 && bs < is->min_prefix))
1380 is->min_prefix = bs;
1381 if (bs == pr->body_size)
1384 if (s->precedence && is->precedence &&
1385 is->precedence > s->precedence)
1386 /* This terminal has a low precedence and
1387 * shouldn't be shifted
1390 if (s->precedence && is->precedence &&
1391 is->precedence == s->precedence && s->assoc != Right)
1392 /* This terminal has a matching precedence and is
1393 * not Right-associative, so we mustn't shift it.
1396 if (symset_find(&done, s->num) < 0) {
1397 symset_add(&done, s->num, 0);
1399 if (s->type != Nonterminal)
1402 is->starts_line = 1;
1407 add_first(pr, bs+1, &LA, g, &to_end);
1409 struct symset ss = set_find(g, is->items.data[i]);
1410 symset_union(&LA, &ss);
1412 sn = save_set(g, LA);
1413 LA = set_find(g, sn);
1414 if (symset_find(&LA, TK_newline))
1415 symset_add(&LAnl, TK_newline, 0);
1416 snnl = save_set(g, LAnl);
1417 LAnl = set_find(g, snnl);
1420 /* Add productions for this symbol */
1421 for (p2 = s->first_production;
1422 p2 < g->production_count &&
1423 g->productions[p2]->head == s;
1425 int itm = item_num(p2, 0);
1426 int pos = symset_find(&is->items, itm);
1428 if (g->productions[p2]->line_like)
1429 symset_add(&is->items, itm, snnl);
1431 symset_add(&is->items, itm, sn);
1432 /* Will have re-ordered, so start
1433 * from beginning again */
1435 } else if (type >= LALR) {
1436 struct symset ss = set_find(g, is->items.data[pos]);
1437 struct symset tmp = INIT_SYMSET;
1438 struct symset *la = &LA;
1440 if (g->productions[p2]->line_like)
1442 symset_union(&tmp, &ss);
1443 if (symset_union(&tmp, la)) {
1444 is->items.data[pos] = save_set(g, tmp);
1452 For each symbol we found (and placed in `done`) we collect all the items for
1453 which this symbol is next, and create a new itemset with "DOT" advanced over
1454 the symbol. This is then added to the collection of itemsets (or merged
1455 with a pre-existing itemset).
1457 ###### derive itemsets
1458 // Now we have a completed itemset, so we need to
1459 // compute all the 'next' itemsets and create them
1460 // if they don't exist.
1461 for (i = 0; i < done.cnt; i++) {
1463 unsigned short state;
1464 struct symbol *sym = g->symtab[done.syms[i]];
1465 enum assoc assoc = Non;
1466 unsigned short precedence = 0;
1467 struct symset newitemset = INIT_SYMSET;
1469 newitemset = INIT_DATASET;
1471 for (j = 0; j < is->items.cnt; j++) {
1472 int itm = is->items.syms[j];
1473 int p = item_prod(itm);
1474 int bp = item_index(itm);
1475 struct production *pr = g->productions[p];
1476 unsigned short la = 0;
1479 if (bp == pr->body_size)
1481 if (pr->body[bp] != sym)
1484 la = is->items.data[j];
1485 pos = symset_find(&newitemset, pr->head->num);
1486 if (bp + 1 == pr->body_size &&
1487 pr->precedence > 0 &&
1488 pr->precedence > precedence) {
1489 // new itemset is reducible and has a precedence.
1490 precedence = pr->precedence;
1494 symset_add(&newitemset, item_num(p, bp+1), la);
1495 else if (type >= LALR) {
1496 // Need to merge la set.
1497 int la2 = newitemset.data[pos];
1499 struct symset ss = set_find(g, la2);
1500 struct symset LA = INIT_SYMSET;
1501 symset_union(&LA, &ss);
1502 ss = set_find(g, la);
1503 if (symset_union(&LA, &ss))
1504 newitemset.data[pos] = save_set(g, LA);
1510 state = add_itemset(g, newitemset, assoc, precedence, type);
1511 if (symset_find(&is->go_to, done.syms[i]) < 0)
1512 symset_add(&is->go_to, done.syms[i], state);
1515 All that is left is to create the initial itemset from production zero, and
1516 with `TK_eof` as the LA set.
1519 static void build_itemsets(struct grammar *g, enum grammar_type type)
1521 struct symset first = INIT_SYMSET;
1524 unsigned short la = 0;
1526 // LA set just has eof
1527 struct symset eof = INIT_SYMSET;
1528 symset_add(&eof, TK_eof, 0);
1529 la = save_set(g, eof);
1530 first = INIT_DATASET;
1532 // production 0, offset 0 (with no data)
1533 symset_add(&first, item_num(0, 0), la);
1534 add_itemset(g, first, Non, 0, type);
1535 for (again = 0, is = g->items;
1537 is = is->next ?: again ? (again = 0, g->items) : NULL) {
1539 struct symset done = INIT_SYMSET;
1550 ### Completing the analysis.
1552 The exact process of analysis depends on which level was requested. For
1553 `LR0` and `LR05` we don't need first and follow sets at all. For `LALR` and
1554 `LR1` we need first, but not follow. For `SLR` we need both.
1556 We don't build the "action" tables that you might expect as the parser
1557 doesn't use them. They will be calculated to some extent if needed for
1560 Once we have built everything we allocate arrays for the two lists:
1561 symbols and itemsets. This allows more efficient access during reporting.
1562 The symbols are grouped as terminals and non-terminals and we record the
1563 changeover point in `first_nonterm`.
1565 ###### grammar fields
1566 struct symbol **symtab;
1567 struct itemset **statetab;
1570 ###### grammar_analyse
1572 static void grammar_analyse(struct grammar *g, enum grammar_type type)
1576 int snum = TK_reserved;
1577 for (s = g->syms; s; s = s->next)
1578 if (s->num < 0 && s->type == Terminal) {
1582 g->first_nonterm = snum;
1583 for (s = g->syms; s; s = s->next)
1584 if (s->num < 0 && s->type != Virtual) {
1588 for (s = g->syms; s; s = s->next)
1594 g->symtab = calloc(g->num_syms, sizeof(g->symtab[0]));
1595 for (s = g->syms; s; s = s->next)
1596 g->symtab[s->num] = s;
1606 build_itemsets(g, type);
1608 g->statetab = calloc(g->states, sizeof(g->statetab[0]));
1609 for (is = g->items; is ; is = is->next)
1610 g->statetab[is->state] = is;
1613 ## Reporting on the Grammar
1615 The purpose of the report is to give the grammar developer insight into
1616 how the grammar parser will work. It is basically a structured dump of
1617 all the tables that have been generated, plus a description of any conflicts.
1619 ###### grammar_report
1620 static int grammar_report(struct grammar *g, enum grammar_type type)
1626 return report_conflicts(g, type) + report_left_recursive(g);
1629 Firstly we have the complete list of symbols, together with the
1630 "FIRST" set if that was generated. We add a mark to each symbol to
1631 show if it can end in a newline (`>`), if it is considered to be
1632 "line-like" (`<`), or if it is nullable (`.`).
1636 static void report_symbols(struct grammar *g)
1640 printf("SYMBOLS + FIRST:\n");
1642 printf("SYMBOLS:\n");
1644 for (n = 0; n < g->num_syms; n++) {
1645 struct symbol *s = g->symtab[n];
1649 printf(" %c%c%3d%c: ",
1650 s->nullable ? '.':' ',
1651 s->line_like ? '<':' ',
1652 s->num, symtypes[s->type]);
1655 printf(" (%d%s)", s->precedence,
1656 assoc_names[s->assoc]);
1658 if (g->first && s->type == Nonterminal) {
1661 for (j = 0; j < g->first[n].cnt; j++) {
1664 prtxt(g->symtab[g->first[n].syms[j]]->name);
1671 Then we have the follow sets if they were computed.
1673 static void report_follow(struct grammar *g)
1676 printf("FOLLOW:\n");
1677 for (n = 0; n < g->num_syms; n++)
1678 if (g->follow[n].cnt) {
1682 prtxt(g->symtab[n]->name);
1683 for (j = 0; j < g->follow[n].cnt; j++) {
1686 prtxt(g->symtab[g->follow[n].syms[j]]->name);
1692 And finally the item sets. These include the GO TO tables and, for
1693 LALR and LR1, the LA set for each item. Lots of stuff, so we break
1694 it up a bit. First the items, with production number and associativity.
1696 static void report_item(struct grammar *g, int itm)
1698 int p = item_prod(itm);
1699 int dot = item_index(itm);
1700 struct production *pr = g->productions[p];
1704 prtxt(pr->head->name);
1706 for (i = 0; i < pr->body_size; i++) {
1707 printf(" %s", (dot == i ? ". ": ""));
1708 prtxt(pr->body[i]->name);
1710 if (dot == pr->body_size)
1713 if (pr->precedence && dot == pr->body_size)
1714 printf(" (%d%s)", pr->precedence,
1715 assoc_names[pr->assoc]);
1716 if (dot < pr->body_size &&
1717 pr->body[dot]->precedence) {
1718 struct symbol *s = pr->body[dot];
1719 printf(" [%d%s]", s->precedence,
1720 assoc_names[s->assoc]);
1722 if (pr->line_like == 1)
1723 printf(" $$NEWLINE");
1724 else if (pr->line_like)
1729 The LA sets which are (possibly) reported with each item:
1731 static void report_la(struct grammar *g, int lanum)
1733 struct symset la = set_find(g, lanum);
1737 printf(" LOOK AHEAD(%d)", lanum);
1738 for (i = 0; i < la.cnt; i++) {
1741 prtxt(g->symtab[la.syms[i]]->name);
1746 Then the go to sets:
1748 static void report_goto(struct grammar *g, struct symset gt)
1753 for (i = 0; i < gt.cnt; i++) {
1755 prtxt(g->symtab[gt.syms[i]]->name);
1756 printf(" -> %d\n", gt.data[i]);
1760 Now we can report all the item sets complete with items, LA sets, and GO TO.
1762 static void report_itemsets(struct grammar *g)
1765 printf("ITEM SETS(%d)\n", g->states);
1766 for (s = 0; s < g->states; s++) {
1768 struct itemset *is = g->statetab[s];
1769 printf(" Itemset %d:%s min prefix=%d",
1770 s, is->starts_line?" (startsline)":"", is->min_prefix);
1772 printf(" %d%s", is->precedence, assoc_names[is->assoc]);
1774 for (j = 0; j < is->items.cnt; j++) {
1775 report_item(g, is->items.syms[j]);
1776 if (is->items.data != NO_DATA)
1777 report_la(g, is->items.data[j]);
1779 report_goto(g, is->go_to);
1783 ### Reporting conflicts
1785 Conflict detection varies a lot among different analysis levels. However
1786 LR0 and LR0.5 are quite similar - having no follow sets, and SLR, LALR and
1787 LR1 are also similar as they have FOLLOW or LA sets.
1791 ## conflict functions
1793 static int report_conflicts(struct grammar *g, enum grammar_type type)
1796 printf("Conflicts:\n");
1798 cnt = conflicts_lr0(g, type);
1800 cnt = conflicts_slr(g, type);
1802 printf(" - no conflicts\n");
1806 LR0 conflicts are any state which have both a reducible item and
1807 a shiftable item, or two reducible items.
1809 LR05 conflicts only occur if two possible reductions exist,
1810 as shifts always over-ride reductions.
1812 ###### conflict functions
1813 static int conflicts_lr0(struct grammar *g, enum grammar_type type)
1817 for (i = 0; i < g->states; i++) {
1818 struct itemset *is = g->statetab[i];
1819 int last_reduce = -1;
1820 int prev_reduce = -1;
1821 int last_shift = -1;
1825 for (j = 0; j < is->items.cnt; j++) {
1826 int itm = is->items.syms[j];
1827 int p = item_prod(itm);
1828 int bp = item_index(itm);
1829 struct production *pr = g->productions[p];
1831 if (bp == pr->body_size) {
1832 prev_reduce = last_reduce;
1836 if (pr->body[bp]->type == Terminal)
1839 if (type == LR0 && last_reduce >= 0 && last_shift >= 0) {
1840 printf(" State %d has both SHIFT and REDUCE:\n", i);
1841 report_item(g, is->items.syms[last_shift]);
1842 report_item(g, is->items.syms[last_reduce]);
1845 if (prev_reduce >= 0) {
1846 printf(" State %d has 2 (or more) reducible items\n", i);
1847 report_item(g, is->items.syms[prev_reduce]);
1848 report_item(g, is->items.syms[last_reduce]);
1855 SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping
1856 look ahead, or if a symbol in a look-ahead can be shifted. They differ only
1857 in the source of the look ahead set.
1859 We build two datasets to reflect the "action" table: one which maps
1860 terminals to items where that terminal could be shifted and another
1861 which maps terminals to items that could be reduced when the terminal
1862 is in look-ahead. We report when we get conflicts between the two.
1864 As a special case, if we find a SHIFT/REDUCE conflict, on the NEWLINE
1865 terminal, we ignore it. NEWLINES are handled specially with its own
1866 rules for when to shift and when to reduce. Conflicts are expected,
1867 but handled internally.
1869 static int conflicts_slr(struct grammar *g, enum grammar_type type)
1874 for (i = 0; i < g->states; i++) {
1875 struct itemset *is = g->statetab[i];
1876 struct symset shifts = INIT_DATASET;
1877 struct symset reduce = INIT_DATASET;
1881 /* First collect the shifts */
1882 for (j = 0; j < is->items.cnt; j++) {
1883 unsigned short itm = is->items.syms[j];
1884 int p = item_prod(itm);
1885 int bp = item_index(itm);
1886 struct production *pr = g->productions[p];
1889 if (bp >= pr->body_size ||
1890 pr->body[bp]->type != Terminal)
1895 if (s->precedence && is->precedence)
1896 /* Precedence resolves this, so no conflict */
1899 if (symset_find(&shifts, s->num) < 0)
1900 symset_add(&shifts, s->num, itm);
1902 /* Now look for reductions and conflicts */
1903 for (j = 0; j < is->items.cnt; j++) {
1904 unsigned short itm = is->items.syms[j];
1905 int p = item_prod(itm);
1906 int bp = item_index(itm);
1907 struct production *pr = g->productions[p];
1909 if (bp < pr->body_size)
1914 la = g->follow[pr->head->num];
1916 la = set_find(g, is->items.data[j]);
1918 for (k = 0; k < la.cnt; k++) {
1919 int pos = symset_find(&shifts, la.syms[k]);
1920 if (pos >= 0 && la.syms[k] != TK_newline) {
1921 printf(" State %d has SHIFT/REDUCE conflict on ", i);
1923 prtxt(g->symtab[la.syms[k]]->name);
1925 report_item(g, shifts.data[pos]);
1926 report_item(g, itm);
1928 pos = symset_find(&reduce, la.syms[k]);
1930 symset_add(&reduce, la.syms[k], itm);
1933 printf(" State %d has REDUCE/REDUCE conflict on ", i);
1934 prtxt(g->symtab[la.syms[k]]->name);
1936 report_item(g, itm);
1937 report_item(g, reduce.data[pos]);
1941 symset_free(shifts);
1942 symset_free(reduce);
1948 ### Reporting non-final left-recursive symbols.
1950 Left recursive symbols are a problem for parses that honour indentation
1951 when they appear other than at the end of the production. So we need to
1952 report these when asked.
1956 static int report_left_recursive(struct grammar *g)
1959 int bad_left_recursive = 0;
1961 for (p = 0; p < g->production_count; p++) {
1962 struct production *pr = g->productions[p];
1965 for (sn = 0; sn < pr->body_size - 1; sn++) {
1966 struct symbol *s = pr->body[sn];
1968 if (s->left_recursive == 1 &&
1970 if (!bad_left_recursive) {
1971 bad_left_recursive = 1;
1972 printf("Misplaced left recursive symbols.\n");
1976 printf(" in production [%d]\n", p);
1977 s->left_recursive = 2;
1981 return bad_left_recursive;
1984 ## Generating the parser
1986 The exported part of the parser is the `parse_XX` function, where the name
1987 `XX` is based on the name of the parser files.
1989 This takes a `code_node`, a partially initialized `token_config`, and an
1990 optional `FILE` to send tracing to. The `token_config` gets the list of
1991 known words added and then is used with the `code_node` to initialize the
1994 `parse_XX` then calls the library function `parser_run` to actually complete
1995 the parse. This needs the `states` table and function to call the various
1996 pieces of code provided in the grammar file, so they are generated first.
1998 ###### parser_generate
2000 static void gen_parser(FILE *f, struct grammar *g, char *file, char *name,
2001 struct code_node *pre_reduce)
2007 gen_reduce(f, g, file, pre_reduce);
2010 fprintf(f, "#line 0 \"gen_parser\"\n");
2011 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace)\n",
2014 fprintf(f, "\tstruct token_state *tokens;\n");
2015 fprintf(f, "\tconfig->words_marks = known;\n");
2016 fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
2017 fprintf(f, "\ttokens = token_open(code, config);\n");
2018 fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n");
2019 fprintf(f, "\ttoken_close(tokens);\n");
2020 fprintf(f, "\treturn rv;\n");
2021 fprintf(f, "}\n\n");
2024 ### Known words table
2026 The known words table is simply an array of terminal symbols.
2027 The table of nonterminals used for tracing is a similar array. We
2028 include virtual symbols in the table of non_terminals to keep the
2033 static void gen_known(FILE *f, struct grammar *g)
2036 fprintf(f, "#line 0 \"gen_known\"\n");
2037 fprintf(f, "static const char *known[] = {\n");
2038 for (i = TK_reserved;
2039 i < g->num_syms && g->symtab[i]->type == Terminal;
2041 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
2042 g->symtab[i]->name.txt);
2043 fprintf(f, "};\n\n");
2046 static void gen_non_term(FILE *f, struct grammar *g)
2049 fprintf(f, "#line 0 \"gen_non_term\"\n");
2050 fprintf(f, "static const char *non_term[] = {\n");
2051 for (i = TK_reserved;
2054 if (g->symtab[i]->type == Nonterminal)
2055 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
2056 g->symtab[i]->name.txt);
2057 fprintf(f, "};\n\n");
2060 ### States and the goto tables.
2062 For each state we record the goto table, the reducible production if
2063 there is one, or a symbol to shift for error recovery.
2064 Some of the details of the reducible production are stored in the
2065 `do_reduce` function to come later. Here we store the production number,
2066 the body size (useful for stack management) and the resulting symbol (useful
2067 for knowing how to free data later).
2069 The go to table is stored in a simple array of `sym` and corresponding
2072 ###### exported types
2080 const struct lookup * go_to;
2091 static void gen_goto(FILE *f, struct grammar *g)
2094 fprintf(f, "#line 0 \"gen_goto\"\n");
2095 for (i = 0; i < g->states; i++) {
2097 fprintf(f, "static const struct lookup goto_%d[] = {\n",
2099 struct symset gt = g->statetab[i]->go_to;
2100 for (j = 0; j < gt.cnt; j++)
2101 fprintf(f, "\t{ %d, %d },\n",
2102 gt.syms[j], gt.data[j]);
2109 static void gen_states(FILE *f, struct grammar *g)
2112 fprintf(f, "#line 0 \"gen_states\"\n");
2113 fprintf(f, "static const struct state states[] = {\n");
2114 for (i = 0; i < g->states; i++) {
2115 struct itemset *is = g->statetab[i];
2116 int j, prod = -1, prod_len;
2118 for (j = 0; j < is->items.cnt; j++) {
2119 int itm = is->items.syms[j];
2120 int p = item_prod(itm);
2121 int bp = item_index(itm);
2122 struct production *pr = g->productions[p];
2124 if (bp < pr->body_size)
2126 /* This is what we reduce */
2127 if (prod < 0 || prod_len < pr->body_size) {
2129 prod_len = pr->body_size;
2134 fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, %d, %d, %d },\n",
2135 i, is->go_to.cnt, i, prod,
2136 g->productions[prod]->body_size,
2137 g->productions[prod]->head->num,
2139 g->productions[prod]->line_like,
2142 fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, 0, %d },\n",
2143 i, is->go_to.cnt, i,
2144 is->starts_line, is->min_prefix);
2146 fprintf(f, "};\n\n");
2149 ### The `do_reduce` function and the code
2151 When the parser engine decides to reduce a production, it calls `do_reduce`.
2152 This has two functions.
2154 Firstly, if a non-NULL `trace` file is passed, it prints out details of the
2155 production being reduced. Secondly it runs the code that was included with
2156 the production if any.
2158 This code needs to be able to store data somewhere. Rather than requiring
2159 `do_reduce` to `malloc` that "somewhere", we pass in a large buffer and have
2160 `do_reduce` return the size to be saved.
2162 In order for the code to access "global" context, we pass in the
2163 "config" pointer that was passed to parser function. If the `struct
2164 token_config` is embedded in some larger structure, the reducing code
2165 can access the larger structure using pointer manipulation.
2167 The code fragment requires translation when written out. Any `$N` needs to
2168 be converted to a reference either to that buffer (if `$0`) or to the
2169 structure returned by a previous reduction. These pointers need to be cast
2170 to the appropriate type for each access. All this is handled in
2173 `gen_code` also allows symbol references to contain a '`<`' as in
2174 '`$<2`'. This is particularly useful for references (or pointers), but
2175 can be used with structures too. The `<` implies that the value it
2176 being moved out, so the object will not be automatically freed. It is
2177 equivalent to assigning `NULL` to the pointer or filling a structure
2180 Instead of a number `N`, the `$` or `$<` can be followed by some letters
2181 and possibly a number. A number by itself (other than zero) selects a
2182 symbol from the body of the production. A sequence of letters selects
2183 the shortest symbol in the body which contains those letters in the given
2184 order. If a number follows the letters, then a later occurrence of
2185 that symbol is chosen. So "`$AB2`" will refer to the structure attached
2186 to the second occurrence of the shortest symbol which contains an `A`
2187 followed by a `B`. If there is no unique shortest system, or if the
2188 number given is too large, then the symbol reference is not transformed,
2189 and will cause an error when the code is compiled.
2193 static int textchr(struct text t, char c, int s)
2196 for (i = s; i < t.len; i++)
2202 static int subseq_match(char *seq, int slen, struct text name)
2206 st = textchr(name, *seq, st);
2216 static int choose_sym(char **namep, int len, struct production *p)
2218 char *name = *namep;
2227 ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'))) {
2233 while (len > 0 && (c >= '0' && c <= '9')) {
2236 n = n * 10 + (c - '0');
2246 for (i = 0; i < p->body_size; i++) {
2247 if (!subseq_match(nam, namlen, p->body[i]->name))
2249 if (slen == 0 || p->body[i]->name.len < slen)
2251 if (s >= 0 && p->body[i] != p->body[s] &&
2252 p->body[i]->name.len == p->body[s]->name.len)
2253 /* not unique, so s cannot be used */
2260 for (i = 0; i < p->body_size; i++)
2261 if (p->body[i] == p->body[s]) {
2272 static void gen_code(struct production *p, FILE *f, struct grammar *g)
2275 char *used = calloc(1, p->body_size);
2278 fprintf(f, "\t\t\t");
2279 for (c = p->code.txt; c < p->code.txt + p->code.len; c++) {
2293 n = choose_sym(&c, p->code.txt + p->code.len - c, p);
2302 fprintf(f, "(*(struct %.*s*%s)ret)",
2303 p->head->struct_name.len,
2304 p->head->struct_name.txt,
2305 p->head->isref ? "*":"");
2306 else if (p->body[n-1]->type == Terminal)
2307 fprintf(f, "(*(struct token *)body[%d])",
2309 else if (p->body[n-1]->struct_name.txt == NULL)
2310 fprintf(f, "$%d", n);
2312 fprintf(f, "(*(struct %.*s*%s)body[%d])",
2313 p->body[n-1]->struct_name.len,
2314 p->body[n-1]->struct_name.txt,
2315 p->body[n-1]->isref ? "*":"", n-1);
2321 for (i = 0; i < p->body_size; i++) {
2322 if (p->body[i]->struct_name.txt &&
2324 // assume this has been copied out
2325 if (p->body[i]->isref)
2326 fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i);
2328 fprintf(f, "\t\tmemset(body[%d], 0, sizeof(struct %.*s));\n", i, p->body[i]->struct_name.len, p->body[i]->struct_name.txt);
2336 static void gen_reduce(FILE *f, struct grammar *g, char *file,
2337 struct code_node *code)
2340 fprintf(f, "#line 1 \"gen_reduce\"\n");
2341 fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n");
2343 fprintf(f, "\tint ret_size = 0;\n");
2345 code_node_print(f, code, file);
2347 fprintf(f, "#line 4 \"gen_reduce\"\n");
2348 fprintf(f, "\tswitch(prod) {\n");
2349 for (i = 0; i < g->production_count; i++) {
2350 struct production *p = g->productions[i];
2351 fprintf(f, "\tcase %d:\n", i);
2354 fprintf(f, "#line %d \"%s\"\n", p->code_line, file);
2358 if (p->head->struct_name.txt)
2359 fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n",
2360 p->head->struct_name.len,
2361 p->head->struct_name.txt,
2362 p->head->isref ? "*":"");
2364 fprintf(f, "\t\tbreak;\n");
2366 fprintf(f, "\t}\n\treturn ret_size;\n}\n\n");
2371 As each non-terminal can potentially cause a different type of data
2372 structure to be allocated and filled in, we need to be able to free it when
2375 It is particularly important to have fine control over freeing during error
2376 recovery where individual stack frames might need to be freed.
2378 For this, the grammar author is required to defined a `free_XX` function for
2379 each structure that is used by a non-terminal. `do_free` will call whichever
2380 is appropriate given a symbol number, and will call `free` (as is
2381 appropriate for tokens) on any terminal symbol.
2385 static void gen_free(FILE *f, struct grammar *g)
2389 fprintf(f, "#line 0 \"gen_free\"\n");
2390 fprintf(f, "static void do_free(short sym, void *asn)\n");
2392 fprintf(f, "\tif (!asn) return;\n");
2393 fprintf(f, "\tif (sym < %d) {\n", g->first_nonterm);
2394 fprintf(f, "\t\tfree(asn);\n\t\treturn;\n\t}\n");
2395 fprintf(f, "\tswitch(sym) {\n");
2397 for (i = 0; i < g->num_syms; i++) {
2398 struct symbol *s = g->symtab[i];
2400 s->type != Nonterminal ||
2401 s->struct_name.txt == NULL)
2404 fprintf(f, "\tcase %d:\n", s->num);
2406 fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n",
2408 s->struct_name.txt);
2409 fprintf(f, "\t\tfree(asn);\n");
2411 fprintf(f, "\t\tfree_%.*s(asn);\n",
2413 s->struct_name.txt);
2414 fprintf(f, "\t\tbreak;\n");
2416 fprintf(f, "\t}\n}\n\n");
2419 ## The main routine.
2421 There are three key parts to the "main" function of parsergen: processing
2422 the arguments, loading the grammar file, and dealing with the grammar.
2424 ### Argument processing.
2426 Command line options allow the selection of analysis mode, name of output
2427 file, and whether or not a report should be generated. By default we create
2428 a report only if no code output was requested.
2430 The `parse_XX` function name uses the basename of the output file which
2431 should not have a suffix (`.c`). `.c` is added to the given name for the
2432 code, and `.h` is added for the header (if header text is specifed in the
2439 static const struct option long_options[] = {
2440 { "LR0", 0, NULL, '0' },
2441 { "LR05", 0, NULL, '5' },
2442 { "SLR", 0, NULL, 'S' },
2443 { "LALR", 0, NULL, 'L' },
2444 { "LR1", 0, NULL, '1' },
2445 { "tag", 1, NULL, 't' },
2446 { "report", 0, NULL, 'R' },
2447 { "output", 1, NULL, 'o' },
2448 { NULL, 0, NULL, 0 }
2450 const char *options = "05SL1t:Ro:";
2452 ###### process arguments
2454 char *outfile = NULL;
2459 enum grammar_type type = LR05;
2460 while ((opt = getopt_long(argc, argv, options,
2461 long_options, NULL)) != -1) {
2476 outfile = optarg; break;
2478 tag = optarg; break;
2480 fprintf(stderr, "Usage: parsergen ...\n");
2485 infile = argv[optind++];
2487 fprintf(stderr, "No input file given\n");
2490 if (outfile && report == 1)
2493 if (name && strchr(name, '/'))
2494 name = strrchr(name, '/')+1;
2496 if (optind < argc) {
2497 fprintf(stderr, "Excess command line arguments\n");
2501 ### Loading the grammar file
2503 To be able to run `mdcode` and `scanner` on the grammar we need to memory
2506 Once we have extracted the code (with `mdcode`) we expect to find three
2507 sections: header, code, and grammar. Anything else that is not
2508 excluded by the `--tag` option is an error.
2510 "header" and "code" are optional, though it is hard to build a working
2511 parser with neither. "grammar" must be provided.
2515 #include <sys/mman.h>
2520 static void pr_err(char *msg)
2523 fprintf(stderr, "%s\n", msg);
2527 struct section *table;
2531 fd = open(infile, O_RDONLY);
2533 fprintf(stderr, "parsergen: cannot open %s: %s\n",
2534 infile, strerror(errno));
2537 len = lseek(fd, 0, 2);
2538 file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2539 table = code_extract(file, file+len, pr_err);
2541 struct code_node *hdr = NULL;
2542 struct code_node *code = NULL;
2543 struct code_node *gram = NULL;
2544 struct code_node *pre_reduce = NULL;
2545 for (s = table; s; s = s->next) {
2546 struct text sec = s->section;
2547 if (tag && !strip_tag(&sec, tag))
2549 if (text_is(sec, "header"))
2551 else if (text_is(sec, "code"))
2553 else if (text_is(sec, "grammar"))
2555 else if (text_is(sec, "reduce"))
2556 pre_reduce = s->code;
2558 fprintf(stderr, "Unknown content section: %.*s\n",
2559 s->section.len, s->section.txt);
2564 ### Processing the input
2566 As we need to append an extention to a filename and then open it for
2567 writing, and we need to do this twice, it helps to have a separate function.
2571 static FILE *open_ext(char *base, char *ext)
2573 char *fn = malloc(strlen(base) + strlen(ext) + 1);
2575 strcat(strcpy(fn, base), ext);
2581 If we can read the grammar, then we analyse and optionally report on it. If
2582 the report finds conflicts we will exit with an error status.
2584 ###### process input
2585 struct grammar *g = NULL;
2587 fprintf(stderr, "No grammar section provided\n");
2590 g = grammar_read(gram);
2592 fprintf(stderr, "Failure to parse grammar\n");
2597 grammar_analyse(g, type);
2599 if (grammar_report(g, type))
2603 If a "headers" section is defined, we write it out and include a declaration
2604 for the `parse_XX` function so it can be used from separate code.
2606 if (rv == 0 && hdr && outfile) {
2607 FILE *f = open_ext(outfile, ".h");
2609 code_node_print(f, hdr, infile);
2610 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace);\n",
2614 fprintf(stderr, "Cannot create %s.h\n",
2620 And if all goes well and an output file was provided, we create the `.c`
2621 file with the code section (if any) and the parser tables and function.
2623 if (rv == 0 && outfile) {
2624 FILE *f = open_ext(outfile, ".c");
2627 code_node_print(f, code, infile);
2628 gen_parser(f, g, infile, name, pre_reduce);
2631 fprintf(stderr, "Cannot create %s.c\n",
2637 And that about wraps it up. We need to set the locale so that UTF-8 is
2638 recognised properly, and link with `libicuuc` as `libmdcode` requires that.
2640 ###### File: parsergen.mk
2641 parsergen : parsergen.o libscanner.o libmdcode.o
2642 $(CC) $(CFLAGS) -o parsergen parsergen.o libscanner.o libmdcode.o -licuuc
2649 int main(int argc, char *argv[])
2654 setlocale(LC_ALL,"");
2656 ## process arguments
2663 ## The SHIFT/REDUCE parser
2665 Having analysed the grammar and generated all the tables, we only need the
2666 shift/reduce engine to bring it all together.
2668 ### Goto table lookup
2670 The parser generator has nicely provided us with goto tables sorted by
2671 symbol number. We need a binary search function to find a symbol in the
2674 ###### parser functions
2676 static int search(const struct state *l, int sym)
2679 int hi = l->go_to_cnt;
2683 while (lo + 1 < hi) {
2684 int mid = (lo + hi) / 2;
2685 if (l->go_to[mid].sym <= sym)
2690 if (l->go_to[lo].sym == sym)
2691 return l->go_to[lo].state;
2696 ### The state stack.
2698 The core data structure for the parser is the stack. This tracks all the
2699 symbols that have been recognised or partially recognised.
2701 The stack usually won't grow very large - maybe a few tens of entries. So
2702 we dynamically resize and array as required but never bother to shrink it
2705 We keep the stack as two separate allocations. One, `asn_stack` stores the
2706 "abstract syntax nodes" which are created by each reduction. When we call
2707 `do_reduce` we need to pass an array of the `asn`s of the body of the
2708 production, and by keeping a separate `asn` stack, we can just pass a
2709 pointer into this stack.
2711 The other allocation stores all other stack fields of which there are six.
2712 The `state` is the most important one and guides the parsing process. The
2713 `sym` is nearly unnecessary. However when we want to free entries from the
2714 `asn_stack`, it helps to know what type they are so we can call the right
2715 freeing function. The symbol leads us to the right free function through
2718 The `indents` count tracks the line indents with-in the symbol or
2719 immediately follow it. These are used to allow indent information to
2720 guide parsing and error recovery.
2722 `since_newline` tracks how many stack frames since the last
2723 start-of-line (whether indented or not). So if `since_newline` is
2724 zero, then this symbol is at the start of a line. Similarly
2725 `since_indent` counts the number of states since an indent, it is zero
2726 precisely when `indents` is not zero.
2728 `newline_permitted` keeps track of whether newlines should be ignored
2731 The stack is most properly seen as alternating states and symbols -
2732 states, like the 'DOT' in items, are between symbols. Each frame in
2733 our stack holds a state and the symbol that was before it. The
2734 bottom of stack holds the start state but no symbol, as nothing came
2735 before the beginning.
2737 ###### parser functions
2742 short newline_permitted;
2746 short since_newline;
2756 Two operations are needed on the stack - shift (which is like push) and pop.
2758 Shift applies not only to terminals but also to non-terminals. When
2759 we reduce a production we will pop off entries corresponding to the
2760 body symbols, then push on an item for the head of the production.
2761 This last is exactly the same process as shifting in a terminal so we
2762 use the same function for both. In both cases we provide the symbol,
2763 the number of indents the symbol contains (which will be zero for a
2764 terminal symbol) and a flag indicating the the symbol was at (or was
2765 reduced from a symbol which was at) the start of a line. The state is
2766 deduced from the current top-of-stack state and the new symbol.
2768 To simplify other code we arrange for `shift` to fail if there is no `goto`
2769 state for the symbol. This is useful in basic parsing due to our design
2770 that we shift when we can, and reduce when we cannot. So the `shift`
2771 function reports if it could.
2773 `shift` is also used to push state zero onto the stack, so if the
2774 stack is empty, it always chooses zero as the next state.
2776 So `shift` finds the next state. If that succeeds it extends the
2777 allocations if needed and pushes all the information onto the stacks.
2779 Newlines are permitted after a `starts_line` state until an internal
2780 indent. If the new frame has neither a `starts_line` state nor an
2781 indent, newlines are permitted if the previous stack frame permitted
2784 ###### parser functions
2786 static int shift(struct parser *p,
2787 short sym, short indents, short start_of_line,
2789 const struct state states[])
2791 // Push an entry onto the stack
2792 struct frame next = {0};
2793 int newstate = p->tos
2794 ? search(&states[p->stack[p->tos-1].state],
2799 if (p->tos >= p->stack_size) {
2800 p->stack_size += 10;
2801 p->stack = realloc(p->stack, p->stack_size
2802 * sizeof(p->stack[0]));
2803 p->asn_stack = realloc(p->asn_stack, p->stack_size
2804 * sizeof(p->asn_stack[0]));
2807 next.indents = indents;
2808 next.state = newstate;
2809 if (states[newstate].starts_line)
2810 next.newline_permitted = 1;
2812 next.newline_permitted = 0;
2814 next.newline_permitted =
2815 p->stack[p->tos-1].newline_permitted;
2817 next.newline_permitted = 0;
2819 if (!start_of_line) {
2821 next.since_newline = p->stack[p->tos-1].since_newline + 1;
2823 next.since_newline = 1;
2826 next.since_indent = 0;
2828 next.since_indent = p->stack[p->tos-1].since_indent + 1;
2830 next.since_indent = 1;
2832 p->stack[p->tos] = next;
2833 p->asn_stack[p->tos] = asn;
2838 `pop` primarily moves the top of stack (`tos`) back down the required
2839 amount and frees any `asn` entries that need to be freed. It also
2840 collects a summary of the indents and line starts in the symbols that
2841 are being removed. It is called _after_ we reduce a production, just
2842 before we `shift` the nonterminal in.
2844 ###### parser functions
2846 static int pop(struct parser *p, int num,
2847 short *start_of_line,
2848 void(*do_free)(short sym, void *asn))
2855 for (i = 0; i < num; i++) {
2856 sol |= !p->stack[p->tos+i].since_newline;
2857 indents += p->stack[p->tos+i].indents;
2858 do_free(p->stack[p->tos+i].sym,
2859 p->asn_stack[p->tos+i]);
2862 *start_of_line = sol;
2866 ### Memory allocation
2868 The `scanner` returns tokens in a local variable - we want them in allocated
2869 memory so they can live in the `asn_stack`. Similarly the `asn` produced by
2870 a reduce is in a large buffer. Both of these require some allocation and
2871 copying, hence `memdup` and `tokcopy`.
2873 ###### parser includes
2876 ###### parser functions
2878 void *memdup(void *m, int len)
2884 memcpy(ret, m, len);
2888 static struct token *tok_copy(struct token tk)
2890 struct token *new = malloc(sizeof(*new));
2895 ### The heart of the parser.
2897 Now we have the parser. If we can shift we do, though newlines and
2898 reducing indenting may block that. If not and we can reduce we do
2899 that. If the production we reduced was production zero, then we have
2900 accepted the input and can finish.
2902 We return whatever `asn` was returned by reducing production zero.
2904 If we can neither shift nor reduce we have an error to handle. We pop
2905 single entries off the stack until we can shift the `TK_error` symbol,
2906 then drop input tokens until we find one we can shift into the new error
2907 state. We need to ensure that something is dropped or shifted after an
2908 error, or we could get into an infinite loop, only shifting in
2909 `TK_error`, then reducing. So we track if there has been a shift since
2910 the last error, and if not the next error always discards one token.
2912 When we find `TK_in` and `TK_out` tokens which report indents we need
2913 to handle them directly as the grammar cannot express what we want to
2916 `TK_in` tokens are easy: we simply update indent count in the top stack frame to
2917 record how many indents there are following the previous token.
2919 `TK_out` tokens must be canceled against an indent count
2920 within the stack. If we can reduce some symbols that are all since
2921 the most recent indent, then we do that first. If the minimum prefix
2922 of the current state then extends back before the most recent indent,
2923 that indent can be cancelled. If the minimum prefix is shorter then
2924 the indent had ended prematurely and we must start error handling, which
2925 is still a work-in-progress.
2927 `TK_newline` tokens are ignored unless the top stack frame records
2928 that they are permitted. In that case they will not be considered for
2929 shifting if it is possible to reduce some symbols that are all since
2930 the most recent start of line. This is how a newline forcibly
2931 terminates any line-like structure - we try to reduce down to at most
2932 one symbol for each line where newlines are allowed.
2933 A consequence of this is that a rule like
2935 ###### Example: newlines - broken
2939 IfStatement -> Newlines if ....
2941 cannot work, as the NEWLINE will never be shifted as the empty string
2942 will be reduced first. Optional sets of newlines need to be include
2943 in the thing that preceed:
2945 ###### Example: newlines - works
2949 IfStatement -> If ....
2951 Here the NEWLINE will be shifted because nothing can be reduced until
2954 When during error handling we discard tokens read in, we want to keep
2955 discarding until we see one that is recognised. If we had a full set
2956 of LR(1) grammar states, this would mean looking in the look-ahead set,
2957 but we don't keep a full look-ahead set. We only record the subset
2958 that leads to SHIFT. We can, however, deduce the look-ahead set by
2959 looking at the SHIFT subsets for all states that we can get to by
2960 reducing zero or more times. So we need a little function which
2961 checks if a given token is in any of these look-ahead sets.
2963 ###### parser includes
2968 static int in_lookahead(struct token *tk, const struct state *states, int state)
2970 while (state >= 0) {
2971 if (search(&states[state], tk->num) >= 0)
2973 if (states[state].reduce_prod < 0)
2975 state = search(&states[state], states[state].reduce_sym);
2980 void *parser_run(struct token_state *tokens,
2981 const struct state states[],
2982 int (*do_reduce)(int, void**, struct token_config*, void*),
2983 void (*do_free)(short, void*),
2984 FILE *trace, const char *non_term[],
2985 struct token_config *config)
2987 struct parser p = { 0 };
2988 struct token *tk = NULL;
2990 int shift_since_err = 1;
2993 shift(&p, TK_eof, 0, 1, NULL, states);
2995 struct token *err_tk;
2996 struct frame *tos = &p.stack[p.tos-1];
2998 tk = tok_copy(token_next(tokens));
2999 parser_trace(trace, &p,
3000 tk, states, non_term, config->known_count);
3002 if (tk->num == TK_in) {
3004 tos->since_newline = 0;
3005 tos->since_indent = 0;
3006 if (!states[tos->state].starts_line)
3007 tos->newline_permitted = 0;
3010 parser_trace_action(trace, "Record");
3013 if (tk->num == TK_out) {
3014 if (states[tos->state].reduce_size >= 0 &&
3015 states[tos->state].reduce_size <= tos->since_indent)
3017 if (states[tos->state].min_prefix >= tos->since_indent) {
3019 struct frame *in = tos - tos->since_indent;
3021 if (in->indents == 0) {
3022 /* Reassess since_indent and newline_permitted */
3024 in->since_indent = in[-1].since_indent + 1;
3025 in->newline_permitted = in[-1].newline_permitted;
3027 in->since_indent = 0;
3028 in->newline_permitted = 0;
3030 if (states[in->state].starts_line)
3031 in->newline_permitted = 1;
3034 in->since_indent = in[-1].since_indent + 1;
3035 if (states[in->state].starts_line)
3036 in->newline_permitted = 1;
3038 in->newline_permitted = in[-1].newline_permitted;
3043 parser_trace_action(trace, "Cancel");
3046 // fall through to error handling as both SHIFT and REDUCE
3049 if (tk->num == TK_newline) {
3050 if (!tos->newline_permitted) {
3053 parser_trace_action(trace, "Discard");
3056 if (tos->since_newline > 1 &&
3057 states[tos->state].reduce_size >= 0 &&
3058 states[tos->state].reduce_size <= tos->since_newline)
3061 if (shift(&p, tk->num, 0, tk->num == TK_newline, tk, states)) {
3062 shift_since_err = 1;
3064 parser_trace_action(trace, "Shift");
3068 if (states[tos->state].reduce_prod >= 0 &&
3069 states[tos->state].newline_only &&
3070 !(tk->num == TK_newline ||
3071 tk->num == TK_eof ||
3072 tk->num == TK_out ||
3073 (tos->indents == 0 && tos->since_newline == 0))) {
3074 /* Anything other than newline or out or eof
3075 * in an error unless we are already at start
3076 * of line, as this production must end at EOL.
3078 } else if (states[tos->state].reduce_prod >= 0) {
3081 const struct state *nextstate = &states[tos->state];
3082 int prod = nextstate->reduce_prod;
3083 int size = nextstate->reduce_size;
3085 static char buf[16*1024];
3086 short indents, start_of_line;
3088 body = p.asn_stack + (p.tos - size);
3090 bufsize = do_reduce(prod, body, config, buf);
3092 indents = pop(&p, size, &start_of_line,
3094 res = memdup(buf, bufsize);
3095 memset(buf, 0, bufsize);
3096 if (!shift(&p, nextstate->reduce_sym,
3097 indents, start_of_line,
3099 if (prod != 0) abort();
3103 parser_trace_action(trace, "Reduce");
3106 /* Error. We walk up the stack until we
3107 * find a state which will accept TK_error.
3108 * We then shift in TK_error and see what state
3109 * that takes us too.
3110 * Then we discard input tokens until
3111 * we find one that is acceptable.
3113 parser_trace_action(trace, "ERROR");
3114 short indents = 0, start_of_line;
3116 err_tk = tok_copy(*tk);
3118 shift(&p, TK_error, 0, 0,
3119 err_tk, states) == 0)
3120 // discard this state
3121 indents += pop(&p, 1, &start_of_line, do_free);
3124 // no state accepted TK_error
3127 if (!shift_since_err) {
3128 /* must discard at least one token to avoid
3131 if (tk->num == TK_eof)
3134 tk = tok_copy(token_next(tokens));
3136 shift_since_err = 0;
3137 tos = &p.stack[p.tos-1];
3138 while (!in_lookahead(tk, states, tos->state) &&
3139 tk->num != TK_eof) {
3141 tk = tok_copy(token_next(tokens));
3142 shift_since_err = 1;
3143 if (tk->num == TK_in)
3145 if (tk->num == TK_out) {
3149 // FIXME update since_indent here
3152 tos->indents += indents;
3155 pop(&p, p.tos, NULL, do_free);
3161 ###### exported functions
3162 void *parser_run(struct token_state *tokens,
3163 const struct state states[],
3164 int (*do_reduce)(int, void**, struct token_config*, void*),
3165 void (*do_free)(short, void*),
3166 FILE *trace, const char *non_term[],
3167 struct token_config *config);
3171 Being able to visualize the parser in action can be invaluable when
3172 debugging the parser code, or trying to understand how the parse of a
3173 particular grammar progresses. The stack contains all the important
3174 state, so just printing out the stack every time around the parse loop
3175 can make it possible to see exactly what is happening.
3177 This doesn't explicitly show each SHIFT and REDUCE action. However they
3178 are easily deduced from the change between consecutive lines, and the
3179 details of each state can be found by cross referencing the states list
3180 in the stack with the "report" that parsergen can generate.
3182 For terminal symbols, we just dump the token. For non-terminals we
3183 print the name of the symbol. The look ahead token is reported at the
3184 end inside square brackets.
3186 ###### parser functions
3188 static char *reserved_words[] = {
3189 [TK_error] = "ERROR",
3192 [TK_newline] = "NEWLINE",
3195 static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
3197 fprintf(trace, "(%d", f->state);
3198 if (states[f->state].starts_line)
3199 fprintf(trace, "s");
3200 if (f->newline_permitted)
3201 fprintf(trace, "n%d", f->since_newline);
3202 fprintf(trace, ") ");
3205 void parser_trace(FILE *trace, struct parser *p,
3206 struct token *tk, const struct state states[],
3207 const char *non_term[], int knowns)
3212 for (i = 0; i < p->tos; i++) {
3213 struct frame *f = &p->stack[i];
3216 if (sym < TK_reserved &&
3217 reserved_words[sym] != NULL)
3218 fputs(reserved_words[sym], trace);
3219 else if (sym < TK_reserved + knowns) {
3220 struct token *t = p->asn_stack[i];
3221 text_dump(trace, t->txt, 20);
3223 fputs(non_term[sym - TK_reserved - knowns],
3226 fprintf(trace, ".%d", f->indents);
3227 if (f->since_newline == 0)
3231 parser_trace_state(trace, f, states);
3233 fprintf(trace, "[");
3234 if (tk->num < TK_reserved &&
3235 reserved_words[tk->num] != NULL)
3236 fputs(reserved_words[tk->num], trace);
3238 text_dump(trace, tk->txt, 20);
3239 fprintf(trace, ":%d:%d]", tk->line, tk->col);
3242 void parser_trace_action(FILE *trace, char *action)
3245 fprintf(trace, " - %s\n", action);
3250 The obvious example for a parser is a calculator.
3252 As `scanner` provides number parsing function using `libgmp` is it not much
3253 work to perform arbitrary rational number calculations.
3255 This calculator takes one expression, or an equality test, per line. The
3256 results are printed and if any equality test fails, the program exits with
3259 ###### File: parsergen.mk
3260 calc.c calc.h : parsergen parsergen.mdc
3261 ./parsergen --tag calc -o calc parsergen.mdc
3262 calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o
3263 $(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp
3265 ./calc parsergen.mdc
3270 #include "parse_number.h"
3271 // what do we use for a demo-grammar? A calculator of course.
3283 #include <sys/mman.h>
3289 #include "scanner.h"
3294 static void free_number(struct number *n)
3300 static int text_is(struct text t, char *s)
3302 return (strlen(s) == t.len &&
3303 strncmp(s, t.txt, t.len) == 0);
3306 int main(int argc, char *argv[])
3308 int fd = open(argv[1], O_RDONLY);
3309 int len = lseek(fd, 0, 2);
3310 char *file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
3311 struct section *table = code_extract(file, file+len, NULL);
3313 struct token_config config = {
3314 .ignored = (1 << TK_line_comment)
3317 .number_chars = ".,_+-",
3321 for (s = table; s; s = s->next)
3322 if (text_is(s->section, "example: input"))
3323 parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
3325 struct section *t = table->next;
3326 code_free(table->code);
3338 Session -> Session Line
3341 Line -> Expression NEWLINE ${ gmp_printf("Answer = %Qd\n", $1.val);
3342 { mpf_t fl; mpf_init2(fl, 20); mpf_set_q(fl, $1.val);
3343 gmp_printf(" or as a decimal: %Fg\n", fl);
3347 | Expression = Expression NEWLINE ${
3348 if (mpq_equal($1.val, $3.val))
3349 gmp_printf("Both equal %Qd\n", $1.val);
3351 gmp_printf("NOT EQUAL: %Qd\n != : %Qd\n",
3356 | NEWLINE ${ printf("Blank line\n"); }$
3357 | ERROR NEWLINE ${ printf("Skipped a bad line\n"); }$
3360 Expression -> Expression + Expression ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$
3361 | Expression - Expression ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$
3362 | Expression * Expression ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$
3363 | Expression / Expression ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$
3364 | Expression // Expression ${ {
3367 mpz_init(z0); mpz_init(z1); mpz_init(z2);
3368 mpz_tdiv_q(z1, mpq_numref($1.val), mpq_denref($1.val));
3369 mpz_tdiv_q(z2, mpq_numref($3.val), mpq_denref($3.val));
3370 mpz_tdiv_q(z0, z1, z2);
3371 mpq_set_z($0.val, z0);
3372 mpz_clear(z0); mpz_clear(z1); mpz_clear(z2);
3374 | NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$
3375 | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$
3380 3.1415926535 - 355/113
3382 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9
3384 1 * 1000 + 2 * 100 + 3 * 10 + 4 * 1