1 # LR(1) Parser Generator #
3 This parser generator takes a grammar description combined with code
4 fragments, analyses it, and can report details about the analysis and
5 write out C code files which can be compiled to make a parser.
7 There are several distinct sections.
9 1. `grammar_read` will read a grammar from a literate-code file and
10 store the productions, symbols, and code fragments.
11 2. `grammar_analyse` will take that grammar and build LR parsing
12 states and look-ahead tables.
13 3. `grammar_report` will present the details of the analysis
14 in a readable format and will report any conflicts.
15 4. `parser_generate` will write out C code files with various
16 tables and with the code fragments arranged in useful places.
17 5. `parser_run` is a library function intended to be linked together
18 with the generated parser tables to complete the implementation of
20 6. Finally `calc` is a test grammar for a simple calculator. The
21 `parsergen` program built from the C code in this file can extract
22 that grammar directly from this file and process it.
24 ###### File: parsergen.c
29 ## forward declarations
40 ###### File: libparser.c
47 ###### File: parsergen.mk
50 parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc
53 ## Reading the grammar
55 The grammar must be stored in a markdown literate code file as parsed
56 by "[mdcode][]". It must have three top level (i.e. unreferenced)
57 sections: `header`, `code`, and `grammar`. The first two will be
58 literally copied into the generated `.c` and `.h`. files. The last
59 contains the grammar. This is tokenised with "[scanner][]".
61 If the `--tag` option is given, then any top level header that doesn't
62 start with the tag is ignored, and the tag is striped from the rest. So
64 means that the three needed sections must be `Foo: header`, `Foo: code`,
65 and `Foo: grammar`. The tag `calc` is used to extract the same calculator
69 [scanner]: scanner.html
75 ###### parser includes
79 The grammar contains production sets, precedence/associativity
80 declarations, and data type declarations. These are all parsed with
81 _ad hoc_ parsing as we don't have a parser generator yet.
83 The precedence and associativity can be set for each production, but
84 can be inherited from symbols. The data type (either structure or a
85 reference to a structure) is potentially defined for each non-terminal
86 and describes what C structure is used to store information about each
90 enum assoc {Left, Right, Non};
91 char *assoc_names[] = {"Left","Right","Non"};
94 struct text struct_name;
97 unsigned short precedence;
101 unsigned short precedence;
110 The strings reported by `mdcode` and `scanner` are `struct text` which have
111 length rather than being null terminated. To help with printing and
112 comparing we define `text_is` and `prtxt`, which should possibly go in
113 `mdcode`. `scanner` does provide `text_dump` which is useful for strings
114 which might contain control characters.
116 `strip_tag` is a bit like `strncmp`, but adds a test for a colon,
117 because that is what we need to detect tags.
120 static int text_is(struct text t, char *s)
122 return (strlen(s) == t.len &&
123 strncmp(s, t.txt, t.len) == 0);
125 static void prtxt(struct text t)
127 printf("%.*s", t.len, t.txt);
130 static int strip_tag(struct text *t, char *tag)
132 int skip = strlen(tag) + 1;
133 if (skip >= t->len ||
134 strncmp(t->txt, tag, skip-1) != 0 ||
135 t->txt[skip-1] != ':')
137 while (skip < t->len && t->txt[skip] == ' ')
146 Productions are comprised primarily of symbols - terminal and
147 non-terminal. We do not make a syntactic distinction between the two,
148 though non-terminals must be identifiers. Non-terminal symbols are
149 those which appear in the head of a production, terminal symbols are
150 those which don't. There are also "virtual" symbols used for precedence
151 marking discussed later, and sometimes we won't know what type a symbol
154 ###### forward declarations
155 enum symtype { Unknown, Virtual, Terminal, Nonterminal };
156 char *symtypes = "UVTN";
160 Symbols can be either `TK_ident` or `TK_mark`. They are saved in a
161 table of known symbols and the resulting parser will report them as
162 `TK_reserved + N`. A small set of identifiers are reserved for the
163 different token types that `scanner` can report.
166 static char *reserved_words[] = {
167 [TK_error] = "ERROR",
168 [TK_number] = "NUMBER",
169 [TK_ident] = "IDENTIFIER",
171 [TK_string] = "STRING",
172 [TK_multi_string] = "MULTI_STRING",
175 [TK_newline] = "NEWLINE",
181 Note that `TK_eof` and the two `TK_*_comment` tokens cannot be
182 recognised. The former is automatically expected at the end of the text
183 being parsed. The latter are always ignored by the parser.
185 All of these symbols are stored in a simple symbol table. We use the
186 `struct text` from `mdcode` to store the name and link them together
187 into a sorted list using an insertion sort.
189 We don't have separate `find` and `insert` functions as any symbol we
190 find needs to be remembered. We simply expect `find` to always return a
191 symbol, but its type might be `Unknown`.
200 ###### grammar fields
205 static struct symbol *sym_find(struct grammar *g, struct text s)
207 struct symbol **l = &g->syms;
212 (cmp = text_cmp((*l)->name, s)) < 0)
216 n = calloc(1, sizeof(*n));
225 static void symbols_init(struct grammar *g)
227 int entries = sizeof(reserved_words)/sizeof(reserved_words[0]);
229 for (i = 0; i < entries; i++) {
232 t.txt = reserved_words[i];
235 t.len = strlen(t.txt);
242 ### Data types and precedence.
244 Data type specification and precedence specification are both
245 introduced by a dollar sign at the start of the line. If the next
246 word is `LEFT`, `RIGHT` or `NON`, then the line specifies a
247 precedence, otherwise it specifies a data type.
249 The data type name is simply stored and applied to the head of all
250 subsequent productions. It must be the name of a structure optionally
251 preceded by an asterisk which means a reference or "pointer". So
252 `$expression` maps to `struct expression` and `$*statement` maps to
253 `struct statement *`.
255 Any productions given before the first data type declaration will have
256 no data type associated with them and can carry no information. In
257 order to allow other non-terminals to have no type, the data type
258 `$void` can be given. This does *not* mean that `struct void` will be
259 used, but rather than no type will be associated with future
262 The precedence line must contain a list of symbols - typically
263 terminal symbols, but not necessarily. It can only contain symbols
264 that have not been seen yet, so precedence declaration must precede
265 the productions that they affect.
267 A precedence line may also contain a "virtual" symbol which is an
268 identifier preceded by `$$`. Virtual terminals carry precedence
269 information but are not included in the grammar. A production can
270 declare that it inherits the precedence of a given virtual symbol.
272 This use for `$$` precludes it from being used as a symbol in the
273 described language. Two other symbols: `${` and `}$` are also
276 Each new precedence line introduces a new precedence level and
277 declares how it associates. This level is stored in each symbol
278 listed and may be inherited by any production which uses the symbol. A
279 production inherits from the last symbol which has a precedence.
281 The symbols on the first precedence line have the lowest precedence.
282 Subsequent lines introduce symbols with higher precedence.
284 ###### grammar fields
285 struct text current_type;
290 enum symbols { TK_virtual = TK_reserved, TK_open, TK_close };
291 static const char *known[] = { "$$", "${", "}$" };
294 static char *dollar_line(struct token_state *ts, struct grammar *g, int isref)
296 struct token t = token_next(ts);
301 if (t.num != TK_ident) {
302 err = "type or assoc expected after '$'";
305 if (text_is(t.txt, "LEFT"))
307 else if (text_is(t.txt, "RIGHT"))
309 else if (text_is(t.txt, "NON"))
312 g->current_type = t.txt;
313 g->type_isref = isref;
314 if (text_is(t.txt, "void"))
315 g->current_type.txt = NULL;
317 if (t.num != TK_newline) {
318 err = "Extra tokens after type name";
325 err = "$* cannot be followed by a precedence";
329 // This is a precedence line, need some symbols.
333 while (t.num != TK_newline) {
334 enum symtype type = Terminal;
336 if (t.num == TK_virtual) {
339 if (t.num != TK_ident) {
340 err = "$$ must be followed by a word";
343 } else if (t.num != TK_ident &&
345 err = "Illegal token in precedence line";
348 s = sym_find(g, t.txt);
349 if (s->type != Unknown) {
350 err = "Symbols in precedence line must not already be known.";
354 s->precedence = g->prec_levels;
360 err = "No symbols given on precedence line";
364 while (t.num != TK_newline && t.num != TK_eof)
371 A production either starts with an identifier which is the head
372 non-terminal, or a vertical bar (`|`) in which case this production
373 uses the same head as the previous one. The identifier must be
374 followed by a `->` mark. All productions for a given non-terminal must
375 be grouped together with the `nonterminal ->` given only once.
377 After this a (possibly empty) sequence of identifiers and marks form
378 the body of the production. A virtual symbol may be given after the
379 body by preceding it with `$$`. If a virtual symbol is given, the
380 precedence of the production is that for the virtual symbol. If none
381 is given, the precedence is inherited from the last symbol in the
382 production which has a precedence specified.
384 After the optional precedence may come the `${` mark. This indicates
385 the start of a code fragment. If present, this must be on the same
386 line as the start of the production.
388 All of the text from the `${` through to the matching `}$` is
389 collected and forms the code-fragment for the production. It must all
390 be in one `code_node` of the literate code. The `}$` must be
391 at the end of a line.
393 Text in the code fragment will undergo substitutions where `$N` or
394 `$<N`,for some numeric `N`, will be replaced with a variable holding
395 the parse information for the particular symbol in the production.
396 `$0` is the head of the production, `$1` is the first symbol of the
397 body, etc. The type of `$N` for a terminal symbol is `struct token`.
398 For a non-terminal, it is whatever has been declared for that symbol.
399 The `<` may be included for symbols declared as storing a reference
400 (not a structure) and means that the reference is being moved out, so
401 it will not automatically be freed.
403 Symbols that are left-recursive are a little special. These are symbols
404 that both the head of a production and the first body symbol of the same
405 production. They are problematic when they appear in other productions
406 elsewhere than at the end, and when indenting is used to describe
407 structure. In this case there is no way for a smaller indent to ensure
408 the left-recursive symbol cannot be extended. When it appears at the
409 end of a production, that production can be reduced to ensure the symbol
410 isn't extended. So we record left-recursive symbols while reading the
411 grammar, and produce a warning when reporting the grammar if they are
412 found in an unsuitable place.
414 A symbol that is only left recursive in a production where it is
415 followed by newline does not cause these problems because the newline
416 will effectively terminate it.
418 While building productions we will need to add to an array which needs to
422 static void array_add(void *varray, int *cnt, void *new)
424 void ***array = varray;
427 current = ((*cnt-1) | (step-1))+1;
428 if (*cnt == current) {
431 *array = realloc(*array, current * sizeof(void*));
433 (*array)[*cnt] = new;
437 Collecting the code fragment simply involves reading tokens until we
438 hit the end token `}$`, and noting the character position of the start and
442 static struct text collect_code(struct token_state *state,
447 code.txt = start.txt.txt + start.txt.len;
449 t = token_next(state);
450 while (t.node == start.node &&
451 t.num != TK_close && t.num != TK_error &&
453 if (t.num == TK_close && t.node == start.node)
454 code.len = t.txt.txt - code.txt;
460 Now we have all the bits we need to parse a full production.
465 ###### grammar fields
466 struct production **productions;
467 int production_count;
469 ###### production fields
471 struct symbol **body;
477 int first_production;
481 static char *parse_production(struct grammar *g,
483 struct token_state *state)
485 /* Head has already been parsed. */
488 struct production p, *pp;
490 memset(&p, 0, sizeof(p));
492 tk = token_next(state);
493 while (tk.num == TK_ident || tk.num == TK_mark) {
494 struct symbol *bs = sym_find(g, tk.txt);
495 if (bs->type == Unknown)
497 if (bs->type == Virtual) {
498 err = "Virtual symbol not permitted in production";
501 if (bs->precedence) {
502 p.precedence = bs->precedence;
505 array_add(&p.body, &p.body_size, bs);
506 tk = token_next(state);
508 if (tk.num == TK_virtual) {
510 tk = token_next(state);
511 if (tk.num != TK_ident) {
512 err = "word required after $$";
515 vs = sym_find(g, tk.txt);
516 if (vs->num == TK_newline)
518 else if (vs->num == TK_out)
520 else if (vs->precedence == 0) {
521 err = "symbol after $$ must have precedence";
524 p.precedence = vs->precedence;
527 tk = token_next(state);
529 if (tk.num == TK_open) {
530 p.code_line = tk.line;
531 p.code = collect_code(state, tk);
532 if (p.code.txt == NULL) {
533 err = "code fragment not closed properly";
536 tk = token_next(state);
538 if (p.body_size >= 2 &&
539 p.body[0] == p.head &&
540 p.body[1]->num != TK_newline)
541 p.head->left_recursive = 1;
543 if (tk.num != TK_newline && tk.num != TK_eof) {
544 err = "stray tokens at end of line";
547 pp = malloc(sizeof(*pp));
549 array_add(&g->productions, &g->production_count, pp);
552 while (tk.num != TK_newline && tk.num != TK_eof)
553 tk = token_next(state);
557 With the ability to parse production and dollar-lines, we have nearly all
558 that we need to parse a grammar from a `code_node`.
560 The head of the first production will effectively be the `start` symbol of
561 the grammar. However it won't _actually_ be so. Processing the grammar is
562 greatly simplified if the real start symbol only has a single production,
563 and expects `$eof` as the final terminal. So when we find the first
564 explicit production we insert an extra production as production zero which
567 ###### Example: production 0
570 where `START` is the first non-terminal given.
572 ###### create production zero
573 struct production *p = calloc(1,sizeof(*p));
574 struct text start = {"$start",6};
575 struct text eof = {"$eof",4};
576 struct text code = {"$0 = $<1;", 9};
577 p->head = sym_find(g, start);
578 p->head->type = Nonterminal;
579 p->head->struct_name = g->current_type;
580 p->head->isref = g->type_isref;
581 if (g->current_type.txt)
583 array_add(&p->body, &p->body_size, head);
584 array_add(&p->body, &p->body_size, sym_find(g, eof));
585 p->head->first_production = g->production_count;
586 array_add(&g->productions, &g->production_count, p);
588 Now we are ready to read in the grammar. We ignore comments
589 and strings so that the marks which introduce them can be
590 used as terminals in the grammar. We don't ignore numbers
591 even though we don't allow them as that causes the scanner
592 to produce errors that the parser is better positioned to handle.
595 static struct grammar *grammar_read(struct code_node *code)
597 struct token_config conf = {
600 .words_marks = known,
601 .known_count = sizeof(known)/sizeof(known[0]),
603 .ignored = (1 << TK_line_comment)
604 | (1 << TK_block_comment)
607 | (1 << TK_multi_string)
612 struct token_state *state = token_open(code, &conf);
614 struct symbol *head = NULL;
618 g = calloc(1, sizeof(*g));
621 for (tk = token_next(state); tk.num != TK_eof;
622 tk = token_next(state)) {
623 if (tk.num == TK_newline)
625 if (tk.num == TK_ident) {
627 head = sym_find(g, tk.txt);
628 if (head->type == Nonterminal)
629 err = "This non-terminal has already be used.";
630 else if (head->type == Virtual)
631 err = "Virtual symbol not permitted in head of production";
633 head->type = Nonterminal;
634 head->struct_name = g->current_type;
635 head->isref = g->type_isref;
636 if (g->production_count == 0) {
637 ## create production zero
639 head->first_production = g->production_count;
640 tk = token_next(state);
641 if (tk.num == TK_mark &&
642 text_is(tk.txt, "->"))
643 err = parse_production(g, head, state);
645 err = "'->' missing in production";
647 } else if (tk.num == TK_mark
648 && text_is(tk.txt, "|")) {
649 // another production for same non-term
651 err = parse_production(g, head, state);
653 err = "First production must have a head";
654 } else if (tk.num == TK_mark
655 && text_is(tk.txt, "$")) {
656 err = dollar_line(state, g, 0);
657 } else if (tk.num == TK_mark
658 && text_is(tk.txt, "$*")) {
659 err = dollar_line(state, g, 1);
660 } else if (tk.num == TK_mark
661 && text_is(tk.txt, "//")) {
662 while (tk.num != TK_newline &&
664 tk = token_next(state);
666 err = "Unrecognised token at start of line.";
674 fprintf(stderr, "Error at line %d: %s\n",
681 ## Analysing the grammar
683 The central task in analysing the grammar is to determine a set of
684 states to drive the parsing process. This will require finding
685 various sets of symbols and of "items". Some of these sets will need
686 to attach information to each element in the set, so they are more
689 Each "item" may have a 'look-ahead' or `LA` set associated with
690 it. Many of these will be the same as each other. To avoid memory
691 wastage, and to simplify some comparisons of sets, these sets will be
692 stored in a list of unique sets, each assigned a number.
694 Once we have the data structures in hand to manage these sets and
695 lists, we can start setting the 'nullable' flag, build the 'FIRST'
696 sets, and then create the item sets which define the various states.
700 Though we don't only store symbols in these sets, they are the main
701 things we store, so they are called symbol sets or "symsets".
703 A symset has a size, an array of shorts, and an optional array of data
704 values, which are also shorts. If the array of data is not present,
705 we store an impossible pointer, as `NULL` is used to indicate that no
706 memory has been allocated yet;
711 unsigned short *syms, *data;
713 #define NO_DATA ((unsigned short *)1)
714 const struct symset INIT_SYMSET = { 0, NULL, NO_DATA };
715 const struct symset INIT_DATASET = { 0, NULL, NULL };
717 The arrays of shorts are allocated in blocks of 8 and are kept sorted
718 by using an insertion sort. We don't explicitly record the amount of
719 allocated space as it can be derived directly from the current `cnt` using
720 `((cnt - 1) | 7) + 1`.
723 static void symset_add(struct symset *s, unsigned short key, unsigned short val)
726 int current = ((s->cnt-1) | 7) + 1;
727 if (current == s->cnt) {
729 s->syms = realloc(s->syms, sizeof(*s->syms) * current);
730 if (s->data != NO_DATA)
731 s->data = realloc(s->data, sizeof(*s->data) * current);
734 while (i > 0 && s->syms[i-1] > key) {
735 s->syms[i] = s->syms[i-1];
736 if (s->data != NO_DATA)
737 s->data[i] = s->data[i-1];
741 if (s->data != NO_DATA)
746 Finding a symbol (or item) in a `symset` uses a simple binary search.
747 We return the index where the value was found (so data can be accessed),
748 or `-1` to indicate failure.
750 static int symset_find(struct symset *ss, unsigned short key)
757 while (lo + 1 < hi) {
758 int mid = (lo + hi) / 2;
759 if (ss->syms[mid] <= key)
764 if (ss->syms[lo] == key)
769 We will often want to form the union of two symsets. When we do, we
770 will often want to know if anything changed (as that might mean there
771 is more work to do). So `symset_union` reports whether anything was
772 added to the first set. We use a slow quadratic approach as these
773 sets don't really get very big. If profiles shows this to be a problem it
774 can be optimised later.
776 static int symset_union(struct symset *a, struct symset *b)
780 for (i = 0; i < b->cnt; i++)
781 if (symset_find(a, b->syms[i]) < 0) {
782 unsigned short data = 0;
783 if (b->data != NO_DATA)
785 symset_add(a, b->syms[i], data);
791 And of course we must be able to free a symset.
793 static void symset_free(struct symset ss)
796 if (ss.data != NO_DATA)
802 Some symsets are simply stored somewhere appropriate in the `grammar`
803 data structure, others needs a bit of indirection. This applies
804 particularly to "LA" sets. These will be paired with "items" in an
805 "itemset". As itemsets will be stored in a symset, the "LA" set needs to be
806 stored in the `data` field, so we need a mapping from a small (short)
807 number to an LA `symset`.
809 As mentioned earlier this involves creating a list of unique symsets.
811 For now, we just use a linear list sorted by insertion. A skiplist
812 would be more efficient and may be added later.
819 struct setlist *next;
822 ###### grammar fields
823 struct setlist *sets;
828 static int ss_cmp(struct symset a, struct symset b)
831 int diff = a.cnt - b.cnt;
834 for (i = 0; i < a.cnt; i++) {
835 diff = (int)a.syms[i] - (int)b.syms[i];
842 static int save_set(struct grammar *g, struct symset ss)
844 struct setlist **sl = &g->sets;
848 while (*sl && (cmp = ss_cmp((*sl)->ss, ss)) < 0)
855 s = malloc(sizeof(*s));
864 Finding a set by number is currently performed by a simple linear search.
865 If this turns out to hurt performance, we can store the sets in a dynamic
866 array like the productions.
868 static struct symset set_find(struct grammar *g, int num)
870 struct setlist *sl = g->sets;
871 while (sl && sl->num != num)
876 ### Setting `nullable`
878 We set `nullable` on the head symbol for any production for which all
879 the body symbols (if any) are nullable. As this is a recursive
880 definition, any change in the `nullable` setting means that we need to
881 re-evaluate where it needs to be set.
883 We simply loop around performing the same calculations until no more
890 static void set_nullable(struct grammar *g)
893 while (check_again) {
896 for (p = 0; p < g->production_count; p++) {
897 struct production *pr = g->productions[p];
900 if (pr->head->nullable)
902 for (s = 0; s < pr->body_size; s++)
903 if (! pr->body[s]->nullable)
905 if (s == pr->body_size) {
906 pr->head->nullable = 1;
913 ### Setting `line_like`
915 In order to be able to ignore newline tokens when not relevant, but
916 still include them in the parse when needed, we will need to know
917 which states can start a "line-like" section of code. We ignore
918 newlines when there is an indent since the most recent start of a
921 A "line_like" symbol is simply any symbol that can derive a NEWLINE.
922 If a symbol cannot derive a NEWLINE, then it is only part of a line -
923 so is word-like. If it can derive a NEWLINE, then we consider it to
926 Clearly the `TK_newline` token can derive a NEWLINE. Any symbol which
927 is the head of a production that contains a line_like symbol is also a
928 line-like symbol. We use a new field `line_like` to record this
929 attribute of symbols, and compute it in a repetitive manner similar to
936 static void set_line_like(struct grammar *g)
939 g->symtab[TK_newline]->line_like = 1;
940 while (check_again) {
943 for (p = 0; p < g->production_count; p++) {
944 struct production *pr = g->productions[p];
947 if (pr->head->line_like)
950 for (s = 0 ; s < pr->body_size; s++) {
951 if (pr->body[s]->line_like) {
952 pr->head->line_like = 1;
961 ### Building the `first` sets
963 When calculating what can follow a particular non-terminal, we will need to
964 know what the "first" terminal in any subsequent non-terminal might be. So
965 we calculate the `first` set for every non-terminal and store them in an
966 array. We don't bother recording the "first" set for terminals as they are
969 As the "first" for one symbol might depend on the "first" of another,
970 we repeat the calculations until no changes happen, just like with
971 `set_nullable`. This makes use of the fact that `symset_union`
972 reports if any change happens.
974 The core of this, which finds the "first" of part of a production body,
975 will be reused for computing the "follow" sets, so we split it out
976 into a separate function.
978 ###### grammar fields
979 struct symset *first;
983 static int add_first(struct production *pr, int start,
984 struct symset *target, struct grammar *g,
989 for (s = start; s < pr->body_size; s++) {
990 struct symbol *bs = pr->body[s];
991 if (bs->type == Terminal) {
992 if (symset_find(target, bs->num) < 0) {
993 symset_add(target, bs->num, 0);
997 } else if (symset_union(target, &g->first[bs->num]))
1003 *to_end = (s == pr->body_size);
1007 static void build_first(struct grammar *g)
1009 int check_again = 1;
1011 g->first = calloc(g->num_syms, sizeof(g->first[0]));
1012 for (s = 0; s < g->num_syms; s++)
1013 g->first[s] = INIT_SYMSET;
1015 while (check_again) {
1018 for (p = 0; p < g->production_count; p++) {
1019 struct production *pr = g->productions[p];
1020 struct symset *head = &g->first[pr->head->num];
1022 if (add_first(pr, 0, head, g, NULL))
1028 ### Building the `follow` sets.
1030 There are two different situations when we will want to generate "follow"
1031 sets. If we are doing an SLR analysis, we want to generate a single
1032 "follow" set for each non-terminal in the grammar. That is what is
1033 happening here. If we are doing an LALR or LR analysis we will want
1034 to generate a separate "LA" set for each item. We do that later
1035 in state generation.
1037 There are two parts to generating a "follow" set. Firstly we look at
1038 every place that any non-terminal appears in the body of any
1039 production, and we find the set of possible "first" symbols after
1040 there. This is done using `add_first` above and only needs to be done
1041 once as the "first" sets are now stable and will not change.
1045 for (p = 0; p < g->production_count; p++) {
1046 struct production *pr = g->productions[p];
1049 for (b = 0; b < pr->body_size - 1; b++) {
1050 struct symbol *bs = pr->body[b];
1051 if (bs->type == Terminal)
1053 add_first(pr, b+1, &g->follow[bs->num], g, NULL);
1057 The second part is to add the "follow" set of the head of a production
1058 to the "follow" sets of the final symbol in the production, and any
1059 other symbol which is followed only by `nullable` symbols. As this
1060 depends on "follow" itself we need to repeatedly perform the process
1061 until no further changes happen.
1065 for (again = 0, p = 0;
1066 p < g->production_count;
1067 p < g->production_count-1
1068 ? p++ : again ? (again = 0, p = 0)
1070 struct production *pr = g->productions[p];
1073 for (b = pr->body_size - 1; b >= 0; b--) {
1074 struct symbol *bs = pr->body[b];
1075 if (bs->type == Terminal)
1077 if (symset_union(&g->follow[bs->num],
1078 &g->follow[pr->head->num]))
1085 We now just need to create and initialise the `follow` list to get a
1088 ###### grammar fields
1089 struct symset *follow;
1092 static void build_follow(struct grammar *g)
1095 g->follow = calloc(g->num_syms, sizeof(g->follow[0]));
1096 for (s = 0; s < g->num_syms; s++)
1097 g->follow[s] = INIT_SYMSET;
1101 ### Building itemsets and states
1103 There are three different levels of detail that can be chosen for
1104 building the itemsets and states for the LR grammar. They are:
1106 1. LR(0) or SLR(1), where no look-ahead is considered.
1107 2. LALR(1) where we build look-ahead sets with each item and merge
1108 the LA sets when we find two paths to the same "kernel" set of items.
1109 3. LR(1) where different look-ahead for any item in the set means
1110 a different state must be created.
1112 ###### forward declarations
1113 enum grammar_type { LR0, LR05, SLR, LALR, LR1 };
1115 We need to be able to look through existing states to see if a newly
1116 generated state already exists. For now we use a simple sorted linked
1119 An item is a pair of numbers: the production number and the position of
1120 "DOT", which is an index into the body of the production.
1121 As the numbers are not enormous we can combine them into a single "short"
1122 and store them in a `symset` - 4 bits for "DOT" and 12 bits for the
1123 production number (so 4000 productions with maximum of 15 symbols in the
1126 Comparing the itemsets will be a little different to comparing symsets
1127 as we want to do the lookup after generating the "kernel" of an
1128 itemset, so we need to ignore the offset=zero items which are added during
1131 To facilitate this, we modify the "DOT" number so that "0" sorts to
1132 the end of the list in the symset, and then only compare items before
1136 static inline unsigned short item_num(int production, int index)
1138 return production | ((31-index) << 11);
1140 static inline int item_prod(unsigned short item)
1142 return item & 0x7ff;
1144 static inline int item_index(unsigned short item)
1146 return (31-(item >> 11)) & 0x1f;
1149 For LR(1) analysis we need to compare not just the itemset in a state
1150 but also the LA sets. As we assign each unique LA set a number, we
1151 can just compare the symset and the data values together.
1154 static int itemset_cmp(struct symset a, struct symset b,
1155 enum grammar_type type)
1161 i < a.cnt && i < b.cnt &&
1162 item_index(a.syms[i]) > 0 &&
1163 item_index(b.syms[i]) > 0;
1165 int diff = a.syms[i] - b.syms[i];
1169 diff = a.data[i] - b.data[i];
1174 if (i == a.cnt || item_index(a.syms[i]) == 0)
1178 if (i == b.cnt || item_index(b.syms[i]) == 0)
1184 if (type < LR1 || av == -1)
1187 a.data[i] - b.data[i];
1190 It will be helpful to know if an itemset has been "completed" or not,
1191 particularly for LALR where itemsets get merged, at which point they
1192 need to be consider for completion again. So a `completed` flag is needed.
1194 For correct handling of `TK_newline` when parsing, we will need to
1195 know which states (itemsets) can occur at the start of a line, so we
1196 will record a `starts_line` flag too whenever DOT is at the start of a
1199 Finally, for handling `TK_out` we need to know whether productions in the
1200 current state started *before* the most recent indent. A state
1201 doesn't usually keep details of individual productions, so we need to
1202 add one extra detail. `min_prefix` is the smallest non-zero number of
1203 symbols *before* DOT in any production in an itemset. This will allow
1204 us to determine if the the most recent indent is sufficiently recent
1205 to cancel it against a `TK_out`. If it was seen longer ago than the
1206 `min_prefix`, and if the current state cannot be reduced, then the
1207 indented section must have ended in the middle of a syntactic unit, so
1208 an error must be signaled.
1210 And now we can build the list of itemsets. The lookup routine returns
1211 both a success flag and a pointer to where in the list an insert
1212 should happen, so we don't need to search a second time.
1216 struct itemset *next;
1218 struct symset items;
1219 struct symset go_to;
1221 unsigned short precedence;
1227 ###### grammar fields
1228 struct itemset *items;
1232 static int itemset_find(struct grammar *g, struct itemset ***where,
1233 struct symset kernel, enum grammar_type type)
1235 struct itemset **ip;
1237 for (ip = &g->items; *ip ; ip = & (*ip)->next) {
1238 struct itemset *i = *ip;
1240 diff = itemset_cmp(i->items, kernel, type);
1253 Adding an itemset may require merging the LA sets if LALR analysis is
1254 happening. If any new LA set adds any symbols that weren't in the old LA set, we
1255 clear the `completed` flag so that the dependants of this itemset will be
1256 recalculated and their LA sets updated.
1258 `add_itemset` must consume the symsets it is passed, either by adding
1259 them to a data structure, of freeing them.
1261 static int add_itemset(struct grammar *g, struct symset ss,
1262 enum assoc assoc, unsigned short precedence,
1263 enum grammar_type type)
1265 struct itemset **where, *is;
1267 int found = itemset_find(g, &where, ss, type);
1269 is = calloc(1, sizeof(*is));
1270 is->state = g->states;
1274 is->precedence = precedence;
1276 is->go_to = INIT_DATASET;
1285 for (i = 0; i < ss.cnt; i++) {
1286 struct symset temp = INIT_SYMSET, s;
1287 if (ss.data[i] == is->items.data[i])
1289 s = set_find(g, is->items.data[i]);
1290 symset_union(&temp, &s);
1291 s = set_find(g, ss.data[i]);
1292 if (symset_union(&temp, &s)) {
1293 is->items.data[i] = save_set(g, temp);
1304 To build all the itemsets, we first insert the initial itemset made
1305 from production zero, complete each itemset, and then generate new
1306 itemsets from old until no new ones can be made.
1308 Completing an itemset means finding all the items where "DOT" is followed by
1309 a nonterminal and adding "DOT=0" items for every production from that
1310 non-terminal - providing each item hasn't already been added.
1312 If LA sets are needed, the LA set for each new item is found using
1313 `add_first` which was developed earlier for `FIRST` and `FOLLOW`. This may
1314 be supplemented by the LA set for the item which produce the new item.
1316 We also collect a set of all symbols which follow "DOT" (in `done`) as this
1317 is used in the next stage.
1318 If any of these symbols are flagged as `line_like`, then this
1319 state must be a `starts_line` state so now is a good time to record that.
1321 When itemsets are created we assign a precedence to the itemset from
1322 the complete item, if there is one. We ignore the possibility of
1323 there being two and don't (currently) handle precedence in such
1324 grammars. When completing a grammar we ignore any item where DOT is
1325 followed by a terminal with a precedence lower than that for the
1326 itemset. Unless the terminal has right associativity, we also ignore
1327 items where the terminal has the same precedence. The result is that
1328 unwanted items are still in the itemset, but the terminal doesn't get
1329 into the go to set, so the item is ineffective.
1331 ###### complete itemset
1332 for (i = 0; i < is->items.cnt; i++) {
1333 int p = item_prod(is->items.syms[i]);
1334 int bs = item_index(is->items.syms[i]);
1335 struct production *pr = g->productions[p];
1338 struct symset LA = INIT_SYMSET;
1339 unsigned short sn = 0;
1340 struct symset LAnl = INIT_SYMSET;
1341 unsigned short snnl = 0;
1343 if (is->min_prefix == 0 ||
1344 (bs > 0 && bs < is->min_prefix))
1345 is->min_prefix = bs;
1346 if (bs == pr->body_size)
1349 if (s->precedence && is->precedence &&
1350 is->precedence > s->precedence)
1351 /* This terminal has a low precedence and
1352 * shouldn't be shifted
1355 if (s->precedence && is->precedence &&
1356 is->precedence == s->precedence && s->assoc != Right)
1357 /* This terminal has a matching precedence and is
1358 * not Right-associative, so we mustn't shift it.
1361 if (symset_find(&done, s->num) < 0) {
1362 symset_add(&done, s->num, 0);
1364 if (s->type != Nonterminal)
1367 is->starts_line = 1;
1372 add_first(pr, bs+1, &LA, g, &to_end);
1374 struct symset ss = set_find(g, is->items.data[i]);
1375 symset_union(&LA, &ss);
1377 sn = save_set(g, LA);
1378 LA = set_find(g, sn);
1379 if (symset_find(&LA, TK_newline))
1380 symset_add(&LAnl, TK_newline, 0);
1381 snnl = save_set(g, LAnl);
1382 LAnl = set_find(g, snnl);
1385 /* Add productions for this symbol */
1386 for (p2 = s->first_production;
1387 p2 < g->production_count &&
1388 g->productions[p2]->head == s;
1390 int itm = item_num(p2, 0);
1391 int pos = symset_find(&is->items, itm);
1393 if (g->productions[p2]->line_like)
1394 symset_add(&is->items, itm, snnl);
1396 symset_add(&is->items, itm, sn);
1397 /* Will have re-ordered, so start
1398 * from beginning again */
1400 } else if (type >= LALR) {
1401 struct symset ss = set_find(g, is->items.data[pos]);
1402 struct symset tmp = INIT_SYMSET;
1403 struct symset *la = &LA;
1405 if (g->productions[p2]->line_like)
1407 symset_union(&tmp, &ss);
1408 if (symset_union(&tmp, la)) {
1409 is->items.data[pos] = save_set(g, tmp);
1417 For each symbol we found (and placed in `done`) we collect all the items for
1418 which this symbol is next, and create a new itemset with "DOT" advanced over
1419 the symbol. This is then added to the collection of itemsets (or merged
1420 with a pre-existing itemset).
1422 ###### derive itemsets
1423 // Now we have a completed itemset, so we need to
1424 // compute all the 'next' itemsets and create them
1425 // if they don't exist.
1426 for (i = 0; i < done.cnt; i++) {
1428 unsigned short state;
1429 struct symbol *sym = g->symtab[done.syms[i]];
1430 enum assoc assoc = Non;
1431 unsigned short precedence = 0;
1432 struct symset newitemset = INIT_SYMSET;
1434 newitemset = INIT_DATASET;
1436 for (j = 0; j < is->items.cnt; j++) {
1437 int itm = is->items.syms[j];
1438 int p = item_prod(itm);
1439 int bp = item_index(itm);
1440 struct production *pr = g->productions[p];
1441 unsigned short la = 0;
1444 if (bp == pr->body_size)
1446 if (pr->body[bp] != sym)
1449 la = is->items.data[j];
1450 pos = symset_find(&newitemset, pr->head->num);
1451 if (bp + 1 == pr->body_size &&
1452 pr->precedence > 0 &&
1453 pr->precedence > precedence) {
1454 // new itemset is reducible and has a precedence.
1455 precedence = pr->precedence;
1459 symset_add(&newitemset, item_num(p, bp+1), la);
1460 else if (type >= LALR) {
1461 // Need to merge la set.
1462 int la2 = newitemset.data[pos];
1464 struct symset ss = set_find(g, la2);
1465 struct symset LA = INIT_SYMSET;
1466 symset_union(&LA, &ss);
1467 ss = set_find(g, la);
1468 if (symset_union(&LA, &ss))
1469 newitemset.data[pos] = save_set(g, LA);
1475 state = add_itemset(g, newitemset, assoc, precedence, type);
1476 if (symset_find(&is->go_to, done.syms[i]) < 0)
1477 symset_add(&is->go_to, done.syms[i], state);
1480 All that is left is to create the initial itemset from production zero, and
1481 with `TK_eof` as the LA set.
1484 static void build_itemsets(struct grammar *g, enum grammar_type type)
1486 struct symset first = INIT_SYMSET;
1489 unsigned short la = 0;
1491 // LA set just has eof
1492 struct symset eof = INIT_SYMSET;
1493 symset_add(&eof, TK_eof, 0);
1494 la = save_set(g, eof);
1495 first = INIT_DATASET;
1497 // production 0, offset 0 (with no data)
1498 symset_add(&first, item_num(0, 0), la);
1499 add_itemset(g, first, Non, 0, type);
1500 for (again = 0, is = g->items;
1502 is = is->next ?: again ? (again = 0, g->items) : NULL) {
1504 struct symset done = INIT_SYMSET;
1515 ### Completing the analysis.
1517 The exact process of analysis depends on which level was requested. For
1518 `LR0` and `LR05` we don't need first and follow sets at all. For `LALR` and
1519 `LR1` we need first, but not follow. For `SLR` we need both.
1521 We don't build the "action" tables that you might expect as the parser
1522 doesn't use them. They will be calculated to some extent if needed for
1525 Once we have built everything we allocate arrays for the two lists:
1526 symbols and itemsets. This allows more efficient access during reporting.
1527 The symbols are grouped as terminals and non-terminals and we record the
1528 changeover point in `first_nonterm`.
1530 ###### grammar fields
1531 struct symbol **symtab;
1532 struct itemset **statetab;
1535 ###### grammar_analyse
1537 static void grammar_analyse(struct grammar *g, enum grammar_type type)
1541 int snum = TK_reserved;
1542 for (s = g->syms; s; s = s->next)
1543 if (s->num < 0 && s->type == Terminal) {
1547 g->first_nonterm = snum;
1548 for (s = g->syms; s; s = s->next)
1549 if (s->num < 0 && s->type != Virtual) {
1553 for (s = g->syms; s; s = s->next)
1559 g->symtab = calloc(g->num_syms, sizeof(g->symtab[0]));
1560 for (s = g->syms; s; s = s->next)
1561 g->symtab[s->num] = s;
1571 build_itemsets(g, type);
1573 g->statetab = calloc(g->states, sizeof(g->statetab[0]));
1574 for (is = g->items; is ; is = is->next)
1575 g->statetab[is->state] = is;
1578 ## Reporting on the Grammar
1580 The purpose of the report is to give the grammar developer insight into
1581 how the grammar parser will work. It is basically a structured dump of
1582 all the tables that have been generated, plus a description of any conflicts.
1584 ###### grammar_report
1585 static int grammar_report(struct grammar *g, enum grammar_type type)
1591 return report_conflicts(g, type) + report_left_recursive(g);
1594 Firstly we have the complete list of symbols, together with the
1595 "FIRST" set if that was generated. We add a mark to each symbol to
1596 show if it can end in a newline (`>`), if it is considered to be
1597 "line-like" (`<`), or if it is nullable (`.`).
1601 static void report_symbols(struct grammar *g)
1605 printf("SYMBOLS + FIRST:\n");
1607 printf("SYMBOLS:\n");
1609 for (n = 0; n < g->num_syms; n++) {
1610 struct symbol *s = g->symtab[n];
1614 printf(" %c%c%3d%c: ",
1615 s->nullable ? '.':' ',
1616 s->line_like ? '<':' ',
1617 s->num, symtypes[s->type]);
1620 printf(" (%d%s)", s->precedence,
1621 assoc_names[s->assoc]);
1623 if (g->first && s->type == Nonterminal) {
1626 for (j = 0; j < g->first[n].cnt; j++) {
1629 prtxt(g->symtab[g->first[n].syms[j]]->name);
1636 Then we have the follow sets if they were computed.
1638 static void report_follow(struct grammar *g)
1641 printf("FOLLOW:\n");
1642 for (n = 0; n < g->num_syms; n++)
1643 if (g->follow[n].cnt) {
1647 prtxt(g->symtab[n]->name);
1648 for (j = 0; j < g->follow[n].cnt; j++) {
1651 prtxt(g->symtab[g->follow[n].syms[j]]->name);
1657 And finally the item sets. These include the GO TO tables and, for
1658 LALR and LR1, the LA set for each item. Lots of stuff, so we break
1659 it up a bit. First the items, with production number and associativity.
1661 static void report_item(struct grammar *g, int itm)
1663 int p = item_prod(itm);
1664 int dot = item_index(itm);
1665 struct production *pr = g->productions[p];
1669 prtxt(pr->head->name);
1671 for (i = 0; i < pr->body_size; i++) {
1672 printf(" %s", (dot == i ? ". ": ""));
1673 prtxt(pr->body[i]->name);
1675 if (dot == pr->body_size)
1678 if (pr->precedence && dot == pr->body_size)
1679 printf(" (%d%s)", pr->precedence,
1680 assoc_names[pr->assoc]);
1681 if (dot < pr->body_size &&
1682 pr->body[dot]->precedence) {
1683 struct symbol *s = pr->body[dot];
1684 printf(" [%d%s]", s->precedence,
1685 assoc_names[s->assoc]);
1687 if (pr->line_like == 1)
1688 printf(" $$NEWLINE");
1689 else if (pr->line_like)
1694 The LA sets which are (possibly) reported with each item:
1696 static void report_la(struct grammar *g, int lanum)
1698 struct symset la = set_find(g, lanum);
1702 printf(" LOOK AHEAD(%d)", lanum);
1703 for (i = 0; i < la.cnt; i++) {
1706 prtxt(g->symtab[la.syms[i]]->name);
1711 Then the go to sets:
1713 static void report_goto(struct grammar *g, struct symset gt)
1718 for (i = 0; i < gt.cnt; i++) {
1720 prtxt(g->symtab[gt.syms[i]]->name);
1721 printf(" -> %d\n", gt.data[i]);
1725 Now we can report all the item sets complete with items, LA sets, and GO TO.
1727 static void report_itemsets(struct grammar *g)
1730 printf("ITEM SETS(%d)\n", g->states);
1731 for (s = 0; s < g->states; s++) {
1733 struct itemset *is = g->statetab[s];
1734 printf(" Itemset %d:%s min prefix=%d",
1735 s, is->starts_line?" (startsline)":"", is->min_prefix);
1737 printf(" %d%s", is->precedence, assoc_names[is->assoc]);
1739 for (j = 0; j < is->items.cnt; j++) {
1740 report_item(g, is->items.syms[j]);
1741 if (is->items.data != NO_DATA)
1742 report_la(g, is->items.data[j]);
1744 report_goto(g, is->go_to);
1748 ### Reporting conflicts
1750 Conflict detection varies a lot among different analysis levels. However
1751 LR0 and LR0.5 are quite similar - having no follow sets, and SLR, LALR and
1752 LR1 are also similar as they have FOLLOW or LA sets.
1756 ## conflict functions
1758 static int report_conflicts(struct grammar *g, enum grammar_type type)
1761 printf("Conflicts:\n");
1763 cnt = conflicts_lr0(g, type);
1765 cnt = conflicts_slr(g, type);
1767 printf(" - no conflicts\n");
1771 LR0 conflicts are any state which have both a reducible item and
1772 a shiftable item, or two reducible items.
1774 LR05 conflicts only occur if two possible reductions exist,
1775 as shifts always over-ride reductions.
1777 ###### conflict functions
1778 static int conflicts_lr0(struct grammar *g, enum grammar_type type)
1782 for (i = 0; i < g->states; i++) {
1783 struct itemset *is = g->statetab[i];
1784 int last_reduce = -1;
1785 int prev_reduce = -1;
1786 int last_shift = -1;
1790 for (j = 0; j < is->items.cnt; j++) {
1791 int itm = is->items.syms[j];
1792 int p = item_prod(itm);
1793 int bp = item_index(itm);
1794 struct production *pr = g->productions[p];
1796 if (bp == pr->body_size) {
1797 prev_reduce = last_reduce;
1801 if (pr->body[bp]->type == Terminal)
1804 if (type == LR0 && last_reduce >= 0 && last_shift >= 0) {
1805 printf(" State %d has both SHIFT and REDUCE:\n", i);
1806 report_item(g, is->items.syms[last_shift]);
1807 report_item(g, is->items.syms[last_reduce]);
1810 if (prev_reduce >= 0) {
1811 printf(" State %d has 2 (or more) reducible items\n", i);
1812 report_item(g, is->items.syms[prev_reduce]);
1813 report_item(g, is->items.syms[last_reduce]);
1820 SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping
1821 look ahead, or if a symbol in a look-ahead can be shifted. They differ only
1822 in the source of the look ahead set.
1824 We build two datasets to reflect the "action" table: one which maps
1825 terminals to items where that terminal could be shifted and another
1826 which maps terminals to items that could be reduced when the terminal
1827 is in look-ahead. We report when we get conflicts between the two.
1829 As a special case, if we find a SHIFT/REDUCE conflict, on the NEWLINE
1830 terminal, we ignore it. NEWLINES are handled specially with its own
1831 rules for when to shift and when to reduce. Conflicts are expected,
1832 but handled internally.
1834 static int conflicts_slr(struct grammar *g, enum grammar_type type)
1839 for (i = 0; i < g->states; i++) {
1840 struct itemset *is = g->statetab[i];
1841 struct symset shifts = INIT_DATASET;
1842 struct symset reduce = INIT_DATASET;
1846 /* First collect the shifts */
1847 for (j = 0; j < is->items.cnt; j++) {
1848 unsigned short itm = is->items.syms[j];
1849 int p = item_prod(itm);
1850 int bp = item_index(itm);
1851 struct production *pr = g->productions[p];
1854 if (bp >= pr->body_size ||
1855 pr->body[bp]->type != Terminal)
1860 if (s->precedence && is->precedence)
1861 /* Precedence resolves this, so no conflict */
1864 if (symset_find(&shifts, s->num) < 0)
1865 symset_add(&shifts, s->num, itm);
1867 /* Now look for reductions and conflicts */
1868 for (j = 0; j < is->items.cnt; j++) {
1869 unsigned short itm = is->items.syms[j];
1870 int p = item_prod(itm);
1871 int bp = item_index(itm);
1872 struct production *pr = g->productions[p];
1874 if (bp < pr->body_size)
1879 la = g->follow[pr->head->num];
1881 la = set_find(g, is->items.data[j]);
1883 for (k = 0; k < la.cnt; k++) {
1884 int pos = symset_find(&shifts, la.syms[k]);
1885 if (pos >= 0 && la.syms[k] != TK_newline) {
1886 printf(" State %d has SHIFT/REDUCE conflict on ", i);
1888 prtxt(g->symtab[la.syms[k]]->name);
1890 report_item(g, shifts.data[pos]);
1891 report_item(g, itm);
1893 pos = symset_find(&reduce, la.syms[k]);
1895 symset_add(&reduce, la.syms[k], itm);
1898 printf(" State %d has REDUCE/REDUCE conflict on ", i);
1899 prtxt(g->symtab[la.syms[k]]->name);
1901 report_item(g, itm);
1902 report_item(g, reduce.data[pos]);
1906 symset_free(shifts);
1907 symset_free(reduce);
1913 ### Reporting non-final left-recursive symbols.
1915 Left recursive symbols are a problem for parses that honour indentation
1916 when they appear other than at the end of the production. So we need to
1917 report these when asked.
1921 static int report_left_recursive(struct grammar *g)
1924 int bad_left_recursive = 0;
1926 for (p = 0; p < g->production_count; p++) {
1927 struct production *pr = g->productions[p];
1930 for (sn = 0; sn < pr->body_size - 1; sn++) {
1931 struct symbol *s = pr->body[sn];
1933 if (s->left_recursive == 1 &&
1935 if (!bad_left_recursive) {
1936 bad_left_recursive = 1;
1937 printf("Misplaced left recursive symbols.\n");
1941 printf(" in production [%d]\n", p);
1942 s->left_recursive = 2;
1946 return bad_left_recursive;
1949 ## Generating the parser
1951 The exported part of the parser is the `parse_XX` function, where the name
1952 `XX` is based on the name of the parser files.
1954 This takes a `code_node`, a partially initialized `token_config`, and an
1955 optional `FILE` to send tracing to. The `token_config` gets the list of
1956 known words added and then is used with the `code_node` to initialize the
1959 `parse_XX` then calls the library function `parser_run` to actually complete
1960 the parse. This needs the `states` table and function to call the various
1961 pieces of code provided in the grammar file, so they are generated first.
1963 ###### parser_generate
1965 static void gen_parser(FILE *f, struct grammar *g, char *file, char *name,
1966 struct code_node *pre_reduce)
1972 gen_reduce(f, g, file, pre_reduce);
1975 fprintf(f, "#line 0 \"gen_parser\"\n");
1976 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace)\n",
1979 fprintf(f, "\tstruct token_state *tokens;\n");
1980 fprintf(f, "\tconfig->words_marks = known;\n");
1981 fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
1982 fprintf(f, "\ttokens = token_open(code, config);\n");
1983 fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n");
1984 fprintf(f, "\ttoken_close(tokens);\n");
1985 fprintf(f, "\treturn rv;\n");
1986 fprintf(f, "}\n\n");
1989 ### Known words table
1991 The known words table is simply an array of terminal symbols.
1992 The table of nonterminals used for tracing is a similar array. We
1993 include virtual symbols in the table of non_terminals to keep the
1998 static void gen_known(FILE *f, struct grammar *g)
2001 fprintf(f, "#line 0 \"gen_known\"\n");
2002 fprintf(f, "static const char *known[] = {\n");
2003 for (i = TK_reserved;
2004 i < g->num_syms && g->symtab[i]->type == Terminal;
2006 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
2007 g->symtab[i]->name.txt);
2008 fprintf(f, "};\n\n");
2011 static void gen_non_term(FILE *f, struct grammar *g)
2014 fprintf(f, "#line 0 \"gen_non_term\"\n");
2015 fprintf(f, "static const char *non_term[] = {\n");
2016 for (i = TK_reserved;
2019 if (g->symtab[i]->type == Nonterminal)
2020 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
2021 g->symtab[i]->name.txt);
2022 fprintf(f, "};\n\n");
2025 ### States and the goto tables.
2027 For each state we record the goto table, the reducible production if
2028 there is one, or a symbol to shift for error recovery.
2029 Some of the details of the reducible production are stored in the
2030 `do_reduce` function to come later. Here we store the production number,
2031 the body size (useful for stack management) and the resulting symbol (useful
2032 for knowing how to free data later).
2034 The go to table is stored in a simple array of `sym` and corresponding
2037 ###### exported types
2045 const struct lookup * go_to;
2056 static void gen_goto(FILE *f, struct grammar *g)
2059 fprintf(f, "#line 0 \"gen_goto\"\n");
2060 for (i = 0; i < g->states; i++) {
2062 fprintf(f, "static const struct lookup goto_%d[] = {\n",
2064 struct symset gt = g->statetab[i]->go_to;
2065 for (j = 0; j < gt.cnt; j++)
2066 fprintf(f, "\t{ %d, %d },\n",
2067 gt.syms[j], gt.data[j]);
2074 static void gen_states(FILE *f, struct grammar *g)
2077 fprintf(f, "#line 0 \"gen_states\"\n");
2078 fprintf(f, "static const struct state states[] = {\n");
2079 for (i = 0; i < g->states; i++) {
2080 struct itemset *is = g->statetab[i];
2081 int j, prod = -1, prod_len;
2083 for (j = 0; j < is->items.cnt; j++) {
2084 int itm = is->items.syms[j];
2085 int p = item_prod(itm);
2086 int bp = item_index(itm);
2087 struct production *pr = g->productions[p];
2089 if (bp < pr->body_size)
2091 /* This is what we reduce */
2092 if (prod < 0 || prod_len < pr->body_size) {
2094 prod_len = pr->body_size;
2099 fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, %d, %d, %d },\n",
2100 i, is->go_to.cnt, i, prod,
2101 g->productions[prod]->body_size,
2102 g->productions[prod]->head->num,
2104 g->productions[prod]->line_like,
2107 fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, 0, %d },\n",
2108 i, is->go_to.cnt, i,
2109 is->starts_line, is->min_prefix);
2111 fprintf(f, "};\n\n");
2114 ### The `do_reduce` function and the code
2116 When the parser engine decides to reduce a production, it calls `do_reduce`.
2117 This has two functions.
2119 Firstly, if a non-NULL `trace` file is passed, it prints out details of the
2120 production being reduced. Secondly it runs the code that was included with
2121 the production if any.
2123 This code needs to be able to store data somewhere. Rather than requiring
2124 `do_reduce` to `malloc` that "somewhere", we pass in a large buffer and have
2125 `do_reduce` return the size to be saved.
2127 In order for the code to access "global" context, we pass in the
2128 "config" pointer that was passed to parser function. If the `struct
2129 token_config` is embedded in some larger structure, the reducing code
2130 can access the larger structure using pointer manipulation.
2132 The code fragment requires translation when written out. Any `$N` needs to
2133 be converted to a reference either to that buffer (if `$0`) or to the
2134 structure returned by a previous reduction. These pointers need to be cast
2135 to the appropriate type for each access. All this is handled in
2138 `gen_code` also allows symbol references to contain a '`<`' as in '`$<2`'.
2139 This applied only to symbols with references (or pointers), not those with structures.
2140 The `<` implies that the reference it being moved out, so the object will not be
2141 automatically freed. This is equivalent to assigning `NULL` to the pointer.
2145 static void gen_code(struct production *p, FILE *f, struct grammar *g)
2148 char *used = calloc(1, p->body_size);
2151 fprintf(f, "\t\t\t");
2152 for (c = p->code.txt; c < p->code.txt + p->code.len; c++) {
2166 if (*c < '0' || *c > '9') {
2173 while (c[1] >= '0' && c[1] <= '9') {
2175 n = n * 10 + *c - '0';
2178 fprintf(f, "(*(struct %.*s*%s)ret)",
2179 p->head->struct_name.len,
2180 p->head->struct_name.txt,
2181 p->head->isref ? "*":"");
2182 else if (n > p->body_size)
2183 fprintf(f, "$%d", n);
2184 else if (p->body[n-1]->type == Terminal)
2185 fprintf(f, "(*(struct token *)body[%d])",
2187 else if (p->body[n-1]->struct_name.txt == NULL)
2188 fprintf(f, "$%d", n);
2190 fprintf(f, "(*(struct %.*s*%s)body[%d])",
2191 p->body[n-1]->struct_name.len,
2192 p->body[n-1]->struct_name.txt,
2193 p->body[n-1]->isref ? "*":"", n-1);
2198 for (i = 0; i < p->body_size; i++) {
2199 if (p->body[i]->struct_name.txt &&
2201 // assume this has been copied out
2202 if (p->body[i]->isref)
2203 fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i);
2205 fprintf(f, "\t\tmemset(body[%d], 0, sizeof(struct %.*s));\n", i, p->body[i]->struct_name.len, p->body[i]->struct_name.txt);
2213 static void gen_reduce(FILE *f, struct grammar *g, char *file,
2214 struct code_node *code)
2217 fprintf(f, "#line 1 \"gen_reduce\"\n");
2218 fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n");
2220 fprintf(f, "\tint ret_size = 0;\n");
2222 code_node_print(f, code, file);
2224 fprintf(f, "#line 4 \"gen_reduce\"\n");
2225 fprintf(f, "\tswitch(prod) {\n");
2226 for (i = 0; i < g->production_count; i++) {
2227 struct production *p = g->productions[i];
2228 fprintf(f, "\tcase %d:\n", i);
2231 fprintf(f, "#line %d \"%s\"\n", p->code_line, file);
2235 if (p->head->struct_name.txt)
2236 fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n",
2237 p->head->struct_name.len,
2238 p->head->struct_name.txt,
2239 p->head->isref ? "*":"");
2241 fprintf(f, "\t\tbreak;\n");
2243 fprintf(f, "\t}\n\treturn ret_size;\n}\n\n");
2248 As each non-terminal can potentially cause a different type of data
2249 structure to be allocated and filled in, we need to be able to free it when
2252 It is particularly important to have fine control over freeing during error
2253 recovery where individual stack frames might need to be freed.
2255 For this, the grammar author is required to defined a `free_XX` function for
2256 each structure that is used by a non-terminal. `do_free` will call whichever
2257 is appropriate given a symbol number, and will call `free` (as is
2258 appropriate for tokens) on any terminal symbol.
2262 static void gen_free(FILE *f, struct grammar *g)
2266 fprintf(f, "#line 0 \"gen_free\"\n");
2267 fprintf(f, "static void do_free(short sym, void *asn)\n");
2269 fprintf(f, "\tif (!asn) return;\n");
2270 fprintf(f, "\tif (sym < %d) {\n", g->first_nonterm);
2271 fprintf(f, "\t\tfree(asn);\n\t\treturn;\n\t}\n");
2272 fprintf(f, "\tswitch(sym) {\n");
2274 for (i = 0; i < g->num_syms; i++) {
2275 struct symbol *s = g->symtab[i];
2277 s->type != Nonterminal ||
2278 s->struct_name.txt == NULL)
2281 fprintf(f, "\tcase %d:\n", s->num);
2283 fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n",
2285 s->struct_name.txt);
2286 fprintf(f, "\t\tfree(asn);\n");
2288 fprintf(f, "\t\tfree_%.*s(asn);\n",
2290 s->struct_name.txt);
2291 fprintf(f, "\t\tbreak;\n");
2293 fprintf(f, "\t}\n}\n\n");
2296 ## The main routine.
2298 There are three key parts to the "main" function of parsergen: processing
2299 the arguments, loading the grammar file, and dealing with the grammar.
2301 ### Argument processing.
2303 Command line options allow the selection of analysis mode, name of output
2304 file, and whether or not a report should be generated. By default we create
2305 a report only if no code output was requested.
2307 The `parse_XX` function name uses the basename of the output file which
2308 should not have a suffix (`.c`). `.c` is added to the given name for the
2309 code, and `.h` is added for the header (if header text is specifed in the
2316 static const struct option long_options[] = {
2317 { "LR0", 0, NULL, '0' },
2318 { "LR05", 0, NULL, '5' },
2319 { "SLR", 0, NULL, 'S' },
2320 { "LALR", 0, NULL, 'L' },
2321 { "LR1", 0, NULL, '1' },
2322 { "tag", 1, NULL, 't' },
2323 { "report", 0, NULL, 'R' },
2324 { "output", 1, NULL, 'o' },
2325 { NULL, 0, NULL, 0 }
2327 const char *options = "05SL1t:Ro:";
2329 ###### process arguments
2331 char *outfile = NULL;
2336 enum grammar_type type = LR05;
2337 while ((opt = getopt_long(argc, argv, options,
2338 long_options, NULL)) != -1) {
2353 outfile = optarg; break;
2355 tag = optarg; break;
2357 fprintf(stderr, "Usage: parsergen ...\n");
2362 infile = argv[optind++];
2364 fprintf(stderr, "No input file given\n");
2367 if (outfile && report == 1)
2370 if (name && strchr(name, '/'))
2371 name = strrchr(name, '/')+1;
2373 if (optind < argc) {
2374 fprintf(stderr, "Excess command line arguments\n");
2378 ### Loading the grammar file
2380 To be able to run `mdcode` and `scanner` on the grammar we need to memory
2383 Once we have extracted the code (with `mdcode`) we expect to find three
2384 sections: header, code, and grammar. Anything else that is not
2385 excluded by the `--tag` option is an error.
2387 "header" and "code" are optional, though it is hard to build a working
2388 parser with neither. "grammar" must be provided.
2392 #include <sys/mman.h>
2397 static void pr_err(char *msg)
2400 fprintf(stderr, "%s\n", msg);
2404 struct section *table;
2408 fd = open(infile, O_RDONLY);
2410 fprintf(stderr, "parsergen: cannot open %s: %s\n",
2411 infile, strerror(errno));
2414 len = lseek(fd, 0, 2);
2415 file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2416 table = code_extract(file, file+len, pr_err);
2418 struct code_node *hdr = NULL;
2419 struct code_node *code = NULL;
2420 struct code_node *gram = NULL;
2421 struct code_node *pre_reduce = NULL;
2422 for (s = table; s; s = s->next) {
2423 struct text sec = s->section;
2424 if (tag && !strip_tag(&sec, tag))
2426 if (text_is(sec, "header"))
2428 else if (text_is(sec, "code"))
2430 else if (text_is(sec, "grammar"))
2432 else if (text_is(sec, "reduce"))
2433 pre_reduce = s->code;
2435 fprintf(stderr, "Unknown content section: %.*s\n",
2436 s->section.len, s->section.txt);
2441 ### Processing the input
2443 As we need to append an extention to a filename and then open it for
2444 writing, and we need to do this twice, it helps to have a separate function.
2448 static FILE *open_ext(char *base, char *ext)
2450 char *fn = malloc(strlen(base) + strlen(ext) + 1);
2452 strcat(strcpy(fn, base), ext);
2458 If we can read the grammar, then we analyse and optionally report on it. If
2459 the report finds conflicts we will exit with an error status.
2461 ###### process input
2462 struct grammar *g = NULL;
2464 fprintf(stderr, "No grammar section provided\n");
2467 g = grammar_read(gram);
2469 fprintf(stderr, "Failure to parse grammar\n");
2474 grammar_analyse(g, type);
2476 if (grammar_report(g, type))
2480 If a "headers" section is defined, we write it out and include a declaration
2481 for the `parse_XX` function so it can be used from separate code.
2483 if (rv == 0 && hdr && outfile) {
2484 FILE *f = open_ext(outfile, ".h");
2486 code_node_print(f, hdr, infile);
2487 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace);\n",
2491 fprintf(stderr, "Cannot create %s.h\n",
2497 And if all goes well and an output file was provided, we create the `.c`
2498 file with the code section (if any) and the parser tables and function.
2500 if (rv == 0 && outfile) {
2501 FILE *f = open_ext(outfile, ".c");
2504 code_node_print(f, code, infile);
2505 gen_parser(f, g, infile, name, pre_reduce);
2508 fprintf(stderr, "Cannot create %s.c\n",
2514 And that about wraps it up. We need to set the locale so that UTF-8 is
2515 recognised properly, and link with `libicuuc` as `libmdcode` requires that.
2517 ###### File: parsergen.mk
2518 parsergen : parsergen.o libscanner.o libmdcode.o
2519 $(CC) $(CFLAGS) -o parsergen parsergen.o libscanner.o libmdcode.o -licuuc
2526 int main(int argc, char *argv[])
2531 setlocale(LC_ALL,"");
2533 ## process arguments
2540 ## The SHIFT/REDUCE parser
2542 Having analysed the grammar and generated all the tables, we only need the
2543 shift/reduce engine to bring it all together.
2545 ### Goto table lookup
2547 The parser generator has nicely provided us with goto tables sorted by
2548 symbol number. We need a binary search function to find a symbol in the
2551 ###### parser functions
2553 static int search(const struct state *l, int sym)
2556 int hi = l->go_to_cnt;
2560 while (lo + 1 < hi) {
2561 int mid = (lo + hi) / 2;
2562 if (l->go_to[mid].sym <= sym)
2567 if (l->go_to[lo].sym == sym)
2568 return l->go_to[lo].state;
2573 ### The state stack.
2575 The core data structure for the parser is the stack. This tracks all the
2576 symbols that have been recognised or partially recognised.
2578 The stack usually won't grow very large - maybe a few tens of entries. So
2579 we dynamically resize and array as required but never bother to shrink it
2582 We keep the stack as two separate allocations. One, `asn_stack` stores the
2583 "abstract syntax nodes" which are created by each reduction. When we call
2584 `do_reduce` we need to pass an array of the `asn`s of the body of the
2585 production, and by keeping a separate `asn` stack, we can just pass a
2586 pointer into this stack.
2588 The other allocation stores all other stack fields of which there are six.
2589 The `state` is the most important one and guides the parsing process. The
2590 `sym` is nearly unnecessary. However when we want to free entries from the
2591 `asn_stack`, it helps to know what type they are so we can call the right
2592 freeing function. The symbol leads us to the right free function through
2595 The `indents` count tracks the line indents with-in the symbol or
2596 immediately follow it. These are used to allow indent information to
2597 guide parsing and error recovery.
2599 `since_newline` tracks how many stack frames since the last
2600 start-of-line (whether indented or not). So if `since_newline` is
2601 zero, then this symbol is at the start of a line. Similarly
2602 `since_indent` counts the number of states since an indent, it is zero
2603 precisely when `indents` is not zero.
2605 `newline_permitted` keeps track of whether newlines should be ignored
2608 The stack is most properly seen as alternating states and symbols -
2609 states, like the 'DOT' in items, are between symbols. Each frame in
2610 our stack holds a state and the symbol that was before it. The
2611 bottom of stack holds the start state but no symbol, as nothing came
2612 before the beginning.
2614 ###### parser functions
2619 short newline_permitted;
2623 short since_newline;
2633 Two operations are needed on the stack - shift (which is like push) and pop.
2635 Shift applies not only to terminals but also to non-terminals. When
2636 we reduce a production we will pop off entries corresponding to the
2637 body symbols, then push on an item for the head of the production.
2638 This last is exactly the same process as shifting in a terminal so we
2639 use the same function for both. In both cases we provide the symbol,
2640 the number of indents the symbol contains (which will be zero for a
2641 terminal symbol) and a flag indicating the the symbol was at (or was
2642 reduced from a symbol which was at) the start of a line. The state is
2643 deduced from the current top-of-stack state and the new symbol.
2645 To simplify other code we arrange for `shift` to fail if there is no `goto`
2646 state for the symbol. This is useful in basic parsing due to our design
2647 that we shift when we can, and reduce when we cannot. So the `shift`
2648 function reports if it could.
2650 `shift` is also used to push state zero onto the stack, so if the
2651 stack is empty, it always chooses zero as the next state.
2653 So `shift` finds the next state. If that succeeds it extends the
2654 allocations if needed and pushes all the information onto the stacks.
2656 Newlines are permitted after a `starts_line` state until an internal
2657 indent. If the new frame has neither a `starts_line` state nor an
2658 indent, newlines are permitted if the previous stack frame permitted
2661 ###### parser functions
2663 static int shift(struct parser *p,
2664 short sym, short indents, short start_of_line,
2666 const struct state states[])
2668 // Push an entry onto the stack
2669 struct frame next = {0};
2670 int newstate = p->tos
2671 ? search(&states[p->stack[p->tos-1].state],
2676 if (p->tos >= p->stack_size) {
2677 p->stack_size += 10;
2678 p->stack = realloc(p->stack, p->stack_size
2679 * sizeof(p->stack[0]));
2680 p->asn_stack = realloc(p->asn_stack, p->stack_size
2681 * sizeof(p->asn_stack[0]));
2684 next.indents = indents;
2685 next.state = newstate;
2686 if (states[newstate].starts_line)
2687 next.newline_permitted = 1;
2689 next.newline_permitted = 0;
2691 next.newline_permitted =
2692 p->stack[p->tos-1].newline_permitted;
2694 next.newline_permitted = 0;
2696 if (!start_of_line) {
2698 next.since_newline = p->stack[p->tos-1].since_newline + 1;
2700 next.since_newline = 1;
2703 next.since_indent = 0;
2705 next.since_indent = p->stack[p->tos-1].since_indent + 1;
2707 next.since_indent = 1;
2709 p->stack[p->tos] = next;
2710 p->asn_stack[p->tos] = asn;
2715 `pop` primarily moves the top of stack (`tos`) back down the required
2716 amount and frees any `asn` entries that need to be freed. It also
2717 collects a summary of the indents and line starts in the symbols that
2718 are being removed. It is called _after_ we reduce a production, just
2719 before we `shift` the nonterminal in.
2721 ###### parser functions
2723 static int pop(struct parser *p, int num,
2724 short *start_of_line,
2725 void(*do_free)(short sym, void *asn))
2732 for (i = 0; i < num; i++) {
2733 sol |= !p->stack[p->tos+i].since_newline;
2734 indents += p->stack[p->tos+i].indents;
2735 do_free(p->stack[p->tos+i].sym,
2736 p->asn_stack[p->tos+i]);
2739 *start_of_line = sol;
2743 ### Memory allocation
2745 The `scanner` returns tokens in a local variable - we want them in allocated
2746 memory so they can live in the `asn_stack`. Similarly the `asn` produced by
2747 a reduce is in a large buffer. Both of these require some allocation and
2748 copying, hence `memdup` and `tokcopy`.
2750 ###### parser includes
2753 ###### parser functions
2755 void *memdup(void *m, int len)
2761 memcpy(ret, m, len);
2765 static struct token *tok_copy(struct token tk)
2767 struct token *new = malloc(sizeof(*new));
2772 ### The heart of the parser.
2774 Now we have the parser. If we can shift we do, though newlines and
2775 reducing indenting may block that. If not and we can reduce we do
2776 that. If the production we reduced was production zero, then we have
2777 accepted the input and can finish.
2779 We return whatever `asn` was returned by reducing production zero.
2781 If we can neither shift nor reduce we have an error to handle. We pop
2782 single entries off the stack until we can shift the `TK_error` symbol, then
2783 drop input tokens until we find one we can shift into the new error state.
2785 When we find `TK_in` and `TK_out` tokens which report indents we need
2786 to handle them directly as the grammar cannot express what we want to
2789 `TK_in` tokens are easy: we simply update indent count in the top stack frame to
2790 record how many indents there are following the previous token.
2792 `TK_out` tokens must be canceled against an indent count
2793 within the stack. If we can reduce some symbols that are all since
2794 the most recent indent, then we do that first. If the minimum prefix
2795 of the current state then extends back before the most recent indent,
2796 that indent can be cancelled. If the minimum prefix is shorter then
2797 the indent had ended prematurely and we must start error handling, which
2798 is still a work-in-progress.
2800 `TK_newline` tokens are ignored unless the top stack frame records
2801 that they are permitted. In that case they will not be considered for
2802 shifting if it is possible to reduce some symbols that are all since
2803 the most recent start of line. This is how a newline forcibly
2804 terminates any line-like structure - we try to reduce down to at most
2805 one symbol for each line where newlines are allowed.
2806 A consequence of this is that a rule like
2808 ###### Example: newlines - broken
2812 IfStatement -> Newlines if ....
2814 cannot work, as the NEWLINE will never be shifted as the empty string
2815 will be reduced first. Optional sets of newlines need to be include
2816 in the thing that preceed:
2818 ###### Example: newlines - works
2822 IfStatement -> If ....
2824 Here the NEWLINE will be shifted because nothing can be reduced until
2827 When, during error handling, we discard token read in, we want to keep
2828 discarding until we see one that is recognised. If we had a full set
2829 of LR(1) grammar states, this will mean looking in the look-ahead set,
2830 but we don't keep a full look-ahead set. We only record the subset
2831 that leads to SHIFT. We can, however, deduce the look-ahead set but
2832 looking at the SHIFT subsets for all states that we can get to by
2833 reducing zero or more times. So we need a little function which
2834 checks if a given token is in any of these look-ahead sets.
2836 ###### parser includes
2841 static int in_lookahead(struct token *tk, const struct state *states, int state)
2843 while (state >= 0) {
2844 if (search(&states[state], tk->num) >= 0)
2846 if (states[state].reduce_prod < 0)
2848 state = search(&states[state], states[state].reduce_sym);
2853 void *parser_run(struct token_state *tokens,
2854 const struct state states[],
2855 int (*do_reduce)(int, void**, struct token_config*, void*),
2856 void (*do_free)(short, void*),
2857 FILE *trace, const char *non_term[],
2858 struct token_config *config)
2860 struct parser p = { 0 };
2861 struct token *tk = NULL;
2865 shift(&p, TK_eof, 0, 1, NULL, states);
2867 struct token *err_tk;
2868 struct frame *tos = &p.stack[p.tos-1];
2870 tk = tok_copy(token_next(tokens));
2871 parser_trace(trace, &p,
2872 tk, states, non_term, config->known_count);
2874 if (tk->num == TK_in) {
2876 tos->since_newline = 0;
2877 tos->since_indent = 0;
2878 if (!states[tos->state].starts_line)
2879 tos->newline_permitted = 0;
2882 parser_trace_action(trace, "Record");
2885 if (tk->num == TK_out) {
2886 if (states[tos->state].reduce_size >= 0 &&
2887 states[tos->state].reduce_size <= tos->since_indent)
2889 if (states[tos->state].min_prefix >= tos->since_indent) {
2891 struct frame *in = tos - tos->since_indent;
2893 if (in->indents == 0) {
2894 /* Reassess since_indent and newline_permitted */
2896 in->since_indent = in[-1].since_indent + 1;
2897 in->newline_permitted = in[-1].newline_permitted;
2899 in->since_indent = 0;
2900 in->newline_permitted = 0;
2902 if (states[in->state].starts_line)
2903 in->newline_permitted = 1;
2906 in->since_indent = in[-1].since_indent + 1;
2907 if (states[in->state].starts_line)
2908 in->newline_permitted = 1;
2910 in->newline_permitted = in[-1].newline_permitted;
2915 parser_trace_action(trace, "Cancel");
2918 // fall through to error handling as both SHIFT and REDUCE
2921 if (tk->num == TK_newline) {
2922 if (!tos->newline_permitted) {
2925 parser_trace_action(trace, "Discard");
2928 if (tos->since_newline > 1 &&
2929 states[tos->state].reduce_size >= 0 &&
2930 states[tos->state].reduce_size <= tos->since_newline)
2933 if (shift(&p, tk->num, 0, tk->num == TK_newline, tk, states)) {
2935 parser_trace_action(trace, "Shift");
2939 if (states[tos->state].reduce_prod >= 0 &&
2940 states[tos->state].newline_only &&
2941 !(tk->num == TK_newline ||
2942 tk->num == TK_eof ||
2943 tk->num == TK_out ||
2944 (tos->indents == 0 && tos->since_newline == 0))) {
2945 /* Anything other than newline or out or eof
2946 * in an error unless we are already at start
2947 * of line, as this production must end at EOL.
2949 } else if (states[tos->state].reduce_prod >= 0) {
2952 const struct state *nextstate = &states[tos->state];
2953 int prod = nextstate->reduce_prod;
2954 int size = nextstate->reduce_size;
2956 static char buf[16*1024];
2957 short indents, start_of_line;
2959 body = p.asn_stack + (p.tos - size);
2961 bufsize = do_reduce(prod, body, config, buf);
2963 indents = pop(&p, size, &start_of_line,
2965 res = memdup(buf, bufsize);
2966 memset(buf, 0, bufsize);
2967 if (!shift(&p, nextstate->reduce_sym,
2968 indents, start_of_line,
2970 if (prod != 0) abort();
2974 parser_trace_action(trace, "Reduce");
2977 /* Error. We walk up the stack until we
2978 * find a state which will accept TK_error.
2979 * We then shift in TK_error and see what state
2980 * that takes us too.
2981 * Then we discard input tokens until
2982 * we find one that is acceptable.
2984 parser_trace_action(trace, "ERROR");
2985 short indents = 0, start_of_line;
2987 err_tk = tok_copy(*tk);
2989 shift(&p, TK_error, 0, 0,
2990 err_tk, states) == 0)
2991 // discard this state
2992 indents += pop(&p, 1, &start_of_line, do_free);
2995 // no state accepted TK_error
2998 tos = &p.stack[p.tos-1];
2999 while (!in_lookahead(tk, states, tos->state) &&
3000 tk->num != TK_eof) {
3002 tk = tok_copy(token_next(tokens));
3003 if (tk->num == TK_in)
3005 if (tk->num == TK_out) {
3009 // FIXME update since_indent here
3012 tos->indents += indents;
3015 pop(&p, p.tos, NULL, do_free);
3021 ###### exported functions
3022 void *parser_run(struct token_state *tokens,
3023 const struct state states[],
3024 int (*do_reduce)(int, void**, struct token_config*, void*),
3025 void (*do_free)(short, void*),
3026 FILE *trace, const char *non_term[],
3027 struct token_config *config);
3031 Being able to visualize the parser in action can be invaluable when
3032 debugging the parser code, or trying to understand how the parse of a
3033 particular grammar progresses. The stack contains all the important
3034 state, so just printing out the stack every time around the parse loop
3035 can make it possible to see exactly what is happening.
3037 This doesn't explicitly show each SHIFT and REDUCE action. However they
3038 are easily deduced from the change between consecutive lines, and the
3039 details of each state can be found by cross referencing the states list
3040 in the stack with the "report" that parsergen can generate.
3042 For terminal symbols, we just dump the token. For non-terminals we
3043 print the name of the symbol. The look ahead token is reported at the
3044 end inside square brackets.
3046 ###### parser functions
3048 static char *reserved_words[] = {
3049 [TK_error] = "ERROR",
3052 [TK_newline] = "NEWLINE",
3055 static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
3057 fprintf(trace, "(%d", f->state);
3058 if (states[f->state].starts_line)
3059 fprintf(trace, "s");
3060 if (f->newline_permitted)
3061 fprintf(trace, "n%d", f->since_newline);
3062 fprintf(trace, ") ");
3065 void parser_trace(FILE *trace, struct parser *p,
3066 struct token *tk, const struct state states[],
3067 const char *non_term[], int knowns)
3072 for (i = 0; i < p->tos; i++) {
3073 struct frame *f = &p->stack[i];
3076 if (sym < TK_reserved &&
3077 reserved_words[sym] != NULL)
3078 fputs(reserved_words[sym], trace);
3079 else if (sym < TK_reserved + knowns) {
3080 struct token *t = p->asn_stack[i];
3081 text_dump(trace, t->txt, 20);
3083 fputs(non_term[sym - TK_reserved - knowns],
3086 fprintf(trace, ".%d", f->indents);
3087 if (f->since_newline == 0)
3091 parser_trace_state(trace, f, states);
3093 fprintf(trace, "[");
3094 if (tk->num < TK_reserved &&
3095 reserved_words[tk->num] != NULL)
3096 fputs(reserved_words[tk->num], trace);
3098 text_dump(trace, tk->txt, 20);
3099 fprintf(trace, ":%d:%d]", tk->line, tk->col);
3102 void parser_trace_action(FILE *trace, char *action)
3105 fprintf(trace, " - %s\n", action);
3110 The obvious example for a parser is a calculator.
3112 As `scanner` provides number parsing function using `libgmp` is it not much
3113 work to perform arbitrary rational number calculations.
3115 This calculator takes one expression, or an equality test, per line. The
3116 results are printed and if any equality test fails, the program exits with
3119 ###### File: parsergen.mk
3120 calc.c calc.h : parsergen parsergen.mdc
3121 ./parsergen --tag calc -o calc parsergen.mdc
3122 calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o
3123 $(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp
3125 ./calc parsergen.mdc
3130 #include "parse_number.h"
3131 // what do we use for a demo-grammar? A calculator of course.
3143 #include <sys/mman.h>
3149 #include "scanner.h"
3154 static void free_number(struct number *n)
3160 static int text_is(struct text t, char *s)
3162 return (strlen(s) == t.len &&
3163 strncmp(s, t.txt, t.len) == 0);
3166 int main(int argc, char *argv[])
3168 int fd = open(argv[1], O_RDONLY);
3169 int len = lseek(fd, 0, 2);
3170 char *file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
3171 struct section *table = code_extract(file, file+len, NULL);
3173 struct token_config config = {
3174 .ignored = (1 << TK_line_comment)
3177 .number_chars = ".,_+-",
3181 for (s = table; s; s = s->next)
3182 if (text_is(s->section, "example: input"))
3183 parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
3185 struct section *t = table->next;
3186 code_free(table->code);
3198 Session -> Session Line
3201 Line -> Expression NEWLINE ${ gmp_printf("Answer = %Qd\n", $1.val);
3202 { mpf_t fl; mpf_init2(fl, 20); mpf_set_q(fl, $1.val);
3203 gmp_printf(" or as a decimal: %Fg\n", fl);
3207 | Expression = Expression NEWLINE ${
3208 if (mpq_equal($1.val, $3.val))
3209 gmp_printf("Both equal %Qd\n", $1.val);
3211 gmp_printf("NOT EQUAL: %Qd\n != : %Qd\n",
3216 | NEWLINE ${ printf("Blank line\n"); }$
3217 | ERROR NEWLINE ${ printf("Skipped a bad line\n"); }$
3220 Expression -> Expression + Expression ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$
3221 | Expression - Expression ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$
3222 | Expression * Expression ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$
3223 | Expression / Expression ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$
3224 | Expression // Expression ${ {
3227 mpz_init(z0); mpz_init(z1); mpz_init(z2);
3228 mpz_tdiv_q(z1, mpq_numref($1.val), mpq_denref($1.val));
3229 mpz_tdiv_q(z2, mpq_numref($3.val), mpq_denref($3.val));
3230 mpz_tdiv_q(z0, z1, z2);
3231 mpq_set_z($0.val, z0);
3232 mpz_clear(z0); mpz_clear(z1); mpz_clear(z2);
3234 | NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$
3235 | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$
3240 3.1415926535 - 355/113
3242 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9
3244 1 * 1000 + 2 * 100 + 3 * 10 + 4 * 1