1 # LR(1) Parser Generator #
3 This parser generator takes a grammar description combined with code
4 fragments, analyses it, and can report details about the analysis and
5 write out C code files which can be compiled to make a parser.
7 There are several distinct sections.
9 1. `grammar_read` will read a grammar from a literate-code file and
10 store the productions, symbols, and code fragments.
11 2. `grammar_analyse` will take that grammar and build LR parsing
12 states and look-ahead tables.
13 3. `grammar_report` will present the details of the analysis
14 in a readable format and will report any conflicts.
15 4. `parser_generate` will write out C code files with various
16 tables and with the code fragments arranged in useful places.
17 5. `parser_run` is a library function intended to be linked together
18 with the generated parser tables to complete the implementation of
20 6. Finally `calc` is a test grammar for a simple calculator. The
21 `parsergen` program built from the C code in this file can extract
22 that grammar directly from this file and process it.
24 ###### File: parsergen.c
29 ## forward declarations
40 ###### File: libparser.c
47 ###### File: parsergen.mk
50 parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc
53 ## Reading the grammar
55 The grammar must be stored in a markdown literate code file as parsed
56 by "[mdcode][]". It must have three top level (i.e. unreferenced)
57 sections: `header`, `code`, and `grammar`. The first two will be
58 literally copied into the generated `.c` and `.h`. files. The last
59 contains the grammar. This is tokenised with "[scanner][]".
61 If the `--tag` option is given, then any top level header that doesn't
62 start with the tag is ignored, and the tag is striped from the rest. So
64 means that the three needed sections must be `Foo: header`, `Foo: code`,
65 and `Foo: grammar`. The tag `calc` is used to extract the same calculator
69 [scanner]: scanner.html
75 ###### parser includes
79 The grammar contains production sets, precedence/associativity
80 declarations, and data type declarations. These are all parsed with
81 _ad hoc_ parsing as we don't have a parser generator yet.
83 The precedence and associativity can be set for each production, but
84 can be inherited from symbols. The data type (either structure or a
85 reference to a structure) is potentially defined for each non-terminal
86 and describes what C structure is used to store information about each
90 enum assoc {Left, Right, Non};
91 char *assoc_names[] = {"Left","Right","Non"};
94 struct text struct_name;
97 unsigned short precedence;
101 unsigned short precedence;
109 The strings reported by `mdcode` and `scanner` are `struct text` which have
110 length rather than being null terminated. To help with printing and
111 comparing we define `text_is` and `prtxt`, which should possibly go in
112 `mdcode`. `scanner` does provide `text_dump` which is useful for strings
113 which might contain control characters.
115 `strip_tag` is a bit like `strncmp`, but adds a test for a colon,
116 because that is what we need to detect tags.
119 static int text_is(struct text t, char *s)
121 return (strlen(s) == t.len &&
122 strncmp(s, t.txt, t.len) == 0);
124 static void prtxt(struct text t)
126 printf("%.*s", t.len, t.txt);
129 static int strip_tag(struct text *t, char *tag)
131 int skip = strlen(tag) + 1;
132 if (skip >= t->len ||
133 strncmp(t->txt, tag, skip-1) != 0 ||
134 t->txt[skip-1] != ':')
136 while (skip < t->len && t->txt[skip] == ' ')
145 Productions are comprised primarily of symbols - terminal and
146 non-terminal. We do not make a syntactic distinction between the two,
147 though non-terminals must be identifiers. Non-terminal symbols are
148 those which appear in the head of a production, terminal symbols are
149 those which don't. There are also "virtual" symbols used for precedence
150 marking discussed later, and sometimes we won't know what type a symbol
153 ###### forward declarations
154 enum symtype { Unknown, Virtual, Terminal, Nonterminal };
155 char *symtypes = "UVTN";
159 Symbols can be either `TK_ident` or `TK_mark`. They are saved in a
160 table of known symbols and the resulting parser will report them as
161 `TK_reserved + N`. A small set of identifiers are reserved for the
162 different token types that `scanner` can report.
165 static char *reserved_words[] = {
166 [TK_error] = "ERROR",
167 [TK_number] = "NUMBER",
168 [TK_ident] = "IDENTIFIER",
170 [TK_string] = "STRING",
171 [TK_multi_string] = "MULTI_STRING",
174 [TK_newline] = "NEWLINE",
180 Note that `TK_eof` and the two `TK_*_comment` tokens cannot be
181 recognised. The former is automatically expected at the end of the text
182 being parsed. The latter are always ignored by the parser.
184 All of these symbols are stored in a simple symbol table. We use the
185 `struct text` from `mdcode` to store the name and link them together
186 into a sorted list using an insertion sort.
188 We don't have separate `find` and `insert` functions as any symbol we
189 find needs to be remembered. We simply expect `find` to always return a
190 symbol, but its type might be `Unknown`.
199 ###### grammar fields
204 static struct symbol *sym_find(struct grammar *g, struct text s)
206 struct symbol **l = &g->syms;
211 (cmp = text_cmp((*l)->name, s)) < 0)
215 n = calloc(1, sizeof(*n));
224 static void symbols_init(struct grammar *g)
226 int entries = sizeof(reserved_words)/sizeof(reserved_words[0]);
228 for (i = 0; i < entries; i++) {
231 t.txt = reserved_words[i];
234 t.len = strlen(t.txt);
241 ### Data types and precedence.
243 Data type specification and precedence specification are both
244 introduced by a dollar sign at the start of the line. If the next
245 word is `LEFT`, `RIGHT` or `NON`, then the line specifies a
246 precedence, otherwise it specifies a data type.
248 The data type name is simply stored and applied to the head of all
249 subsequent productions. It must be the name of a structure optionally
250 preceded by an asterisk which means a reference or "pointer". So
251 `$expression` maps to `struct expression` and `$*statement` maps to
252 `struct statement *`.
254 Any productions given before the first data type declaration will have
255 no data type associated with them and can carry no information. In
256 order to allow other non-terminals to have no type, the data type
257 `$void` can be given. This does *not* mean that `struct void` will be
258 used, but rather than no type will be associated with future
261 The precedence line must contain a list of symbols - typically
262 terminal symbols, but not necessarily. It can only contain symbols
263 that have not been seen yet, so precedence declaration must precede
264 the productions that they affect.
266 A precedence line may also contain a "virtual" symbol which is an
267 identifier preceded by `$$`. Virtual terminals carry precedence
268 information but are not included in the grammar. A production can
269 declare that it inherits the precedence of a given virtual symbol.
271 This use for `$$` precludes it from being used as a symbol in the
272 described language. Two other symbols: `${` and `}$` are also
275 Each new precedence line introduces a new precedence level and
276 declares how it associates. This level is stored in each symbol
277 listed and may be inherited by any production which uses the symbol. A
278 production inherits from the last symbol which has a precedence.
280 ###### grammar fields
281 struct text current_type;
286 enum symbols { TK_virtual = TK_reserved, TK_open, TK_close };
287 static const char *known[] = { "$$", "${", "}$" };
290 static char *dollar_line(struct token_state *ts, struct grammar *g, int isref)
292 struct token t = token_next(ts);
297 if (t.num != TK_ident) {
298 err = "type or assoc expected after '$'";
301 if (text_is(t.txt, "LEFT"))
303 else if (text_is(t.txt, "RIGHT"))
305 else if (text_is(t.txt, "NON"))
308 g->current_type = t.txt;
309 g->type_isref = isref;
310 if (text_is(t.txt, "void"))
311 g->current_type.txt = NULL;
313 if (t.num != TK_newline) {
314 err = "Extra tokens after type name";
321 err = "$* cannot be followed by a precedence";
325 // This is a precedence line, need some symbols.
329 while (t.num != TK_newline) {
330 enum symtype type = Terminal;
332 if (t.num == TK_virtual) {
335 if (t.num != TK_ident) {
336 err = "$$ must be followed by a word";
339 } else if (t.num != TK_ident &&
341 err = "Illegal token in precedence line";
344 s = sym_find(g, t.txt);
345 if (s->type != Unknown) {
346 err = "Symbols in precedence line must not already be known.";
350 s->precedence = g->prec_levels;
356 err = "No symbols given on precedence line";
360 while (t.num != TK_newline && t.num != TK_eof)
367 A production either starts with an identifier which is the head
368 non-terminal, or a vertical bar (`|`) in which case this production
369 uses the same head as the previous one. The identifier must be
370 followed by a `->` mark. All productions for a given non-terminal must
371 be grouped together with the `nonterminal ->` given only once.
373 After this a (possibly empty) sequence of identifiers and marks form
374 the body of the production. A virtual symbol may be given after the
375 body by preceding it with `$$`. If a virtual symbol is given, the
376 precedence of the production is that for the virtual symbol. If none
377 is given, the precedence is inherited from the last symbol in the
378 production which has a precedence specified.
380 After the optional precedence may come the `${` mark. This indicates
381 the start of a code fragment. If present, this must be on the same
382 line as the start of the production.
384 All of the text from the `${` through to the matching `}$` is
385 collected and forms the code-fragment for the production. It must all
386 be in one `code_node` of the literate code. The `}$` must be
387 at the end of a line.
389 Text in the code fragment will undergo substitutions where `$N` or
390 `$<N`,for some numeric `N`, will be replaced with a variable holding
391 the parse information for the particular symbol in the production.
392 `$0` is the head of the production, `$1` is the first symbol of the
393 body, etc. The type of `$N` for a terminal symbol is `struct token`.
394 For a non-terminal, it is whatever has been declared for that symbol.
395 The `<` may be included for symbols declared as storing a reference
396 (not a structure) and means that the reference is being moved out, so
397 it will not automatically be freed.
399 While building productions we will need to add to an array which needs to
403 static void array_add(void *varray, int *cnt, void *new)
405 void ***array = varray;
408 current = ((*cnt-1) | (step-1))+1;
409 if (*cnt == current) {
412 *array = realloc(*array, current * sizeof(void*));
414 (*array)[*cnt] = new;
418 Collecting the code fragment simply involves reading tokens until we
419 hit the end token `}$`, and noting the character position of the start and
423 static struct text collect_code(struct token_state *state,
428 code.txt = start.txt.txt + start.txt.len;
430 t = token_next(state);
431 while (t.node == start.node &&
432 t.num != TK_close && t.num != TK_error &&
434 if (t.num == TK_close && t.node == start.node)
435 code.len = t.txt.txt - code.txt;
441 Now we have all the bits we need to parse a full production.
446 ###### grammar fields
447 struct production **productions;
448 int production_count;
450 ###### production fields
452 struct symbol **body;
458 int first_production;
461 static char *parse_production(struct grammar *g,
463 struct token_state *state)
465 /* Head has already been parsed. */
468 struct production p, *pp;
470 memset(&p, 0, sizeof(p));
472 tk = token_next(state);
473 while (tk.num == TK_ident || tk.num == TK_mark) {
474 struct symbol *bs = sym_find(g, tk.txt);
475 if (bs->type == Unknown)
477 if (bs->type == Virtual) {
478 err = "Virtual symbol not permitted in production";
481 if (bs->precedence) {
482 p.precedence = bs->precedence;
485 array_add(&p.body, &p.body_size, bs);
486 tk = token_next(state);
488 if (tk.num == TK_virtual) {
490 tk = token_next(state);
491 if (tk.num != TK_ident) {
492 err = "word required after $$";
495 vs = sym_find(g, tk.txt);
496 if (vs->type != Virtual) {
497 err = "symbol after $$ must be virtual";
500 p.precedence = vs->precedence;
502 tk = token_next(state);
504 if (tk.num == TK_open) {
505 p.code_line = tk.line;
506 p.code = collect_code(state, tk);
507 if (p.code.txt == NULL) {
508 err = "code fragment not closed properly";
511 tk = token_next(state);
513 if (tk.num != TK_newline && tk.num != TK_eof) {
514 err = "stray tokens at end of line";
517 pp = malloc(sizeof(*pp));
519 array_add(&g->productions, &g->production_count, pp);
522 while (tk.num != TK_newline && tk.num != TK_eof)
523 tk = token_next(state);
527 With the ability to parse production and dollar-lines, we have nearly all
528 that we need to parse a grammar from a `code_node`.
530 The head of the first production will effectively be the `start` symbol of
531 the grammar. However it won't _actually_ be so. Processing the grammar is
532 greatly simplified if the real start symbol only has a single production,
533 and expects `$eof` as the final terminal. So when we find the first
534 explicit production we insert an extra production as production zero which
537 ###### Example: production 0
540 where `START` is the first non-terminal given.
542 ###### create production zero
543 struct production *p = calloc(1,sizeof(*p));
544 struct text start = {"$start",6};
545 struct text eof = {"$eof",4};
546 struct text code = {"$0 = $<1;", 9};
547 p->head = sym_find(g, start);
548 p->head->type = Nonterminal;
549 p->head->struct_name = g->current_type;
550 p->head->isref = g->type_isref;
551 if (g->current_type.txt)
553 array_add(&p->body, &p->body_size, head);
554 array_add(&p->body, &p->body_size, sym_find(g, eof));
555 p->head->first_production = g->production_count;
556 array_add(&g->productions, &g->production_count, p);
558 Now we are ready to read in the grammar. We ignore comments
559 and strings so that the marks which introduce them can be
560 used as terminals in the grammar. We don't ignore numbers
561 even though we don't allow them as that causes the scanner
562 to produce errors that the parser is better positioned to handle.
565 static struct grammar *grammar_read(struct code_node *code)
567 struct token_config conf = {
570 .words_marks = known,
571 .known_count = sizeof(known)/sizeof(known[0]),
573 .ignored = (1 << TK_line_comment)
574 | (1 << TK_block_comment)
577 | (1 << TK_multi_string)
582 struct token_state *state = token_open(code, &conf);
584 struct symbol *head = NULL;
588 g = calloc(1, sizeof(*g));
591 for (tk = token_next(state); tk.num != TK_eof;
592 tk = token_next(state)) {
593 if (tk.num == TK_newline)
595 if (tk.num == TK_ident) {
597 head = sym_find(g, tk.txt);
598 if (head->type == Nonterminal)
599 err = "This non-terminal has already be used.";
600 else if (head->type == Virtual)
601 err = "Virtual symbol not permitted in head of production";
603 head->type = Nonterminal;
604 head->struct_name = g->current_type;
605 head->isref = g->type_isref;
606 if (g->production_count == 0) {
607 ## create production zero
609 head->first_production = g->production_count;
610 tk = token_next(state);
611 if (tk.num == TK_mark &&
612 text_is(tk.txt, "->"))
613 err = parse_production(g, head, state);
615 err = "'->' missing in production";
617 } else if (tk.num == TK_mark
618 && text_is(tk.txt, "|")) {
619 // another production for same non-term
621 err = parse_production(g, head, state);
623 err = "First production must have a head";
624 } else if (tk.num == TK_mark
625 && text_is(tk.txt, "$")) {
626 err = dollar_line(state, g, 0);
627 } else if (tk.num == TK_mark
628 && text_is(tk.txt, "$*")) {
629 err = dollar_line(state, g, 1);
631 err = "Unrecognised token at start of line.";
639 fprintf(stderr, "Error at line %d: %s\n",
646 ## Analysing the grammar
648 The central task in analysing the grammar is to determine a set of
649 states to drive the parsing process. This will require finding
650 various sets of symbols and of "items". Some of these sets will need
651 to attach information to each element in the set, so they are more
654 Each "item" may have a 'look-ahead' or `LA` set associated with
655 it. Many of these will be the same as each other. To avoid memory
656 wastage, and to simplify some comparisons of sets, these sets will be
657 stored in a list of unique sets, each assigned a number.
659 Once we have the data structures in hand to manage these sets and
660 lists, we can start setting the 'nullable' flag, build the 'FIRST'
661 sets, and then create the item sets which define the various states.
665 Though we don't only store symbols in these sets, they are the main
666 things we store, so they are called symbol sets or "symsets".
668 A symset has a size, an array of shorts, and an optional array of data
669 values, which are also shorts. If the array of data is not present,
670 we store an impossible pointer, as `NULL` is used to indicate that no
671 memory has been allocated yet;
676 unsigned short *syms, *data;
678 #define NO_DATA ((unsigned short *)1)
679 const struct symset INIT_SYMSET = { 0, NULL, NO_DATA };
680 const struct symset INIT_DATASET = { 0, NULL, NULL };
682 The arrays of shorts are allocated in blocks of 8 and are kept sorted
683 by using an insertion sort. We don't explicitly record the amount of
684 allocated space as it can be derived directly from the current `cnt` using
685 `((cnt - 1) | 7) + 1`.
688 static void symset_add(struct symset *s, unsigned short key, unsigned short val)
691 int current = ((s->cnt-1) | 7) + 1;
692 if (current == s->cnt) {
694 s->syms = realloc(s->syms, sizeof(*s->syms) * current);
695 if (s->data != NO_DATA)
696 s->data = realloc(s->data, sizeof(*s->data) * current);
699 while (i > 0 && s->syms[i-1] > key) {
700 s->syms[i] = s->syms[i-1];
701 if (s->data != NO_DATA)
702 s->data[i] = s->data[i-1];
706 if (s->data != NO_DATA)
711 Finding a symbol (or item) in a `symset` uses a simple binary search.
712 We return the index where the value was found (so data can be accessed),
713 or `-1` to indicate failure.
715 static int symset_find(struct symset *ss, unsigned short key)
722 while (lo + 1 < hi) {
723 int mid = (lo + hi) / 2;
724 if (ss->syms[mid] <= key)
729 if (ss->syms[lo] == key)
734 We will often want to form the union of two symsets. When we do, we
735 will often want to know if anything changed (as that might mean there
736 is more work to do). So `symset_union` reports whether anything was
737 added to the first set. We use a slow quadratic approach as these
738 sets don't really get very big. If profiles shows this to be a problem it
739 can be optimised later.
741 static int symset_union(struct symset *a, struct symset *b)
745 for (i = 0; i < b->cnt; i++)
746 if (symset_find(a, b->syms[i]) < 0) {
747 unsigned short data = 0;
748 if (b->data != NO_DATA)
750 symset_add(a, b->syms[i], data);
756 And of course we must be able to free a symset.
758 static void symset_free(struct symset ss)
761 if (ss.data != NO_DATA)
767 Some symsets are simply stored somewhere appropriate in the `grammar`
768 data structure, others needs a bit of indirection. This applies
769 particularly to "LA" sets. These will be paired with "items" in an
770 "itemset". As itemsets will be stored in a symset, the "LA" set needs to be
771 stored in the `data` field, so we need a mapping from a small (short)
772 number to an LA `symset`.
774 As mentioned earlier this involves creating a list of unique symsets.
776 For now, we just use a linear list sorted by insertion. A skiplist
777 would be more efficient and may be added later.
784 struct setlist *next;
787 ###### grammar fields
788 struct setlist *sets;
793 static int ss_cmp(struct symset a, struct symset b)
796 int diff = a.cnt - b.cnt;
799 for (i = 0; i < a.cnt; i++) {
800 diff = (int)a.syms[i] - (int)b.syms[i];
807 static int save_set(struct grammar *g, struct symset ss)
809 struct setlist **sl = &g->sets;
813 while (*sl && (cmp = ss_cmp((*sl)->ss, ss)) < 0)
820 s = malloc(sizeof(*s));
829 Finding a set by number is currently performed by a simple linear search.
830 If this turns out to hurt performance, we can store the sets in a dynamic
831 array like the productions.
833 static struct symset set_find(struct grammar *g, int num)
835 struct setlist *sl = g->sets;
836 while (sl && sl->num != num)
841 ### Setting `nullable`
843 We set `nullable` on the head symbol for any production for which all
844 the body symbols (if any) are nullable. As this is a recursive
845 definition, any change in the `nullable` setting means that we need to
846 re-evaluate where it needs to be set.
848 We simply loop around performing the same calculations until no more
855 static void set_nullable(struct grammar *g)
858 while (check_again) {
861 for (p = 0; p < g->production_count; p++) {
862 struct production *pr = g->productions[p];
865 if (pr->head->nullable)
867 for (s = 0; s < pr->body_size; s++)
868 if (! pr->body[s]->nullable)
870 if (s == pr->body_size) {
871 pr->head->nullable = 1;
878 ### Setting `line_like`
880 In order to be able to ignore newline tokens when not relevant, but
881 still include them in the parse when needed, we will need to know
882 which states can start a "line-like" section of code. We ignore
883 newlines when there is an indent since the most recent start of a
886 A "line_like" symbol is simply any symbol that can derive a NEWLINE.
887 If a symbol cannot derive a NEWLINE, then it is only part of a line -
888 so is word-like. If it can derive a NEWLINE, then we consider it to
891 Clearly the `TK_newline` token can derive a NEWLINE. Any symbol which
892 is the head of a production that contains a line_like symbol is also a
893 line-like symbol. We use a new field `line_like` to record this
894 attribute of symbols, and compute it in a repetitive manner similar to
901 static void set_line_like(struct grammar *g)
904 g->symtab[TK_newline]->line_like = 1;
905 while (check_again) {
908 for (p = 0; p < g->production_count; p++) {
909 struct production *pr = g->productions[p];
912 if (pr->head->line_like)
915 for (s = 0 ; s < pr->body_size; s++) {
916 if (pr->body[s]->line_like) {
917 pr->head->line_like = 1;
926 ### Building the `first` sets
928 When calculating what can follow a particular non-terminal, we will need to
929 know what the "first" terminal in any subsequent non-terminal might be. So
930 we calculate the `first` set for every non-terminal and store them in an
931 array. We don't bother recording the "first" set for terminals as they are
934 As the "first" for one symbol might depend on the "first" of another,
935 we repeat the calculations until no changes happen, just like with
936 `set_nullable`. This makes use of the fact that `symset_union`
937 reports if any change happens.
939 The core of this, which finds the "first" of part of a production body,
940 will be reused for computing the "follow" sets, so we split it out
941 into a separate function.
943 ###### grammar fields
944 struct symset *first;
948 static int add_first(struct production *pr, int start,
949 struct symset *target, struct grammar *g,
954 for (s = start; s < pr->body_size; s++) {
955 struct symbol *bs = pr->body[s];
956 if (bs->type == Terminal) {
957 if (symset_find(target, bs->num) < 0) {
958 symset_add(target, bs->num, 0);
962 } else if (symset_union(target, &g->first[bs->num]))
968 *to_end = (s == pr->body_size);
972 static void build_first(struct grammar *g)
976 g->first = calloc(g->num_syms, sizeof(g->first[0]));
977 for (s = 0; s < g->num_syms; s++)
978 g->first[s] = INIT_SYMSET;
980 while (check_again) {
983 for (p = 0; p < g->production_count; p++) {
984 struct production *pr = g->productions[p];
985 struct symset *head = &g->first[pr->head->num];
987 if (add_first(pr, 0, head, g, NULL))
993 ### Building the `follow` sets.
995 There are two different situations when we will want to generate "follow"
996 sets. If we are doing an SLR analysis, we want to generate a single
997 "follow" set for each non-terminal in the grammar. That is what is
998 happening here. If we are doing an LALR or LR analysis we will want
999 to generate a separate "LA" set for each item. We do that later
1000 in state generation.
1002 There are two parts to generating a "follow" set. Firstly we look at
1003 every place that any non-terminal appears in the body of any
1004 production, and we find the set of possible "first" symbols after
1005 there. This is done using `add_first` above and only needs to be done
1006 once as the "first" sets are now stable and will not change.
1010 for (p = 0; p < g->production_count; p++) {
1011 struct production *pr = g->productions[p];
1014 for (b = 0; b < pr->body_size - 1; b++) {
1015 struct symbol *bs = pr->body[b];
1016 if (bs->type == Terminal)
1018 add_first(pr, b+1, &g->follow[bs->num], g, NULL);
1022 The second part is to add the "follow" set of the head of a production
1023 to the "follow" sets of the final symbol in the production, and any
1024 other symbol which is followed only by `nullable` symbols. As this
1025 depends on "follow" itself we need to repeatedly perform the process
1026 until no further changes happen.
1030 for (again = 0, p = 0;
1031 p < g->production_count;
1032 p < g->production_count-1
1033 ? p++ : again ? (again = 0, p = 0)
1035 struct production *pr = g->productions[p];
1038 for (b = pr->body_size - 1; b >= 0; b--) {
1039 struct symbol *bs = pr->body[b];
1040 if (bs->type == Terminal)
1042 if (symset_union(&g->follow[bs->num],
1043 &g->follow[pr->head->num]))
1050 We now just need to create and initialise the `follow` list to get a
1053 ###### grammar fields
1054 struct symset *follow;
1057 static void build_follow(struct grammar *g)
1060 g->follow = calloc(g->num_syms, sizeof(g->follow[0]));
1061 for (s = 0; s < g->num_syms; s++)
1062 g->follow[s] = INIT_SYMSET;
1066 ### Building itemsets and states
1068 There are three different levels of detail that can be chosen for
1069 building the itemsets and states for the LR grammar. They are:
1071 1. LR(0) or SLR(1), where no look-ahead is considered.
1072 2. LALR(1) where we build look-ahead sets with each item and merge
1073 the LA sets when we find two paths to the same "kernel" set of items.
1074 3. LR(1) where different look-ahead for any item in the set means
1075 a different state must be created.
1077 ###### forward declarations
1078 enum grammar_type { LR0, LR05, SLR, LALR, LR1 };
1080 We need to be able to look through existing states to see if a newly
1081 generated state already exists. For now we use a simple sorted linked
1084 An item is a pair of numbers: the production number and the position of
1085 "DOT", which is an index into the body of the production.
1086 As the numbers are not enormous we can combine them into a single "short"
1087 and store them in a `symset` - 4 bits for "DOT" and 12 bits for the
1088 production number (so 4000 productions with maximum of 15 symbols in the
1091 Comparing the itemsets will be a little different to comparing symsets
1092 as we want to do the lookup after generating the "kernel" of an
1093 itemset, so we need to ignore the offset=zero items which are added during
1096 To facilitate this, we modify the "DOT" number so that "0" sorts to
1097 the end of the list in the symset, and then only compare items before
1101 static inline unsigned short item_num(int production, int index)
1103 return production | ((31-index) << 11);
1105 static inline int item_prod(unsigned short item)
1107 return item & 0x7ff;
1109 static inline int item_index(unsigned short item)
1111 return (31-(item >> 11)) & 0x1f;
1114 For LR(1) analysis we need to compare not just the itemset in a state
1115 but also the LA sets. As we assign each unique LA set a number, we
1116 can just compare the symset and the data values together.
1119 static int itemset_cmp(struct symset a, struct symset b,
1120 enum grammar_type type)
1126 i < a.cnt && i < b.cnt &&
1127 item_index(a.syms[i]) > 0 &&
1128 item_index(b.syms[i]) > 0;
1130 int diff = a.syms[i] - b.syms[i];
1134 diff = a.data[i] - b.data[i];
1139 if (i == a.cnt || item_index(a.syms[i]) == 0)
1143 if (i == b.cnt || item_index(b.syms[i]) == 0)
1149 if (type < LR1 || av == -1)
1152 a.data[i] - b.data[i];
1155 It will be helpful to know if an itemset has been "completed" or not,
1156 particularly for LALR where itemsets get merged, at which point they
1157 need to be consider for completion again. So a `completed` flag is needed.
1159 For correct handling of `TK_newline` when parsing, we will need to
1160 know which states (itemsets) can occur at the start of a line, so we
1161 will record a `starts_line` flag too whenever DOT is at the start of a
1164 Finally, for handling `TK_out` we need to know whether productions in the
1165 current state started *before* the most recent indent. A state
1166 doesn't usually keep details of individual productions, so we need to
1167 add one extra detail. `min_prefix` is the smallest non-zero number of
1168 symbols *before* DOT in any production in an itemset. This will allow
1169 us to determine if the the most recent indent is sufficiently recent
1170 to cancel it against a `TK_out`. If it was seen longer ago than the
1171 `min_prefix`, and if the current state cannot be reduced, then the
1172 indented section must have ended in the middle of a syntactic unit, so
1173 an error must be signaled.
1175 And now we can build the list of itemsets. The lookup routine returns
1176 both a success flag and a pointer to where in the list an insert
1177 should happen, so we don't need to search a second time.
1181 struct itemset *next;
1183 struct symset items;
1184 struct symset go_to;
1186 unsigned short precedence;
1192 ###### grammar fields
1193 struct itemset *items;
1197 static int itemset_find(struct grammar *g, struct itemset ***where,
1198 struct symset kernel, enum grammar_type type)
1200 struct itemset **ip;
1202 for (ip = &g->items; *ip ; ip = & (*ip)->next) {
1203 struct itemset *i = *ip;
1205 diff = itemset_cmp(i->items, kernel, type);
1218 Adding an itemset may require merging the LA sets if LALR analysis is
1219 happening. If any new LA set adds any symbols that weren't in the old LA set, we
1220 clear the `completed` flag so that the dependants of this itemset will be
1221 recalculated and their LA sets updated.
1223 `add_itemset` must consume the symsets it is passed, either by adding
1224 them to a data structure, of freeing them.
1226 static int add_itemset(struct grammar *g, struct symset ss,
1227 enum assoc assoc, unsigned short precedence,
1228 enum grammar_type type)
1230 struct itemset **where, *is;
1232 int found = itemset_find(g, &where, ss, type);
1234 is = calloc(1, sizeof(*is));
1235 is->state = g->states;
1239 is->precedence = precedence;
1241 is->go_to = INIT_DATASET;
1250 for (i = 0; i < ss.cnt; i++) {
1251 struct symset temp = INIT_SYMSET, s;
1252 if (ss.data[i] == is->items.data[i])
1254 s = set_find(g, is->items.data[i]);
1255 symset_union(&temp, &s);
1256 s = set_find(g, ss.data[i]);
1257 if (symset_union(&temp, &s)) {
1258 is->items.data[i] = save_set(g, temp);
1269 To build all the itemsets, we first insert the initial itemset made
1270 from production zero, complete each itemset, and then generate new
1271 itemsets from old until no new ones can be made.
1273 Completing an itemset means finding all the items where "DOT" is followed by
1274 a nonterminal and adding "DOT=0" items for every production from that
1275 non-terminal - providing each item hasn't already been added.
1277 If LA sets are needed, the LA set for each new item is found using
1278 `add_first` which was developed earlier for `FIRST` and `FOLLOW`. This may
1279 be supplemented by the LA set for the item which produce the new item.
1281 We also collect a set of all symbols which follow "DOT" (in `done`) as this
1282 is used in the next stage.
1283 If any of these symbols are flagged as `line_like`, then this
1284 state must be a `starts_line` state so now is a good time to record that.
1286 When itemsets are created we assign a precedence to the itemset from
1287 the complete item, if there is one. We ignore the possibility of
1288 there being two and don't (currently) handle precedence in such
1289 grammars. When completing a grammar we ignore any item where DOT is
1290 followed by a terminal with a precedence lower (numerically higher)
1291 than that for the itemset. Unless the terminal has right
1292 associativity, we also ignore items where the terminal has the same
1293 precedence. The result is that unwanted items are still in the
1294 itemset, but the terminal doesn't get into the go to set, so the item
1297 ###### complete itemset
1298 for (i = 0; i < is->items.cnt; i++) {
1299 int p = item_prod(is->items.syms[i]);
1300 int bs = item_index(is->items.syms[i]);
1301 struct production *pr = g->productions[p];
1304 struct symset LA = INIT_SYMSET;
1305 unsigned short sn = 0;
1307 if (is->min_prefix == 0 ||
1308 (bs > 0 && bs < is->min_prefix))
1309 is->min_prefix = bs;
1310 if (bs == pr->body_size)
1313 if (s->precedence && is->precedence &&
1314 is->precedence < s->precedence)
1315 /* This terminal has a low precedence and
1316 * shouldn't be shifted
1319 if (s->precedence && is->precedence &&
1320 is->precedence == s->precedence && s->assoc != Right)
1321 /* This terminal has a matching precedence and is
1322 * not Right-associative, so we mustn't shift it.
1325 if (symset_find(&done, s->num) < 0) {
1326 symset_add(&done, s->num, 0);
1328 is->starts_line = 1;
1330 if (s->type != Nonterminal)
1336 add_first(pr, bs+1, &LA, g, &to_end);
1338 struct symset ss = set_find(g, is->items.data[i]);
1339 symset_union(&LA, &ss);
1341 sn = save_set(g, LA);
1342 LA = set_find(g, sn);
1345 /* Add productions for this symbol */
1346 for (p2 = s->first_production;
1347 p2 < g->production_count &&
1348 g->productions[p2]->head == s;
1350 int itm = item_num(p2, 0);
1351 int pos = symset_find(&is->items, itm);
1353 symset_add(&is->items, itm, sn);
1354 /* Will have re-ordered, so start
1355 * from beginning again */
1357 } else if (type >= LALR) {
1358 struct symset ss = set_find(g, is->items.data[pos]);
1359 struct symset tmp = INIT_SYMSET;
1361 symset_union(&tmp, &ss);
1362 if (symset_union(&tmp, &LA)) {
1363 is->items.data[pos] = save_set(g, tmp);
1371 For each symbol we found (and placed in `done`) we collect all the items for
1372 which this symbol is next, and create a new itemset with "DOT" advanced over
1373 the symbol. This is then added to the collection of itemsets (or merged
1374 with a pre-existing itemset).
1376 ###### derive itemsets
1377 // Now we have a completed itemset, so we need to
1378 // compute all the 'next' itemsets and create them
1379 // if they don't exist.
1380 for (i = 0; i < done.cnt; i++) {
1382 unsigned short state;
1383 struct symbol *sym = g->symtab[done.syms[i]];
1384 enum assoc assoc = Non;
1385 unsigned short precedence = 0;
1386 struct symset newitemset = INIT_SYMSET;
1388 newitemset = INIT_DATASET;
1390 for (j = 0; j < is->items.cnt; j++) {
1391 int itm = is->items.syms[j];
1392 int p = item_prod(itm);
1393 int bp = item_index(itm);
1394 struct production *pr = g->productions[p];
1395 unsigned short la = 0;
1398 if (bp == pr->body_size)
1400 if (pr->body[bp] != sym)
1403 la = is->items.data[j];
1404 pos = symset_find(&newitemset, pr->head->num);
1405 if (bp + 1 == pr->body_size &&
1406 pr->precedence > 0 &&
1408 pr->precedence < precedence)) {
1409 // new itemset is reducible and has a precedence.
1410 precedence = pr->precedence;
1414 symset_add(&newitemset, item_num(p, bp+1), la);
1415 else if (type >= LALR) {
1416 // Need to merge la set.
1417 int la2 = newitemset.data[pos];
1419 struct symset ss = set_find(g, la2);
1420 struct symset LA = INIT_SYMSET;
1421 symset_union(&LA, &ss);
1422 ss = set_find(g, la);
1423 if (symset_union(&LA, &ss))
1424 newitemset.data[pos] = save_set(g, LA);
1430 state = add_itemset(g, newitemset, assoc, precedence, type);
1431 if (symset_find(&is->go_to, done.syms[i]) < 0)
1432 symset_add(&is->go_to, done.syms[i], state);
1435 All that is left is to create the initial itemset from production zero, and
1436 with `TK_eof` as the LA set.
1439 static void build_itemsets(struct grammar *g, enum grammar_type type)
1441 struct symset first = INIT_SYMSET;
1444 unsigned short la = 0;
1446 // LA set just has eof
1447 struct symset eof = INIT_SYMSET;
1448 symset_add(&eof, TK_eof, 0);
1449 la = save_set(g, eof);
1450 first = INIT_DATASET;
1452 // production 0, offset 0 (with no data)
1453 symset_add(&first, item_num(0, 0), la);
1454 add_itemset(g, first, Non, 0, type);
1455 for (again = 0, is = g->items;
1457 is = is->next ?: again ? (again = 0, g->items) : NULL) {
1459 struct symset done = INIT_SYMSET;
1470 ### Completing the analysis.
1472 The exact process of analysis depends on which level was requested. For
1473 `LR0` and `LR05` we don't need first and follow sets at all. For `LALR` and
1474 `LR1` we need first, but not follow. For `SLR` we need both.
1476 We don't build the "action" tables that you might expect as the parser
1477 doesn't use them. They will be calculated to some extent if needed for
1480 Once we have built everything we allocate arrays for the two lists:
1481 symbols and itemsets. This allows more efficient access during reporting.
1482 The symbols are grouped as terminals and non-terminals and we record the
1483 changeover point in `first_nonterm`.
1485 ###### grammar fields
1486 struct symbol **symtab;
1487 struct itemset **statetab;
1490 ###### grammar_analyse
1492 static void grammar_analyse(struct grammar *g, enum grammar_type type)
1496 int snum = TK_reserved;
1497 for (s = g->syms; s; s = s->next)
1498 if (s->num < 0 && s->type == Terminal) {
1502 g->first_nonterm = snum;
1503 for (s = g->syms; s; s = s->next)
1509 g->symtab = calloc(g->num_syms, sizeof(g->symtab[0]));
1510 for (s = g->syms; s; s = s->next)
1511 g->symtab[s->num] = s;
1521 build_itemsets(g, type);
1523 g->statetab = calloc(g->states, sizeof(g->statetab[0]));
1524 for (is = g->items; is ; is = is->next)
1525 g->statetab[is->state] = is;
1528 ## Reporting on the Grammar
1530 The purpose of the report is to give the grammar developer insight into
1531 how the grammar parser will work. It is basically a structured dump of
1532 all the tables that have been generated, plus a description of any conflicts.
1534 ###### grammar_report
1535 static int grammar_report(struct grammar *g, enum grammar_type type)
1541 return report_conflicts(g, type);
1544 Firstly we have the complete list of symbols, together with the
1545 "FIRST" set if that was generated. We add a mark to each symbol to
1546 show if it can end in a newline (`>`), if it is considered to be
1547 "line-like" (`<`), or if it is nullable (`.`).
1551 static void report_symbols(struct grammar *g)
1555 printf("SYMBOLS + FIRST:\n");
1557 printf("SYMBOLS:\n");
1559 for (n = 0; n < g->num_syms; n++) {
1560 struct symbol *s = g->symtab[n];
1564 printf(" %c%c%3d%c: ",
1565 s->nullable ? '.':' ',
1566 s->line_like ? '<':' ',
1567 s->num, symtypes[s->type]);
1570 printf(" (%d%s)", s->precedence,
1571 assoc_names[s->assoc]);
1573 if (g->first && s->type == Nonterminal) {
1576 for (j = 0; j < g->first[n].cnt; j++) {
1579 prtxt(g->symtab[g->first[n].syms[j]]->name);
1586 Then we have the follow sets if they were computed.
1588 static void report_follow(struct grammar *g)
1591 printf("FOLLOW:\n");
1592 for (n = 0; n < g->num_syms; n++)
1593 if (g->follow[n].cnt) {
1597 prtxt(g->symtab[n]->name);
1598 for (j = 0; j < g->follow[n].cnt; j++) {
1601 prtxt(g->symtab[g->follow[n].syms[j]]->name);
1607 And finally the item sets. These include the GO TO tables and, for
1608 LALR and LR1, the LA set for each item. Lots of stuff, so we break
1609 it up a bit. First the items, with production number and associativity.
1611 static void report_item(struct grammar *g, int itm)
1613 int p = item_prod(itm);
1614 int dot = item_index(itm);
1615 struct production *pr = g->productions[p];
1619 prtxt(pr->head->name);
1621 for (i = 0; i < pr->body_size; i++) {
1622 printf(" %s", (dot == i ? ". ": ""));
1623 prtxt(pr->body[i]->name);
1625 if (dot == pr->body_size)
1628 if (pr->precedence && dot == pr->body_size)
1629 printf(" (%d%s)", pr->precedence,
1630 assoc_names[pr->assoc]);
1631 if (dot < pr->body_size &&
1632 pr->body[dot]->precedence) {
1633 struct symbol *s = pr->body[dot];
1634 printf(" [%d%s]", s->precedence,
1635 assoc_names[s->assoc]);
1640 The LA sets which are (possibly) reported with each item:
1642 static void report_la(struct grammar *g, int lanum)
1644 struct symset la = set_find(g, lanum);
1648 printf(" LOOK AHEAD(%d)", lanum);
1649 for (i = 0; i < la.cnt; i++) {
1652 prtxt(g->symtab[la.syms[i]]->name);
1657 Then the go to sets:
1659 static void report_goto(struct grammar *g, struct symset gt)
1664 for (i = 0; i < gt.cnt; i++) {
1666 prtxt(g->symtab[gt.syms[i]]->name);
1667 printf(" -> %d\n", gt.data[i]);
1671 Now we can report all the item sets complete with items, LA sets, and GO TO.
1673 static void report_itemsets(struct grammar *g)
1676 printf("ITEM SETS(%d)\n", g->states);
1677 for (s = 0; s < g->states; s++) {
1679 struct itemset *is = g->statetab[s];
1680 printf(" Itemset %d:%s min prefix=%d",
1681 s, is->starts_line?" (startsline)":"", is->min_prefix);
1683 printf(" %d%s", is->precedence, assoc_names[is->assoc]);
1685 for (j = 0; j < is->items.cnt; j++) {
1686 report_item(g, is->items.syms[j]);
1687 if (is->items.data != NO_DATA)
1688 report_la(g, is->items.data[j]);
1690 report_goto(g, is->go_to);
1694 ### Reporting conflicts
1696 Conflict detection varies a lot among different analysis levels. However
1697 LR0 and LR0.5 are quite similar - having no follow sets, and SLR, LALR and
1698 LR1 are also similar as they have FOLLOW or LA sets.
1702 ## conflict functions
1704 static int report_conflicts(struct grammar *g, enum grammar_type type)
1707 printf("Conflicts:\n");
1709 cnt = conflicts_lr0(g, type);
1711 cnt = conflicts_slr(g, type);
1713 printf(" - no conflicts\n");
1717 LR0 conflicts are any state which have both a reducible item and
1718 a shiftable item, or two reducible items.
1720 LR05 conflicts only occur if two possible reductions exist,
1721 as shifts always over-ride reductions.
1723 ###### conflict functions
1724 static int conflicts_lr0(struct grammar *g, enum grammar_type type)
1728 for (i = 0; i < g->states; i++) {
1729 struct itemset *is = g->statetab[i];
1730 int last_reduce = -1;
1731 int prev_reduce = -1;
1732 int last_shift = -1;
1736 for (j = 0; j < is->items.cnt; j++) {
1737 int itm = is->items.syms[j];
1738 int p = item_prod(itm);
1739 int bp = item_index(itm);
1740 struct production *pr = g->productions[p];
1742 if (bp == pr->body_size) {
1743 prev_reduce = last_reduce;
1747 if (pr->body[bp]->type == Terminal)
1750 if (type == LR0 && last_reduce >= 0 && last_shift >= 0) {
1751 printf(" State %d has both SHIFT and REDUCE:\n", i);
1752 report_item(g, is->items.syms[last_shift]);
1753 report_item(g, is->items.syms[last_reduce]);
1756 if (prev_reduce >= 0) {
1757 printf(" State %d has 2 (or more) reducible items\n", i);
1758 report_item(g, is->items.syms[prev_reduce]);
1759 report_item(g, is->items.syms[last_reduce]);
1766 SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping
1767 look ahead, or if a symbol in a look-ahead can be shifted. They differ only
1768 in the source of the look ahead set.
1770 We build two datasets to reflect the "action" table: one which maps
1771 terminals to items where that terminal could be shifted and another
1772 which maps terminals to items that could be reduced when the terminal
1773 is in look-ahead. We report when we get conflicts between the two.
1775 As a special case, if we find a SHIFT/REDUCE conflict, where a
1776 terminal that could be shifted is in the lookahead set of some
1777 reducable item, then set check if the reducable item also have
1778 `TK_newline` in its lookahead set. If it does, then a newline will
1779 force the reduction, but anything else can reasonably be shifted, so
1780 that isn't really a conflict. Such apparent conflicts do not get
1781 counted, and are reported as non-critical. This will not affect a
1782 "traditional" grammar that does not include newlines as token.
1784 static int conflicts_slr(struct grammar *g, enum grammar_type type)
1789 for (i = 0; i < g->states; i++) {
1790 struct itemset *is = g->statetab[i];
1791 struct symset shifts = INIT_DATASET;
1792 struct symset reduce = INIT_DATASET;
1796 /* First collect the shifts */
1797 for (j = 0; j < is->items.cnt; j++) {
1798 unsigned short itm = is->items.syms[j];
1799 int p = item_prod(itm);
1800 int bp = item_index(itm);
1801 struct production *pr = g->productions[p];
1803 if (bp < pr->body_size &&
1804 pr->body[bp]->type == Terminal) {
1806 int sym = pr->body[bp]->num;
1807 if (symset_find(&shifts, sym) < 0)
1808 symset_add(&shifts, sym, itm);
1811 /* Now look for reductions and conflicts */
1812 for (j = 0; j < is->items.cnt; j++) {
1813 unsigned short itm = is->items.syms[j];
1814 int p = item_prod(itm);
1815 int bp = item_index(itm);
1816 struct production *pr = g->productions[p];
1818 if (bp < pr->body_size)
1823 la = g->follow[pr->head->num];
1825 la = set_find(g, is->items.data[j]);
1827 for (k = 0; k < la.cnt; k++) {
1828 int pos = symset_find(&shifts, la.syms[k]);
1830 if (symset_find(&la, TK_newline) < 0) {
1831 printf(" State %d has SHIFT/REDUCE conflict on ", i);
1834 printf(" State %d has non-critical SHIFT/REDUCE conflict on ", i);
1835 prtxt(g->symtab[la.syms[k]]->name);
1837 report_item(g, shifts.data[pos]);
1838 report_item(g, itm);
1840 pos = symset_find(&reduce, la.syms[k]);
1842 symset_add(&reduce, la.syms[k], itm);
1845 printf(" State %d has REDUCE/REDUCE conflict on ", i);
1846 prtxt(g->symtab[la.syms[k]]->name);
1848 report_item(g, itm);
1849 report_item(g, reduce.data[pos]);
1853 symset_free(shifts);
1854 symset_free(reduce);
1859 ## Generating the parser
1861 The exported part of the parser is the `parse_XX` function, where the name
1862 `XX` is based on the name of the parser files.
1864 This takes a `code_node`, a partially initialized `token_config`, and an
1865 optional `FILE` to send tracing to. The `token_config` gets the list of
1866 known words added and then is used with the `code_node` to initialize the
1869 `parse_XX` then calls the library function `parser_run` to actually complete
1870 the parse. This needs the `states` table and function to call the various
1871 pieces of code provided in the grammar file, so they are generated first.
1873 ###### parser_generate
1875 static void gen_parser(FILE *f, struct grammar *g, char *file, char *name,
1876 struct code_node *pre_reduce)
1882 gen_reduce(f, g, file, pre_reduce);
1885 fprintf(f, "#line 0 \"gen_parser\"\n");
1886 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace)\n",
1889 fprintf(f, "\tstruct token_state *tokens;\n");
1890 fprintf(f, "\tconfig->words_marks = known;\n");
1891 fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
1892 fprintf(f, "\tconfig->ignored |= (1 << TK_line_comment) | (1 << TK_block_comment);\n");
1893 fprintf(f, "\ttokens = token_open(code, config);\n");
1894 fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n");
1895 fprintf(f, "\ttoken_close(tokens);\n");
1896 fprintf(f, "\treturn rv;\n");
1897 fprintf(f, "}\n\n");
1900 ### Known words table
1902 The known words table is simply an array of terminal symbols.
1903 The table of nonterminals used for tracing is a similar array. We
1904 include virtual symbols in the table of non_terminals to keep the
1909 static void gen_known(FILE *f, struct grammar *g)
1912 fprintf(f, "#line 0 \"gen_known\"\n");
1913 fprintf(f, "static const char *known[] = {\n");
1914 for (i = TK_reserved;
1915 i < g->num_syms && g->symtab[i]->type == Terminal;
1917 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1918 g->symtab[i]->name.txt);
1919 fprintf(f, "};\n\n");
1922 static void gen_non_term(FILE *f, struct grammar *g)
1925 fprintf(f, "#line 0 \"gen_non_term\"\n");
1926 fprintf(f, "static const char *non_term[] = {\n");
1927 for (i = TK_reserved;
1930 if (g->symtab[i]->type != Terminal)
1931 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1932 g->symtab[i]->name.txt);
1933 fprintf(f, "};\n\n");
1936 ### States and the goto tables.
1938 For each state we record the goto table, the reducible production if
1939 there is one, or a symbol to shift for error recovery.
1940 Some of the details of the reducible production are stored in the
1941 `do_reduce` function to come later. Here we store the production number,
1942 the body size (useful for stack management) and the resulting symbol (useful
1943 for knowing how to free data later).
1945 The go to table is stored in a simple array of `sym` and corresponding
1948 ###### exported types
1956 const struct lookup * go_to;
1966 static void gen_goto(FILE *f, struct grammar *g)
1969 fprintf(f, "#line 0 \"gen_goto\"\n");
1970 for (i = 0; i < g->states; i++) {
1972 fprintf(f, "static const struct lookup goto_%d[] = {\n",
1974 struct symset gt = g->statetab[i]->go_to;
1975 for (j = 0; j < gt.cnt; j++)
1976 fprintf(f, "\t{ %d, %d },\n",
1977 gt.syms[j], gt.data[j]);
1984 static void gen_states(FILE *f, struct grammar *g)
1987 fprintf(f, "#line 0 \"gen_states\"\n");
1988 fprintf(f, "static const struct state states[] = {\n");
1989 for (i = 0; i < g->states; i++) {
1990 struct itemset *is = g->statetab[i];
1991 int j, prod = -1, prod_len;
1993 for (j = 0; j < is->items.cnt; j++) {
1994 int itm = is->items.syms[j];
1995 int p = item_prod(itm);
1996 int bp = item_index(itm);
1997 struct production *pr = g->productions[p];
1999 if (bp < pr->body_size)
2001 /* This is what we reduce */
2002 if (prod < 0 || prod_len < pr->body_size) {
2004 prod_len = pr->body_size;
2009 fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, %d, %d },\n",
2010 i, is->go_to.cnt, i, prod,
2011 g->productions[prod]->body_size,
2012 g->productions[prod]->head->num,
2013 is->starts_line, is->min_prefix);
2015 fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, %d },\n",
2016 i, is->go_to.cnt, i,
2017 is->starts_line, is->min_prefix);
2019 fprintf(f, "};\n\n");
2022 ### The `do_reduce` function and the code
2024 When the parser engine decides to reduce a production, it calls `do_reduce`.
2025 This has two functions.
2027 Firstly, if a non-NULL `trace` file is passed, it prints out details of the
2028 production being reduced. Secondly it runs the code that was included with
2029 the production if any.
2031 This code needs to be able to store data somewhere. Rather than requiring
2032 `do_reduce` to `malloc` that "somewhere", we pass in a large buffer and have
2033 `do_reduce` return the size to be saved.
2035 In order for the code to access "global" context, we pass in the
2036 "config" pointer that was passed to parser function. If the `struct
2037 token_config` is embedded in some larger structure, the reducing code
2038 can access the larger structure using pointer manipulation.
2040 The code fragment requires translation when written out. Any `$N` needs to
2041 be converted to a reference either to that buffer (if `$0`) or to the
2042 structure returned by a previous reduction. These pointers need to be cast
2043 to the appropriate type for each access. All this is handled in
2046 `gen_code` also allows symbol references to contain a '`<`' as in '`$<2`'.
2047 This applied only to symbols with references (or pointers), not those with structures.
2048 The `<` implies that the reference it being moved out, so the object will not be
2049 automatically freed. This is equivalent to assigning `NULL` to the pointer.
2053 static void gen_code(struct production *p, FILE *f, struct grammar *g)
2056 char *used = calloc(1, p->body_size);
2059 fprintf(f, "\t\t\t");
2060 for (c = p->code.txt; c < p->code.txt + p->code.len; c++) {
2074 if (*c < '0' || *c > '9') {
2081 while (c[1] >= '0' && c[1] <= '9') {
2083 n = n * 10 + *c - '0';
2086 fprintf(f, "(*(struct %.*s*%s)ret)",
2087 p->head->struct_name.len,
2088 p->head->struct_name.txt,
2089 p->head->isref ? "*":"");
2090 else if (n > p->body_size)
2091 fprintf(f, "$%d", n);
2092 else if (p->body[n-1]->type == Terminal)
2093 fprintf(f, "(*(struct token *)body[%d])",
2095 else if (p->body[n-1]->struct_name.txt == NULL)
2096 fprintf(f, "$%d", n);
2098 fprintf(f, "(*(struct %.*s*%s)body[%d])",
2099 p->body[n-1]->struct_name.len,
2100 p->body[n-1]->struct_name.txt,
2101 p->body[n-1]->isref ? "*":"", n-1);
2106 for (i = 0; i < p->body_size; i++) {
2107 if (p->body[i]->struct_name.txt &&
2108 p->body[i]->isref &&
2110 // assume this has been copied out
2111 fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i);
2118 static void gen_reduce(FILE *f, struct grammar *g, char *file,
2119 struct code_node *code)
2122 fprintf(f, "#line 1 \"gen_reduce\"\n");
2123 fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n");
2125 fprintf(f, "\tint ret_size = 0;\n");
2127 code_node_print(f, code, file);
2129 fprintf(f, "#line 4 \"gen_reduce\"\n");
2130 fprintf(f, "\tswitch(prod) {\n");
2131 for (i = 0; i < g->production_count; i++) {
2132 struct production *p = g->productions[i];
2133 fprintf(f, "\tcase %d:\n", i);
2136 fprintf(f, "#line %d \"%s\"\n", p->code_line, file);
2140 if (p->head->struct_name.txt)
2141 fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n",
2142 p->head->struct_name.len,
2143 p->head->struct_name.txt,
2144 p->head->isref ? "*":"");
2146 fprintf(f, "\t\tbreak;\n");
2148 fprintf(f, "\t}\n\treturn ret_size;\n}\n\n");
2153 As each non-terminal can potentially cause a different type of data
2154 structure to be allocated and filled in, we need to be able to free it when
2157 It is particularly important to have fine control over freeing during error
2158 recovery where individual stack frames might need to be freed.
2160 For this, the grammar author is required to defined a `free_XX` function for
2161 each structure that is used by a non-terminal. `do_free` will call whichever
2162 is appropriate given a symbol number, and will call `free` (as is
2163 appropriate for tokens) on any terminal symbol.
2167 static void gen_free(FILE *f, struct grammar *g)
2171 fprintf(f, "#line 0 \"gen_free\"\n");
2172 fprintf(f, "static void do_free(short sym, void *asn)\n");
2174 fprintf(f, "\tif (!asn) return;\n");
2175 fprintf(f, "\tif (sym < %d) {\n", g->first_nonterm);
2176 fprintf(f, "\t\tfree(asn);\n\t\treturn;\n\t}\n");
2177 fprintf(f, "\tswitch(sym) {\n");
2179 for (i = 0; i < g->num_syms; i++) {
2180 struct symbol *s = g->symtab[i];
2182 s->type != Nonterminal ||
2183 s->struct_name.txt == NULL)
2186 fprintf(f, "\tcase %d:\n", s->num);
2188 fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n",
2190 s->struct_name.txt);
2191 fprintf(f, "\t\tfree(asn);\n");
2193 fprintf(f, "\t\tfree_%.*s(asn);\n",
2195 s->struct_name.txt);
2196 fprintf(f, "\t\tbreak;\n");
2198 fprintf(f, "\t}\n}\n\n");
2201 ## The main routine.
2203 There are three key parts to the "main" function of parsergen: processing
2204 the arguments, loading the grammar file, and dealing with the grammar.
2206 ### Argument processing.
2208 Command line options allow the selection of analysis mode, name of output
2209 file, and whether or not a report should be generated. By default we create
2210 a report only if no code output was requested.
2212 The `parse_XX` function name uses the basename of the output file which
2213 should not have a suffix (`.c`). `.c` is added to the given name for the
2214 code, and `.h` is added for the header (if header text is specifed in the
2221 static const struct option long_options[] = {
2222 { "LR0", 0, NULL, '0' },
2223 { "LR05", 0, NULL, '5' },
2224 { "SLR", 0, NULL, 'S' },
2225 { "LALR", 0, NULL, 'L' },
2226 { "LR1", 0, NULL, '1' },
2227 { "tag", 1, NULL, 't' },
2228 { "report", 0, NULL, 'R' },
2229 { "output", 1, NULL, 'o' },
2230 { NULL, 0, NULL, 0 }
2232 const char *options = "05SL1t:Ro:";
2234 ###### process arguments
2236 char *outfile = NULL;
2241 enum grammar_type type = LR05;
2242 while ((opt = getopt_long(argc, argv, options,
2243 long_options, NULL)) != -1) {
2258 outfile = optarg; break;
2260 tag = optarg; break;
2262 fprintf(stderr, "Usage: parsergen ...\n");
2267 infile = argv[optind++];
2269 fprintf(stderr, "No input file given\n");
2272 if (outfile && report == 1)
2275 if (name && strchr(name, '/'))
2276 name = strrchr(name, '/')+1;
2278 if (optind < argc) {
2279 fprintf(stderr, "Excess command line arguments\n");
2283 ### Loading the grammar file
2285 To be able to run `mdcode` and `scanner` on the grammar we need to memory
2288 Once we have extracted the code (with `mdcode`) we expect to find three
2289 sections: header, code, and grammar. Anything else that is not
2290 excluded by the `--tag` option is an error.
2292 "header" and "code" are optional, though it is hard to build a working
2293 parser with neither. "grammar" must be provided.
2297 #include <sys/mman.h>
2302 static void pr_err(char *msg)
2305 fprintf(stderr, "%s\n", msg);
2309 struct section *table;
2313 fd = open(infile, O_RDONLY);
2315 fprintf(stderr, "parsergen: cannot open %s: %s\n",
2316 infile, strerror(errno));
2319 len = lseek(fd, 0, 2);
2320 file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2321 table = code_extract(file, file+len, pr_err);
2323 struct code_node *hdr = NULL;
2324 struct code_node *code = NULL;
2325 struct code_node *gram = NULL;
2326 struct code_node *pre_reduce = NULL;
2327 for (s = table; s; s = s->next) {
2328 struct text sec = s->section;
2329 if (tag && !strip_tag(&sec, tag))
2331 if (text_is(sec, "header"))
2333 else if (text_is(sec, "code"))
2335 else if (text_is(sec, "grammar"))
2337 else if (text_is(sec, "reduce"))
2338 pre_reduce = s->code;
2340 fprintf(stderr, "Unknown content section: %.*s\n",
2341 s->section.len, s->section.txt);
2346 ### Processing the input
2348 As we need to append an extention to a filename and then open it for
2349 writing, and we need to do this twice, it helps to have a separate function.
2353 static FILE *open_ext(char *base, char *ext)
2355 char *fn = malloc(strlen(base) + strlen(ext) + 1);
2357 strcat(strcpy(fn, base), ext);
2363 If we can read the grammar, then we analyse and optionally report on it. If
2364 the report finds conflicts we will exit with an error status.
2366 ###### process input
2367 struct grammar *g = NULL;
2369 fprintf(stderr, "No grammar section provided\n");
2372 g = grammar_read(gram);
2374 fprintf(stderr, "Failure to parse grammar\n");
2379 grammar_analyse(g, type);
2381 if (grammar_report(g, type))
2385 If a "headers" section is defined, we write it out and include a declaration
2386 for the `parse_XX` function so it can be used from separate code.
2388 if (rv == 0 && hdr && outfile) {
2389 FILE *f = open_ext(outfile, ".h");
2391 code_node_print(f, hdr, infile);
2392 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace);\n",
2396 fprintf(stderr, "Cannot create %s.h\n",
2402 And if all goes well and an output file was provided, we create the `.c`
2403 file with the code section (if any) and the parser tables and function.
2405 if (rv == 0 && outfile) {
2406 FILE *f = open_ext(outfile, ".c");
2409 code_node_print(f, code, infile);
2410 gen_parser(f, g, infile, name, pre_reduce);
2413 fprintf(stderr, "Cannot create %s.c\n",
2419 And that about wraps it up. We need to set the locale so that UTF-8 is
2420 recognised properly, and link with `libicuuc` as `libmdcode` requires that.
2422 ###### File: parsergen.mk
2423 parsergen : parsergen.o libscanner.o libmdcode.o
2424 $(CC) $(CFLAGS) -o parsergen parsergen.o libscanner.o libmdcode.o -licuuc
2431 int main(int argc, char *argv[])
2436 setlocale(LC_ALL,"");
2438 ## process arguments
2445 ## The SHIFT/REDUCE parser
2447 Having analysed the grammar and generated all the tables, we only need the
2448 shift/reduce engine to bring it all together.
2450 ### Goto table lookup
2452 The parser generator has nicely provided us with goto tables sorted by
2453 symbol number. We need a binary search function to find a symbol in the
2456 ###### parser functions
2458 static int search(const struct state *l, int sym)
2461 int hi = l->go_to_cnt;
2465 while (lo + 1 < hi) {
2466 int mid = (lo + hi) / 2;
2467 if (l->go_to[mid].sym <= sym)
2472 if (l->go_to[lo].sym == sym)
2473 return l->go_to[lo].state;
2478 ### The state stack.
2480 The core data structure for the parser is the stack. This tracks all the
2481 symbols that have been recognised or partially recognised.
2483 The stack usually won't grow very large - maybe a few tens of entries. So
2484 we dynamically resize and array as required but never bother to shrink it
2487 We keep the stack as two separate allocations. One, `asn_stack` stores the
2488 "abstract syntax nodes" which are created by each reduction. When we call
2489 `do_reduce` we need to pass an array of the `asn`s of the body of the
2490 production, and by keeping a separate `asn` stack, we can just pass a
2491 pointer into this stack.
2493 The other allocation stores all other stack fields of which there are six.
2494 The `state` is the most important one and guides the parsing process. The
2495 `sym` is nearly unnecessary. However when we want to free entries from the
2496 `asn_stack`, it helps to know what type they are so we can call the right
2497 freeing function. The symbol leads us to the right free function through
2500 The `indents` count tracks the line indents with-in the symbol or
2501 immediately follow it. These are used to allow indent information to
2502 guide parsing and error recovery.
2504 `since_newline` tracks how many stack frames since the last
2505 start-of-line (whether indented or not). So if `since_newline` is
2506 zero, then this symbol is at the start of a line. Similarly
2507 `since_indent` counts the number of states since an indent, it is zero
2508 precisely when `indents` is not zero.
2510 `newline_permitted` keeps track of whether newlines should be ignored
2513 The stack is most properly seen as alternating states and symbols -
2514 states, like the 'DOT' in items, are between symbols. Each frame in
2515 our stack holds a state and the symbol that was before it. The
2516 bottom of stack holds the start state but no symbol, as nothing came
2517 before the beginning.
2519 ###### parser functions
2524 short newline_permitted;
2528 short since_newline;
2538 Two operations are needed on the stack - shift (which is like push) and pop.
2540 Shift applies not only to terminals but also to non-terminals. When
2541 we reduce a production we will pop off entries corresponding to the
2542 body symbols, then push on an item for the head of the production.
2543 This last is exactly the same process as shifting in a terminal so we
2544 use the same function for both. In both cases we provide the symbol,
2545 the number of indents the symbol contains (which will be zero for a
2546 terminal symbol) and a flag indicating the the symbol was at (or was
2547 reduced from a symbol which was at) the start of a line. The state is
2548 deduced from the current top-of-stack state and the new symbol.
2550 To simplify other code we arrange for `shift` to fail if there is no `goto`
2551 state for the symbol. This is useful in basic parsing due to our design
2552 that we shift when we can, and reduce when we cannot. So the `shift`
2553 function reports if it could.
2555 `shift` is also used to push state zero onto the stack, so if the
2556 stack is empty, it always chooses zero as the next state.
2558 So `shift` finds the next state. If that succeeds it extends the
2559 allocations if needed and pushes all the information onto the stacks.
2561 Newlines are permitted after a `starts_line` state until an internal
2562 indent. If the new frame has neither a `starts_line` state nor an
2563 indent, newlines are permitted if the previous stack frame permitted
2566 ###### parser functions
2568 static int shift(struct parser *p,
2569 short sym, short indents, short start_of_line,
2571 const struct state states[])
2573 // Push an entry onto the stack
2574 struct frame next = {0};
2575 int newstate = p->tos
2576 ? search(&states[p->stack[p->tos-1].state],
2581 if (p->tos >= p->stack_size) {
2582 p->stack_size += 10;
2583 p->stack = realloc(p->stack, p->stack_size
2584 * sizeof(p->stack[0]));
2585 p->asn_stack = realloc(p->asn_stack, p->stack_size
2586 * sizeof(p->asn_stack[0]));
2589 next.indents = indents;
2590 next.state = newstate;
2591 if (states[newstate].starts_line)
2592 next.newline_permitted = 1;
2594 next.newline_permitted = 0;
2596 next.newline_permitted =
2597 p->stack[p->tos-1].newline_permitted;
2599 next.newline_permitted = 0;
2601 if (!start_of_line) {
2603 next.since_newline = p->stack[p->tos-1].since_newline + 1;
2605 next.since_newline = 1;
2608 next.since_indent = 0;
2610 next.since_indent = p->stack[p->tos-1].since_indent + 1;
2612 next.since_indent = 1;
2614 p->stack[p->tos] = next;
2615 p->asn_stack[p->tos] = asn;
2620 `pop` primarily moves the top of stack (`tos`) back down the required
2621 amount and frees any `asn` entries that need to be freed. It also
2622 collects a summary of the indents and line starts in the symbols that
2623 are being removed. It is called _after_ we reduce a production, just
2624 before we `shift` the nonterminal in.
2626 ###### parser functions
2628 static int pop(struct parser *p, int num,
2629 short *start_of_line,
2630 void(*do_free)(short sym, void *asn))
2637 for (i = 0; i < num; i++) {
2638 sol |= !p->stack[p->tos+i].since_newline;
2639 indents += p->stack[p->tos+i].indents;
2640 do_free(p->stack[p->tos+i].sym,
2641 p->asn_stack[p->tos+i]);
2644 *start_of_line = sol;
2648 ### Memory allocation
2650 The `scanner` returns tokens in a local variable - we want them in allocated
2651 memory so they can live in the `asn_stack`. Similarly the `asn` produced by
2652 a reduce is in a large buffer. Both of these require some allocation and
2653 copying, hence `memdup` and `tokcopy`.
2655 ###### parser includes
2658 ###### parser functions
2660 void *memdup(void *m, int len)
2666 memcpy(ret, m, len);
2670 static struct token *tok_copy(struct token tk)
2672 struct token *new = malloc(sizeof(*new));
2677 ### The heart of the parser.
2679 Now we have the parser. If we can shift we do, though newlines and
2680 reducing indenting may block that. If not and we can reduce we do
2681 that. If the production we reduced was production zero, then we have
2682 accepted the input and can finish.
2684 We return whatever `asn` was returned by reducing production zero.
2686 If we can neither shift nor reduce we have an error to handle. We pop
2687 single entries off the stack until we can shift the `TK_error` symbol, then
2688 drop input tokens until we find one we can shift into the new error state.
2690 When we find `TK_in` and `TK_out` tokens which report indents we need
2691 to handle them directly as the grammar cannot express what we want to
2694 `TK_in` tokens are easy: we simply update indent count in the top stack frame to
2695 record how many indents there are following the previous token.
2697 `TK_out` tokens must be canceled against an indent count
2698 within the stack. If we can reduce some symbols that are all since
2699 the most recent indent, then we do that first. If the minimum prefix
2700 of the current state then extends back before the most recent indent,
2701 that indent can be cancelled. If the minimum prefix is shorter then
2702 the indent had ended prematurely and we must start error handling, which
2703 is still a work-in-progress.
2705 `TK_newline` tokens are ignored unless the top stack frame records
2706 that they are permitted. In that case they will not be considered for
2707 shifting if it is possible to reduce some symbols that are all since
2708 the most recent start of line. This is how a newline forcibly
2709 terminates any line-like structure - we try to reduce down to at most
2710 one symbol for each line where newlines are allowed.
2711 A consequence of this is that a rule like
2713 ###### Example: newlines - broken
2717 IfStatement -> Newlines if ....
2719 cannot work, as the NEWLINE will never be shifted as the empty string
2720 will be reduced first. Optional sets of newlines need to be include
2721 in the thing that preceed:
2723 ###### Example: newlines - works
2727 IfStatement -> If ....
2729 Here the NEWLINE will be shifted because nothing can be reduced until
2732 When, during error handling, we discard token read in, we want to keep
2733 discarding until we see one that is recognised. If we had a full set
2734 of LR(1) grammar states, this will mean looking in the look-ahead set,
2735 but we don't keep a full look-ahead set. We only record the subset
2736 that leads to SHIFT. We can, however, deduce the look-ahead set but
2737 looking at the SHIFT subsets for all states that we can get to by
2738 reducing zero or more times. So we need a little function which
2739 checks if a given token is in any of these look-ahead sets.
2741 ###### parser includes
2746 static int in_lookahead(struct token *tk, const struct state *states, int state)
2748 while (state >= 0) {
2749 if (search(&states[state], tk->num) >= 0)
2751 if (states[state].reduce_prod < 0)
2753 state = search(&states[state], states[state].reduce_sym);
2758 void *parser_run(struct token_state *tokens,
2759 const struct state states[],
2760 int (*do_reduce)(int, void**, struct token_config*, void*),
2761 void (*do_free)(short, void*),
2762 FILE *trace, const char *non_term[],
2763 struct token_config *config)
2765 struct parser p = { 0 };
2766 struct token *tk = NULL;
2770 shift(&p, TK_eof, 0, 1, NULL, states);
2772 struct token *err_tk;
2773 struct frame *tos = &p.stack[p.tos-1];
2775 tk = tok_copy(token_next(tokens));
2776 parser_trace(trace, &p,
2777 tk, states, non_term, config->known_count);
2779 if (tk->num == TK_in) {
2781 tos->since_newline = 0;
2782 tos->since_indent = 0;
2783 if (!states[tos->state].starts_line)
2784 tos->newline_permitted = 0;
2787 parser_trace_action(trace, "Record");
2790 if (tk->num == TK_out) {
2791 if (states[tos->state].reduce_size >= 0 &&
2792 states[tos->state].reduce_size <= tos->since_indent)
2794 if (states[tos->state].min_prefix >= tos->since_indent) {
2796 struct frame *in = tos - tos->since_indent;
2798 if (in->indents == 0) {
2799 /* Reassess since_indent and newline_permitted */
2801 in->since_indent = in[-1].since_indent + 1;
2802 in->newline_permitted = in[-1].newline_permitted;
2804 in->since_indent = 0;
2805 in->newline_permitted = 0;
2807 if (states[in->state].starts_line)
2808 in->newline_permitted = 1;
2811 in->since_indent = in[-1].since_indent + 1;
2812 if (states[in->state].starts_line)
2813 in->newline_permitted = 1;
2815 in->newline_permitted = in[-1].newline_permitted;
2820 parser_trace_action(trace, "Cancel");
2823 // fall through to error handling as both SHIFT and REDUCE
2826 if (tk->num == TK_newline) {
2827 if (!tos->newline_permitted) {
2830 parser_trace_action(trace, "Discard");
2833 if (tos->since_newline > 1 &&
2834 states[tos->state].reduce_size >= 0 &&
2835 states[tos->state].reduce_size <= tos->since_newline)
2838 if (shift(&p, tk->num, 0, tk->num == TK_newline, tk, states)) {
2840 parser_trace_action(trace, "Shift");
2844 if (states[tos->state].reduce_prod >= 0) {
2847 const struct state *nextstate = &states[tos->state];
2848 int prod = nextstate->reduce_prod;
2849 int size = nextstate->reduce_size;
2851 static char buf[16*1024];
2852 short indents, start_of_line;
2854 body = p.asn_stack + (p.tos - size);
2856 bufsize = do_reduce(prod, body, config, buf);
2858 indents = pop(&p, size, &start_of_line,
2860 res = memdup(buf, bufsize);
2861 memset(buf, 0, bufsize);
2862 if (!shift(&p, nextstate->reduce_sym,
2863 indents, start_of_line,
2865 if (prod != 0) abort();
2869 parser_trace_action(trace, "Reduce");
2872 /* Error. We walk up the stack until we
2873 * find a state which will accept TK_error.
2874 * We then shift in TK_error and see what state
2875 * that takes us too.
2876 * Then we discard input tokens until
2877 * we find one that is acceptable.
2879 parser_trace_action(trace, "ERROR");
2880 short indents = 0, start_of_line;
2882 err_tk = tok_copy(*tk);
2884 shift(&p, TK_error, 0, 0,
2885 err_tk, states) == 0)
2886 // discard this state
2887 indents += pop(&p, 1, &start_of_line, do_free);
2890 // no state accepted TK_error
2893 tos = &p.stack[p.tos-1];
2894 while (!in_lookahead(tk, states, tos->state) &&
2895 tk->num != TK_eof) {
2897 tk = tok_copy(token_next(tokens));
2898 if (tk->num == TK_in)
2900 if (tk->num == TK_out) {
2904 // FIXME update since_indent here
2907 tos->indents += indents;
2910 pop(&p, p.tos, NULL, do_free);
2916 ###### exported functions
2917 void *parser_run(struct token_state *tokens,
2918 const struct state states[],
2919 int (*do_reduce)(int, void**, struct token_config*, void*),
2920 void (*do_free)(short, void*),
2921 FILE *trace, const char *non_term[],
2922 struct token_config *config);
2926 Being able to visualize the parser in action can be invaluable when
2927 debugging the parser code, or trying to understand how the parse of a
2928 particular grammar progresses. The stack contains all the important
2929 state, so just printing out the stack every time around the parse loop
2930 can make it possible to see exactly what is happening.
2932 This doesn't explicitly show each SHIFT and REDUCE action. However they
2933 are easily deduced from the change between consecutive lines, and the
2934 details of each state can be found by cross referencing the states list
2935 in the stack with the "report" that parsergen can generate.
2937 For terminal symbols, we just dump the token. For non-terminals we
2938 print the name of the symbol. The look ahead token is reported at the
2939 end inside square brackets.
2941 ###### parser functions
2943 static char *reserved_words[] = {
2944 [TK_error] = "ERROR",
2947 [TK_newline] = "NEWLINE",
2950 static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
2952 fprintf(trace, "(%d", f->state);
2953 if (states[f->state].starts_line)
2954 fprintf(trace, "s");
2955 if (f->newline_permitted)
2956 fprintf(trace, "n%d", f->since_newline);
2957 fprintf(trace, ") ");
2960 void parser_trace(FILE *trace, struct parser *p,
2961 struct token *tk, const struct state states[],
2962 const char *non_term[], int knowns)
2967 for (i = 0; i < p->tos; i++) {
2968 struct frame *f = &p->stack[i];
2971 if (sym < TK_reserved &&
2972 reserved_words[sym] != NULL)
2973 fputs(reserved_words[sym], trace);
2974 else if (sym < TK_reserved + knowns) {
2975 struct token *t = p->asn_stack[i];
2976 text_dump(trace, t->txt, 20);
2978 fputs(non_term[sym - TK_reserved - knowns],
2981 fprintf(trace, ".%d", f->indents);
2982 if (f->since_newline == 0)
2986 parser_trace_state(trace, f, states);
2988 fprintf(trace, "[");
2989 if (tk->num < TK_reserved &&
2990 reserved_words[tk->num] != NULL)
2991 fputs(reserved_words[tk->num], trace);
2993 text_dump(trace, tk->txt, 20);
2997 void parser_trace_action(FILE *trace, char *action)
3000 fprintf(trace, " - %s\n", action);
3005 The obvious example for a parser is a calculator.
3007 As `scanner` provides number parsing function using `libgmp` is it not much
3008 work to perform arbitrary rational number calculations.
3010 This calculator takes one expression, or an equality test, per line. The
3011 results are printed and if any equality test fails, the program exits with
3014 ###### File: parsergen.mk
3015 calc.c calc.h : parsergen parsergen.mdc
3016 ./parsergen --tag calc -o calc parsergen.mdc
3017 calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o
3018 $(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp
3020 ./calc parsergen.mdc
3026 // what do we use for a demo-grammar? A calculator of course.
3038 #include <sys/mman.h>
3044 #include "scanner.h"
3050 static void free_number(struct number *n)
3056 static int text_is(struct text t, char *s)
3058 return (strlen(s) == t.len &&
3059 strncmp(s, t.txt, t.len) == 0);
3062 int main(int argc, char *argv[])
3064 int fd = open(argv[1], O_RDONLY);
3065 int len = lseek(fd, 0, 2);
3066 char *file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
3067 struct section *table = code_extract(file, file+len, NULL);
3069 struct token_config config = {
3070 .ignored = (1 << TK_line_comment)
3071 | (1 << TK_block_comment)
3074 .number_chars = ".,_+-",
3078 for (s = table; s; s = s->next)
3079 if (text_is(s->section, "example: input"))
3080 parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
3082 struct section *t = table->next;
3083 code_free(table->code);
3095 Session -> Session Line
3098 Line -> Expression NEWLINE ${ gmp_printf("Answer = %Qd\n", $1.val);
3099 { mpf_t fl; mpf_init2(fl, 20); mpf_set_q(fl, $1.val);
3100 gmp_printf(" or as a decimal: %Fg\n", fl);
3104 | Expression = Expression NEWLINE ${
3105 if (mpq_equal($1.val, $3.val))
3106 gmp_printf("Both equal %Qd\n", $1.val);
3108 gmp_printf("NOT EQUAL: %Qd\n != : %Qd\n",
3113 | NEWLINE ${ printf("Blank line\n"); }$
3114 | ERROR NEWLINE ${ printf("Skipped a bad line\n"); }$
3117 Expression -> Expression + Expression ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$
3118 | Expression - Expression ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$
3119 | Expression * Expression ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$
3120 | Expression / Expression ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$
3121 | NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$
3122 | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$
3127 3.1415926535 - 355/113
3129 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9
3131 1 * 1000 + 2 * 100 + 3 * 10 + 4 * 1