1 # LR(1) Parser Generator #
3 This parser generator takes a grammar description combined with code
4 fragments, analyses it, and can report details about the analysis and
5 write out C code files which can be compiled to make a parser.
7 There are several distinct sections.
9 1. `grammar_read` will read a grammar from a literate-code file and
10 store the productions, symbols, and code fragments.
11 2. `grammar_analyse` will take that grammar and build LR parsing
12 states and look-ahead tables.
13 3. `grammar_report` will present the details of the analysis
14 in a readable format and will report any conflicts.
15 4. `parser_generate` will write out C code files with various
16 tables and with the code fragments arranged in useful places.
17 5. `parser_run` is a library function intended to be linked together
18 with the generated parser tables to complete the implementation of
20 6. Finally `calc` is a test grammar for a simple calculator. The
21 `parsergen` program built from the C code in this file can extract
22 that grammar directly from this file and process it.
24 ###### File: parsergen.c
29 ## forward declarations
40 ###### File: libparser.c
47 ###### File: parsergen.mk
50 parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc
53 ## Reading the grammar
55 The grammar must be stored in a markdown literate code file as parsed
56 by "[mdcode][]". It must have three top level (i.e. unreferenced)
57 sections: `header`, `code`, and `grammar`. The first two will be
58 literally copied into the generated `.c` and `.h`. files. The last
59 contains the grammar. This is tokenised with "[scanner][]".
61 If the `--tag` option is given, then any top level header that doesn't
62 start with the tag is ignored, and the tag is striped from the rest. So
64 means that the three needed sections must be `Foo: header`, `Foo: code`,
65 and `Foo: grammar`. The tag `calc` is used to extract the same calculator
69 [scanner]: scanner.html
75 ###### parser includes
79 The grammar contains production sets, precedence/associativity
80 declarations, and data type declarations. These are all parsed with
81 _ad hoc_ parsing as we don't have a parser generator yet.
83 The precedence and associativity can be set for each production, but
84 can be inherited from symbols. The data type (either structure or a
85 reference to a structure) is potentially defined for each non-terminal
86 and describes what C structure is used to store information about each
90 enum assoc {Left, Right, Non};
91 char *assoc_names[] = {"Left","Right","Non"};
94 struct text struct_name;
97 unsigned short precedence;
101 unsigned short precedence;
109 The strings reported by `mdcode` and `scanner` are `struct text` which have
110 length rather than being null terminated. To help with printing and
111 comparing we define `text_is` and `prtxt`, which should possibly go in
112 `mdcode`. `scanner` does provide `text_dump` which is useful for strings
113 which might contain control characters.
115 `strip_tag` is a bit like `strncmp`, but adds a test for a colon,
116 because that is what we need to detect tags.
119 static int text_is(struct text t, char *s)
121 return (strlen(s) == t.len &&
122 strncmp(s, t.txt, t.len) == 0);
124 static void prtxt(struct text t)
126 printf("%.*s", t.len, t.txt);
129 static int strip_tag(struct text *t, char *tag)
131 int skip = strlen(tag) + 1;
132 if (skip >= t->len ||
133 strncmp(t->txt, tag, skip-1) != 0 ||
134 t->txt[skip-1] != ':')
136 while (skip < t->len && t->txt[skip] == ' ')
145 Productions are comprised primarily of symbols - terminal and
146 non-terminal. We do not make a syntactic distinction between the two,
147 though non-terminals must be identifiers. Non-terminal symbols are
148 those which appear in the head of a production, terminal symbols are
149 those which don't. There are also "virtual" symbols used for precedence
150 marking discussed later, and sometimes we won't know what type a symbol
153 ###### forward declarations
154 enum symtype { Unknown, Virtual, Terminal, Nonterminal };
155 char *symtypes = "UVTN";
159 Symbols can be either `TK_ident` or `TK_mark`. They are saved in a
160 table of known symbols and the resulting parser will report them as
161 `TK_reserved + N`. A small set of identifiers are reserved for the
162 different token types that `scanner` can report.
165 static char *reserved_words[] = {
166 [TK_error] = "ERROR",
167 [TK_number] = "NUMBER",
168 [TK_ident] = "IDENTIFIER",
170 [TK_string] = "STRING",
171 [TK_multi_string] = "MULTI_STRING",
174 [TK_newline] = "NEWLINE",
180 Note that `TK_eof` and the two `TK_*_comment` tokens cannot be
181 recognised. The former is automatically expected at the end of the text
182 being parsed. The latter are always ignored by the parser.
184 All of these symbols are stored in a simple symbol table. We use the
185 `struct text` from `mdcode` to store the name and link them together
186 into a sorted list using an insertion sort.
188 We don't have separate `find` and `insert` functions as any symbol we
189 find needs to be remembered. We simply expect `find` to always return a
190 symbol, but its type might be `Unknown`.
199 ###### grammar fields
204 static struct symbol *sym_find(struct grammar *g, struct text s)
206 struct symbol **l = &g->syms;
211 (cmp = text_cmp((*l)->name, s)) < 0)
215 n = calloc(1, sizeof(*n));
224 static void symbols_init(struct grammar *g)
226 int entries = sizeof(reserved_words)/sizeof(reserved_words[0]);
228 for (i = 0; i < entries; i++) {
231 t.txt = reserved_words[i];
234 t.len = strlen(t.txt);
241 ### Data types and precedence.
243 Data type specification and precedence specification are both
244 introduced by a dollar sign at the start of the line. If the next
245 word is `LEFT`, `RIGHT` or `NON`, then the line specifies a
246 precedence, otherwise it specifies a data type.
248 The data type name is simply stored and applied to the head of all
249 subsequent productions. It must be the name of a structure optionally
250 preceded by an asterisk which means a reference or "pointer". So
251 `$expression` maps to `struct expression` and `$*statement` maps to
252 `struct statement *`.
254 Any productions given before the first data type declaration will have
255 no data type associated with them and can carry no information. In
256 order to allow other non-terminals to have no type, the data type
257 `$void` can be given. This does *not* mean that `struct void` will be
258 used, but rather than no type will be associated with future
261 The precedence line must contain a list of symbols - typically
262 terminal symbols, but not necessarily. It can only contain symbols
263 that have not been seen yet, so precedence declaration must precede
264 the productions that they affect.
266 A precedence line may also contain a "virtual" symbol which is an
267 identifier preceded by `$$`. Virtual terminals carry precedence
268 information but are not included in the grammar. A production can
269 declare that it inherits the precedence of a given virtual symbol.
271 This use for `$$` precludes it from being used as a symbol in the
272 described language. Two other symbols: `${` and `}$` are also
275 Each new precedence line introduces a new precedence level and
276 declares how it associates. This level is stored in each symbol
277 listed and may be inherited by any production which uses the symbol. A
278 production inherits from the last symbol which has a precedence.
280 The symbols on the first precedence line have the lowest precedence.
281 Subsequent lines introduce symbols with higher precedence.
283 ###### grammar fields
284 struct text current_type;
289 enum symbols { TK_virtual = TK_reserved, TK_open, TK_close };
290 static const char *known[] = { "$$", "${", "}$" };
293 static char *dollar_line(struct token_state *ts, struct grammar *g, int isref)
295 struct token t = token_next(ts);
300 if (t.num != TK_ident) {
301 err = "type or assoc expected after '$'";
304 if (text_is(t.txt, "LEFT"))
306 else if (text_is(t.txt, "RIGHT"))
308 else if (text_is(t.txt, "NON"))
311 g->current_type = t.txt;
312 g->type_isref = isref;
313 if (text_is(t.txt, "void"))
314 g->current_type.txt = NULL;
316 if (t.num != TK_newline) {
317 err = "Extra tokens after type name";
324 err = "$* cannot be followed by a precedence";
328 // This is a precedence line, need some symbols.
332 while (t.num != TK_newline) {
333 enum symtype type = Terminal;
335 if (t.num == TK_virtual) {
338 if (t.num != TK_ident) {
339 err = "$$ must be followed by a word";
342 } else if (t.num != TK_ident &&
344 err = "Illegal token in precedence line";
347 s = sym_find(g, t.txt);
348 if (s->type != Unknown) {
349 err = "Symbols in precedence line must not already be known.";
353 s->precedence = g->prec_levels;
359 err = "No symbols given on precedence line";
363 while (t.num != TK_newline && t.num != TK_eof)
370 A production either starts with an identifier which is the head
371 non-terminal, or a vertical bar (`|`) in which case this production
372 uses the same head as the previous one. The identifier must be
373 followed by a `->` mark. All productions for a given non-terminal must
374 be grouped together with the `nonterminal ->` given only once.
376 After this a (possibly empty) sequence of identifiers and marks form
377 the body of the production. A virtual symbol may be given after the
378 body by preceding it with `$$`. If a virtual symbol is given, the
379 precedence of the production is that for the virtual symbol. If none
380 is given, the precedence is inherited from the last symbol in the
381 production which has a precedence specified.
383 After the optional precedence may come the `${` mark. This indicates
384 the start of a code fragment. If present, this must be on the same
385 line as the start of the production.
387 All of the text from the `${` through to the matching `}$` is
388 collected and forms the code-fragment for the production. It must all
389 be in one `code_node` of the literate code. The `}$` must be
390 at the end of a line.
392 Text in the code fragment will undergo substitutions where `$N` or
393 `$<N`,for some numeric `N`, will be replaced with a variable holding
394 the parse information for the particular symbol in the production.
395 `$0` is the head of the production, `$1` is the first symbol of the
396 body, etc. The type of `$N` for a terminal symbol is `struct token`.
397 For a non-terminal, it is whatever has been declared for that symbol.
398 The `<` may be included for symbols declared as storing a reference
399 (not a structure) and means that the reference is being moved out, so
400 it will not automatically be freed.
402 While building productions we will need to add to an array which needs to
406 static void array_add(void *varray, int *cnt, void *new)
408 void ***array = varray;
411 current = ((*cnt-1) | (step-1))+1;
412 if (*cnt == current) {
415 *array = realloc(*array, current * sizeof(void*));
417 (*array)[*cnt] = new;
421 Collecting the code fragment simply involves reading tokens until we
422 hit the end token `}$`, and noting the character position of the start and
426 static struct text collect_code(struct token_state *state,
431 code.txt = start.txt.txt + start.txt.len;
433 t = token_next(state);
434 while (t.node == start.node &&
435 t.num != TK_close && t.num != TK_error &&
437 if (t.num == TK_close && t.node == start.node)
438 code.len = t.txt.txt - code.txt;
444 Now we have all the bits we need to parse a full production.
449 ###### grammar fields
450 struct production **productions;
451 int production_count;
453 ###### production fields
455 struct symbol **body;
461 int first_production;
464 static char *parse_production(struct grammar *g,
466 struct token_state *state)
468 /* Head has already been parsed. */
471 struct production p, *pp;
473 memset(&p, 0, sizeof(p));
475 tk = token_next(state);
476 while (tk.num == TK_ident || tk.num == TK_mark) {
477 struct symbol *bs = sym_find(g, tk.txt);
478 if (bs->type == Unknown)
480 if (bs->type == Virtual) {
481 err = "Virtual symbol not permitted in production";
484 if (bs->precedence) {
485 p.precedence = bs->precedence;
488 array_add(&p.body, &p.body_size, bs);
489 tk = token_next(state);
491 if (tk.num == TK_virtual) {
493 tk = token_next(state);
494 if (tk.num != TK_ident) {
495 err = "word required after $$";
498 vs = sym_find(g, tk.txt);
499 if (vs->type != Virtual) {
500 err = "symbol after $$ must be virtual";
503 p.precedence = vs->precedence;
505 tk = token_next(state);
507 if (tk.num == TK_open) {
508 p.code_line = tk.line;
509 p.code = collect_code(state, tk);
510 if (p.code.txt == NULL) {
511 err = "code fragment not closed properly";
514 tk = token_next(state);
516 if (tk.num != TK_newline && tk.num != TK_eof) {
517 err = "stray tokens at end of line";
520 pp = malloc(sizeof(*pp));
522 array_add(&g->productions, &g->production_count, pp);
525 while (tk.num != TK_newline && tk.num != TK_eof)
526 tk = token_next(state);
530 With the ability to parse production and dollar-lines, we have nearly all
531 that we need to parse a grammar from a `code_node`.
533 The head of the first production will effectively be the `start` symbol of
534 the grammar. However it won't _actually_ be so. Processing the grammar is
535 greatly simplified if the real start symbol only has a single production,
536 and expects `$eof` as the final terminal. So when we find the first
537 explicit production we insert an extra production as production zero which
540 ###### Example: production 0
543 where `START` is the first non-terminal given.
545 ###### create production zero
546 struct production *p = calloc(1,sizeof(*p));
547 struct text start = {"$start",6};
548 struct text eof = {"$eof",4};
549 struct text code = {"$0 = $<1;", 9};
550 p->head = sym_find(g, start);
551 p->head->type = Nonterminal;
552 p->head->struct_name = g->current_type;
553 p->head->isref = g->type_isref;
554 if (g->current_type.txt)
556 array_add(&p->body, &p->body_size, head);
557 array_add(&p->body, &p->body_size, sym_find(g, eof));
558 p->head->first_production = g->production_count;
559 array_add(&g->productions, &g->production_count, p);
561 Now we are ready to read in the grammar. We ignore comments
562 and strings so that the marks which introduce them can be
563 used as terminals in the grammar. We don't ignore numbers
564 even though we don't allow them as that causes the scanner
565 to produce errors that the parser is better positioned to handle.
568 static struct grammar *grammar_read(struct code_node *code)
570 struct token_config conf = {
573 .words_marks = known,
574 .known_count = sizeof(known)/sizeof(known[0]),
576 .ignored = (1 << TK_line_comment)
577 | (1 << TK_block_comment)
580 | (1 << TK_multi_string)
585 struct token_state *state = token_open(code, &conf);
587 struct symbol *head = NULL;
591 g = calloc(1, sizeof(*g));
594 for (tk = token_next(state); tk.num != TK_eof;
595 tk = token_next(state)) {
596 if (tk.num == TK_newline)
598 if (tk.num == TK_ident) {
600 head = sym_find(g, tk.txt);
601 if (head->type == Nonterminal)
602 err = "This non-terminal has already be used.";
603 else if (head->type == Virtual)
604 err = "Virtual symbol not permitted in head of production";
606 head->type = Nonterminal;
607 head->struct_name = g->current_type;
608 head->isref = g->type_isref;
609 if (g->production_count == 0) {
610 ## create production zero
612 head->first_production = g->production_count;
613 tk = token_next(state);
614 if (tk.num == TK_mark &&
615 text_is(tk.txt, "->"))
616 err = parse_production(g, head, state);
618 err = "'->' missing in production";
620 } else if (tk.num == TK_mark
621 && text_is(tk.txt, "|")) {
622 // another production for same non-term
624 err = parse_production(g, head, state);
626 err = "First production must have a head";
627 } else if (tk.num == TK_mark
628 && text_is(tk.txt, "$")) {
629 err = dollar_line(state, g, 0);
630 } else if (tk.num == TK_mark
631 && text_is(tk.txt, "$*")) {
632 err = dollar_line(state, g, 1);
634 err = "Unrecognised token at start of line.";
642 fprintf(stderr, "Error at line %d: %s\n",
649 ## Analysing the grammar
651 The central task in analysing the grammar is to determine a set of
652 states to drive the parsing process. This will require finding
653 various sets of symbols and of "items". Some of these sets will need
654 to attach information to each element in the set, so they are more
657 Each "item" may have a 'look-ahead' or `LA` set associated with
658 it. Many of these will be the same as each other. To avoid memory
659 wastage, and to simplify some comparisons of sets, these sets will be
660 stored in a list of unique sets, each assigned a number.
662 Once we have the data structures in hand to manage these sets and
663 lists, we can start setting the 'nullable' flag, build the 'FIRST'
664 sets, and then create the item sets which define the various states.
668 Though we don't only store symbols in these sets, they are the main
669 things we store, so they are called symbol sets or "symsets".
671 A symset has a size, an array of shorts, and an optional array of data
672 values, which are also shorts. If the array of data is not present,
673 we store an impossible pointer, as `NULL` is used to indicate that no
674 memory has been allocated yet;
679 unsigned short *syms, *data;
681 #define NO_DATA ((unsigned short *)1)
682 const struct symset INIT_SYMSET = { 0, NULL, NO_DATA };
683 const struct symset INIT_DATASET = { 0, NULL, NULL };
685 The arrays of shorts are allocated in blocks of 8 and are kept sorted
686 by using an insertion sort. We don't explicitly record the amount of
687 allocated space as it can be derived directly from the current `cnt` using
688 `((cnt - 1) | 7) + 1`.
691 static void symset_add(struct symset *s, unsigned short key, unsigned short val)
694 int current = ((s->cnt-1) | 7) + 1;
695 if (current == s->cnt) {
697 s->syms = realloc(s->syms, sizeof(*s->syms) * current);
698 if (s->data != NO_DATA)
699 s->data = realloc(s->data, sizeof(*s->data) * current);
702 while (i > 0 && s->syms[i-1] > key) {
703 s->syms[i] = s->syms[i-1];
704 if (s->data != NO_DATA)
705 s->data[i] = s->data[i-1];
709 if (s->data != NO_DATA)
714 Finding a symbol (or item) in a `symset` uses a simple binary search.
715 We return the index where the value was found (so data can be accessed),
716 or `-1` to indicate failure.
718 static int symset_find(struct symset *ss, unsigned short key)
725 while (lo + 1 < hi) {
726 int mid = (lo + hi) / 2;
727 if (ss->syms[mid] <= key)
732 if (ss->syms[lo] == key)
737 We will often want to form the union of two symsets. When we do, we
738 will often want to know if anything changed (as that might mean there
739 is more work to do). So `symset_union` reports whether anything was
740 added to the first set. We use a slow quadratic approach as these
741 sets don't really get very big. If profiles shows this to be a problem it
742 can be optimised later.
744 static int symset_union(struct symset *a, struct symset *b)
748 for (i = 0; i < b->cnt; i++)
749 if (symset_find(a, b->syms[i]) < 0) {
750 unsigned short data = 0;
751 if (b->data != NO_DATA)
753 symset_add(a, b->syms[i], data);
759 And of course we must be able to free a symset.
761 static void symset_free(struct symset ss)
764 if (ss.data != NO_DATA)
770 Some symsets are simply stored somewhere appropriate in the `grammar`
771 data structure, others needs a bit of indirection. This applies
772 particularly to "LA" sets. These will be paired with "items" in an
773 "itemset". As itemsets will be stored in a symset, the "LA" set needs to be
774 stored in the `data` field, so we need a mapping from a small (short)
775 number to an LA `symset`.
777 As mentioned earlier this involves creating a list of unique symsets.
779 For now, we just use a linear list sorted by insertion. A skiplist
780 would be more efficient and may be added later.
787 struct setlist *next;
790 ###### grammar fields
791 struct setlist *sets;
796 static int ss_cmp(struct symset a, struct symset b)
799 int diff = a.cnt - b.cnt;
802 for (i = 0; i < a.cnt; i++) {
803 diff = (int)a.syms[i] - (int)b.syms[i];
810 static int save_set(struct grammar *g, struct symset ss)
812 struct setlist **sl = &g->sets;
816 while (*sl && (cmp = ss_cmp((*sl)->ss, ss)) < 0)
823 s = malloc(sizeof(*s));
832 Finding a set by number is currently performed by a simple linear search.
833 If this turns out to hurt performance, we can store the sets in a dynamic
834 array like the productions.
836 static struct symset set_find(struct grammar *g, int num)
838 struct setlist *sl = g->sets;
839 while (sl && sl->num != num)
844 ### Setting `nullable`
846 We set `nullable` on the head symbol for any production for which all
847 the body symbols (if any) are nullable. As this is a recursive
848 definition, any change in the `nullable` setting means that we need to
849 re-evaluate where it needs to be set.
851 We simply loop around performing the same calculations until no more
858 static void set_nullable(struct grammar *g)
861 while (check_again) {
864 for (p = 0; p < g->production_count; p++) {
865 struct production *pr = g->productions[p];
868 if (pr->head->nullable)
870 for (s = 0; s < pr->body_size; s++)
871 if (! pr->body[s]->nullable)
873 if (s == pr->body_size) {
874 pr->head->nullable = 1;
881 ### Setting `line_like`
883 In order to be able to ignore newline tokens when not relevant, but
884 still include them in the parse when needed, we will need to know
885 which states can start a "line-like" section of code. We ignore
886 newlines when there is an indent since the most recent start of a
889 A "line_like" symbol is simply any symbol that can derive a NEWLINE.
890 If a symbol cannot derive a NEWLINE, then it is only part of a line -
891 so is word-like. If it can derive a NEWLINE, then we consider it to
894 Clearly the `TK_newline` token can derive a NEWLINE. Any symbol which
895 is the head of a production that contains a line_like symbol is also a
896 line-like symbol. We use a new field `line_like` to record this
897 attribute of symbols, and compute it in a repetitive manner similar to
904 static void set_line_like(struct grammar *g)
907 g->symtab[TK_newline]->line_like = 1;
908 while (check_again) {
911 for (p = 0; p < g->production_count; p++) {
912 struct production *pr = g->productions[p];
915 if (pr->head->line_like)
918 for (s = 0 ; s < pr->body_size; s++) {
919 if (pr->body[s]->line_like) {
920 pr->head->line_like = 1;
929 ### Building the `first` sets
931 When calculating what can follow a particular non-terminal, we will need to
932 know what the "first" terminal in any subsequent non-terminal might be. So
933 we calculate the `first` set for every non-terminal and store them in an
934 array. We don't bother recording the "first" set for terminals as they are
937 As the "first" for one symbol might depend on the "first" of another,
938 we repeat the calculations until no changes happen, just like with
939 `set_nullable`. This makes use of the fact that `symset_union`
940 reports if any change happens.
942 The core of this, which finds the "first" of part of a production body,
943 will be reused for computing the "follow" sets, so we split it out
944 into a separate function.
946 ###### grammar fields
947 struct symset *first;
951 static int add_first(struct production *pr, int start,
952 struct symset *target, struct grammar *g,
957 for (s = start; s < pr->body_size; s++) {
958 struct symbol *bs = pr->body[s];
959 if (bs->type == Terminal) {
960 if (symset_find(target, bs->num) < 0) {
961 symset_add(target, bs->num, 0);
965 } else if (symset_union(target, &g->first[bs->num]))
971 *to_end = (s == pr->body_size);
975 static void build_first(struct grammar *g)
979 g->first = calloc(g->num_syms, sizeof(g->first[0]));
980 for (s = 0; s < g->num_syms; s++)
981 g->first[s] = INIT_SYMSET;
983 while (check_again) {
986 for (p = 0; p < g->production_count; p++) {
987 struct production *pr = g->productions[p];
988 struct symset *head = &g->first[pr->head->num];
990 if (add_first(pr, 0, head, g, NULL))
996 ### Building the `follow` sets.
998 There are two different situations when we will want to generate "follow"
999 sets. If we are doing an SLR analysis, we want to generate a single
1000 "follow" set for each non-terminal in the grammar. That is what is
1001 happening here. If we are doing an LALR or LR analysis we will want
1002 to generate a separate "LA" set for each item. We do that later
1003 in state generation.
1005 There are two parts to generating a "follow" set. Firstly we look at
1006 every place that any non-terminal appears in the body of any
1007 production, and we find the set of possible "first" symbols after
1008 there. This is done using `add_first` above and only needs to be done
1009 once as the "first" sets are now stable and will not change.
1013 for (p = 0; p < g->production_count; p++) {
1014 struct production *pr = g->productions[p];
1017 for (b = 0; b < pr->body_size - 1; b++) {
1018 struct symbol *bs = pr->body[b];
1019 if (bs->type == Terminal)
1021 add_first(pr, b+1, &g->follow[bs->num], g, NULL);
1025 The second part is to add the "follow" set of the head of a production
1026 to the "follow" sets of the final symbol in the production, and any
1027 other symbol which is followed only by `nullable` symbols. As this
1028 depends on "follow" itself we need to repeatedly perform the process
1029 until no further changes happen.
1033 for (again = 0, p = 0;
1034 p < g->production_count;
1035 p < g->production_count-1
1036 ? p++ : again ? (again = 0, p = 0)
1038 struct production *pr = g->productions[p];
1041 for (b = pr->body_size - 1; b >= 0; b--) {
1042 struct symbol *bs = pr->body[b];
1043 if (bs->type == Terminal)
1045 if (symset_union(&g->follow[bs->num],
1046 &g->follow[pr->head->num]))
1053 We now just need to create and initialise the `follow` list to get a
1056 ###### grammar fields
1057 struct symset *follow;
1060 static void build_follow(struct grammar *g)
1063 g->follow = calloc(g->num_syms, sizeof(g->follow[0]));
1064 for (s = 0; s < g->num_syms; s++)
1065 g->follow[s] = INIT_SYMSET;
1069 ### Building itemsets and states
1071 There are three different levels of detail that can be chosen for
1072 building the itemsets and states for the LR grammar. They are:
1074 1. LR(0) or SLR(1), where no look-ahead is considered.
1075 2. LALR(1) where we build look-ahead sets with each item and merge
1076 the LA sets when we find two paths to the same "kernel" set of items.
1077 3. LR(1) where different look-ahead for any item in the set means
1078 a different state must be created.
1080 ###### forward declarations
1081 enum grammar_type { LR0, LR05, SLR, LALR, LR1 };
1083 We need to be able to look through existing states to see if a newly
1084 generated state already exists. For now we use a simple sorted linked
1087 An item is a pair of numbers: the production number and the position of
1088 "DOT", which is an index into the body of the production.
1089 As the numbers are not enormous we can combine them into a single "short"
1090 and store them in a `symset` - 4 bits for "DOT" and 12 bits for the
1091 production number (so 4000 productions with maximum of 15 symbols in the
1094 Comparing the itemsets will be a little different to comparing symsets
1095 as we want to do the lookup after generating the "kernel" of an
1096 itemset, so we need to ignore the offset=zero items which are added during
1099 To facilitate this, we modify the "DOT" number so that "0" sorts to
1100 the end of the list in the symset, and then only compare items before
1104 static inline unsigned short item_num(int production, int index)
1106 return production | ((31-index) << 11);
1108 static inline int item_prod(unsigned short item)
1110 return item & 0x7ff;
1112 static inline int item_index(unsigned short item)
1114 return (31-(item >> 11)) & 0x1f;
1117 For LR(1) analysis we need to compare not just the itemset in a state
1118 but also the LA sets. As we assign each unique LA set a number, we
1119 can just compare the symset and the data values together.
1122 static int itemset_cmp(struct symset a, struct symset b,
1123 enum grammar_type type)
1129 i < a.cnt && i < b.cnt &&
1130 item_index(a.syms[i]) > 0 &&
1131 item_index(b.syms[i]) > 0;
1133 int diff = a.syms[i] - b.syms[i];
1137 diff = a.data[i] - b.data[i];
1142 if (i == a.cnt || item_index(a.syms[i]) == 0)
1146 if (i == b.cnt || item_index(b.syms[i]) == 0)
1152 if (type < LR1 || av == -1)
1155 a.data[i] - b.data[i];
1158 It will be helpful to know if an itemset has been "completed" or not,
1159 particularly for LALR where itemsets get merged, at which point they
1160 need to be consider for completion again. So a `completed` flag is needed.
1162 For correct handling of `TK_newline` when parsing, we will need to
1163 know which states (itemsets) can occur at the start of a line, so we
1164 will record a `starts_line` flag too whenever DOT is at the start of a
1167 Finally, for handling `TK_out` we need to know whether productions in the
1168 current state started *before* the most recent indent. A state
1169 doesn't usually keep details of individual productions, so we need to
1170 add one extra detail. `min_prefix` is the smallest non-zero number of
1171 symbols *before* DOT in any production in an itemset. This will allow
1172 us to determine if the the most recent indent is sufficiently recent
1173 to cancel it against a `TK_out`. If it was seen longer ago than the
1174 `min_prefix`, and if the current state cannot be reduced, then the
1175 indented section must have ended in the middle of a syntactic unit, so
1176 an error must be signaled.
1178 And now we can build the list of itemsets. The lookup routine returns
1179 both a success flag and a pointer to where in the list an insert
1180 should happen, so we don't need to search a second time.
1184 struct itemset *next;
1186 struct symset items;
1187 struct symset go_to;
1189 unsigned short precedence;
1195 ###### grammar fields
1196 struct itemset *items;
1200 static int itemset_find(struct grammar *g, struct itemset ***where,
1201 struct symset kernel, enum grammar_type type)
1203 struct itemset **ip;
1205 for (ip = &g->items; *ip ; ip = & (*ip)->next) {
1206 struct itemset *i = *ip;
1208 diff = itemset_cmp(i->items, kernel, type);
1221 Adding an itemset may require merging the LA sets if LALR analysis is
1222 happening. If any new LA set adds any symbols that weren't in the old LA set, we
1223 clear the `completed` flag so that the dependants of this itemset will be
1224 recalculated and their LA sets updated.
1226 `add_itemset` must consume the symsets it is passed, either by adding
1227 them to a data structure, of freeing them.
1229 static int add_itemset(struct grammar *g, struct symset ss,
1230 enum assoc assoc, unsigned short precedence,
1231 enum grammar_type type)
1233 struct itemset **where, *is;
1235 int found = itemset_find(g, &where, ss, type);
1237 is = calloc(1, sizeof(*is));
1238 is->state = g->states;
1242 is->precedence = precedence;
1244 is->go_to = INIT_DATASET;
1253 for (i = 0; i < ss.cnt; i++) {
1254 struct symset temp = INIT_SYMSET, s;
1255 if (ss.data[i] == is->items.data[i])
1257 s = set_find(g, is->items.data[i]);
1258 symset_union(&temp, &s);
1259 s = set_find(g, ss.data[i]);
1260 if (symset_union(&temp, &s)) {
1261 is->items.data[i] = save_set(g, temp);
1272 To build all the itemsets, we first insert the initial itemset made
1273 from production zero, complete each itemset, and then generate new
1274 itemsets from old until no new ones can be made.
1276 Completing an itemset means finding all the items where "DOT" is followed by
1277 a nonterminal and adding "DOT=0" items for every production from that
1278 non-terminal - providing each item hasn't already been added.
1280 If LA sets are needed, the LA set for each new item is found using
1281 `add_first` which was developed earlier for `FIRST` and `FOLLOW`. This may
1282 be supplemented by the LA set for the item which produce the new item.
1284 We also collect a set of all symbols which follow "DOT" (in `done`) as this
1285 is used in the next stage.
1286 If any of these symbols are flagged as `line_like`, then this
1287 state must be a `starts_line` state so now is a good time to record that.
1289 When itemsets are created we assign a precedence to the itemset from
1290 the complete item, if there is one. We ignore the possibility of
1291 there being two and don't (currently) handle precedence in such
1292 grammars. When completing a grammar we ignore any item where DOT is
1293 followed by a terminal with a precedence lower than that for the
1294 itemset. Unless the terminal has right associativity, we also ignore
1295 items where the terminal has the same precedence. The result is that
1296 unwanted items are still in the itemset, but the terminal doesn't get
1297 into the go to set, so the item is ineffective.
1299 ###### complete itemset
1300 for (i = 0; i < is->items.cnt; i++) {
1301 int p = item_prod(is->items.syms[i]);
1302 int bs = item_index(is->items.syms[i]);
1303 struct production *pr = g->productions[p];
1306 struct symset LA = INIT_SYMSET;
1307 unsigned short sn = 0;
1309 if (is->min_prefix == 0 ||
1310 (bs > 0 && bs < is->min_prefix))
1311 is->min_prefix = bs;
1312 if (bs == pr->body_size)
1315 if (s->precedence && is->precedence &&
1316 is->precedence > s->precedence)
1317 /* This terminal has a low precedence and
1318 * shouldn't be shifted
1321 if (s->precedence && is->precedence &&
1322 is->precedence == s->precedence && s->assoc != Right)
1323 /* This terminal has a matching precedence and is
1324 * not Right-associative, so we mustn't shift it.
1327 if (symset_find(&done, s->num) < 0) {
1328 symset_add(&done, s->num, 0);
1330 is->starts_line = 1;
1332 if (s->type != Nonterminal)
1338 add_first(pr, bs+1, &LA, g, &to_end);
1340 struct symset ss = set_find(g, is->items.data[i]);
1341 symset_union(&LA, &ss);
1343 sn = save_set(g, LA);
1344 LA = set_find(g, sn);
1347 /* Add productions for this symbol */
1348 for (p2 = s->first_production;
1349 p2 < g->production_count &&
1350 g->productions[p2]->head == s;
1352 int itm = item_num(p2, 0);
1353 int pos = symset_find(&is->items, itm);
1355 symset_add(&is->items, itm, sn);
1356 /* Will have re-ordered, so start
1357 * from beginning again */
1359 } else if (type >= LALR) {
1360 struct symset ss = set_find(g, is->items.data[pos]);
1361 struct symset tmp = INIT_SYMSET;
1363 symset_union(&tmp, &ss);
1364 if (symset_union(&tmp, &LA)) {
1365 is->items.data[pos] = save_set(g, tmp);
1373 For each symbol we found (and placed in `done`) we collect all the items for
1374 which this symbol is next, and create a new itemset with "DOT" advanced over
1375 the symbol. This is then added to the collection of itemsets (or merged
1376 with a pre-existing itemset).
1378 ###### derive itemsets
1379 // Now we have a completed itemset, so we need to
1380 // compute all the 'next' itemsets and create them
1381 // if they don't exist.
1382 for (i = 0; i < done.cnt; i++) {
1384 unsigned short state;
1385 struct symbol *sym = g->symtab[done.syms[i]];
1386 enum assoc assoc = Non;
1387 unsigned short precedence = 0;
1388 struct symset newitemset = INIT_SYMSET;
1390 newitemset = INIT_DATASET;
1392 for (j = 0; j < is->items.cnt; j++) {
1393 int itm = is->items.syms[j];
1394 int p = item_prod(itm);
1395 int bp = item_index(itm);
1396 struct production *pr = g->productions[p];
1397 unsigned short la = 0;
1400 if (bp == pr->body_size)
1402 if (pr->body[bp] != sym)
1405 la = is->items.data[j];
1406 pos = symset_find(&newitemset, pr->head->num);
1407 if (bp + 1 == pr->body_size &&
1408 pr->precedence > 0 &&
1409 pr->precedence > precedence) {
1410 // new itemset is reducible and has a precedence.
1411 precedence = pr->precedence;
1415 symset_add(&newitemset, item_num(p, bp+1), la);
1416 else if (type >= LALR) {
1417 // Need to merge la set.
1418 int la2 = newitemset.data[pos];
1420 struct symset ss = set_find(g, la2);
1421 struct symset LA = INIT_SYMSET;
1422 symset_union(&LA, &ss);
1423 ss = set_find(g, la);
1424 if (symset_union(&LA, &ss))
1425 newitemset.data[pos] = save_set(g, LA);
1431 state = add_itemset(g, newitemset, assoc, precedence, type);
1432 if (symset_find(&is->go_to, done.syms[i]) < 0)
1433 symset_add(&is->go_to, done.syms[i], state);
1436 All that is left is to create the initial itemset from production zero, and
1437 with `TK_eof` as the LA set.
1440 static void build_itemsets(struct grammar *g, enum grammar_type type)
1442 struct symset first = INIT_SYMSET;
1445 unsigned short la = 0;
1447 // LA set just has eof
1448 struct symset eof = INIT_SYMSET;
1449 symset_add(&eof, TK_eof, 0);
1450 la = save_set(g, eof);
1451 first = INIT_DATASET;
1453 // production 0, offset 0 (with no data)
1454 symset_add(&first, item_num(0, 0), la);
1455 add_itemset(g, first, Non, 0, type);
1456 for (again = 0, is = g->items;
1458 is = is->next ?: again ? (again = 0, g->items) : NULL) {
1460 struct symset done = INIT_SYMSET;
1471 ### Completing the analysis.
1473 The exact process of analysis depends on which level was requested. For
1474 `LR0` and `LR05` we don't need first and follow sets at all. For `LALR` and
1475 `LR1` we need first, but not follow. For `SLR` we need both.
1477 We don't build the "action" tables that you might expect as the parser
1478 doesn't use them. They will be calculated to some extent if needed for
1481 Once we have built everything we allocate arrays for the two lists:
1482 symbols and itemsets. This allows more efficient access during reporting.
1483 The symbols are grouped as terminals and non-terminals and we record the
1484 changeover point in `first_nonterm`.
1486 ###### grammar fields
1487 struct symbol **symtab;
1488 struct itemset **statetab;
1491 ###### grammar_analyse
1493 static void grammar_analyse(struct grammar *g, enum grammar_type type)
1497 int snum = TK_reserved;
1498 for (s = g->syms; s; s = s->next)
1499 if (s->num < 0 && s->type == Terminal) {
1503 g->first_nonterm = snum;
1504 for (s = g->syms; s; s = s->next)
1510 g->symtab = calloc(g->num_syms, sizeof(g->symtab[0]));
1511 for (s = g->syms; s; s = s->next)
1512 g->symtab[s->num] = s;
1522 build_itemsets(g, type);
1524 g->statetab = calloc(g->states, sizeof(g->statetab[0]));
1525 for (is = g->items; is ; is = is->next)
1526 g->statetab[is->state] = is;
1529 ## Reporting on the Grammar
1531 The purpose of the report is to give the grammar developer insight into
1532 how the grammar parser will work. It is basically a structured dump of
1533 all the tables that have been generated, plus a description of any conflicts.
1535 ###### grammar_report
1536 static int grammar_report(struct grammar *g, enum grammar_type type)
1542 return report_conflicts(g, type);
1545 Firstly we have the complete list of symbols, together with the
1546 "FIRST" set if that was generated. We add a mark to each symbol to
1547 show if it can end in a newline (`>`), if it is considered to be
1548 "line-like" (`<`), or if it is nullable (`.`).
1552 static void report_symbols(struct grammar *g)
1556 printf("SYMBOLS + FIRST:\n");
1558 printf("SYMBOLS:\n");
1560 for (n = 0; n < g->num_syms; n++) {
1561 struct symbol *s = g->symtab[n];
1565 printf(" %c%c%3d%c: ",
1566 s->nullable ? '.':' ',
1567 s->line_like ? '<':' ',
1568 s->num, symtypes[s->type]);
1571 printf(" (%d%s)", s->precedence,
1572 assoc_names[s->assoc]);
1574 if (g->first && s->type == Nonterminal) {
1577 for (j = 0; j < g->first[n].cnt; j++) {
1580 prtxt(g->symtab[g->first[n].syms[j]]->name);
1587 Then we have the follow sets if they were computed.
1589 static void report_follow(struct grammar *g)
1592 printf("FOLLOW:\n");
1593 for (n = 0; n < g->num_syms; n++)
1594 if (g->follow[n].cnt) {
1598 prtxt(g->symtab[n]->name);
1599 for (j = 0; j < g->follow[n].cnt; j++) {
1602 prtxt(g->symtab[g->follow[n].syms[j]]->name);
1608 And finally the item sets. These include the GO TO tables and, for
1609 LALR and LR1, the LA set for each item. Lots of stuff, so we break
1610 it up a bit. First the items, with production number and associativity.
1612 static void report_item(struct grammar *g, int itm)
1614 int p = item_prod(itm);
1615 int dot = item_index(itm);
1616 struct production *pr = g->productions[p];
1620 prtxt(pr->head->name);
1622 for (i = 0; i < pr->body_size; i++) {
1623 printf(" %s", (dot == i ? ". ": ""));
1624 prtxt(pr->body[i]->name);
1626 if (dot == pr->body_size)
1629 if (pr->precedence && dot == pr->body_size)
1630 printf(" (%d%s)", pr->precedence,
1631 assoc_names[pr->assoc]);
1632 if (dot < pr->body_size &&
1633 pr->body[dot]->precedence) {
1634 struct symbol *s = pr->body[dot];
1635 printf(" [%d%s]", s->precedence,
1636 assoc_names[s->assoc]);
1641 The LA sets which are (possibly) reported with each item:
1643 static void report_la(struct grammar *g, int lanum)
1645 struct symset la = set_find(g, lanum);
1649 printf(" LOOK AHEAD(%d)", lanum);
1650 for (i = 0; i < la.cnt; i++) {
1653 prtxt(g->symtab[la.syms[i]]->name);
1658 Then the go to sets:
1660 static void report_goto(struct grammar *g, struct symset gt)
1665 for (i = 0; i < gt.cnt; i++) {
1667 prtxt(g->symtab[gt.syms[i]]->name);
1668 printf(" -> %d\n", gt.data[i]);
1672 Now we can report all the item sets complete with items, LA sets, and GO TO.
1674 static void report_itemsets(struct grammar *g)
1677 printf("ITEM SETS(%d)\n", g->states);
1678 for (s = 0; s < g->states; s++) {
1680 struct itemset *is = g->statetab[s];
1681 printf(" Itemset %d:%s min prefix=%d",
1682 s, is->starts_line?" (startsline)":"", is->min_prefix);
1684 printf(" %d%s", is->precedence, assoc_names[is->assoc]);
1686 for (j = 0; j < is->items.cnt; j++) {
1687 report_item(g, is->items.syms[j]);
1688 if (is->items.data != NO_DATA)
1689 report_la(g, is->items.data[j]);
1691 report_goto(g, is->go_to);
1695 ### Reporting conflicts
1697 Conflict detection varies a lot among different analysis levels. However
1698 LR0 and LR0.5 are quite similar - having no follow sets, and SLR, LALR and
1699 LR1 are also similar as they have FOLLOW or LA sets.
1703 ## conflict functions
1705 static int report_conflicts(struct grammar *g, enum grammar_type type)
1708 printf("Conflicts:\n");
1710 cnt = conflicts_lr0(g, type);
1712 cnt = conflicts_slr(g, type);
1714 printf(" - no conflicts\n");
1718 LR0 conflicts are any state which have both a reducible item and
1719 a shiftable item, or two reducible items.
1721 LR05 conflicts only occur if two possible reductions exist,
1722 as shifts always over-ride reductions.
1724 ###### conflict functions
1725 static int conflicts_lr0(struct grammar *g, enum grammar_type type)
1729 for (i = 0; i < g->states; i++) {
1730 struct itemset *is = g->statetab[i];
1731 int last_reduce = -1;
1732 int prev_reduce = -1;
1733 int last_shift = -1;
1737 for (j = 0; j < is->items.cnt; j++) {
1738 int itm = is->items.syms[j];
1739 int p = item_prod(itm);
1740 int bp = item_index(itm);
1741 struct production *pr = g->productions[p];
1743 if (bp == pr->body_size) {
1744 prev_reduce = last_reduce;
1748 if (pr->body[bp]->type == Terminal)
1751 if (type == LR0 && last_reduce >= 0 && last_shift >= 0) {
1752 printf(" State %d has both SHIFT and REDUCE:\n", i);
1753 report_item(g, is->items.syms[last_shift]);
1754 report_item(g, is->items.syms[last_reduce]);
1757 if (prev_reduce >= 0) {
1758 printf(" State %d has 2 (or more) reducible items\n", i);
1759 report_item(g, is->items.syms[prev_reduce]);
1760 report_item(g, is->items.syms[last_reduce]);
1767 SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping
1768 look ahead, or if a symbol in a look-ahead can be shifted. They differ only
1769 in the source of the look ahead set.
1771 We build two datasets to reflect the "action" table: one which maps
1772 terminals to items where that terminal could be shifted and another
1773 which maps terminals to items that could be reduced when the terminal
1774 is in look-ahead. We report when we get conflicts between the two.
1776 As a special case, if we find a SHIFT/REDUCE conflict, where a
1777 terminal that could be shifted is in the lookahead set of some
1778 reducable item, then set check if the reducable item also have
1779 `TK_newline` in its lookahead set. If it does, then a newline will
1780 force the reduction, but anything else can reasonably be shifted, so
1781 that isn't really a conflict. Such apparent conflicts do not get
1782 counted, and are reported as non-critical. This will not affect a
1783 "traditional" grammar that does not include newlines as token.
1785 static int conflicts_slr(struct grammar *g, enum grammar_type type)
1790 for (i = 0; i < g->states; i++) {
1791 struct itemset *is = g->statetab[i];
1792 struct symset shifts = INIT_DATASET;
1793 struct symset reduce = INIT_DATASET;
1797 /* First collect the shifts */
1798 for (j = 0; j < is->items.cnt; j++) {
1799 unsigned short itm = is->items.syms[j];
1800 int p = item_prod(itm);
1801 int bp = item_index(itm);
1802 struct production *pr = g->productions[p];
1805 if (bp >= pr->body_size ||
1806 pr->body[bp]->type != Terminal)
1811 if (s->precedence && is->precedence)
1812 /* Precedence resolves this, so no conflict */
1815 if (symset_find(&shifts, s->num) < 0)
1816 symset_add(&shifts, s->num, itm);
1818 /* Now look for reductions and conflicts */
1819 for (j = 0; j < is->items.cnt; j++) {
1820 unsigned short itm = is->items.syms[j];
1821 int p = item_prod(itm);
1822 int bp = item_index(itm);
1823 struct production *pr = g->productions[p];
1825 if (bp < pr->body_size)
1830 la = g->follow[pr->head->num];
1832 la = set_find(g, is->items.data[j]);
1834 for (k = 0; k < la.cnt; k++) {
1835 int pos = symset_find(&shifts, la.syms[k]);
1837 if (symset_find(&la, TK_newline) < 0) {
1838 printf(" State %d has SHIFT/REDUCE conflict on ", i);
1841 printf(" State %d has non-critical SHIFT/REDUCE conflict on ", i);
1842 prtxt(g->symtab[la.syms[k]]->name);
1844 report_item(g, shifts.data[pos]);
1845 report_item(g, itm);
1847 pos = symset_find(&reduce, la.syms[k]);
1849 symset_add(&reduce, la.syms[k], itm);
1852 printf(" State %d has REDUCE/REDUCE conflict on ", i);
1853 prtxt(g->symtab[la.syms[k]]->name);
1855 report_item(g, itm);
1856 report_item(g, reduce.data[pos]);
1860 symset_free(shifts);
1861 symset_free(reduce);
1866 ## Generating the parser
1868 The exported part of the parser is the `parse_XX` function, where the name
1869 `XX` is based on the name of the parser files.
1871 This takes a `code_node`, a partially initialized `token_config`, and an
1872 optional `FILE` to send tracing to. The `token_config` gets the list of
1873 known words added and then is used with the `code_node` to initialize the
1876 `parse_XX` then calls the library function `parser_run` to actually complete
1877 the parse. This needs the `states` table and function to call the various
1878 pieces of code provided in the grammar file, so they are generated first.
1880 ###### parser_generate
1882 static void gen_parser(FILE *f, struct grammar *g, char *file, char *name,
1883 struct code_node *pre_reduce)
1889 gen_reduce(f, g, file, pre_reduce);
1892 fprintf(f, "#line 0 \"gen_parser\"\n");
1893 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace)\n",
1896 fprintf(f, "\tstruct token_state *tokens;\n");
1897 fprintf(f, "\tconfig->words_marks = known;\n");
1898 fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
1899 fprintf(f, "\tconfig->ignored |= (1 << TK_line_comment) | (1 << TK_block_comment);\n");
1900 fprintf(f, "\ttokens = token_open(code, config);\n");
1901 fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n");
1902 fprintf(f, "\ttoken_close(tokens);\n");
1903 fprintf(f, "\treturn rv;\n");
1904 fprintf(f, "}\n\n");
1907 ### Known words table
1909 The known words table is simply an array of terminal symbols.
1910 The table of nonterminals used for tracing is a similar array. We
1911 include virtual symbols in the table of non_terminals to keep the
1916 static void gen_known(FILE *f, struct grammar *g)
1919 fprintf(f, "#line 0 \"gen_known\"\n");
1920 fprintf(f, "static const char *known[] = {\n");
1921 for (i = TK_reserved;
1922 i < g->num_syms && g->symtab[i]->type == Terminal;
1924 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1925 g->symtab[i]->name.txt);
1926 fprintf(f, "};\n\n");
1929 static void gen_non_term(FILE *f, struct grammar *g)
1932 fprintf(f, "#line 0 \"gen_non_term\"\n");
1933 fprintf(f, "static const char *non_term[] = {\n");
1934 for (i = TK_reserved;
1937 if (g->symtab[i]->type != Terminal)
1938 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1939 g->symtab[i]->name.txt);
1940 fprintf(f, "};\n\n");
1943 ### States and the goto tables.
1945 For each state we record the goto table, the reducible production if
1946 there is one, or a symbol to shift for error recovery.
1947 Some of the details of the reducible production are stored in the
1948 `do_reduce` function to come later. Here we store the production number,
1949 the body size (useful for stack management) and the resulting symbol (useful
1950 for knowing how to free data later).
1952 The go to table is stored in a simple array of `sym` and corresponding
1955 ###### exported types
1963 const struct lookup * go_to;
1973 static void gen_goto(FILE *f, struct grammar *g)
1976 fprintf(f, "#line 0 \"gen_goto\"\n");
1977 for (i = 0; i < g->states; i++) {
1979 fprintf(f, "static const struct lookup goto_%d[] = {\n",
1981 struct symset gt = g->statetab[i]->go_to;
1982 for (j = 0; j < gt.cnt; j++)
1983 fprintf(f, "\t{ %d, %d },\n",
1984 gt.syms[j], gt.data[j]);
1991 static void gen_states(FILE *f, struct grammar *g)
1994 fprintf(f, "#line 0 \"gen_states\"\n");
1995 fprintf(f, "static const struct state states[] = {\n");
1996 for (i = 0; i < g->states; i++) {
1997 struct itemset *is = g->statetab[i];
1998 int j, prod = -1, prod_len;
2000 for (j = 0; j < is->items.cnt; j++) {
2001 int itm = is->items.syms[j];
2002 int p = item_prod(itm);
2003 int bp = item_index(itm);
2004 struct production *pr = g->productions[p];
2006 if (bp < pr->body_size)
2008 /* This is what we reduce */
2009 if (prod < 0 || prod_len < pr->body_size) {
2011 prod_len = pr->body_size;
2016 fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, %d, %d },\n",
2017 i, is->go_to.cnt, i, prod,
2018 g->productions[prod]->body_size,
2019 g->productions[prod]->head->num,
2020 is->starts_line, is->min_prefix);
2022 fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, %d },\n",
2023 i, is->go_to.cnt, i,
2024 is->starts_line, is->min_prefix);
2026 fprintf(f, "};\n\n");
2029 ### The `do_reduce` function and the code
2031 When the parser engine decides to reduce a production, it calls `do_reduce`.
2032 This has two functions.
2034 Firstly, if a non-NULL `trace` file is passed, it prints out details of the
2035 production being reduced. Secondly it runs the code that was included with
2036 the production if any.
2038 This code needs to be able to store data somewhere. Rather than requiring
2039 `do_reduce` to `malloc` that "somewhere", we pass in a large buffer and have
2040 `do_reduce` return the size to be saved.
2042 In order for the code to access "global" context, we pass in the
2043 "config" pointer that was passed to parser function. If the `struct
2044 token_config` is embedded in some larger structure, the reducing code
2045 can access the larger structure using pointer manipulation.
2047 The code fragment requires translation when written out. Any `$N` needs to
2048 be converted to a reference either to that buffer (if `$0`) or to the
2049 structure returned by a previous reduction. These pointers need to be cast
2050 to the appropriate type for each access. All this is handled in
2053 `gen_code` also allows symbol references to contain a '`<`' as in '`$<2`'.
2054 This applied only to symbols with references (or pointers), not those with structures.
2055 The `<` implies that the reference it being moved out, so the object will not be
2056 automatically freed. This is equivalent to assigning `NULL` to the pointer.
2060 static void gen_code(struct production *p, FILE *f, struct grammar *g)
2063 char *used = calloc(1, p->body_size);
2066 fprintf(f, "\t\t\t");
2067 for (c = p->code.txt; c < p->code.txt + p->code.len; c++) {
2081 if (*c < '0' || *c > '9') {
2088 while (c[1] >= '0' && c[1] <= '9') {
2090 n = n * 10 + *c - '0';
2093 fprintf(f, "(*(struct %.*s*%s)ret)",
2094 p->head->struct_name.len,
2095 p->head->struct_name.txt,
2096 p->head->isref ? "*":"");
2097 else if (n > p->body_size)
2098 fprintf(f, "$%d", n);
2099 else if (p->body[n-1]->type == Terminal)
2100 fprintf(f, "(*(struct token *)body[%d])",
2102 else if (p->body[n-1]->struct_name.txt == NULL)
2103 fprintf(f, "$%d", n);
2105 fprintf(f, "(*(struct %.*s*%s)body[%d])",
2106 p->body[n-1]->struct_name.len,
2107 p->body[n-1]->struct_name.txt,
2108 p->body[n-1]->isref ? "*":"", n-1);
2113 for (i = 0; i < p->body_size; i++) {
2114 if (p->body[i]->struct_name.txt &&
2115 p->body[i]->isref &&
2117 // assume this has been copied out
2118 fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i);
2125 static void gen_reduce(FILE *f, struct grammar *g, char *file,
2126 struct code_node *code)
2129 fprintf(f, "#line 1 \"gen_reduce\"\n");
2130 fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n");
2132 fprintf(f, "\tint ret_size = 0;\n");
2134 code_node_print(f, code, file);
2136 fprintf(f, "#line 4 \"gen_reduce\"\n");
2137 fprintf(f, "\tswitch(prod) {\n");
2138 for (i = 0; i < g->production_count; i++) {
2139 struct production *p = g->productions[i];
2140 fprintf(f, "\tcase %d:\n", i);
2143 fprintf(f, "#line %d \"%s\"\n", p->code_line, file);
2147 if (p->head->struct_name.txt)
2148 fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n",
2149 p->head->struct_name.len,
2150 p->head->struct_name.txt,
2151 p->head->isref ? "*":"");
2153 fprintf(f, "\t\tbreak;\n");
2155 fprintf(f, "\t}\n\treturn ret_size;\n}\n\n");
2160 As each non-terminal can potentially cause a different type of data
2161 structure to be allocated and filled in, we need to be able to free it when
2164 It is particularly important to have fine control over freeing during error
2165 recovery where individual stack frames might need to be freed.
2167 For this, the grammar author is required to defined a `free_XX` function for
2168 each structure that is used by a non-terminal. `do_free` will call whichever
2169 is appropriate given a symbol number, and will call `free` (as is
2170 appropriate for tokens) on any terminal symbol.
2174 static void gen_free(FILE *f, struct grammar *g)
2178 fprintf(f, "#line 0 \"gen_free\"\n");
2179 fprintf(f, "static void do_free(short sym, void *asn)\n");
2181 fprintf(f, "\tif (!asn) return;\n");
2182 fprintf(f, "\tif (sym < %d) {\n", g->first_nonterm);
2183 fprintf(f, "\t\tfree(asn);\n\t\treturn;\n\t}\n");
2184 fprintf(f, "\tswitch(sym) {\n");
2186 for (i = 0; i < g->num_syms; i++) {
2187 struct symbol *s = g->symtab[i];
2189 s->type != Nonterminal ||
2190 s->struct_name.txt == NULL)
2193 fprintf(f, "\tcase %d:\n", s->num);
2195 fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n",
2197 s->struct_name.txt);
2198 fprintf(f, "\t\tfree(asn);\n");
2200 fprintf(f, "\t\tfree_%.*s(asn);\n",
2202 s->struct_name.txt);
2203 fprintf(f, "\t\tbreak;\n");
2205 fprintf(f, "\t}\n}\n\n");
2208 ## The main routine.
2210 There are three key parts to the "main" function of parsergen: processing
2211 the arguments, loading the grammar file, and dealing with the grammar.
2213 ### Argument processing.
2215 Command line options allow the selection of analysis mode, name of output
2216 file, and whether or not a report should be generated. By default we create
2217 a report only if no code output was requested.
2219 The `parse_XX` function name uses the basename of the output file which
2220 should not have a suffix (`.c`). `.c` is added to the given name for the
2221 code, and `.h` is added for the header (if header text is specifed in the
2228 static const struct option long_options[] = {
2229 { "LR0", 0, NULL, '0' },
2230 { "LR05", 0, NULL, '5' },
2231 { "SLR", 0, NULL, 'S' },
2232 { "LALR", 0, NULL, 'L' },
2233 { "LR1", 0, NULL, '1' },
2234 { "tag", 1, NULL, 't' },
2235 { "report", 0, NULL, 'R' },
2236 { "output", 1, NULL, 'o' },
2237 { NULL, 0, NULL, 0 }
2239 const char *options = "05SL1t:Ro:";
2241 ###### process arguments
2243 char *outfile = NULL;
2248 enum grammar_type type = LR05;
2249 while ((opt = getopt_long(argc, argv, options,
2250 long_options, NULL)) != -1) {
2265 outfile = optarg; break;
2267 tag = optarg; break;
2269 fprintf(stderr, "Usage: parsergen ...\n");
2274 infile = argv[optind++];
2276 fprintf(stderr, "No input file given\n");
2279 if (outfile && report == 1)
2282 if (name && strchr(name, '/'))
2283 name = strrchr(name, '/')+1;
2285 if (optind < argc) {
2286 fprintf(stderr, "Excess command line arguments\n");
2290 ### Loading the grammar file
2292 To be able to run `mdcode` and `scanner` on the grammar we need to memory
2295 Once we have extracted the code (with `mdcode`) we expect to find three
2296 sections: header, code, and grammar. Anything else that is not
2297 excluded by the `--tag` option is an error.
2299 "header" and "code" are optional, though it is hard to build a working
2300 parser with neither. "grammar" must be provided.
2304 #include <sys/mman.h>
2309 static void pr_err(char *msg)
2312 fprintf(stderr, "%s\n", msg);
2316 struct section *table;
2320 fd = open(infile, O_RDONLY);
2322 fprintf(stderr, "parsergen: cannot open %s: %s\n",
2323 infile, strerror(errno));
2326 len = lseek(fd, 0, 2);
2327 file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2328 table = code_extract(file, file+len, pr_err);
2330 struct code_node *hdr = NULL;
2331 struct code_node *code = NULL;
2332 struct code_node *gram = NULL;
2333 struct code_node *pre_reduce = NULL;
2334 for (s = table; s; s = s->next) {
2335 struct text sec = s->section;
2336 if (tag && !strip_tag(&sec, tag))
2338 if (text_is(sec, "header"))
2340 else if (text_is(sec, "code"))
2342 else if (text_is(sec, "grammar"))
2344 else if (text_is(sec, "reduce"))
2345 pre_reduce = s->code;
2347 fprintf(stderr, "Unknown content section: %.*s\n",
2348 s->section.len, s->section.txt);
2353 ### Processing the input
2355 As we need to append an extention to a filename and then open it for
2356 writing, and we need to do this twice, it helps to have a separate function.
2360 static FILE *open_ext(char *base, char *ext)
2362 char *fn = malloc(strlen(base) + strlen(ext) + 1);
2364 strcat(strcpy(fn, base), ext);
2370 If we can read the grammar, then we analyse and optionally report on it. If
2371 the report finds conflicts we will exit with an error status.
2373 ###### process input
2374 struct grammar *g = NULL;
2376 fprintf(stderr, "No grammar section provided\n");
2379 g = grammar_read(gram);
2381 fprintf(stderr, "Failure to parse grammar\n");
2386 grammar_analyse(g, type);
2388 if (grammar_report(g, type))
2392 If a "headers" section is defined, we write it out and include a declaration
2393 for the `parse_XX` function so it can be used from separate code.
2395 if (rv == 0 && hdr && outfile) {
2396 FILE *f = open_ext(outfile, ".h");
2398 code_node_print(f, hdr, infile);
2399 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace);\n",
2403 fprintf(stderr, "Cannot create %s.h\n",
2409 And if all goes well and an output file was provided, we create the `.c`
2410 file with the code section (if any) and the parser tables and function.
2412 if (rv == 0 && outfile) {
2413 FILE *f = open_ext(outfile, ".c");
2416 code_node_print(f, code, infile);
2417 gen_parser(f, g, infile, name, pre_reduce);
2420 fprintf(stderr, "Cannot create %s.c\n",
2426 And that about wraps it up. We need to set the locale so that UTF-8 is
2427 recognised properly, and link with `libicuuc` as `libmdcode` requires that.
2429 ###### File: parsergen.mk
2430 parsergen : parsergen.o libscanner.o libmdcode.o
2431 $(CC) $(CFLAGS) -o parsergen parsergen.o libscanner.o libmdcode.o -licuuc
2438 int main(int argc, char *argv[])
2443 setlocale(LC_ALL,"");
2445 ## process arguments
2452 ## The SHIFT/REDUCE parser
2454 Having analysed the grammar and generated all the tables, we only need the
2455 shift/reduce engine to bring it all together.
2457 ### Goto table lookup
2459 The parser generator has nicely provided us with goto tables sorted by
2460 symbol number. We need a binary search function to find a symbol in the
2463 ###### parser functions
2465 static int search(const struct state *l, int sym)
2468 int hi = l->go_to_cnt;
2472 while (lo + 1 < hi) {
2473 int mid = (lo + hi) / 2;
2474 if (l->go_to[mid].sym <= sym)
2479 if (l->go_to[lo].sym == sym)
2480 return l->go_to[lo].state;
2485 ### The state stack.
2487 The core data structure for the parser is the stack. This tracks all the
2488 symbols that have been recognised or partially recognised.
2490 The stack usually won't grow very large - maybe a few tens of entries. So
2491 we dynamically resize and array as required but never bother to shrink it
2494 We keep the stack as two separate allocations. One, `asn_stack` stores the
2495 "abstract syntax nodes" which are created by each reduction. When we call
2496 `do_reduce` we need to pass an array of the `asn`s of the body of the
2497 production, and by keeping a separate `asn` stack, we can just pass a
2498 pointer into this stack.
2500 The other allocation stores all other stack fields of which there are six.
2501 The `state` is the most important one and guides the parsing process. The
2502 `sym` is nearly unnecessary. However when we want to free entries from the
2503 `asn_stack`, it helps to know what type they are so we can call the right
2504 freeing function. The symbol leads us to the right free function through
2507 The `indents` count tracks the line indents with-in the symbol or
2508 immediately follow it. These are used to allow indent information to
2509 guide parsing and error recovery.
2511 `since_newline` tracks how many stack frames since the last
2512 start-of-line (whether indented or not). So if `since_newline` is
2513 zero, then this symbol is at the start of a line. Similarly
2514 `since_indent` counts the number of states since an indent, it is zero
2515 precisely when `indents` is not zero.
2517 `newline_permitted` keeps track of whether newlines should be ignored
2520 The stack is most properly seen as alternating states and symbols -
2521 states, like the 'DOT' in items, are between symbols. Each frame in
2522 our stack holds a state and the symbol that was before it. The
2523 bottom of stack holds the start state but no symbol, as nothing came
2524 before the beginning.
2526 ###### parser functions
2531 short newline_permitted;
2535 short since_newline;
2545 Two operations are needed on the stack - shift (which is like push) and pop.
2547 Shift applies not only to terminals but also to non-terminals. When
2548 we reduce a production we will pop off entries corresponding to the
2549 body symbols, then push on an item for the head of the production.
2550 This last is exactly the same process as shifting in a terminal so we
2551 use the same function for both. In both cases we provide the symbol,
2552 the number of indents the symbol contains (which will be zero for a
2553 terminal symbol) and a flag indicating the the symbol was at (or was
2554 reduced from a symbol which was at) the start of a line. The state is
2555 deduced from the current top-of-stack state and the new symbol.
2557 To simplify other code we arrange for `shift` to fail if there is no `goto`
2558 state for the symbol. This is useful in basic parsing due to our design
2559 that we shift when we can, and reduce when we cannot. So the `shift`
2560 function reports if it could.
2562 `shift` is also used to push state zero onto the stack, so if the
2563 stack is empty, it always chooses zero as the next state.
2565 So `shift` finds the next state. If that succeeds it extends the
2566 allocations if needed and pushes all the information onto the stacks.
2568 Newlines are permitted after a `starts_line` state until an internal
2569 indent. If the new frame has neither a `starts_line` state nor an
2570 indent, newlines are permitted if the previous stack frame permitted
2573 ###### parser functions
2575 static int shift(struct parser *p,
2576 short sym, short indents, short start_of_line,
2578 const struct state states[])
2580 // Push an entry onto the stack
2581 struct frame next = {0};
2582 int newstate = p->tos
2583 ? search(&states[p->stack[p->tos-1].state],
2588 if (p->tos >= p->stack_size) {
2589 p->stack_size += 10;
2590 p->stack = realloc(p->stack, p->stack_size
2591 * sizeof(p->stack[0]));
2592 p->asn_stack = realloc(p->asn_stack, p->stack_size
2593 * sizeof(p->asn_stack[0]));
2596 next.indents = indents;
2597 next.state = newstate;
2598 if (states[newstate].starts_line)
2599 next.newline_permitted = 1;
2601 next.newline_permitted = 0;
2603 next.newline_permitted =
2604 p->stack[p->tos-1].newline_permitted;
2606 next.newline_permitted = 0;
2608 if (!start_of_line) {
2610 next.since_newline = p->stack[p->tos-1].since_newline + 1;
2612 next.since_newline = 1;
2615 next.since_indent = 0;
2617 next.since_indent = p->stack[p->tos-1].since_indent + 1;
2619 next.since_indent = 1;
2621 p->stack[p->tos] = next;
2622 p->asn_stack[p->tos] = asn;
2627 `pop` primarily moves the top of stack (`tos`) back down the required
2628 amount and frees any `asn` entries that need to be freed. It also
2629 collects a summary of the indents and line starts in the symbols that
2630 are being removed. It is called _after_ we reduce a production, just
2631 before we `shift` the nonterminal in.
2633 ###### parser functions
2635 static int pop(struct parser *p, int num,
2636 short *start_of_line,
2637 void(*do_free)(short sym, void *asn))
2644 for (i = 0; i < num; i++) {
2645 sol |= !p->stack[p->tos+i].since_newline;
2646 indents += p->stack[p->tos+i].indents;
2647 do_free(p->stack[p->tos+i].sym,
2648 p->asn_stack[p->tos+i]);
2651 *start_of_line = sol;
2655 ### Memory allocation
2657 The `scanner` returns tokens in a local variable - we want them in allocated
2658 memory so they can live in the `asn_stack`. Similarly the `asn` produced by
2659 a reduce is in a large buffer. Both of these require some allocation and
2660 copying, hence `memdup` and `tokcopy`.
2662 ###### parser includes
2665 ###### parser functions
2667 void *memdup(void *m, int len)
2673 memcpy(ret, m, len);
2677 static struct token *tok_copy(struct token tk)
2679 struct token *new = malloc(sizeof(*new));
2684 ### The heart of the parser.
2686 Now we have the parser. If we can shift we do, though newlines and
2687 reducing indenting may block that. If not and we can reduce we do
2688 that. If the production we reduced was production zero, then we have
2689 accepted the input and can finish.
2691 We return whatever `asn` was returned by reducing production zero.
2693 If we can neither shift nor reduce we have an error to handle. We pop
2694 single entries off the stack until we can shift the `TK_error` symbol, then
2695 drop input tokens until we find one we can shift into the new error state.
2697 When we find `TK_in` and `TK_out` tokens which report indents we need
2698 to handle them directly as the grammar cannot express what we want to
2701 `TK_in` tokens are easy: we simply update indent count in the top stack frame to
2702 record how many indents there are following the previous token.
2704 `TK_out` tokens must be canceled against an indent count
2705 within the stack. If we can reduce some symbols that are all since
2706 the most recent indent, then we do that first. If the minimum prefix
2707 of the current state then extends back before the most recent indent,
2708 that indent can be cancelled. If the minimum prefix is shorter then
2709 the indent had ended prematurely and we must start error handling, which
2710 is still a work-in-progress.
2712 `TK_newline` tokens are ignored unless the top stack frame records
2713 that they are permitted. In that case they will not be considered for
2714 shifting if it is possible to reduce some symbols that are all since
2715 the most recent start of line. This is how a newline forcibly
2716 terminates any line-like structure - we try to reduce down to at most
2717 one symbol for each line where newlines are allowed.
2718 A consequence of this is that a rule like
2720 ###### Example: newlines - broken
2724 IfStatement -> Newlines if ....
2726 cannot work, as the NEWLINE will never be shifted as the empty string
2727 will be reduced first. Optional sets of newlines need to be include
2728 in the thing that preceed:
2730 ###### Example: newlines - works
2734 IfStatement -> If ....
2736 Here the NEWLINE will be shifted because nothing can be reduced until
2739 When, during error handling, we discard token read in, we want to keep
2740 discarding until we see one that is recognised. If we had a full set
2741 of LR(1) grammar states, this will mean looking in the look-ahead set,
2742 but we don't keep a full look-ahead set. We only record the subset
2743 that leads to SHIFT. We can, however, deduce the look-ahead set but
2744 looking at the SHIFT subsets for all states that we can get to by
2745 reducing zero or more times. So we need a little function which
2746 checks if a given token is in any of these look-ahead sets.
2748 ###### parser includes
2753 static int in_lookahead(struct token *tk, const struct state *states, int state)
2755 while (state >= 0) {
2756 if (search(&states[state], tk->num) >= 0)
2758 if (states[state].reduce_prod < 0)
2760 state = search(&states[state], states[state].reduce_sym);
2765 void *parser_run(struct token_state *tokens,
2766 const struct state states[],
2767 int (*do_reduce)(int, void**, struct token_config*, void*),
2768 void (*do_free)(short, void*),
2769 FILE *trace, const char *non_term[],
2770 struct token_config *config)
2772 struct parser p = { 0 };
2773 struct token *tk = NULL;
2777 shift(&p, TK_eof, 0, 1, NULL, states);
2779 struct token *err_tk;
2780 struct frame *tos = &p.stack[p.tos-1];
2782 tk = tok_copy(token_next(tokens));
2783 parser_trace(trace, &p,
2784 tk, states, non_term, config->known_count);
2786 if (tk->num == TK_in) {
2788 tos->since_newline = 0;
2789 tos->since_indent = 0;
2790 if (!states[tos->state].starts_line)
2791 tos->newline_permitted = 0;
2794 parser_trace_action(trace, "Record");
2797 if (tk->num == TK_out) {
2798 if (states[tos->state].reduce_size >= 0 &&
2799 states[tos->state].reduce_size <= tos->since_indent)
2801 if (states[tos->state].min_prefix >= tos->since_indent) {
2803 struct frame *in = tos - tos->since_indent;
2805 if (in->indents == 0) {
2806 /* Reassess since_indent and newline_permitted */
2808 in->since_indent = in[-1].since_indent + 1;
2809 in->newline_permitted = in[-1].newline_permitted;
2811 in->since_indent = 0;
2812 in->newline_permitted = 0;
2814 if (states[in->state].starts_line)
2815 in->newline_permitted = 1;
2818 in->since_indent = in[-1].since_indent + 1;
2819 if (states[in->state].starts_line)
2820 in->newline_permitted = 1;
2822 in->newline_permitted = in[-1].newline_permitted;
2827 parser_trace_action(trace, "Cancel");
2830 // fall through to error handling as both SHIFT and REDUCE
2833 if (tk->num == TK_newline) {
2834 if (!tos->newline_permitted) {
2837 parser_trace_action(trace, "Discard");
2840 if (tos->since_newline > 1 &&
2841 states[tos->state].reduce_size >= 0 &&
2842 states[tos->state].reduce_size <= tos->since_newline)
2845 if (shift(&p, tk->num, 0, tk->num == TK_newline, tk, states)) {
2847 parser_trace_action(trace, "Shift");
2851 if (states[tos->state].reduce_prod >= 0) {
2854 const struct state *nextstate = &states[tos->state];
2855 int prod = nextstate->reduce_prod;
2856 int size = nextstate->reduce_size;
2858 static char buf[16*1024];
2859 short indents, start_of_line;
2861 body = p.asn_stack + (p.tos - size);
2863 bufsize = do_reduce(prod, body, config, buf);
2865 indents = pop(&p, size, &start_of_line,
2867 res = memdup(buf, bufsize);
2868 memset(buf, 0, bufsize);
2869 if (!shift(&p, nextstate->reduce_sym,
2870 indents, start_of_line,
2872 if (prod != 0) abort();
2876 parser_trace_action(trace, "Reduce");
2879 /* Error. We walk up the stack until we
2880 * find a state which will accept TK_error.
2881 * We then shift in TK_error and see what state
2882 * that takes us too.
2883 * Then we discard input tokens until
2884 * we find one that is acceptable.
2886 parser_trace_action(trace, "ERROR");
2887 short indents = 0, start_of_line;
2889 err_tk = tok_copy(*tk);
2891 shift(&p, TK_error, 0, 0,
2892 err_tk, states) == 0)
2893 // discard this state
2894 indents += pop(&p, 1, &start_of_line, do_free);
2897 // no state accepted TK_error
2900 tos = &p.stack[p.tos-1];
2901 while (!in_lookahead(tk, states, tos->state) &&
2902 tk->num != TK_eof) {
2904 tk = tok_copy(token_next(tokens));
2905 if (tk->num == TK_in)
2907 if (tk->num == TK_out) {
2911 // FIXME update since_indent here
2914 tos->indents += indents;
2917 pop(&p, p.tos, NULL, do_free);
2923 ###### exported functions
2924 void *parser_run(struct token_state *tokens,
2925 const struct state states[],
2926 int (*do_reduce)(int, void**, struct token_config*, void*),
2927 void (*do_free)(short, void*),
2928 FILE *trace, const char *non_term[],
2929 struct token_config *config);
2933 Being able to visualize the parser in action can be invaluable when
2934 debugging the parser code, or trying to understand how the parse of a
2935 particular grammar progresses. The stack contains all the important
2936 state, so just printing out the stack every time around the parse loop
2937 can make it possible to see exactly what is happening.
2939 This doesn't explicitly show each SHIFT and REDUCE action. However they
2940 are easily deduced from the change between consecutive lines, and the
2941 details of each state can be found by cross referencing the states list
2942 in the stack with the "report" that parsergen can generate.
2944 For terminal symbols, we just dump the token. For non-terminals we
2945 print the name of the symbol. The look ahead token is reported at the
2946 end inside square brackets.
2948 ###### parser functions
2950 static char *reserved_words[] = {
2951 [TK_error] = "ERROR",
2954 [TK_newline] = "NEWLINE",
2957 static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
2959 fprintf(trace, "(%d", f->state);
2960 if (states[f->state].starts_line)
2961 fprintf(trace, "s");
2962 if (f->newline_permitted)
2963 fprintf(trace, "n%d", f->since_newline);
2964 fprintf(trace, ") ");
2967 void parser_trace(FILE *trace, struct parser *p,
2968 struct token *tk, const struct state states[],
2969 const char *non_term[], int knowns)
2974 for (i = 0; i < p->tos; i++) {
2975 struct frame *f = &p->stack[i];
2978 if (sym < TK_reserved &&
2979 reserved_words[sym] != NULL)
2980 fputs(reserved_words[sym], trace);
2981 else if (sym < TK_reserved + knowns) {
2982 struct token *t = p->asn_stack[i];
2983 text_dump(trace, t->txt, 20);
2985 fputs(non_term[sym - TK_reserved - knowns],
2988 fprintf(trace, ".%d", f->indents);
2989 if (f->since_newline == 0)
2993 parser_trace_state(trace, f, states);
2995 fprintf(trace, "[");
2996 if (tk->num < TK_reserved &&
2997 reserved_words[tk->num] != NULL)
2998 fputs(reserved_words[tk->num], trace);
3000 text_dump(trace, tk->txt, 20);
3004 void parser_trace_action(FILE *trace, char *action)
3007 fprintf(trace, " - %s\n", action);
3012 The obvious example for a parser is a calculator.
3014 As `scanner` provides number parsing function using `libgmp` is it not much
3015 work to perform arbitrary rational number calculations.
3017 This calculator takes one expression, or an equality test, per line. The
3018 results are printed and if any equality test fails, the program exits with
3021 ###### File: parsergen.mk
3022 calc.c calc.h : parsergen parsergen.mdc
3023 ./parsergen --tag calc -o calc parsergen.mdc
3024 calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o
3025 $(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp
3027 ./calc parsergen.mdc
3033 // what do we use for a demo-grammar? A calculator of course.
3045 #include <sys/mman.h>
3051 #include "scanner.h"
3057 static void free_number(struct number *n)
3063 static int text_is(struct text t, char *s)
3065 return (strlen(s) == t.len &&
3066 strncmp(s, t.txt, t.len) == 0);
3069 int main(int argc, char *argv[])
3071 int fd = open(argv[1], O_RDONLY);
3072 int len = lseek(fd, 0, 2);
3073 char *file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
3074 struct section *table = code_extract(file, file+len, NULL);
3076 struct token_config config = {
3077 .ignored = (1 << TK_line_comment)
3078 | (1 << TK_block_comment)
3081 .number_chars = ".,_+-",
3085 for (s = table; s; s = s->next)
3086 if (text_is(s->section, "example: input"))
3087 parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
3089 struct section *t = table->next;
3090 code_free(table->code);
3102 Session -> Session Line
3105 Line -> Expression NEWLINE ${ gmp_printf("Answer = %Qd\n", $1.val);
3106 { mpf_t fl; mpf_init2(fl, 20); mpf_set_q(fl, $1.val);
3107 gmp_printf(" or as a decimal: %Fg\n", fl);
3111 | Expression = Expression NEWLINE ${
3112 if (mpq_equal($1.val, $3.val))
3113 gmp_printf("Both equal %Qd\n", $1.val);
3115 gmp_printf("NOT EQUAL: %Qd\n != : %Qd\n",
3120 | NEWLINE ${ printf("Blank line\n"); }$
3121 | ERROR NEWLINE ${ printf("Skipped a bad line\n"); }$
3124 Expression -> Expression + Expression ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$
3125 | Expression - Expression ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$
3126 | Expression * Expression ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$
3127 | Expression / Expression ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$
3128 | NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$
3129 | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$
3134 3.1415926535 - 355/113
3136 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9
3138 1 * 1000 + 2 * 100 + 3 * 10 + 4 * 1