1 # LR(1) Parser Generator #
3 This parser generator takes a grammar description combined with code
4 fragments, analyses it, and can report details about the analysis and
5 write out C code files which can be compiled to make a parser.
7 There are several distinct sections.
9 1. `grammar_read` will read a grammar from a literate-code file and
10 store the productions, symbols, and code fragments.
11 2. `grammar_analyse` will take that grammar and build LR parsing
12 states and look-ahead tables.
13 3. `grammar_report` will present the details of the analysis
14 in a readable format and will report any conflicts.
15 4. `parser_generate` will write out C code files with various
16 tables and with the code fragments arranged in useful places.
17 5. `parser_run` is a library function intended to be linked together
18 with the generated parser tables to complete the implementation of
20 6. Finally `calc` is a test grammar for a simple calculator. The
21 `parsergen` program built from the C code in this file can extract
22 that grammar directly from this file and process it.
25 ###### File: parsergen.c
30 ## forward declarations
41 ###### File: libparser.c
48 ###### File: parsergen.mk
51 parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc
54 ## Reading the grammar
56 The grammar must be stored in a markdown literate code file as parsed
57 by "[mdcode][]". It must have three top level (i.e. unreferenced)
58 sections: `header`, `code`, and `grammar`. The first two will be
59 literally copied into the generated `.c` and `.h`. files. The last
60 contains the grammar. This is tokenised with "[scanner][]".
62 If the `--tag` option is given, then any top level header that doesn't
63 start with the tag is ignored, and the tag is striped from the rest. So
65 means that the three needed sections must be `Foo: header`, `Foo: code`,
66 and `Foo: grammar`. The tag `calc` is used to extract the same calculator
70 [scanner]: scanner.html
76 ###### parser includes
80 The grammar contains production sets, precedence/associativity
81 declarations, and data type declarations. These are all parsed with
82 _ad hoc_ parsing as we don't have a parser generator yet.
84 The precedence and associativity can be set for each production, but
85 can be inherited from symbols. The data type (either structure or a
86 reference to a structure) is potentially defined for each non-terminal
87 and describes what C structure is used to store information about each
91 enum assoc {Left, Right, Non};
92 char *assoc_names[] = {"Left","Right","Non"};
95 struct text struct_name;
98 unsigned short precedence;
102 unsigned short precedence;
110 The strings reported by `mdcode` and `scanner` are `struct text` which have
111 length rather than being null terminated. To help with printing and
112 comparing we define `text_is` and `prtxt`, which should possibly go in
113 `mdcode`. `scanner` does provide `text_dump` which is useful for strings
114 which might contain control characters.
116 `strip_tag` is a bit like `strncmp`, but adds a test for a colon,
117 because that is what we need to detect tags.
120 static int text_is(struct text t, char *s)
122 return (strlen(s) == t.len &&
123 strncmp(s, t.txt, t.len) == 0);
125 static void prtxt(struct text t)
127 printf("%.*s", t.len, t.txt);
130 static int strip_tag(struct text *t, char *tag)
132 int skip = strlen(tag) + 1;
133 if (skip >= t->len ||
134 strncmp(t->txt, tag, skip-1) != 0 ||
135 t->txt[skip-1] != ':')
137 while (skip < t->len && t->txt[skip] == ' ')
146 Productions are comprised primarily of symbols - terminal and
147 non-terminal. We do not make a syntactic distinction between the two,
148 though non-terminals must be identifiers. Non-terminal symbols are
149 those which appear in the head of a production, terminal symbols are
150 those which don't. There are also "virtual" symbols used for precedence
151 marking discussed later, and sometimes we won't know what type a symbol
154 ###### forward declarations
155 enum symtype { Unknown, Virtual, Terminal, Nonterminal };
156 char *symtypes = "UVTN";
160 Symbols can be either `TK_ident` or `TK_mark`. They are saved in a
161 table of known symbols and the resulting parser will report them as
162 `TK_reserved + N`. A small set of identifiers are reserved for the
163 different token types that `scanner` can report.
166 static char *reserved_words[] = {
167 [TK_error] = "ERROR",
168 [TK_number] = "NUMBER",
169 [TK_ident] = "IDENTIFIER",
171 [TK_string] = "STRING",
172 [TK_multi_string] = "MULTI_STRING",
175 [TK_newline] = "NEWLINE",
181 Note that `TK_eof` and the two `TK_*_comment` tokens cannot be
182 recognised. The former is automatically expected at the end of the text
183 being parsed. The latter are always ignored by the parser.
185 All of these symbols are stored in a simple symbol table. We use the
186 `struct text` from `mdcode` to store the name and link them together
187 into a sorted list using an insertion sort.
189 We don't have separate `find` and `insert` functions as any symbol we
190 find needs to be remembered. We simply expect `find` to always return a
191 symbol, but its type might be `Unknown`.
200 ###### grammar fields
205 static struct symbol *sym_find(struct grammar *g, struct text s)
207 struct symbol **l = &g->syms;
212 (cmp = text_cmp((*l)->name, s)) < 0)
216 n = calloc(1, sizeof(*n));
225 static void symbols_init(struct grammar *g)
227 int entries = sizeof(reserved_words)/sizeof(reserved_words[0]);
229 for (i = 0; i < entries; i++) {
232 t.txt = reserved_words[i];
235 t.len = strlen(t.txt);
242 ### Data types and precedence.
244 Data type specification and precedence specification are both
245 introduced by a dollar sign at the start of the line. If the next
246 word is `LEFT`, `RIGHT` or `NON`, then the line specifies a
247 precedence, otherwise it specifies a data type.
249 The data type name is simply stored and applied to the head of all
250 subsequent productions. It must be the name of a structure optionally
251 preceded by an asterisk which means a reference or "pointer". So
252 `$expression` maps to `struct expression` and `$*statement` maps to
253 `struct statement *`.
255 Any productions given before the first data type declaration will have
256 no data type associated with them and can carry no information. In
257 order to allow other non-terminals to have no type, the data type
258 `$void` can be given. This does *not* mean that `struct void` will be
259 used, but rather than no type will be associated with future
262 The precedence line must contain a list of symbols - typically
263 terminal symbols, but not necessarily. It can only contain symbols
264 that have not been seen yet, so precedence declaration must precede
265 the productions that they affect.
267 A precedence line may also contain a "virtual" symbol which is an
268 identifier preceded by `$$`. Virtual terminals carry precedence
269 information but are not included in the grammar. A production can
270 declare that it inherits the precedence of a given virtual symbol.
272 This use for `$$` precludes it from being used as a symbol in the
273 described language. Two other symbols: `${` and `}$` are also
276 Each new precedence line introduces a new precedence level and
277 declares how it associates. This level is stored in each symbol
278 listed and may be inherited by any production which uses the symbol. A
279 production inherits from the last symbol which has a precedence.
281 ###### grammar fields
282 struct text current_type;
287 enum symbols { TK_virtual = TK_reserved, TK_open, TK_close };
288 static const char *known[] = { "$$", "${", "}$" };
291 static char *dollar_line(struct token_state *ts, struct grammar *g, int isref)
293 struct token t = token_next(ts);
298 if (t.num != TK_ident) {
299 err = "type or assoc expected after '$'";
302 if (text_is(t.txt, "LEFT"))
304 else if (text_is(t.txt, "RIGHT"))
306 else if (text_is(t.txt, "NON"))
309 g->current_type = t.txt;
310 g->type_isref = isref;
311 if (text_is(t.txt, "void"))
312 g->current_type.txt = NULL;
314 if (t.num != TK_newline) {
315 err = "Extra tokens after type name";
322 err = "$* cannot be followed by a precedence";
326 // This is a precedence line, need some symbols.
330 while (t.num != TK_newline) {
331 enum symtype type = Terminal;
333 if (t.num == TK_virtual) {
336 if (t.num != TK_ident) {
337 err = "$$ must be followed by a word";
340 } else if (t.num != TK_ident &&
342 err = "Illegal token in precedence line";
345 s = sym_find(g, t.txt);
346 if (s->type != Unknown) {
347 err = "Symbols in precedence line must not already be known.";
351 s->precedence = g->prec_levels;
357 err = "No symbols given on precedence line";
361 while (t.num != TK_newline && t.num != TK_eof)
368 A production either starts with an identifier which is the head
369 non-terminal, or a vertical bar (`|`) in which case this production
370 uses the same head as the previous one. The identifier must be
371 followed by a `->` mark. All productions for a given non-terminal must
372 be grouped together with the `nonterminal ->` given only once.
374 After this a (possibly empty) sequence of identifiers and marks form
375 the body of the production. A virtual symbol may be given after the
376 body by preceding it with `$$`. If a virtual symbol is given, the
377 precedence of the production is that for the virtual symbol. If none
378 is given, the precedence is inherited from the last symbol in the
379 production which has a precedence specified.
381 After the optional precedence may come the `${` mark. This indicates
382 the start of a code fragment. If present, this must be on the same
383 line as the start of the production.
385 All of the text from the `${` through to the matching `}$` is
386 collected and forms the code-fragment for the production. It must all
387 be in one `code_node` of the literate code. The `}$` must be
388 at the end of a line.
390 Text in the code fragment will undergo substitutions where `$N` or
391 `$<N`,for some numeric `N`, will be replaced with a variable holding
392 the parse information for the particular symbol in the production.
393 `$0` is the head of the production, `$1` is the first symbol of the
394 body, etc. The type of `$N` for a terminal symbol is `struct token`.
395 For a non-terminal, it is whatever has been declared for that symbol.
396 The `<` may be included for symbols declared as storing a reference
397 (not a structure) and means that the reference is being moved out, so
398 it will not automatically be freed.
400 While building productions we will need to add to an array which needs to
404 static void array_add(void *varray, int *cnt, void *new)
406 void ***array = varray;
409 current = ((*cnt-1) | (step-1))+1;
410 if (*cnt == current) {
413 *array = realloc(*array, current * sizeof(void*));
415 (*array)[*cnt] = new;
419 Collecting the code fragment simply involves reading tokens until we
420 hit the end token `}$`, and noting the character position of the start and
424 static struct text collect_code(struct token_state *state,
429 code.txt = start.txt.txt + start.txt.len;
431 t = token_next(state);
432 while (t.node == start.node &&
433 t.num != TK_close && t.num != TK_error &&
435 if (t.num == TK_close && t.node == start.node)
436 code.len = t.txt.txt - code.txt;
442 Now we have all the bits we need to parse a full production.
447 ###### grammar fields
448 struct production **productions;
449 int production_count;
451 ###### production fields
453 struct symbol **body;
459 int first_production;
462 static char *parse_production(struct grammar *g,
464 struct token_state *state)
466 /* Head has already been parsed. */
469 struct production p, *pp;
471 memset(&p, 0, sizeof(p));
473 tk = token_next(state);
474 while (tk.num == TK_ident || tk.num == TK_mark) {
475 struct symbol *bs = sym_find(g, tk.txt);
476 if (bs->type == Unknown)
478 if (bs->type == Virtual) {
479 err = "Virtual symbol not permitted in production";
482 if (bs->precedence) {
483 p.precedence = bs->precedence;
486 array_add(&p.body, &p.body_size, bs);
487 tk = token_next(state);
489 if (tk.num == TK_virtual) {
491 tk = token_next(state);
492 if (tk.num != TK_ident) {
493 err = "word required after $$";
496 vs = sym_find(g, tk.txt);
497 if (vs->type != Virtual) {
498 err = "symbol after $$ must be virtual";
501 p.precedence = vs->precedence;
503 tk = token_next(state);
505 if (tk.num == TK_open) {
506 p.code_line = tk.line;
507 p.code = collect_code(state, tk);
508 if (p.code.txt == NULL) {
509 err = "code fragment not closed properly";
512 tk = token_next(state);
514 if (tk.num != TK_newline && tk.num != TK_eof) {
515 err = "stray tokens at end of line";
518 pp = malloc(sizeof(*pp));
520 array_add(&g->productions, &g->production_count, pp);
523 while (tk.num != TK_newline && tk.num != TK_eof)
524 tk = token_next(state);
528 With the ability to parse production and dollar-lines, we have nearly all
529 that we need to parse a grammar from a `code_node`.
531 The head of the first production will effectively be the `start` symbol of
532 the grammar. However it won't _actually_ be so. Processing the grammar is
533 greatly simplified if the real start symbol only has a single production,
534 and expects `$eof` as the final terminal. So when we find the first
535 explicit production we insert an extra production as production zero which
538 ###### Example: production 0
541 where `START` is the first non-terminal given.
543 ###### create production zero
544 struct production *p = calloc(1,sizeof(*p));
545 struct text start = {"$start",6};
546 struct text eof = {"$eof",4};
547 struct text code = {"$0 = $<1;", 9};
548 p->head = sym_find(g, start);
549 p->head->type = Nonterminal;
550 p->head->struct_name = g->current_type;
551 p->head->isref = g->type_isref;
552 if (g->current_type.txt)
554 array_add(&p->body, &p->body_size, head);
555 array_add(&p->body, &p->body_size, sym_find(g, eof));
556 p->head->first_production = g->production_count;
557 array_add(&g->productions, &g->production_count, p);
559 Now we are ready to read in the grammar. We ignore comments
560 and strings so that the marks which introduce them can be
561 used as terminals in the grammar. We don't ignore numbers
562 even though we don't allow them as that causes the scanner
563 to produce errors that the parser is better positioned to handle.
566 static struct grammar *grammar_read(struct code_node *code)
568 struct token_config conf = {
571 .words_marks = known,
572 .known_count = sizeof(known)/sizeof(known[0]),
574 .ignored = (1 << TK_line_comment)
575 | (1 << TK_block_comment)
578 | (1 << TK_multi_string)
583 struct token_state *state = token_open(code, &conf);
585 struct symbol *head = NULL;
589 g = calloc(1, sizeof(*g));
592 for (tk = token_next(state); tk.num != TK_eof;
593 tk = token_next(state)) {
594 if (tk.num == TK_newline)
596 if (tk.num == TK_ident) {
598 head = sym_find(g, tk.txt);
599 if (head->type == Nonterminal)
600 err = "This non-terminal has already be used.";
601 else if (head->type == Virtual)
602 err = "Virtual symbol not permitted in head of production";
604 head->type = Nonterminal;
605 head->struct_name = g->current_type;
606 head->isref = g->type_isref;
607 if (g->production_count == 0) {
608 ## create production zero
610 head->first_production = g->production_count;
611 tk = token_next(state);
612 if (tk.num == TK_mark &&
613 text_is(tk.txt, "->"))
614 err = parse_production(g, head, state);
616 err = "'->' missing in production";
618 } else if (tk.num == TK_mark
619 && text_is(tk.txt, "|")) {
620 // another production for same non-term
622 err = parse_production(g, head, state);
624 err = "First production must have a head";
625 } else if (tk.num == TK_mark
626 && text_is(tk.txt, "$")) {
627 err = dollar_line(state, g, 0);
628 } else if (tk.num == TK_mark
629 && text_is(tk.txt, "$*")) {
630 err = dollar_line(state, g, 1);
632 err = "Unrecognised token at start of line.";
640 fprintf(stderr, "Error at line %d: %s\n",
647 ## Analysing the grammar
649 The central task in analysing the grammar is to determine a set of
650 states to drive the parsing process. This will require finding
651 various sets of symbols and of "items". Some of these sets will need
652 to attach information to each element in the set, so they are more
655 Each "item" may have a 'look-ahead' or `LA` set associated with
656 it. Many of these will be the same as each other. To avoid memory
657 wastage, and to simplify some comparisons of sets, these sets will be
658 stored in a list of unique sets, each assigned a number.
660 Once we have the data structures in hand to manage these sets and
661 lists, we can start setting the 'nullable' flag, build the 'FIRST'
662 sets, and then create the item sets which define the various states.
666 Though we don't only store symbols in these sets, they are the main
667 things we store, so they are called symbol sets or "symsets".
669 A symset has a size, an array of shorts, and an optional array of data
670 values, which are also shorts. If the array of data is not present,
671 we store an impossible pointer, as `NULL` is used to indicate that no
672 memory has been allocated yet;
677 unsigned short *syms, *data;
679 #define NO_DATA ((unsigned short *)1)
680 const struct symset INIT_SYMSET = { 0, NULL, NO_DATA };
681 const struct symset INIT_DATASET = { 0, NULL, NULL };
683 The arrays of shorts are allocated in blocks of 8 and are kept sorted
684 by using an insertion sort. We don't explicitly record the amount of
685 allocated space as it can be derived directly from the current `cnt` using
686 `((cnt - 1) | 7) + 1`.
689 static void symset_add(struct symset *s, unsigned short key, unsigned short val)
692 int current = ((s->cnt-1) | 7) + 1;
693 if (current == s->cnt) {
695 s->syms = realloc(s->syms, sizeof(*s->syms) * current);
696 if (s->data != NO_DATA)
697 s->data = realloc(s->data, sizeof(*s->data) * current);
700 while (i > 0 && s->syms[i-1] > key) {
701 s->syms[i] = s->syms[i-1];
702 if (s->data != NO_DATA)
703 s->data[i] = s->data[i-1];
707 if (s->data != NO_DATA)
712 Finding a symbol (or item) in a `symset` uses a simple binary search.
713 We return the index where the value was found (so data can be accessed),
714 or `-1` to indicate failure.
716 static int symset_find(struct symset *ss, unsigned short key)
723 while (lo + 1 < hi) {
724 int mid = (lo + hi) / 2;
725 if (ss->syms[mid] <= key)
730 if (ss->syms[lo] == key)
735 We will often want to form the union of two symsets. When we do, we
736 will often want to know if anything changed (as that might mean there
737 is more work to do). So `symset_union` reports whether anything was
738 added to the first set. We use a slow quadratic approach as these
739 sets don't really get very big. If profiles shows this to be a problem it
740 can be optimised later.
742 static int symset_union(struct symset *a, struct symset *b)
746 for (i = 0; i < b->cnt; i++)
747 if (symset_find(a, b->syms[i]) < 0) {
748 unsigned short data = 0;
749 if (b->data != NO_DATA)
751 symset_add(a, b->syms[i], data);
757 And of course we must be able to free a symset.
759 static void symset_free(struct symset ss)
762 if (ss.data != NO_DATA)
768 Some symsets are simply stored somewhere appropriate in the `grammar`
769 data structure, others needs a bit of indirection. This applies
770 particularly to "LA" sets. These will be paired with "items" in an
771 "itemset". As itemsets will be stored in a symset, the "LA" set needs to be
772 stored in the `data` field, so we need a mapping from a small (short)
773 number to an LA `symset`.
775 As mentioned earlier this involves creating a list of unique symsets.
777 For now, we just use a linear list sorted by insertion. A skiplist
778 would be more efficient and may be added later.
785 struct setlist *next;
788 ###### grammar fields
789 struct setlist *sets;
794 static int ss_cmp(struct symset a, struct symset b)
797 int diff = a.cnt - b.cnt;
800 for (i = 0; i < a.cnt; i++) {
801 diff = (int)a.syms[i] - (int)b.syms[i];
808 static int save_set(struct grammar *g, struct symset ss)
810 struct setlist **sl = &g->sets;
814 while (*sl && (cmp = ss_cmp((*sl)->ss, ss)) < 0)
821 s = malloc(sizeof(*s));
830 Finding a set by number is currently performed by a simple linear search.
831 If this turns out to hurt performance, we can store the sets in a dynamic
832 array like the productions.
834 static struct symset set_find(struct grammar *g, int num)
836 struct setlist *sl = g->sets;
837 while (sl && sl->num != num)
843 ### Setting `nullable`
845 We set `nullable` on the head symbol for any production for which all
846 the body symbols (if any) are nullable. As this is a recursive
847 definition, any change in the `nullable` setting means that we need to
848 re-evaluate where it needs to be set.
850 We simply loop around performing the same calculations until no more
857 static void set_nullable(struct grammar *g)
860 while (check_again) {
863 for (p = 0; p < g->production_count; p++) {
864 struct production *pr = g->productions[p];
867 if (pr->head->nullable)
869 for (s = 0; s < pr->body_size; s++)
870 if (! pr->body[s]->nullable)
872 if (s == pr->body_size) {
873 pr->head->nullable = 1;
880 ### Setting `line_like`
882 In order to be able to ignore newline tokens when not relevant, but
883 still include them in the parse when needed, we will need to know
884 which states can start a "line-like" section of code. We ignore
885 newlines when there is an indent since the most recent start of a
888 A "line_like" symbol is simply any symbol that can derive a NEWLINE.
889 If a symbol cannot derive a NEWLINE, then it is only part of a line -
890 so is word-like. If it can derive a NEWLINE, then we consider it to
894 Clearly the `TK_newline` token can derive a NEWLINE. Any symbol which
895 is the head of a production that contains a line_like symbol is also a
896 line-like symbol. We use a new field `line_like` to record this
897 attribute of symbols, and compute it in a repetitive manner similar to
904 static void set_line_like(struct grammar *g)
907 g->symtab[TK_newline]->line_like = 1;
908 while (check_again) {
911 for (p = 0; p < g->production_count; p++) {
912 struct production *pr = g->productions[p];
915 if (pr->head->line_like)
918 for (s = 0 ; s < pr->body_size; s++) {
919 if (pr->body[s]->line_like) {
920 pr->head->line_like = 1;
929 ### Building the `first` sets
931 When calculating what can follow a particular non-terminal, we will need to
932 know what the "first" terminal in any subsequent non-terminal might be. So
933 we calculate the `first` set for every non-terminal and store them in an
934 array. We don't bother recording the "first" set for terminals as they are
937 As the "first" for one symbol might depend on the "first" of another,
938 we repeat the calculations until no changes happen, just like with
939 `set_nullable`. This makes use of the fact that `symset_union`
940 reports if any change happens.
942 The core of this, which finds the "first" of part of a production body,
943 will be reused for computing the "follow" sets, so we split it out
944 into a separate function.
946 ###### grammar fields
947 struct symset *first;
951 static int add_first(struct production *pr, int start,
952 struct symset *target, struct grammar *g,
957 for (s = start; s < pr->body_size; s++) {
958 struct symbol *bs = pr->body[s];
959 if (bs->type == Terminal) {
960 if (symset_find(target, bs->num) < 0) {
961 symset_add(target, bs->num, 0);
965 } else if (symset_union(target, &g->first[bs->num]))
971 *to_end = (s == pr->body_size);
975 static void build_first(struct grammar *g)
979 g->first = calloc(g->num_syms, sizeof(g->first[0]));
980 for (s = 0; s < g->num_syms; s++)
981 g->first[s] = INIT_SYMSET;
983 while (check_again) {
986 for (p = 0; p < g->production_count; p++) {
987 struct production *pr = g->productions[p];
988 struct symset *head = &g->first[pr->head->num];
990 if (add_first(pr, 0, head, g, NULL))
996 ### Building the `follow` sets.
998 There are two different situations when we will want to generate "follow"
999 sets. If we are doing an SLR analysis, we want to generate a single
1000 "follow" set for each non-terminal in the grammar. That is what is
1001 happening here. If we are doing an LALR or LR analysis we will want
1002 to generate a separate "LA" set for each item. We do that later
1003 in state generation.
1005 There are two parts to generating a "follow" set. Firstly we look at
1006 every place that any non-terminal appears in the body of any
1007 production, and we find the set of possible "first" symbols after
1008 there. This is done using `add_first` above and only needs to be done
1009 once as the "first" sets are now stable and will not change.
1013 for (p = 0; p < g->production_count; p++) {
1014 struct production *pr = g->productions[p];
1017 for (b = 0; b < pr->body_size - 1; b++) {
1018 struct symbol *bs = pr->body[b];
1019 if (bs->type == Terminal)
1021 add_first(pr, b+1, &g->follow[bs->num], g, NULL);
1025 The second part is to add the "follow" set of the head of a production
1026 to the "follow" sets of the final symbol in the production, and any
1027 other symbol which is followed only by `nullable` symbols. As this
1028 depends on "follow" itself we need to repeatedly perform the process
1029 until no further changes happen.
1033 for (again = 0, p = 0;
1034 p < g->production_count;
1035 p < g->production_count-1
1036 ? p++ : again ? (again = 0, p = 0)
1038 struct production *pr = g->productions[p];
1041 for (b = pr->body_size - 1; b >= 0; b--) {
1042 struct symbol *bs = pr->body[b];
1043 if (bs->type == Terminal)
1045 if (symset_union(&g->follow[bs->num],
1046 &g->follow[pr->head->num]))
1053 We now just need to create and initialise the `follow` list to get a
1056 ###### grammar fields
1057 struct symset *follow;
1060 static void build_follow(struct grammar *g)
1063 g->follow = calloc(g->num_syms, sizeof(g->follow[0]));
1064 for (s = 0; s < g->num_syms; s++)
1065 g->follow[s] = INIT_SYMSET;
1069 ### Building itemsets and states
1071 There are three different levels of detail that can be chosen for
1072 building the itemsets and states for the LR grammar. They are:
1074 1. LR(0) or SLR(1), where no look-ahead is considered.
1075 2. LALR(1) where we build look-ahead sets with each item and merge
1076 the LA sets when we find two paths to the same "kernel" set of items.
1077 3. LR(1) where different look-ahead for any item in the set means
1078 a different state must be created.
1080 ###### forward declarations
1081 enum grammar_type { LR0, LR05, SLR, LALR, LR1 };
1083 We need to be able to look through existing states to see if a newly
1084 generated state already exists. For now we use a simple sorted linked
1087 An item is a pair of numbers: the production number and the position of
1088 "DOT", which is an index into the body of the production.
1089 As the numbers are not enormous we can combine them into a single "short"
1090 and store them in a `symset` - 4 bits for "DOT" and 12 bits for the
1091 production number (so 4000 productions with maximum of 15 symbols in the
1094 Comparing the itemsets will be a little different to comparing symsets
1095 as we want to do the lookup after generating the "kernel" of an
1096 itemset, so we need to ignore the offset=zero items which are added during
1099 To facilitate this, we modify the "DOT" number so that "0" sorts to
1100 the end of the list in the symset, and then only compare items before
1104 static inline unsigned short item_num(int production, int index)
1106 return production | ((31-index) << 11);
1108 static inline int item_prod(unsigned short item)
1110 return item & 0x7ff;
1112 static inline int item_index(unsigned short item)
1114 return (31-(item >> 11)) & 0x1f;
1117 For LR(1) analysis we need to compare not just the itemset in a state
1118 but also the LA sets. As we assign each unique LA set a number, we
1119 can just compare the symset and the data values together.
1122 static int itemset_cmp(struct symset a, struct symset b,
1123 enum grammar_type type)
1129 i < a.cnt && i < b.cnt &&
1130 item_index(a.syms[i]) > 0 &&
1131 item_index(b.syms[i]) > 0;
1133 int diff = a.syms[i] - b.syms[i];
1137 diff = a.data[i] - b.data[i];
1142 if (i == a.cnt || item_index(a.syms[i]) == 0)
1146 if (i == b.cnt || item_index(b.syms[i]) == 0)
1152 if (type < LR1 || av == -1)
1155 a.data[i] - b.data[i];
1158 It will be helpful to know if an itemset has been "completed" or not,
1159 particularly for LALR where itemsets get merged, at which point they
1160 need to be consider for completion again. So a `completed` flag is needed.
1162 For correct handling of `TK_newline` when parsing, we will need to
1163 know which states (itemsets) can occur at the start of a line, so we
1164 will record a `starts_line` flag too whenever DOT is at the start of a
1167 Finally, for handling `TK_out` we need to know whether productions in the
1168 current state started *before* the most recent indent. A state
1169 doesn't usually keep details of individual productions, so we need to
1170 add one extra detail. `min_prefix` is the smallest non-zero number of
1171 symbols *before* DOT in any production in an itemset. This will allow
1172 us to determine if the the most recent indent is sufficiently recent
1173 to cancel it against a `TK_out`. If it was seen longer ago than the
1174 `min_prefix`, and if the current state cannot be reduced, then the
1175 indented section must have ended in the middle of a syntactic unit, so
1176 an error must be signaled.
1178 And now we can build the list of itemsets. The lookup routine returns
1179 both a success flag and a pointer to where in the list an insert
1180 should happen, so we don't need to search a second time.
1184 struct itemset *next;
1186 struct symset items;
1187 struct symset go_to;
1189 unsigned short precedence;
1195 ###### grammar fields
1196 struct itemset *items;
1200 static int itemset_find(struct grammar *g, struct itemset ***where,
1201 struct symset kernel, enum grammar_type type)
1203 struct itemset **ip;
1205 for (ip = &g->items; *ip ; ip = & (*ip)->next) {
1206 struct itemset *i = *ip;
1208 diff = itemset_cmp(i->items, kernel, type);
1221 Adding an itemset may require merging the LA sets if LALR analysis is
1222 happening. If any new LA set adds any symbols that weren't in the old LA set, we
1223 clear the `completed` flag so that the dependants of this itemset will be
1224 recalculated and their LA sets updated.
1226 `add_itemset` must consume the symsets it is passed, either by adding
1227 them to a data structure, of freeing them.
1229 static int add_itemset(struct grammar *g, struct symset ss,
1230 enum assoc assoc, unsigned short precedence,
1231 enum grammar_type type)
1233 struct itemset **where, *is;
1235 int found = itemset_find(g, &where, ss, type);
1237 is = calloc(1, sizeof(*is));
1238 is->state = g->states;
1242 is->precedence = precedence;
1244 is->go_to = INIT_DATASET;
1253 for (i = 0; i < ss.cnt; i++) {
1254 struct symset temp = INIT_SYMSET, s;
1255 if (ss.data[i] == is->items.data[i])
1257 s = set_find(g, is->items.data[i]);
1258 symset_union(&temp, &s);
1259 s = set_find(g, ss.data[i]);
1260 if (symset_union(&temp, &s)) {
1261 is->items.data[i] = save_set(g, temp);
1272 To build all the itemsets, we first insert the initial itemset made
1273 from production zero, complete each itemset, and then generate new
1274 itemsets from old until no new ones can be made.
1276 Completing an itemset means finding all the items where "DOT" is followed by
1277 a nonterminal and adding "DOT=0" items for every production from that
1278 non-terminal - providing each item hasn't already been added.
1280 If LA sets are needed, the LA set for each new item is found using
1281 `add_first` which was developed earlier for `FIRST` and `FOLLOW`. This may
1282 be supplemented by the LA set for the item which produce the new item.
1284 We also collect a set of all symbols which follow "DOT" (in `done`) as this
1285 is used in the next stage.
1286 If any of these symbols are flagged as `line_like`, then this
1287 state must be a `starts_line` state so now is a good time to record that.
1289 When itemsets are created we assign a precedence to the itemset from
1290 the complete item, if there is one. We ignore the possibility of
1291 there being two and don't (currently) handle precedence in such
1292 grammars. When completing a grammar we ignore any item where DOT is
1293 followed by a terminal with a precedence lower (numerically higher)
1294 than that for the itemset. Unless the terminal has right
1295 associativity, we also ignore items where the terminal has the same
1296 precedence. The result is that unwanted items are still in the
1297 itemset, but the terminal doesn't get into the go to set, so the item
1300 ###### complete itemset
1301 for (i = 0; i < is->items.cnt; i++) {
1302 int p = item_prod(is->items.syms[i]);
1303 int bs = item_index(is->items.syms[i]);
1304 struct production *pr = g->productions[p];
1307 struct symset LA = INIT_SYMSET;
1308 unsigned short sn = 0;
1310 if (is->min_prefix == 0 ||
1311 (bs > 0 && bs < is->min_prefix))
1312 is->min_prefix = bs;
1313 if (bs == pr->body_size)
1316 if (s->precedence && is->precedence &&
1317 is->precedence < s->precedence)
1318 /* This terminal has a low precedence and
1319 * shouldn't be shifted
1322 if (s->precedence && is->precedence &&
1323 is->precedence == s->precedence && s->assoc != Right)
1324 /* This terminal has a matching precedence and is
1325 * not Right-associative, so we mustn't shift it.
1328 if (symset_find(&done, s->num) < 0) {
1329 symset_add(&done, s->num, 0);
1331 is->starts_line = 1;
1333 if (s->type != Nonterminal)
1339 add_first(pr, bs+1, &LA, g, &to_end);
1341 struct symset ss = set_find(g, is->items.data[i]);
1342 symset_union(&LA, &ss);
1344 sn = save_set(g, LA);
1345 LA = set_find(g, sn);
1348 /* Add productions for this symbol */
1349 for (p2 = s->first_production;
1350 p2 < g->production_count &&
1351 g->productions[p2]->head == s;
1353 int itm = item_num(p2, 0);
1354 int pos = symset_find(&is->items, itm);
1356 symset_add(&is->items, itm, sn);
1357 /* Will have re-ordered, so start
1358 * from beginning again */
1360 } else if (type >= LALR) {
1361 struct symset ss = set_find(g, is->items.data[pos]);
1362 struct symset tmp = INIT_SYMSET;
1364 symset_union(&tmp, &ss);
1365 if (symset_union(&tmp, &LA)) {
1366 is->items.data[pos] = save_set(g, tmp);
1374 For each symbol we found (and placed in `done`) we collect all the items for
1375 which this symbol is next, and create a new itemset with "DOT" advanced over
1376 the symbol. This is then added to the collection of itemsets (or merged
1377 with a pre-existing itemset).
1379 ###### derive itemsets
1380 // Now we have a completed itemset, so we need to
1381 // compute all the 'next' itemsets and create them
1382 // if they don't exist.
1383 for (i = 0; i < done.cnt; i++) {
1385 unsigned short state;
1386 struct symbol *sym = g->symtab[done.syms[i]];
1387 enum assoc assoc = Non;
1388 unsigned short precedence = 0;
1389 struct symset newitemset = INIT_SYMSET;
1391 newitemset = INIT_DATASET;
1393 for (j = 0; j < is->items.cnt; j++) {
1394 int itm = is->items.syms[j];
1395 int p = item_prod(itm);
1396 int bp = item_index(itm);
1397 struct production *pr = g->productions[p];
1398 unsigned short la = 0;
1401 if (bp == pr->body_size)
1403 if (pr->body[bp] != sym)
1406 la = is->items.data[j];
1407 pos = symset_find(&newitemset, pr->head->num);
1408 if (bp + 1 == pr->body_size &&
1409 pr->precedence > 0 &&
1411 pr->precedence < precedence)) {
1412 // new itemset is reducible and has a precedence.
1413 precedence = pr->precedence;
1417 symset_add(&newitemset, item_num(p, bp+1), la);
1418 else if (type >= LALR) {
1419 // Need to merge la set.
1420 int la2 = newitemset.data[pos];
1422 struct symset ss = set_find(g, la2);
1423 struct symset LA = INIT_SYMSET;
1424 symset_union(&LA, &ss);
1425 ss = set_find(g, la);
1426 if (symset_union(&LA, &ss))
1427 newitemset.data[pos] = save_set(g, LA);
1433 state = add_itemset(g, newitemset, assoc, precedence, type);
1434 if (symset_find(&is->go_to, done.syms[i]) < 0)
1435 symset_add(&is->go_to, done.syms[i], state);
1438 All that is left is to create the initial itemset from production zero, and
1439 with `TK_eof` as the LA set.
1442 static void build_itemsets(struct grammar *g, enum grammar_type type)
1444 struct symset first = INIT_SYMSET;
1447 unsigned short la = 0;
1449 // LA set just has eof
1450 struct symset eof = INIT_SYMSET;
1451 symset_add(&eof, TK_eof, 0);
1452 la = save_set(g, eof);
1453 first = INIT_DATASET;
1455 // production 0, offset 0 (with no data)
1456 symset_add(&first, item_num(0, 0), la);
1457 add_itemset(g, first, Non, 0, type);
1458 for (again = 0, is = g->items;
1460 is = is->next ?: again ? (again = 0, g->items) : NULL) {
1462 struct symset done = INIT_SYMSET;
1473 ### Completing the analysis.
1475 The exact process of analysis depends on which level was requested. For
1476 `LR0` and `LR05` we don't need first and follow sets at all. For `LALR` and
1477 `LR1` we need first, but not follow. For `SLR` we need both.
1479 We don't build the "action" tables that you might expect as the parser
1480 doesn't use them. They will be calculated to some extent if needed for
1483 Once we have built everything we allocate arrays for the two lists:
1484 symbols and itemsets. This allows more efficient access during reporting.
1485 The symbols are grouped as terminals and non-terminals and we record the
1486 changeover point in `first_nonterm`.
1488 ###### grammar fields
1489 struct symbol **symtab;
1490 struct itemset **statetab;
1493 ###### grammar_analyse
1495 static void grammar_analyse(struct grammar *g, enum grammar_type type)
1499 int snum = TK_reserved;
1500 for (s = g->syms; s; s = s->next)
1501 if (s->num < 0 && s->type == Terminal) {
1505 g->first_nonterm = snum;
1506 for (s = g->syms; s; s = s->next)
1512 g->symtab = calloc(g->num_syms, sizeof(g->symtab[0]));
1513 for (s = g->syms; s; s = s->next)
1514 g->symtab[s->num] = s;
1524 build_itemsets(g, type);
1526 g->statetab = calloc(g->states, sizeof(g->statetab[0]));
1527 for (is = g->items; is ; is = is->next)
1528 g->statetab[is->state] = is;
1531 ## Reporting on the Grammar
1533 The purpose of the report is to give the grammar developer insight into
1534 how the grammar parser will work. It is basically a structured dump of
1535 all the tables that have been generated, plus a description of any conflicts.
1537 ###### grammar_report
1538 static int grammar_report(struct grammar *g, enum grammar_type type)
1544 return report_conflicts(g, type);
1547 Firstly we have the complete list of symbols, together with the
1548 "FIRST" set if that was generated. We add a mark to each symbol to
1549 show if it can end in a newline (`>`), if it is considered to be
1550 "line-like" (`<`), or if it is nullable (`.`).
1554 static void report_symbols(struct grammar *g)
1558 printf("SYMBOLS + FIRST:\n");
1560 printf("SYMBOLS:\n");
1562 for (n = 0; n < g->num_syms; n++) {
1563 struct symbol *s = g->symtab[n];
1567 printf(" %c%c%3d%c: ",
1568 s->nullable ? '.':' ',
1569 s->line_like ? '<':' ',
1570 s->num, symtypes[s->type]);
1573 printf(" (%d%s)", s->precedence,
1574 assoc_names[s->assoc]);
1576 if (g->first && s->type == Nonterminal) {
1579 for (j = 0; j < g->first[n].cnt; j++) {
1582 prtxt(g->symtab[g->first[n].syms[j]]->name);
1589 Then we have the follow sets if they were computed.
1591 static void report_follow(struct grammar *g)
1594 printf("FOLLOW:\n");
1595 for (n = 0; n < g->num_syms; n++)
1596 if (g->follow[n].cnt) {
1600 prtxt(g->symtab[n]->name);
1601 for (j = 0; j < g->follow[n].cnt; j++) {
1604 prtxt(g->symtab[g->follow[n].syms[j]]->name);
1610 And finally the item sets. These include the GO TO tables and, for
1611 LALR and LR1, the LA set for each item. Lots of stuff, so we break
1612 it up a bit. First the items, with production number and associativity.
1614 static void report_item(struct grammar *g, int itm)
1616 int p = item_prod(itm);
1617 int dot = item_index(itm);
1618 struct production *pr = g->productions[p];
1622 prtxt(pr->head->name);
1624 for (i = 0; i < pr->body_size; i++) {
1625 printf(" %s", (dot == i ? ". ": ""));
1626 prtxt(pr->body[i]->name);
1628 if (dot == pr->body_size)
1631 if (pr->precedence && dot == pr->body_size)
1632 printf(" (%d%s)", pr->precedence,
1633 assoc_names[pr->assoc]);
1634 if (dot < pr->body_size &&
1635 pr->body[dot]->precedence) {
1636 struct symbol *s = pr->body[dot];
1637 printf(" [%d%s]", s->precedence,
1638 assoc_names[s->assoc]);
1643 The LA sets which are (possibly) reported with each item:
1645 static void report_la(struct grammar *g, int lanum)
1647 struct symset la = set_find(g, lanum);
1651 printf(" LOOK AHEAD(%d)", lanum);
1652 for (i = 0; i < la.cnt; i++) {
1655 prtxt(g->symtab[la.syms[i]]->name);
1660 Then the go to sets:
1663 static void report_goto(struct grammar *g, struct symset gt)
1668 for (i = 0; i < gt.cnt; i++) {
1670 prtxt(g->symtab[gt.syms[i]]->name);
1671 printf(" -> %d\n", gt.data[i]);
1675 Now we can report all the item sets complete with items, LA sets, and GO TO.
1677 static void report_itemsets(struct grammar *g)
1680 printf("ITEM SETS(%d)\n", g->states);
1681 for (s = 0; s < g->states; s++) {
1683 struct itemset *is = g->statetab[s];
1684 printf(" Itemset %d:%s min prefix=%d",
1685 s, is->starts_line?" (startsline)":"", is->min_prefix);
1687 printf(" %d%s", is->precedence, assoc_names[is->assoc]);
1689 for (j = 0; j < is->items.cnt; j++) {
1690 report_item(g, is->items.syms[j]);
1691 if (is->items.data != NO_DATA)
1692 report_la(g, is->items.data[j]);
1694 report_goto(g, is->go_to);
1698 ### Reporting conflicts
1700 Conflict detection varies a lot among different analysis levels. However
1701 LR0 and LR0.5 are quite similar - having no follow sets, and SLR, LALR and
1702 LR1 are also similar as they have FOLLOW or LA sets.
1706 ## conflict functions
1708 static int report_conflicts(struct grammar *g, enum grammar_type type)
1711 printf("Conflicts:\n");
1713 cnt = conflicts_lr0(g, type);
1715 cnt = conflicts_slr(g, type);
1717 printf(" - no conflicts\n");
1721 LR0 conflicts are any state which have both a reducible item and
1722 a shiftable item, or two reducible items.
1724 LR05 conflicts only occur if two possible reductions exist,
1725 as shifts always over-ride reductions.
1727 ###### conflict functions
1728 static int conflicts_lr0(struct grammar *g, enum grammar_type type)
1732 for (i = 0; i < g->states; i++) {
1733 struct itemset *is = g->statetab[i];
1734 int last_reduce = -1;
1735 int prev_reduce = -1;
1736 int last_shift = -1;
1740 for (j = 0; j < is->items.cnt; j++) {
1741 int itm = is->items.syms[j];
1742 int p = item_prod(itm);
1743 int bp = item_index(itm);
1744 struct production *pr = g->productions[p];
1746 if (bp == pr->body_size) {
1747 prev_reduce = last_reduce;
1751 if (pr->body[bp]->type == Terminal)
1754 if (type == LR0 && last_reduce >= 0 && last_shift >= 0) {
1755 printf(" State %d has both SHIFT and REDUCE:\n", i);
1756 report_item(g, is->items.syms[last_shift]);
1757 report_item(g, is->items.syms[last_reduce]);
1760 if (prev_reduce >= 0) {
1761 printf(" State %d has 2 (or more) reducible items\n", i);
1762 report_item(g, is->items.syms[prev_reduce]);
1763 report_item(g, is->items.syms[last_reduce]);
1770 SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping
1771 look ahead, or if a symbol in a look-ahead can be shifted. They differ only
1772 in the source of the look ahead set.
1774 We build two datasets to reflect the "action" table: one which maps
1775 terminals to items where that terminal could be shifted and another
1776 which maps terminals to items that could be reduced when the terminal
1777 is in look-ahead. We report when we get conflicts between the two.
1779 As a special case, if we find a SHIFT/REDUCE conflict, where a
1780 terminal that could be shifted is in the lookahead set of some
1781 reducable item, then set check if the reducable item also have
1782 `TK_newline` in its lookahead set. If it does, then a newline will
1783 force and reduction, but anything else can reasonably be shifts, so
1784 that isn't really a conflict. Such apparent conflicts do not get
1785 reported. This will not affect a "tradtional" grammar that does not
1786 include newlines as token.
1788 static int conflicts_slr(struct grammar *g, enum grammar_type type)
1793 for (i = 0; i < g->states; i++) {
1794 struct itemset *is = g->statetab[i];
1795 struct symset shifts = INIT_DATASET;
1796 struct symset reduce = INIT_DATASET;
1800 /* First collect the shifts */
1801 for (j = 0; j < is->items.cnt; j++) {
1802 unsigned short itm = is->items.syms[j];
1803 int p = item_prod(itm);
1804 int bp = item_index(itm);
1805 struct production *pr = g->productions[p];
1807 if (bp < pr->body_size &&
1808 pr->body[bp]->type == Terminal) {
1810 int sym = pr->body[bp]->num;
1811 if (symset_find(&shifts, sym) < 0)
1812 symset_add(&shifts, sym, itm);
1815 /* Now look for reductions and conflicts */
1816 for (j = 0; j < is->items.cnt; j++) {
1817 unsigned short itm = is->items.syms[j];
1818 int p = item_prod(itm);
1819 int bp = item_index(itm);
1820 struct production *pr = g->productions[p];
1822 if (bp < pr->body_size)
1827 la = g->follow[pr->head->num];
1829 la = set_find(g, is->items.data[j]);
1831 for (k = 0; k < la.cnt; k++) {
1832 int pos = symset_find(&shifts, la.syms[k]);
1833 if (pos >= 0 && symset_find(&la, TK_newline) < 0) {
1834 printf(" State %d has SHIFT/REDUCE conflict on ", i);
1835 prtxt(g->symtab[la.syms[k]]->name);
1837 report_item(g, shifts.data[pos]);
1838 report_item(g, itm);
1841 pos = symset_find(&reduce, la.syms[k]);
1843 symset_add(&reduce, la.syms[k], itm);
1846 printf(" State %d has REDUCE/REDUCE conflict on ", i);
1847 prtxt(g->symtab[la.syms[k]]->name);
1849 report_item(g, itm);
1850 report_item(g, reduce.data[pos]);
1854 symset_free(shifts);
1855 symset_free(reduce);
1861 ## Generating the parser
1863 The exported part of the parser is the `parse_XX` function, where the name
1864 `XX` is based on the name of the parser files.
1866 This takes a `code_node`, a partially initialized `token_config`, and an
1867 optional `FILE` to send tracing to. The `token_config` gets the list of
1868 known words added and then is used with the `code_node` to initialize the
1871 `parse_XX` then calls the library function `parser_run` to actually complete
1872 the parse. This needs the `states` table and function to call the various
1873 pieces of code provided in the grammar file, so they are generated first.
1875 ###### parser_generate
1877 static void gen_parser(FILE *f, struct grammar *g, char *file, char *name)
1883 gen_reduce(f, g, file);
1886 fprintf(f, "#line 0 \"gen_parser\"\n");
1887 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace)\n",
1890 fprintf(f, "\tstruct token_state *tokens;\n");
1891 fprintf(f, "\tconfig->words_marks = known;\n");
1892 fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
1893 fprintf(f, "\tconfig->ignored |= (1 << TK_line_comment) | (1 << TK_block_comment);\n");
1894 fprintf(f, "\ttokens = token_open(code, config);\n");
1895 fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n");
1896 fprintf(f, "\ttoken_close(tokens);\n");
1897 fprintf(f, "\treturn rv;\n");
1898 fprintf(f, "}\n\n");
1901 ### Known words table
1903 The known words table is simply an array of terminal symbols.
1904 The table of nonterminals used for tracing is a similar array.
1908 static void gen_known(FILE *f, struct grammar *g)
1911 fprintf(f, "#line 0 \"gen_known\"\n");
1912 fprintf(f, "static const char *known[] = {\n");
1913 for (i = TK_reserved;
1914 i < g->num_syms && g->symtab[i]->type == Terminal;
1916 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1917 g->symtab[i]->name.txt);
1918 fprintf(f, "};\n\n");
1921 static void gen_non_term(FILE *f, struct grammar *g)
1924 fprintf(f, "#line 0 \"gen_non_term\"\n");
1925 fprintf(f, "static const char *non_term[] = {\n");
1926 for (i = TK_reserved;
1929 if (g->symtab[i]->type == Nonterminal)
1930 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1931 g->symtab[i]->name.txt);
1932 fprintf(f, "};\n\n");
1935 ### States and the goto tables.
1937 For each state we record the goto table, the reducible production if
1938 there is one, or a symbol to shift for error recovery.
1939 Some of the details of the reducible production are stored in the
1940 `do_reduce` function to come later. Here we store the production number,
1941 the body size (useful for stack management) and the resulting symbol (useful
1942 for knowing how to free data later).
1944 The go to table is stored in a simple array of `sym` and corresponding
1947 ###### exported types
1955 const struct lookup * go_to;
1966 static void gen_goto(FILE *f, struct grammar *g)
1969 fprintf(f, "#line 0 \"gen_goto\"\n");
1970 for (i = 0; i < g->states; i++) {
1972 fprintf(f, "static const struct lookup goto_%d[] = {\n",
1974 struct symset gt = g->statetab[i]->go_to;
1975 for (j = 0; j < gt.cnt; j++)
1976 fprintf(f, "\t{ %d, %d },\n",
1977 gt.syms[j], gt.data[j]);
1984 static void gen_states(FILE *f, struct grammar *g)
1987 fprintf(f, "#line 0 \"gen_states\"\n");
1988 fprintf(f, "static const struct state states[] = {\n");
1989 for (i = 0; i < g->states; i++) {
1990 struct itemset *is = g->statetab[i];
1991 int j, prod = -1, prod_len;
1993 for (j = 0; j < is->items.cnt; j++) {
1994 int itm = is->items.syms[j];
1995 int p = item_prod(itm);
1996 int bp = item_index(itm);
1997 struct production *pr = g->productions[p];
1999 if (bp < pr->body_size)
2001 /* This is what we reduce */
2002 if (prod < 0 || prod_len < pr->body_size) {
2004 prod_len = pr->body_size;
2009 fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, %d, %d },\n",
2010 i, is->go_to.cnt, i, prod,
2011 g->productions[prod]->body_size,
2012 g->productions[prod]->head->num,
2013 is->starts_line, is->min_prefix);
2015 fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, %d },\n",
2016 i, is->go_to.cnt, i,
2017 is->starts_line, is->min_prefix);
2019 fprintf(f, "};\n\n");
2022 ### The `do_reduce` function and the code
2024 When the parser engine decides to reduce a production, it calls `do_reduce`.
2025 This has two functions.
2027 Firstly, if a non-NULL `trace` file is passed, it prints out details of the
2028 production being reduced. Secondly it runs the code that was included with
2029 the production if any.
2031 This code needs to be able to store data somewhere. Rather than requiring
2032 `do_reduce` to `malloc` that "somewhere", we pass in a large buffer and have
2033 `do_reduce` return the size to be saved.
2035 In order for the code to access "global" context, we pass in the
2036 "config" pointer that was passed to parser function. If the `struct
2037 token_config` is embedded in some larger structure, the reducing code
2038 can access the larger structure using pointer manipulation.
2040 The code fragment requires translation when written out. Any `$N` needs to
2041 be converted to a reference either to that buffer (if `$0`) or to the
2042 structure returned by a previous reduction. These pointers need to be cast
2043 to the appropriate type for each access. All this is handled in
2046 `gen_code` also allows symbol references to contain a '`<`' as in '`$<2`'.
2047 This applied only to symbols with references (or pointers), not those with structures.
2048 The `<` implies that the reference it being moved out, so the object will not be
2049 automatically freed. This is equivalent to assigning `NULL` to the pointer.
2053 static void gen_code(struct production *p, FILE *f, struct grammar *g)
2056 char *used = calloc(1, p->body_size);
2059 fprintf(f, "\t\t\t");
2060 for (c = p->code.txt; c < p->code.txt + p->code.len; c++) {
2074 if (*c < '0' || *c > '9') {
2081 while (c[1] >= '0' && c[1] <= '9') {
2083 n = n * 10 + *c - '0';
2086 fprintf(f, "(*(struct %.*s*%s)ret)",
2087 p->head->struct_name.len,
2088 p->head->struct_name.txt,
2089 p->head->isref ? "*":"");
2090 else if (n > p->body_size)
2091 fprintf(f, "$%d", n);
2092 else if (p->body[n-1]->type == Terminal)
2093 fprintf(f, "(*(struct token *)body[%d])",
2095 else if (p->body[n-1]->struct_name.txt == NULL)
2096 fprintf(f, "$%d", n);
2098 fprintf(f, "(*(struct %.*s*%s)body[%d])",
2099 p->body[n-1]->struct_name.len,
2100 p->body[n-1]->struct_name.txt,
2101 p->body[n-1]->isref ? "*":"", n-1);
2106 for (i = 0; i < p->body_size; i++) {
2107 if (p->body[i]->struct_name.txt &&
2108 p->body[i]->isref &&
2110 // assume this has been copied out
2111 fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i);
2118 static void gen_reduce(FILE *f, struct grammar *g, char *file)
2121 fprintf(f, "#line 0 \"gen_reduce\"\n");
2122 fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n");
2124 fprintf(f, "\tint ret_size = 0;\n");
2126 fprintf(f, "\tswitch(prod) {\n");
2127 for (i = 0; i < g->production_count; i++) {
2128 struct production *p = g->productions[i];
2129 fprintf(f, "\tcase %d:\n", i);
2132 fprintf(f, "#line %d \"%s\"\n", p->code_line, file);
2136 if (p->head->struct_name.txt)
2137 fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n",
2138 p->head->struct_name.len,
2139 p->head->struct_name.txt,
2140 p->head->isref ? "*":"");
2142 fprintf(f, "\t\tbreak;\n");
2144 fprintf(f, "\t}\n\treturn ret_size;\n}\n\n");
2149 As each non-terminal can potentially cause a different type of data
2150 structure to be allocated and filled in, we need to be able to free it when
2153 It is particularly important to have fine control over freeing during error
2154 recovery where individual stack frames might need to be freed.
2156 For this, the grammar author is required to defined a `free_XX` function for
2157 each structure that is used by a non-terminal. `do_free` will call whichever
2158 is appropriate given a symbol number, and will call `free` (as is
2159 appropriate for tokens) on any terminal symbol.
2163 static void gen_free(FILE *f, struct grammar *g)
2167 fprintf(f, "#line 0 \"gen_free\"\n");
2168 fprintf(f, "static void do_free(short sym, void *asn)\n");
2170 fprintf(f, "\tif (!asn) return;\n");
2171 fprintf(f, "\tif (sym < %d) {\n", g->first_nonterm);
2172 fprintf(f, "\t\tfree(asn);\n\t\treturn;\n\t}\n");
2173 fprintf(f, "\tswitch(sym) {\n");
2175 for (i = 0; i < g->num_syms; i++) {
2176 struct symbol *s = g->symtab[i];
2178 s->type != Nonterminal ||
2179 s->struct_name.txt == NULL)
2182 fprintf(f, "\tcase %d:\n", s->num);
2184 fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n",
2186 s->struct_name.txt);
2187 fprintf(f, "\t\tfree(asn);\n");
2189 fprintf(f, "\t\tfree_%.*s(asn);\n",
2191 s->struct_name.txt);
2192 fprintf(f, "\t\tbreak;\n");
2194 fprintf(f, "\t}\n}\n\n");
2197 ## The main routine.
2199 There are three key parts to the "main" function of parsergen: processing
2200 the arguments, loading the grammar file, and dealing with the grammar.
2202 ### Argument processing.
2204 Command line options allow the selection of analysis mode, name of output
2205 file, and whether or not a report should be generated. By default we create
2206 a report only if no code output was requested.
2208 The `parse_XX` function name uses the basename of the output file which
2209 should not have a suffix (`.c`). `.c` is added to the given name for the
2210 code, and `.h` is added for the header (if header text is specifed in the
2217 static const struct option long_options[] = {
2218 { "LR0", 0, NULL, '0' },
2219 { "LR05", 0, NULL, '5' },
2220 { "SLR", 0, NULL, 'S' },
2221 { "LALR", 0, NULL, 'L' },
2222 { "LR1", 0, NULL, '1' },
2223 { "tag", 1, NULL, 't' },
2224 { "report", 0, NULL, 'R' },
2225 { "output", 1, NULL, 'o' },
2226 { NULL, 0, NULL, 0 }
2228 const char *options = "05SL1t:Ro:";
2230 ###### process arguments
2232 char *outfile = NULL;
2237 enum grammar_type type = LR05;
2238 while ((opt = getopt_long(argc, argv, options,
2239 long_options, NULL)) != -1) {
2254 outfile = optarg; break;
2256 tag = optarg; break;
2258 fprintf(stderr, "Usage: parsergen ...\n");
2263 infile = argv[optind++];
2265 fprintf(stderr, "No input file given\n");
2268 if (outfile && report == 1)
2271 if (name && strchr(name, '/'))
2272 name = strrchr(name, '/')+1;
2274 if (optind < argc) {
2275 fprintf(stderr, "Excess command line arguments\n");
2279 ### Loading the grammar file
2281 To be able to run `mdcode` and `scanner` on the grammar we need to memory
2284 Once we have extracted the code (with `mdcode`) we expect to find three
2285 sections: header, code, and grammar. Anything else that is not
2286 excluded by the `--tag` option is an error.
2288 "header" and "code" are optional, though it is hard to build a working
2289 parser with neither. "grammar" must be provided.
2293 #include <sys/mman.h>
2298 static void pr_err(char *msg)
2301 fprintf(stderr, "%s\n", msg);
2305 struct section *table;
2309 fd = open(infile, O_RDONLY);
2311 fprintf(stderr, "parsergen: cannot open %s: %s\n",
2312 infile, strerror(errno));
2315 len = lseek(fd, 0, 2);
2316 file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2317 table = code_extract(file, file+len, pr_err);
2319 struct code_node *hdr = NULL;
2320 struct code_node *code = NULL;
2321 struct code_node *gram = NULL;
2322 for (s = table; s; s = s->next) {
2323 struct text sec = s->section;
2324 if (tag && !strip_tag(&sec, tag))
2326 if (text_is(sec, "header"))
2328 else if (text_is(sec, "code"))
2330 else if (text_is(sec, "grammar"))
2333 fprintf(stderr, "Unknown content section: %.*s\n",
2334 s->section.len, s->section.txt);
2339 ### Processing the input
2341 As we need to append an extention to a filename and then open it for
2342 writing, and we need to do this twice, it helps to have a separate function.
2346 static FILE *open_ext(char *base, char *ext)
2348 char *fn = malloc(strlen(base) + strlen(ext) + 1);
2350 strcat(strcpy(fn, base), ext);
2356 If we can read the grammar, then we analyse and optionally report on it. If
2357 the report finds conflicts we will exit with an error status.
2359 ###### process input
2360 struct grammar *g = NULL;
2362 fprintf(stderr, "No grammar section provided\n");
2365 g = grammar_read(gram);
2367 fprintf(stderr, "Failure to parse grammar\n");
2372 grammar_analyse(g, type);
2374 if (grammar_report(g, type))
2378 If a "headers" section is defined, we write it out and include a declaration
2379 for the `parse_XX` function so it can be used from separate code.
2381 if (rv == 0 && hdr && outfile) {
2382 FILE *f = open_ext(outfile, ".h");
2384 code_node_print(f, hdr, infile);
2385 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace);\n",
2389 fprintf(stderr, "Cannot create %s.h\n",
2395 And if all goes well and an output file was provided, we create the `.c`
2396 file with the code section (if any) and the parser tables and function.
2398 if (rv == 0 && outfile) {
2399 FILE *f = open_ext(outfile, ".c");
2402 code_node_print(f, code, infile);
2403 gen_parser(f, g, infile, name);
2406 fprintf(stderr, "Cannot create %s.c\n",
2412 And that about wraps it up. We need to set the locale so that UTF-8 is
2413 recognised properly, and link with `libicuuc` as `libmdcode` requires that.
2415 ###### File: parsergen.mk
2416 parsergen : parsergen.o libscanner.o libmdcode.o
2417 $(CC) $(CFLAGS) -o parsergen parsergen.o libscanner.o libmdcode.o -licuuc
2424 int main(int argc, char *argv[])
2429 setlocale(LC_ALL,"");
2431 ## process arguments
2438 ## The SHIFT/REDUCE parser
2440 Having analysed the grammar and generated all the tables, we only need the
2441 shift/reduce engine to bring it all together.
2443 ### Goto table lookup
2445 The parser generator has nicely provided us with goto tables sorted by
2446 symbol number. We need a binary search function to find a symbol in the
2449 ###### parser functions
2451 static int search(const struct state *l, int sym)
2454 int hi = l->go_to_cnt;
2458 while (lo + 1 < hi) {
2459 int mid = (lo + hi) / 2;
2460 if (l->go_to[mid].sym <= sym)
2465 if (l->go_to[lo].sym == sym)
2466 return l->go_to[lo].state;
2471 ### The state stack.
2473 The core data structure for the parser is the stack. This tracks all the
2474 symbols that have been recognised or partially recognised.
2476 The stack usually won't grow very large - maybe a few tens of entries. So
2477 we dynamically resize and array as required but never bother to shrink it
2480 We keep the stack as two separate allocations. One, `asn_stack` stores the
2481 "abstract syntax nodes" which are created by each reduction. When we call
2482 `do_reduce` we need to pass an array of the `asn`s of the body of the
2483 production, and by keeping a separate `asn` stack, we can just pass a
2484 pointer into this stack.
2486 The other allocation stores all other stack fields of which there are six.
2487 The `state` is the most important one and guides the parsing process. The
2488 `sym` is nearly unnecessary. However when we want to free entries from the
2489 `asn_stack`, it helps to know what type they are so we can call the right
2490 freeing function. The symbol leads us to the right free function through
2493 The `indents` count tracks the line indents with-in the symbol or
2494 immediately follow it. These are used to allow indent information to
2495 guide parsing and error recovery.
2497 `since_newline` tracks how many stack frames since the last
2498 start-of-line (whether indented or not). So if `since_newline` is
2499 zero, then this symbol is at the start of a line. Similarly
2500 `since_indent` counts the number of states since an indent, it is zero
2501 precisely when `indents` is not zero.
2503 `newline_permitted` keeps track of whether newlines should be ignored
2506 The stack is most properly seen as alternating states and symbols -
2507 states, like the 'DOT' in items, are between symbols. Each frame in
2508 our stack holds a state and the symbol that was before it. The
2509 bottom of stack holds the start state but no symbol, as nothing came
2510 before the beginning.
2512 ###### parser functions
2517 short newline_permitted;
2521 short since_newline;
2531 Two operations are needed on the stack - shift (which is like push) and pop.
2533 Shift applies not only to terminals but also to non-terminals. When
2534 we reduce a production we will pop off entries corresponding to the
2535 body symbols, then push on an item for the head of the production.
2536 This last is exactly the same process as shifting in a terminal so we
2537 use the same function for both. In both cases we provide the symbol,
2538 the number of indents the symbol contains (which will be zero for a
2539 terminal symbol) and a flag indicating the the symbol was at (or was
2540 reduced from a symbol which was at) the start of a line. The state is
2541 deduced from the current top-of-stack state and the new symbol.
2543 To simplify other code we arrange for `shift` to fail if there is no `goto`
2544 state for the symbol. This is useful in basic parsing due to our design
2545 that we shift when we can, and reduce when we cannot. So the `shift`
2546 function reports if it could.
2548 `shift` is also used to push state zero onto the stack, so if the
2549 stack is empty, it always chooses zero as the next state.
2551 So `shift` finds the next state. If that succeeds it extends the
2552 allocations if needed and pushes all the information onto the stacks.
2554 Newlines are permitted after a `starts_line` state until an internal
2555 indent. If the new frame has neither a `starts_line` state nor an
2556 indent, newlines are permitted if the previous stack frame permitted
2559 ###### parser functions
2561 static int shift(struct parser *p,
2562 short sym, short indents, short start_of_line,
2564 const struct state states[])
2566 // Push an entry onto the stack
2567 struct frame next = {0};
2568 int newstate = p->tos
2569 ? search(&states[p->stack[p->tos-1].state],
2574 if (p->tos >= p->stack_size) {
2575 p->stack_size += 10;
2576 p->stack = realloc(p->stack, p->stack_size
2577 * sizeof(p->stack[0]));
2578 p->asn_stack = realloc(p->asn_stack, p->stack_size
2579 * sizeof(p->asn_stack[0]));
2582 next.indents = indents;
2583 next.state = newstate;
2584 if (states[newstate].starts_line)
2585 next.newline_permitted = 1;
2587 next.newline_permitted = 0;
2589 next.newline_permitted =
2590 p->stack[p->tos-1].newline_permitted;
2592 next.newline_permitted = 0;
2594 if (!start_of_line) {
2596 next.since_newline = p->stack[p->tos-1].since_newline + 1;
2598 next.since_newline = 1;
2601 next.since_indent = 0;
2603 next.since_indent = p->stack[p->tos-1].since_indent + 1;
2605 next.since_indent = 1;
2607 p->stack[p->tos] = next;
2608 p->asn_stack[p->tos] = asn;
2613 `pop` primarily moves the top of stack (`tos`) back down the required
2614 amount and frees any `asn` entries that need to be freed. It also
2615 collects a summary of the indents and line starts in the symbols that
2616 are being removed. It is called _after_ we reduce a production, just
2617 before we `shift` the nonterminal in.
2619 ###### parser functions
2621 static int pop(struct parser *p, int num,
2622 short *start_of_line,
2623 void(*do_free)(short sym, void *asn))
2630 for (i = 0; i < num; i++) {
2631 sol |= !p->stack[p->tos+i].since_newline;
2632 indents += p->stack[p->tos+i].indents;
2633 do_free(p->stack[p->tos+i].sym,
2634 p->asn_stack[p->tos+i]);
2637 *start_of_line = sol;
2641 ### Memory allocation
2643 The `scanner` returns tokens in a local variable - we want them in allocated
2644 memory so they can live in the `asn_stack`. Similarly the `asn` produced by
2645 a reduce is in a large buffer. Both of these require some allocation and
2646 copying, hence `memdup` and `tokcopy`.
2648 ###### parser includes
2651 ###### parser functions
2653 void *memdup(void *m, int len)
2659 memcpy(ret, m, len);
2663 static struct token *tok_copy(struct token tk)
2665 struct token *new = malloc(sizeof(*new));
2670 ### The heart of the parser.
2672 Now we have the parser. If we can shift we do, though newlines and
2673 reducing indenting may block that. If not and we can reduce we do
2674 that. If the production we reduced was production zero, then we have
2675 accepted the input and can finish.
2677 We return whatever `asn` was returned by reducing production zero.
2679 If we can neither shift nor reduce we have an error to handle. We pop
2680 single entries off the stack until we can shift the `TK_error` symbol, then
2681 drop input tokens until we find one we can shift into the new error state.
2683 When we find `TK_in` and `TK_out` tokens which report indents we need
2684 to handle them directly as the grammar cannot express what we want to
2687 `TK_in` tokens are easy: we simply update indent count in the top stack frame to
2688 record how many indents there are following the previous token.
2690 `TK_out` tokens must be canceled against an indent count
2691 within the stack. If we can reduce some symbols that are all since
2692 the most recent indent, then we do that first. If the minimum prefix
2693 of the current state then extends back before the most recent indent,
2694 that indent can be cancelled. If the minimum prefix is shorter then
2695 the indent had ended prematurely and we must start error handling, which
2696 is still a work-in-progress.
2698 `TK_newline` tokens are ignored unless the top stack frame records
2699 that they are permitted. In that case they will not be considered for
2700 shifting if it is possible to reduce some symbols that are all since
2701 the most recent start of line. This is how a newline forcible
2702 terminates any line-like structure - we try to reduce down to at most
2703 one symbol for each line where newlines are allowed.
2705 When, during error handling, we discard token read in, we want to keep
2706 discarding until we see one that is recognised. If we had a full set
2707 of LR(1) grammar states, this will mean looking in the look-ahead set,
2708 but we don't keep a full look-ahead set. We only record the subset
2709 that leads to SHIFT. We can, however, deduce the look-ahead set but
2710 looking at the SHIFT subsets for all states that we can get to by
2711 reducing zero or more times. So we need a little function which
2712 checks if a given token is in any of these look-ahead sets.
2714 ###### parser includes
2719 static int in_lookahead(struct token *tk, const struct state *states, int state)
2721 while (state >= 0) {
2722 if (search(&states[state], tk->num) >= 0)
2724 if (states[state].reduce_prod < 0)
2726 state = search(&states[state], states[state].reduce_sym);
2731 void *parser_run(struct token_state *tokens,
2732 const struct state states[],
2733 int (*do_reduce)(int, void**, struct token_config*, void*),
2734 void (*do_free)(short, void*),
2735 FILE *trace, const char *non_term[],
2736 struct token_config *config)
2738 struct parser p = { 0 };
2739 struct token *tk = NULL;
2743 shift(&p, TK_eof, 0, 1, NULL, states);
2745 struct token *err_tk;
2746 struct frame *tos = &p.stack[p.tos-1];
2748 tk = tok_copy(token_next(tokens));
2749 parser_trace(trace, &p,
2750 tk, states, non_term, config->known_count);
2752 if (tk->num == TK_in) {
2754 tos->since_newline = 0;
2755 tos->since_indent = 0;
2756 if (!states[tos->state].starts_line)
2757 tos->newline_permitted = 0;
2760 parser_trace_action(trace, "Record");
2763 if (tk->num == TK_out) {
2764 if (states[tos->state].reduce_size >= 0 &&
2765 states[tos->state].reduce_size <= tos->since_indent)
2767 if (states[tos->state].min_prefix >= tos->since_indent) {
2769 struct frame *in = tos - tos->since_indent;
2771 if (in->indents == 0) {
2772 /* Reassess since_indent and newline_permitted */
2774 in->since_indent = in[-1].since_indent + 1;
2775 in->newline_permitted = in[-1].newline_permitted;
2777 in->since_indent = 0;
2778 in->newline_permitted = 0;
2780 if (states[in->state].starts_line)
2781 in->newline_permitted = 1;
2784 in->since_indent = in[-1].since_indent + 1;
2785 if (states[in->state].starts_line)
2786 in->newline_permitted = 1;
2788 in->newline_permitted = in[-1].newline_permitted;
2793 parser_trace_action(trace, "Cancel");
2796 // fall through to error handling as both SHIFT and REDUCE
2799 if (tk->num == TK_newline) {
2800 if (!tos->newline_permitted) {
2803 parser_trace_action(trace, "Discard");
2806 if (tos->since_newline > 1 &&
2807 states[tos->state].reduce_size >= 0 &&
2808 states[tos->state].reduce_size <= tos->since_newline)
2811 if (shift(&p, tk->num, 0, tk->num == TK_newline, tk, states)) {
2813 parser_trace_action(trace, "Shift");
2817 if (states[tos->state].reduce_prod >= 0) {
2820 const struct state *nextstate = &states[tos->state];
2821 int prod = nextstate->reduce_prod;
2822 int size = nextstate->reduce_size;
2824 static char buf[16*1024];
2825 short indents, start_of_line;
2827 body = p.asn_stack + (p.tos - size);
2829 bufsize = do_reduce(prod, body, config, buf);
2831 indents = pop(&p, size, &start_of_line,
2833 res = memdup(buf, bufsize);
2834 memset(buf, 0, bufsize);
2835 if (!shift(&p, nextstate->reduce_sym,
2836 indents, start_of_line,
2838 if (prod != 0) abort();
2842 parser_trace_action(trace, "Reduce");
2845 /* Error. We walk up the stack until we
2846 * find a state which will accept TK_error.
2847 * We then shift in TK_error and see what state
2848 * that takes us too.
2849 * Then we discard input tokens until
2850 * we find one that is acceptable.
2852 parser_trace_action(trace, "ERROR");
2853 short indents = 0, start_of_line;
2855 err_tk = tok_copy(*tk);
2857 shift(&p, TK_error, 0, 0,
2858 err_tk, states) == 0)
2859 // discard this state
2860 indents += pop(&p, 1, &start_of_line, do_free);
2863 // no state accepted TK_error
2866 tos = &p.stack[p.tos-1];
2867 while (!in_lookahead(tk, states, tos->state) &&
2868 tk->num != TK_eof) {
2870 tk = tok_copy(token_next(tokens));
2871 if (tk->num == TK_in)
2873 if (tk->num == TK_out) {
2877 // FIXME update since_indent here
2880 tos->indents += indents;
2883 pop(&p, p.tos, NULL, do_free);
2889 ###### exported functions
2890 void *parser_run(struct token_state *tokens,
2891 const struct state states[],
2892 int (*do_reduce)(int, void**, struct token_config*, void*),
2893 void (*do_free)(short, void*),
2894 FILE *trace, const char *non_term[],
2895 struct token_config *config);
2899 Being able to visualize the parser in action can be invaluable when
2900 debugging the parser code, or trying to understand how the parse of a
2901 particular grammar progresses. The stack contains all the important
2902 state, so just printing out the stack every time around the parse loop
2903 can make it possible to see exactly what is happening.
2905 This doesn't explicitly show each SHIFT and REDUCE action. However they
2906 are easily deduced from the change between consecutive lines, and the
2907 details of each state can be found by cross referencing the states list
2908 in the stack with the "report" that parsergen can generate.
2910 For terminal symbols, we just dump the token. For non-terminals we
2911 print the name of the symbol. The look ahead token is reported at the
2912 end inside square brackets.
2914 ###### parser functions
2916 static char *reserved_words[] = {
2917 [TK_error] = "ERROR",
2920 [TK_newline] = "NEWLINE",
2923 static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
2925 fprintf(trace, "(%d", f->state);
2926 if (states[f->state].starts_line)
2927 fprintf(trace, "s");
2928 if (f->newline_permitted)
2929 fprintf(trace, "n%d", f->since_newline);
2930 fprintf(trace, ") ");
2933 void parser_trace(FILE *trace, struct parser *p,
2934 struct token *tk, const struct state states[],
2935 const char *non_term[], int knowns)
2940 for (i = 0; i < p->tos; i++) {
2941 struct frame *f = &p->stack[i];
2944 if (sym < TK_reserved &&
2945 reserved_words[sym] != NULL)
2946 fputs(reserved_words[sym], trace);
2947 else if (sym < TK_reserved + knowns) {
2948 struct token *t = p->asn_stack[i];
2949 text_dump(trace, t->txt, 20);
2951 fputs(non_term[sym - TK_reserved - knowns],
2954 fprintf(trace, ".%d", f->indents);
2955 if (f->since_newline == 0)
2959 parser_trace_state(trace, f, states);
2961 fprintf(trace, "[");
2962 if (tk->num < TK_reserved &&
2963 reserved_words[tk->num] != NULL)
2964 fputs(reserved_words[tk->num], trace);
2966 text_dump(trace, tk->txt, 20);
2970 void parser_trace_action(FILE *trace, char *action)
2973 fprintf(trace, " - %s\n", action);
2978 The obvious example for a parser is a calculator.
2980 As `scanner` provides number parsing function using `libgmp` is it not much
2981 work to perform arbitrary rational number calculations.
2983 This calculator takes one expression, or an equality test, per line. The
2984 results are printed and if any equality test fails, the program exits with
2987 ###### File: parsergen.mk
2988 calc.c calc.h : parsergen parsergen.mdc
2989 ./parsergen --tag calc -o calc parsergen.mdc
2990 calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o
2991 $(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp
2993 ./calc parsergen.mdc
2999 // what do we use for a demo-grammar? A calculator of course.
3011 #include <sys/mman.h>
3017 #include "scanner.h"
3023 static void free_number(struct number *n)
3029 static int text_is(struct text t, char *s)
3031 return (strlen(s) == t.len &&
3032 strncmp(s, t.txt, t.len) == 0);
3035 int main(int argc, char *argv[])
3037 int fd = open(argv[1], O_RDONLY);
3038 int len = lseek(fd, 0, 2);
3039 char *file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
3040 struct section *table = code_extract(file, file+len, NULL);
3042 struct token_config config = {
3043 .ignored = (1 << TK_line_comment)
3044 | (1 << TK_block_comment)
3047 .number_chars = ".,_+-",
3051 for (s = table; s; s = s->next)
3052 if (text_is(s->section, "example: input"))
3053 parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
3055 struct section *t = table->next;
3056 code_free(table->code);
3068 Session -> Session Line
3071 Line -> Expression NEWLINE ${ gmp_printf("Answer = %Qd\n", $1.val);
3072 { mpf_t fl; mpf_init2(fl, 20); mpf_set_q(fl, $1.val);
3073 gmp_printf(" or as a decimal: %Fg\n", fl);
3077 | Expression = Expression NEWLINE ${
3078 if (mpq_equal($1.val, $3.val))
3079 gmp_printf("Both equal %Qd\n", $1.val);
3081 gmp_printf("NOT EQUAL: %Qd\n != : %Qd\n",
3086 | NEWLINE ${ printf("Blank line\n"); }$
3087 | ERROR NEWLINE ${ printf("Skipped a bad line\n"); }$
3090 Expression -> Expression + Expression ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$
3091 | Expression - Expression ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$
3092 | Expression * Expression ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$
3093 | Expression / Expression ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$
3094 | NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$
3095 | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$
3100 3.1415926535 - 355/113
3102 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9
3104 1 * 1000 + 2 * 100 + 3 * 10 + 4 * 1