1 # LR(1) Parser Generator with 2D support #
3 This parser generator takes a grammar description combined with code
4 fragments, analyses it, and can report details about the analysis and
5 write out C code files which can be compiled to make a parser.
7 "2D support" means that indentation and line breaks can be significant.
9 There are several distinct sections.
11 1. `grammar_read` will read a grammar from a literate-code file and
12 store the productions, symbols, and code fragments.
13 2. `grammar_analyse` will take that grammar and build LR parsing
14 states and look-ahead tables.
15 3. `grammar_report` will present the details of the analysis
16 in a readable format and will report any conflicts.
17 4. `parser_generate` will write out C code files with various
18 tables and with the code fragments arranged in useful places.
19 5. `parser_run` is a library function intended to be linked together
20 with the generated parser tables to complete the implementation of
22 6. Finally `calc` is a test grammar for a simple calculator. The
23 `parsergen` program built from the C code in this file can extract
24 that grammar directly from this file and process it.
26 ###### File: parsergen.c
31 ## forward declarations
42 ###### File: libparser.c
49 ###### File: parsergen.mk
52 parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc
55 ## Reading the grammar
57 The grammar must be stored in a markdown literate code file as parsed by
58 "[mdcode][]". It may have up to four top level (i.e. unreferenced)
59 sections: `header`, `code`, `grammar` and `reduce`. The first two, if
60 present, will be literally copied into the generated `.c` and `.h`
61 files. The third is required and contains the grammar which is
62 tokenised with "[scanner][]". `reduce` can provide variable
63 declarations and common intialization code for all the reduction code
64 fragments in the grammar.
66 If the `--tag` option is given, then any top level header that doesn't
67 start with the tag is ignored, and the tag is striped from the rest. So
68 `--tag Foo` means that the four sections expected are `Foo: header`,
69 `Foo: code`, `Foo: grammar` and `Foo: reduce`. The tag `calc` is used
70 to extract the sample calculator from this file.
73 [scanner]: scanner.html
79 ###### parser includes
83 The grammar contains production sets, precedence/associativity
84 declarations, general terminal declarations, and data type declarations.
85 These are all parsed with _ad hoc_ parsing as we don't have a parser
88 The precedence and associativity can be set for each production, and
89 can be inherited from symbols. The data type (either structure or a
90 reference to a structure) is potentially defined for each non-terminal
91 and describes what C structure is used to store information about each
95 enum assoc {Left, Right, Non};
96 char *assoc_names[] = {"Left","Right","Non"};
99 struct text struct_name;
102 unsigned short precedence;
106 unsigned short precedence;
114 The strings reported by `mdcode` and `scanner` are `struct text` which
115 have length rather than being nul terminated. To help with printing and
116 comparing we define `text_is` and `prtxt`, which should possibly go in
117 `mdcode`. `scanner` does provide `text_dump` which is useful for
118 strings which might contain control characters.
120 `strip_tag` is a bit like `strncmp`, but adds a test for a colon,
121 because that is what we need to detect tags.
124 static int text_is(struct text t, char *s)
126 return (strlen(s) == t.len &&
127 strncmp(s, t.txt, t.len) == 0);
129 static void prtxt(struct text t)
131 printf("%.*s", t.len, t.txt);
134 static int strip_tag(struct text *t, char *tag)
136 int skip = strlen(tag) + 1;
137 if (skip >= t->len ||
138 strncmp(t->txt, tag, skip-1) != 0 ||
139 t->txt[skip-1] != ':')
141 while (skip < t->len && t->txt[skip] == ' ')
150 Productions are comprised primarily of symbols - terminal and
151 non-terminal. We do not make a syntactic distinction between the two,
152 though non-terminals must be identifiers. Non-terminal symbols are
153 those which appear in the head of a production, terminal symbols are
154 those which don't. There are also "virtual" symbols used for precedence
155 marking discussed later, and sometimes we won't know what type a symbol
158 To help with code safety it is possible to declare the terminal symbols.
159 If this is done, then any symbol used in a production that does not
160 appear in the head of any production and is not declared as a terminal
161 is treated as an error.
163 ###### forward declarations
164 enum symtype { Unknown, Virtual, Terminal, Nonterminal };
165 char *symtypes = "UVTN";
168 ###### grammar fields
169 int terminals_declared;
171 Symbols can be either `TK_ident` or `TK_mark`. They are saved in a
172 table of known symbols and the resulting parser will report them as
173 `TK_reserved + N`. A small set of identifiers are reserved for the
174 different token types that `scanner` can report.
178 static struct { int num; char *name; } reserved_words[] = {
179 { TK_error, "ERROR" },
180 { TK_number, "NUMBER" },
181 { TK_ident, "IDENTIFIER" },
183 { TK_string, "STRING" },
184 { TK_multi_string, "MULTI_STRING" },
187 { TK_newline, "NEWLINE" },
193 Note that `TK_eof` and the two `TK_*_comment` tokens cannot be
194 recognised. The former is automatically expected at the end of the text
195 being parsed. The latter are always ignored by the parser.
197 All of these symbols are stored in a simple symbol table. We use the
198 `struct text` from `mdcode` to store the name and link them together
199 into a sorted list using an insertion sort.
201 We don't have separate `find` and `insert` functions as any symbol we
202 find needs to be remembered. We simply expect `find` to always return a
203 symbol, but its type might be `Unknown`.
212 ###### grammar fields
217 static struct symbol *sym_find(struct grammar *g, struct text s)
219 struct symbol **l = &g->syms;
224 (cmp = text_cmp((*l)->name, s)) < 0)
228 n = calloc(1, sizeof(*n));
237 static void symbols_init(struct grammar *g)
239 int entries = sizeof(reserved_words)/sizeof(reserved_words[0]);
241 for (i = 0; i < entries; i++) {
244 t.txt = reserved_words[i].name;
245 t.len = strlen(t.txt);
248 s->num = reserved_words[i].num;
252 ### Data types and precedence.
254 Data type specification, precedence specification, and declaration of
255 terminals are all introduced by a dollar sign at the start of the line.
256 If the next word is `LEFT`, `RIGHT` or `NON`, then the line specifies a
257 precedence, if it is `TERM` then the line declares terminals without
258 precedence, otherwise it specifies a data type.
260 The data type name is simply stored and applied to the head of all
261 subsequent productions. It must be the name of a structure optionally
262 preceded by an asterisk which means a reference or "pointer". So
263 `$expression` maps to `struct expression` and `$*statement` maps to
264 `struct statement *`.
266 Any productions given before the first data type declaration will have
267 no data type associated with them and can carry no information. In
268 order to allow other non-terminals to have no type, the data type
269 `$void` can be given. This does *not* mean that `struct void` will be
270 used, but rather than no type will be associated with subsequent
273 The precedence line must contain a list of symbols, either terminal
274 symbols or virtual symbols. It can only contain symbols that have not
275 been seen yet, so precedence declaration must precede the productions
278 A "virtual" symbol is an identifier preceded by `$$`. Virtual symbols
279 carry precedence information but are not included in the grammar. A
280 production can declare that it inherits the precedence of a given
283 This use for `$$` precludes it from being used as a symbol in the
284 described language. Two other symbols: `${` and `}$` are also
287 Each new precedence line introduces a new precedence level and
288 declares how it associates. This level is stored in each symbol
289 listed and may be inherited by any production which uses the symbol. A
290 production inherits from the last symbol which has a precedence.
292 The symbols on the first precedence line have the lowest precedence.
293 Subsequent lines introduce symbols with higher precedence and so bind
296 Note that a declaration line (or "dollar line") can start with either of
297 two different marks: "$" or "$*". The `dollar_line()` function defined
298 here is told which was found in the `isref` argument.
300 ###### grammar fields
301 struct text current_type;
306 enum symbols { TK_virtual = TK_reserved, TK_open, TK_close };
307 static const char *known[] = { "$$", "${", "}$" };
310 static char *dollar_line(struct token_state *ts, struct grammar *g, int isref)
312 struct token t = token_next(ts);
318 if (t.num != TK_ident) {
319 err = "type or assoc expected after '$'";
322 if (text_is(t.txt, "LEFT"))
324 else if (text_is(t.txt, "RIGHT"))
326 else if (text_is(t.txt, "NON"))
328 else if (text_is(t.txt, "TERM")) {
330 g->terminals_declared = 1;
332 g->current_type = t.txt;
333 g->type_isref = isref;
334 if (text_is(t.txt, "void"))
335 g->current_type.txt = NULL;
337 if (t.num != TK_newline) {
338 err = "Extra tokens after type name";
345 err = "$* cannot be followed by a precedence";
349 // This is a precedence or TERM line, need some symbols.
353 while (t.num != TK_newline) {
354 enum symtype type = Terminal;
356 if (t.num == TK_virtual) {
359 if (t.num != TK_ident) {
360 err = "$$ must be followed by a word";
364 err = "Virtual symbols not permitted on $TERM line";
367 } else if (t.num != TK_ident &&
369 err = "Illegal token in precedence line";
372 s = sym_find(g, t.txt);
373 if (s->type != Unknown) {
374 err = "Symbols in precedence/TERM line must not already be known.";
379 s->precedence = g->prec_levels;
386 err = "No symbols given on precedence/TERM line";
390 while (t.num != TK_newline && t.num != TK_eof)
397 A production either starts with an identifier which is the head
398 non-terminal, or a vertical bar (`|`) in which case this production
399 uses the same head as the previous one. The identifier must be
400 followed by a `->` mark. All productions for a given non-terminal must
401 be grouped together with the `nonterminal ->` given only once.
403 After this a (possibly empty) sequence of identifiers and marks form
404 the body of the production. A virtual symbol may be given after the
405 body by preceding it with `$$`. If a virtual symbol is given, the
406 precedence of the production is that for the virtual symbol. If none
407 is given, the precedence is inherited from the last symbol in the
408 production which has a precedence specified.
410 After the optional precedence may come the `${` mark. This indicates
411 the start of a code fragment. If present, this must be on the same
412 line as the start of the production.
414 All of the text from the `${` through to the matching `}$` is
415 collected and forms the code-fragment for the production. It must all
416 be in one `code_node` of the literate code. The `}$` must be
417 at the end of a line.
419 Text in the code fragment will undergo substitutions where `$N` or
420 `$<N`,for some numeric `N` (or non-numeric indicator as described
421 later), will be replaced with a variable holding the parse information
422 for the particular symbol in the production. `$0` is the head of the
423 production, `$1` is the first symbol of the body, etc. The type of `$N`
424 for a terminal symbol is `struct token`. For a non-terminal, it is
425 whatever has been declared for that symbol. The `<` may be included and
426 means that the value (usually a reference) is being moved out, so it
427 will not automatically be freed. The effect of using '<' is that the
428 variable is cleared to all-zeros.
430 While building productions we will need to add to an array which needs to
434 static void array_add(void *varray, int *cnt, void *new)
436 void ***array = varray;
439 current = ((*cnt-1) | (step-1))+1;
440 if (*cnt == current) {
443 *array = realloc(*array, current * sizeof(void*));
445 (*array)[*cnt] = new;
449 Collecting the code fragment simply involves reading tokens until we
450 hit the end token `}$`, and noting the character position of the start and
454 static struct text collect_code(struct token_state *state,
459 code.txt = start.txt.txt + start.txt.len;
461 t = token_next(state);
462 while (t.node == start.node &&
463 t.num != TK_close && t.num != TK_error &&
465 if (t.num == TK_close && t.node == start.node)
466 code.len = t.txt.txt - code.txt;
472 Now we have all the bits we need to parse a full production.
477 ###### grammar fields
478 struct production **productions;
479 int production_count;
481 ###### production fields
483 struct symbol **body;
489 int first_production;
492 static char *parse_production(struct grammar *g,
494 struct token_state *state)
496 /* Head has already been parsed. */
499 struct production p, *pp;
501 memset(&p, 0, sizeof(p));
503 tk = token_next(state);
504 while (tk.num == TK_ident || tk.num == TK_mark) {
505 struct symbol *bs = sym_find(g, tk.txt);
506 if (bs->type == Unknown) {
507 if (!g->terminals_declared)
510 if (bs->type == Virtual) {
511 err = "Virtual symbol not permitted in production";
514 if (bs->precedence) {
515 p.precedence = bs->precedence;
518 array_add(&p.body, &p.body_size, bs);
519 tk = token_next(state);
521 if (tk.num == TK_virtual) {
523 tk = token_next(state);
524 if (tk.num != TK_ident) {
525 err = "word required after $$";
528 vs = sym_find(g, tk.txt);
529 if (vs->precedence == 0) {
530 err = "symbol after $$ must have precedence";
533 p.precedence = vs->precedence;
536 tk = token_next(state);
538 if (tk.num == TK_open) {
539 p.code_line = tk.line;
540 p.code = collect_code(state, tk);
541 if (p.code.txt == NULL) {
542 err = "code fragment not closed properly";
545 tk = token_next(state);
548 if (tk.num != TK_newline && tk.num != TK_eof) {
549 err = "stray tokens at end of line";
552 pp = malloc(sizeof(*pp));
554 array_add(&g->productions, &g->production_count, pp);
557 while (tk.num != TK_newline && tk.num != TK_eof)
558 tk = token_next(state);
563 With the ability to parse productions and dollar-lines, we have nearly all
564 that we need to parse a grammar from a `code_node`.
566 The head of the first production will effectively be the `start` symbol
567 of the grammar. However it won't _actually_ be so. Processing the
568 grammar is greatly simplified if the real start symbol only has a single
569 production, and expects `$eof` as the final terminal. So when we find
570 the first explicit production we insert an extra implicit production as
571 production zero which looks like
573 ###### Example: production 0
576 where `START` is the first non-terminal given.
578 ###### create production zero
579 struct production *p = calloc(1,sizeof(*p));
580 struct text start = {"$start",6};
581 struct text eof = {"$eof",4};
582 struct text code = {"$0 = $<1;", 9};
583 p->head = sym_find(g, start);
584 p->head->type = Nonterminal;
585 p->head->struct_name = g->current_type;
586 p->head->isref = g->type_isref;
587 if (g->current_type.txt)
589 array_add(&p->body, &p->body_size, head);
590 array_add(&p->body, &p->body_size, sym_find(g, eof));
591 p->head->first_production = g->production_count;
592 array_add(&g->productions, &g->production_count, p);
594 Now we are ready to read in the grammar. We ignore comments
595 and strings so that the marks which introduce them can be
596 used as terminals in the grammar. We don't ignore numbers
597 even though we don't allow them as that causes the scanner
598 to produce errors that the parser is better positioned to handle.
599 We also ignore indentation, but we expect and use newlines.
601 To allow comments in the grammar, we explicitly check for "//" as the
602 first token on the line and those lines are skipped. "//" can still be
603 used as a terminal anywhere that a terminal is expected.
606 static struct grammar *grammar_read(struct code_node *code)
608 struct token_config conf = {
611 .words_marks = known,
612 .known_count = sizeof(known)/sizeof(known[0]),
614 .ignored = (1 << TK_line_comment)
615 | (1 << TK_block_comment)
618 | (1 << TK_multi_string)
623 struct token_state *state = token_open(code, &conf);
625 struct symbol *head = NULL;
629 g = calloc(1, sizeof(*g));
632 for (tk = token_next(state); tk.num != TK_eof;
633 tk = token_next(state)) {
634 if (tk.num == TK_newline)
636 if (tk.num == TK_ident) {
638 head = sym_find(g, tk.txt);
639 if (head->type == Nonterminal)
640 err = "This non-terminal has already be used.";
641 else if (head->type == Virtual)
642 err = "Virtual symbol not permitted in head of production";
644 head->type = Nonterminal;
645 head->struct_name = g->current_type;
646 head->isref = g->type_isref;
647 if (g->production_count == 0) {
648 ## create production zero
650 head->first_production = g->production_count;
651 tk = token_next(state);
652 if (tk.num == TK_mark &&
653 text_is(tk.txt, "->"))
654 err = parse_production(g, head, state);
656 err = "'->' missing in production";
658 } else if (tk.num == TK_mark
659 && text_is(tk.txt, "|")) {
660 // another production for same non-term
662 err = parse_production(g, head, state);
664 err = "First production must have a head";
665 } else if (tk.num == TK_mark
666 && text_is(tk.txt, "$")) {
667 err = dollar_line(state, g, 0);
668 } else if (tk.num == TK_mark
669 && text_is(tk.txt, "$*")) {
670 err = dollar_line(state, g, 1);
671 } else if (tk.num == TK_mark
672 && text_is(tk.txt, "//")) {
673 while (tk.num != TK_newline &&
675 tk = token_next(state);
677 err = "Unrecognised token at start of line.";
683 if (g->terminals_declared) {
686 for (s = g->syms; s; s = s->next) {
687 if (s->type != Unknown)
690 fprintf(stderr, "Token %.*s not declared\n",
691 s->name.len, s->name.txt);
694 free(g); // FIXME free content
700 fprintf(stderr, "Error at line %d: %s\n",
703 free(g); // FIXME free content
707 ## Analysing the grammar
709 The central task in analysing the grammar is to determine a set of
710 states to drive the parsing process. This will require finding various
711 sets of symbols and of "items". Some of these sets will need to attach
712 information to each element in the set, so they are more like maps.
714 Each "item" may have a 'look-ahead' or `LA` set associated with it.
715 Many of these will be the same as each other. To avoid memory wastage,
716 and to simplify some comparisons of sets, these sets will be stored in a
717 list of unique sets, each assigned a number.
719 Once we have the data structures in hand to manage these sets and lists,
720 we can start setting the 'nullable' flag, build the 'FIRST' sets, and
721 then create the item sets which define the various states.
725 Though we don't only store symbols in these sets, they are the main
726 things we store, so they are called symbol sets or "symsets".
728 A symset has a size, an array of shorts, and an optional array of data
729 values, which are also shorts. If the array of data is not present, we
730 store an impossible pointer, as `NULL` is used to indicate that no
731 memory has been allocated yet;
736 unsigned short *syms, *data;
738 #define NO_DATA ((unsigned short *)1)
739 const struct symset INIT_SYMSET = { 0, NULL, NO_DATA };
740 const struct symset INIT_DATASET = { 0, NULL, NULL };
742 The arrays of shorts are allocated in blocks of 8 and are kept sorted by
743 using an insertion sort. We don't explicitly record the amount of
744 allocated space as it can be derived directly from the current `cnt`
745 using `((cnt - 1) | 7) + 1`.
748 static void symset_add(struct symset *s, unsigned short key, unsigned short val)
751 int current = ((s->cnt-1) | 7) + 1;
752 if (current == s->cnt) {
754 s->syms = realloc(s->syms, sizeof(*s->syms) * current);
755 if (s->data != NO_DATA)
756 s->data = realloc(s->data, sizeof(*s->data) * current);
759 while (i > 0 && s->syms[i-1] > key) {
760 s->syms[i] = s->syms[i-1];
761 if (s->data != NO_DATA)
762 s->data[i] = s->data[i-1];
766 if (s->data != NO_DATA)
771 Finding a symbol (or item) in a `symset` uses a simple binary search.
772 We return the index where the value was found (so data can be accessed),
773 or `-1` to indicate failure.
775 static int symset_find(struct symset *ss, unsigned short key)
782 while (lo + 1 < hi) {
783 int mid = (lo + hi) / 2;
784 if (ss->syms[mid] <= key)
789 if (ss->syms[lo] == key)
794 We will often want to form the union of two symsets. When we do, we
795 will often want to know if anything changed (as that might mean there is
796 more work to do). So `symset_union` reports whether anything was added
797 to the first set. We use a slow quadratic approach as these sets are
798 rarely large. If profiling shows this to be a problem it can be
801 static int symset_union(struct symset *a, struct symset *b)
805 for (i = 0; i < b->cnt; i++)
806 if (symset_find(a, b->syms[i]) < 0) {
807 unsigned short data = 0;
808 if (b->data != NO_DATA)
810 symset_add(a, b->syms[i], data);
816 And of course we must be able to free a symset.
818 static void symset_free(struct symset ss)
821 if (ss.data != NO_DATA)
827 Some symsets are simply stored somewhere appropriate in the `grammar`
828 data structure, others needs a bit of indirection. This applies
829 particularly to "LA" sets. These will be paired with "items" in an
830 "itemset". As itemsets will be stored in a symset, the "LA" set needs
831 to be stored in the `data` field, so we need a mapping from a small
832 (short) number to an LA `symset`.
834 As mentioned earlier this involves creating a list of unique symsets.
836 For now, we just use a linear list sorted by insertion. A skiplist
837 would be more efficient and may be added later.
844 struct setlist *next;
847 ###### grammar fields
848 struct setlist *sets;
853 static int ss_cmp(struct symset a, struct symset b)
856 int diff = a.cnt - b.cnt;
859 for (i = 0; i < a.cnt; i++) {
860 diff = (int)a.syms[i] - (int)b.syms[i];
867 static int save_set(struct grammar *g, struct symset ss)
869 struct setlist **sl = &g->sets;
873 while (*sl && (cmp = ss_cmp((*sl)->ss, ss)) < 0)
880 s = malloc(sizeof(*s));
889 Finding a set by number is currently performed by a simple linear
890 search. If this turns out to hurt performance, we can store the sets in
891 a dynamic array like the productions.
893 static struct symset set_find(struct grammar *g, int num)
895 struct setlist *sl = g->sets;
896 while (sl && sl->num != num)
901 ### Setting `nullable`
903 We set `nullable` on the head symbol for any production for which all
904 the body symbols (if any) are nullable. As this is a recursive
905 definition, any change in the `nullable` setting means that we need to
906 re-evaluate where it needs to be set.
908 We simply loop around performing the same calculations until no more
915 static void set_nullable(struct grammar *g)
918 while (check_again) {
921 for (p = 0; p < g->production_count; p++) {
922 struct production *pr = g->productions[p];
925 if (pr->head->nullable)
927 for (s = 0; s < pr->body_size; s++)
928 if (! pr->body[s]->nullable)
930 if (s == pr->body_size) {
931 pr->head->nullable = 1;
938 ### Building the `first` sets
940 When calculating what can follow a particular non-terminal, we will need
941 to know what the "first" terminal in any subsequent non-terminal might
942 be. So we calculate the `first` set for every non-terminal and store
943 them in an array. We don't bother recording the "first" set for
944 terminals as they are trivial.
946 As the "first" for one symbol might depend on the "first" of another, we
947 repeat the calculations until no changes happen, just like with
948 `set_nullable`. This makes use of the fact that `symset_union` reports
949 if any change happens.
951 The core of this, which finds the "first" of part of a production body,
952 will be reused for computing the "follow" sets, so we split that out
953 into a separate function, `add_first`, which also reports if it got all
954 the way to the end of the production without finding a non-nullable
957 ###### grammar fields
958 struct symset *first;
962 static int add_first(struct production *pr, int start,
963 struct symset *target, struct grammar *g,
968 for (s = start; s < pr->body_size; s++) {
969 struct symbol *bs = pr->body[s];
970 if (bs->type == Terminal) {
971 if (symset_find(target, bs->num) < 0) {
972 symset_add(target, bs->num, 0);
976 } else if (symset_union(target, &g->first[bs->num]))
982 *to_end = (s == pr->body_size);
986 static void build_first(struct grammar *g)
990 g->first = calloc(g->num_syms, sizeof(g->first[0]));
991 for (s = 0; s < g->num_syms; s++)
992 g->first[s] = INIT_SYMSET;
994 while (check_again) {
997 for (p = 0; p < g->production_count; p++) {
998 struct production *pr = g->productions[p];
999 struct symset *head = &g->first[pr->head->num];
1001 if (add_first(pr, 0, head, g, NULL))
1007 ### Building the `follow` sets.
1009 There are two different situations when we will want to generate
1010 "follow" sets. If we are doing an SLR analysis, we want to generate a
1011 single "follow" set for each non-terminal in the grammar. That is what
1012 is happening here. If we are doing an LALR or LR analysis we will want
1013 to generate a separate "LA" set for each item. We do that later in
1016 There are two parts to generating a "follow" set. Firstly we look at
1017 every place that any non-terminal appears in the body of any production,
1018 and we find the set of possible "first" symbols after there. This is
1019 done using `add_first` above and only needs to be done once as the
1020 "first" sets are now stable and will not change.
1022 ###### grammar fields
1023 struct symset *follow;
1027 for (p = 0; p < g->production_count; p++) {
1028 struct production *pr = g->productions[p];
1031 for (b = 0; b < pr->body_size - 1; b++) {
1032 struct symbol *bs = pr->body[b];
1033 if (bs->type == Terminal)
1035 add_first(pr, b+1, &g->follow[bs->num], g, NULL);
1039 The second part is to add the "follow" set of the head of a production
1040 to the "follow" sets of the final symbol in the production, and any
1041 other symbol which is followed only by `nullable` symbols. As this
1042 depends on "follow" itself we need to repeatedly perform the process
1043 until no further changes happen.
1045 Rather than 2 nested loops, one that repeats the whole process until
1046 there is no change, and one that iterates of the set of productions, we
1047 combine these two functions into a single loop.
1051 for (check_again = 0, p = 0;
1052 p < g->production_count;
1053 p < g->production_count-1
1054 ? p++ : check_again ? (check_again = 0, p = 0)
1056 struct production *pr = g->productions[p];
1059 for (b = pr->body_size - 1; b >= 0; b--) {
1060 struct symbol *bs = pr->body[b];
1061 if (bs->type == Terminal)
1063 if (symset_union(&g->follow[bs->num],
1064 &g->follow[pr->head->num]))
1071 We now just need to create and initialise the `follow` list to get a
1075 static void build_follow(struct grammar *g)
1077 int s, check_again, p;
1078 g->follow = calloc(g->num_syms, sizeof(g->follow[0]));
1079 for (s = 0; s < g->num_syms; s++)
1080 g->follow[s] = INIT_SYMSET;
1084 ### Building itemsets and states
1086 There are three different levels of detail that can be chosen for
1087 building the itemsets and states for the LR grammar. They are:
1089 1. LR(0), LR(0.5), or SLR(1), where no look-ahead is included in the
1090 itemsets - look-ahead, if used at all, is simply the 'follow' sets
1092 2. LALR(1) where we build look-ahead sets with each item and merge
1093 the LA sets when we find two paths to the same "kernel" set of items.
1094 3. LR(1) where different look-ahead for any item in the set means
1095 a different item set must be created.
1097 ###### forward declarations
1098 enum grammar_type { LR0, LR05, SLR, LALR, LR1 };
1100 We need to be able to look through existing states to see if a newly
1101 generated state already exists. For now we use a simple sorted linked
1104 An item is a pair of numbers: the production number and the position of
1105 "DOT", which is an index into the body of the production. As the
1106 numbers are not enormous we can combine them into a single "short" and
1107 store them in a `symset` - 5 bits for "DOT" and 11 bits for the
1108 production number (so 2000 productions with maximum of 31 symbols in the
1111 Comparing the itemsets will be a little different to comparing symsets
1112 as we want to do the lookup after generating the "kernel" of an
1113 itemset, so we need to ignore the offset=zero items which are added during
1116 To facilitate this, we modify the "DOT" number so that "0" sorts to
1117 the end of the list in the symset, and then only compare items before
1121 static inline unsigned short item_num(int production, int dot)
1123 return production | ((31-dot) << 11);
1125 static inline int item_prod(unsigned short item)
1127 return item & 0x7ff;
1129 static inline int item_dot(unsigned short item)
1131 return (31-(item >> 11)) & 0x1f;
1134 For LR(1) analysis we need to compare not just the itemset in a state
1135 but also the LA sets. As we assign each unique LA set a number, we
1136 can just compare the symset and the data values together.
1139 static int itemset_cmp(struct symset a, struct symset b,
1140 enum grammar_type type)
1146 i < a.cnt && i < b.cnt &&
1147 item_dot(a.syms[i]) > 0 &&
1148 item_dot(b.syms[i]) > 0;
1150 int diff = a.syms[i] - b.syms[i];
1154 diff = a.data[i] - b.data[i];
1159 if (i == a.cnt || item_dot(a.syms[i]) == 0)
1163 if (i == b.cnt || item_dot(b.syms[i]) == 0)
1169 if (type < LR1 || av == -1)
1172 a.data[i] - b.data[i];
1175 It will be helpful to know if an itemset has been "completed" or not,
1176 particularly for LALR where itemsets get merged, at which point they
1177 need to be consider for completion again. So a `completed` flag is
1180 And now we can build the list of itemsets. The lookup routine returns
1181 both a success flag and a pointer to where in the list an insert should
1182 happen, so we don't need to search a second time.
1186 struct itemset *next;
1188 struct symset items;
1189 struct symset go_to;
1191 unsigned short precedence;
1195 ###### grammar fields
1196 struct itemset *items;
1200 static int itemset_find(struct grammar *g, struct itemset ***where,
1201 struct symset kernel, enum grammar_type type)
1203 struct itemset **ip;
1205 for (ip = &g->items; *ip ; ip = & (*ip)->next) {
1206 struct itemset *i = *ip;
1208 diff = itemset_cmp(i->items, kernel, type);
1221 Adding an itemset may require merging the LA sets if LALR analysis is
1222 happening. If any new LA set adds any symbols that weren't in the old
1223 LA set, we clear the `completed` flag so that the dependants of this
1224 itemset will be recalculated and their LA sets updated.
1226 `add_itemset` must consume the symsets it is passed, either by adding
1227 them to a data structure, of freeing them.
1229 static int add_itemset(struct grammar *g, struct symset ss,
1230 enum assoc assoc, unsigned short precedence,
1231 enum grammar_type type)
1233 struct itemset **where, *is;
1235 int found = itemset_find(g, &where, ss, type);
1237 is = calloc(1, sizeof(*is));
1238 is->state = g->states;
1242 is->precedence = precedence;
1244 is->go_to = INIT_DATASET;
1253 for (i = 0; i < ss.cnt; i++) {
1254 struct symset temp = INIT_SYMSET, s;
1255 if (ss.data[i] == is->items.data[i])
1257 s = set_find(g, is->items.data[i]);
1258 symset_union(&temp, &s);
1259 s = set_find(g, ss.data[i]);
1260 if (symset_union(&temp, &s)) {
1261 is->items.data[i] = save_set(g, temp);
1272 To build all the itemsets, we first insert the initial itemset made from
1273 production zero, complete each itemset, and then generate new itemsets
1274 from old until no new ones can be made.
1276 Completing an itemset means finding all the items where "DOT" is
1277 followed by a nonterminal and adding "DOT=0" items for every production
1278 from that non-terminal - providing each item hasn't already been added.
1279 When we add such an item it might get inserted before the current
1280 one, so to make sure we process it we reset the loop counter to the
1283 If LA sets are needed, the LA set for each new item is found using
1284 `add_first` which was developed earlier for `FIRST` and `FOLLOW`. This
1285 may be supplemented by the LA set for the item which produced the new
1288 We also collect a set of all symbols which follow "DOT" (in `done`) as
1289 this is used in the next stage.
1291 When itemsets are created we assign a precedence to the itemset from the
1292 complete item, if there is one. We ignore the possibility of there
1293 being two and don't (currently) handle precedence in such grammars.
1294 When completing a grammar we ignore any item where DOT is followed by a
1295 terminal with a precedence lower than that for the itemset. Unless the
1296 terminal has right associativity, we also ignore items where the
1297 terminal has the same precedence. The result is that unwanted items are
1298 still in the itemset, but the terminal doesn't get into the go to set,
1299 so the item is ineffective.
1301 ###### complete itemset
1302 for (i = 0; i < is->items.cnt; i++) {
1303 int p = item_prod(is->items.syms[i]);
1304 int bs = item_dot(is->items.syms[i]);
1305 struct production *pr = g->productions[p];
1308 struct symset LA = INIT_SYMSET;
1309 unsigned short sn = 0;
1311 if (bs == pr->body_size)
1314 if (s->precedence && is->precedence &&
1315 is->precedence > s->precedence)
1316 /* This terminal has a low precedence and
1317 * shouldn't be shifted
1320 if (s->precedence && is->precedence &&
1321 is->precedence == s->precedence && s->assoc != Right)
1322 /* This terminal has a matching precedence and is
1323 * not Right-associative, so we mustn't shift it.
1326 if (symset_find(&done, s->num) < 0)
1327 symset_add(&done, s->num, 0);
1329 if (s->type != Nonterminal)
1335 add_first(pr, bs+1, &LA, g, &to_end);
1337 struct symset ss = set_find(g, is->items.data[i]);
1338 symset_union(&LA, &ss);
1340 sn = save_set(g, LA);
1341 LA = set_find(g, sn);
1344 /* Add productions for this symbol */
1345 for (p2 = s->first_production;
1346 p2 < g->production_count &&
1347 g->productions[p2]->head == s;
1349 int itm = item_num(p2, 0);
1350 int pos = symset_find(&is->items, itm);
1352 symset_add(&is->items, itm, sn);
1353 /* Will have re-ordered, so start
1354 * from beginning again */
1356 } else if (type >= LALR) {
1357 struct symset ss = set_find(g, is->items.data[pos]);
1358 struct symset tmp = INIT_SYMSET;
1359 struct symset *la = &LA;
1361 symset_union(&tmp, &ss);
1362 if (symset_union(&tmp, la)) {
1363 is->items.data[pos] = save_set(g, tmp);
1371 For each symbol we found (and placed in `done`) we collect all the items
1372 for which this symbol is next, and create a new itemset with "DOT"
1373 advanced over the symbol. This is then added to the collection of
1374 itemsets (or merged with a pre-existing itemset).
1376 ###### derive itemsets
1377 // Now we have a completed itemset, so we need to
1378 // compute all the 'next' itemsets and create them
1379 // if they don't exist.
1380 for (i = 0; i < done.cnt; i++) {
1382 unsigned short state;
1383 struct symbol *sym = g->symtab[done.syms[i]];
1384 enum assoc assoc = Non;
1385 unsigned short precedence = 0;
1386 struct symset newitemset = INIT_SYMSET;
1388 newitemset = INIT_DATASET;
1390 for (j = 0; j < is->items.cnt; j++) {
1391 int itm = is->items.syms[j];
1392 int p = item_prod(itm);
1393 int bp = item_dot(itm);
1394 struct production *pr = g->productions[p];
1395 unsigned short la = 0;
1398 if (bp == pr->body_size)
1400 if (pr->body[bp] != sym)
1405 la = is->items.data[j];
1406 if (bp == pr->body_size &&
1407 pr->precedence > 0 &&
1408 pr->precedence > precedence) {
1409 // new itemset is reducible and has a precedence.
1410 precedence = pr->precedence;
1413 pos = symset_find(&newitemset, item_num(p, bp));
1416 symset_add(&newitemset, item_num(p, bp), la);
1417 else if (type >= LALR) {
1418 // Need to merge la set.
1419 int la2 = newitemset.data[pos];
1421 struct symset ss = set_find(g, la2);
1422 struct symset LA = INIT_SYMSET;
1423 symset_union(&LA, &ss);
1424 ss = set_find(g, la);
1425 if (symset_union(&LA, &ss))
1426 newitemset.data[pos] = save_set(g, LA);
1432 state = add_itemset(g, newitemset, assoc, precedence, type);
1433 if (symset_find(&is->go_to, done.syms[i]) < 0)
1434 symset_add(&is->go_to, done.syms[i], state);
1437 All that is left is to create the initial itemset from production zero, and
1438 with `TK_eof` as the LA set.
1441 static void build_itemsets(struct grammar *g, enum grammar_type type)
1443 struct symset first = INIT_SYMSET;
1446 unsigned short la = 0;
1448 // LA set just has eof
1449 struct symset eof = INIT_SYMSET;
1450 symset_add(&eof, TK_eof, 0);
1451 la = save_set(g, eof);
1452 first = INIT_DATASET;
1454 // production 0, offset 0 (with no data)
1455 symset_add(&first, item_num(0, 0), la);
1456 add_itemset(g, first, Non, 0, type);
1457 for (check_again = 0, is = g->items;
1459 is = is->next ?: check_again ? (check_again = 0, g->items) : NULL) {
1461 struct symset done = INIT_SYMSET;
1472 ### Completing the analysis.
1474 The exact process of analysis depends on which level was requested. For
1475 `LR0` and `LR05` we don't need first and follow sets at all. For `LALR` and
1476 `LR1` we need first, but not follow. For `SLR` we need both.
1478 We don't build the "action" tables that you might expect as the parser
1479 doesn't use them. They will be calculated to some extent if needed for
1482 Once we have built everything we allocate arrays for the two lists:
1483 symbols and itemsets. This allows more efficient access during
1484 reporting. The symbols are grouped as terminals and then non-terminals,
1485 and we record the changeover point in `first_nonterm`.
1487 ###### grammar fields
1488 struct symbol **symtab;
1489 struct itemset **statetab;
1492 ###### grammar_analyse
1494 static void grammar_analyse(struct grammar *g, enum grammar_type type)
1498 int snum = TK_reserved;
1499 for (s = g->syms; s; s = s->next)
1500 if (s->num < 0 && s->type == Terminal) {
1504 g->first_nonterm = snum;
1505 for (s = g->syms; s; s = s->next)
1506 if (s->num < 0 && s->type != Virtual) {
1510 for (s = g->syms; s; s = s->next)
1516 g->symtab = calloc(g->num_syms, sizeof(g->symtab[0]));
1517 for (s = g->syms; s; s = s->next)
1518 g->symtab[s->num] = s;
1527 build_itemsets(g, type);
1529 g->statetab = calloc(g->states, sizeof(g->statetab[0]));
1530 for (is = g->items; is ; is = is->next)
1531 g->statetab[is->state] = is;
1534 ## Reporting on the Grammar
1536 The purpose of the report is to give the grammar developer insight into
1537 how the grammar parser will work. It is basically a structured dump of
1538 all the tables that have been generated, plus a description of any conflicts.
1540 ###### grammar_report
1541 static int grammar_report(struct grammar *g, enum grammar_type type)
1547 return report_conflicts(g, type);
1550 Firstly we have the complete list of symbols, together with the
1551 "FIRST" set if that was generated. We add a mark to each symbol to
1552 show if it is nullable (`.`).
1556 static void report_symbols(struct grammar *g)
1560 printf("SYMBOLS + FIRST:\n");
1562 printf("SYMBOLS:\n");
1564 for (n = 0; n < g->num_syms; n++) {
1565 struct symbol *s = g->symtab[n];
1569 printf(" %c%3d%c: ",
1570 s->nullable ? '.':' ',
1571 s->num, symtypes[s->type]);
1574 printf(" (%d%s)", s->precedence,
1575 assoc_names[s->assoc]);
1577 if (g->first && s->type == Nonterminal) {
1580 for (j = 0; j < g->first[n].cnt; j++) {
1583 prtxt(g->symtab[g->first[n].syms[j]]->name);
1590 Then we have the follow sets if they were computed.
1592 static void report_follow(struct grammar *g)
1595 printf("FOLLOW:\n");
1596 for (n = 0; n < g->num_syms; n++)
1597 if (g->follow[n].cnt) {
1601 prtxt(g->symtab[n]->name);
1602 for (j = 0; j < g->follow[n].cnt; j++) {
1605 prtxt(g->symtab[g->follow[n].syms[j]]->name);
1611 And finally the item sets. These include the GO TO tables and, for
1612 LALR and LR1, the LA set for each item. Lots of stuff, so we break
1613 it up a bit. First the items, with production number and associativity.
1615 static void report_item(struct grammar *g, int itm)
1617 int p = item_prod(itm);
1618 int dot = item_dot(itm);
1619 struct production *pr = g->productions[p];
1623 prtxt(pr->head->name);
1625 for (i = 0; i < pr->body_size; i++) {
1626 printf(" %s", (dot == i ? ". ": ""));
1627 prtxt(pr->body[i]->name);
1629 if (dot == pr->body_size)
1632 if (pr->precedence && dot == pr->body_size)
1633 printf(" (%d%s)", pr->precedence,
1634 assoc_names[pr->assoc]);
1635 if (dot < pr->body_size &&
1636 pr->body[dot]->precedence) {
1637 struct symbol *s = pr->body[dot];
1638 printf(" [%d%s]", s->precedence,
1639 assoc_names[s->assoc]);
1644 The LA sets which are (possibly) reported with each item:
1646 static void report_la(struct grammar *g, int lanum)
1648 struct symset la = set_find(g, lanum);
1652 printf(" LOOK AHEAD(%d)", lanum);
1653 for (i = 0; i < la.cnt; i++) {
1656 prtxt(g->symtab[la.syms[i]]->name);
1661 Then the go to sets:
1663 static void report_goto(struct grammar *g, struct symset gt)
1668 for (i = 0; i < gt.cnt; i++) {
1670 prtxt(g->symtab[gt.syms[i]]->name);
1671 printf(" -> %d\n", gt.data[i]);
1675 Now we can report all the item sets complete with items, LA sets, and GO TO.
1677 static void report_itemsets(struct grammar *g)
1680 printf("ITEM SETS(%d)\n", g->states);
1681 for (s = 0; s < g->states; s++) {
1683 struct itemset *is = g->statetab[s];
1684 printf(" Itemset %d:", s);
1686 printf(" %d%s", is->precedence, assoc_names[is->assoc]);
1688 for (j = 0; j < is->items.cnt; j++) {
1689 report_item(g, is->items.syms[j]);
1690 if (is->items.data != NO_DATA)
1691 report_la(g, is->items.data[j]);
1693 report_goto(g, is->go_to);
1697 ### Reporting conflicts
1699 Conflict detection varies a lot among different analysis levels. However
1700 LR0 and LR0.5 are quite similar - having no follow sets - and SLR, LALR and
1701 LR1 are also similar as they have FOLLOW or LA sets.
1705 ## conflict functions
1707 static int report_conflicts(struct grammar *g, enum grammar_type type)
1710 printf("Conflicts:\n");
1712 cnt = conflicts_lr0(g, type);
1714 cnt = conflicts_slr(g, type);
1716 printf(" - no conflicts\n");
1720 LR0 conflicts are any state which have both a reducible item and
1721 a shiftable item, or two reducible items.
1723 LR05 conflicts only occur if two possible reductions exist,
1724 as shifts always over-ride reductions.
1726 ###### conflict functions
1727 static int conflicts_lr0(struct grammar *g, enum grammar_type type)
1731 for (i = 0; i < g->states; i++) {
1732 struct itemset *is = g->statetab[i];
1733 int last_reduce = -1;
1734 int prev_reduce = -1;
1735 int last_shift = -1;
1739 for (j = 0; j < is->items.cnt; j++) {
1740 int itm = is->items.syms[j];
1741 int p = item_prod(itm);
1742 int bp = item_dot(itm);
1743 struct production *pr = g->productions[p];
1745 if (bp == pr->body_size) {
1746 prev_reduce = last_reduce;
1750 if (pr->body[bp]->type == Terminal)
1753 if (type == LR0 && last_reduce >= 0 && last_shift >= 0) {
1754 printf(" State %d has both SHIFT and REDUCE:\n", i);
1755 report_item(g, is->items.syms[last_shift]);
1756 report_item(g, is->items.syms[last_reduce]);
1759 if (prev_reduce >= 0) {
1760 printf(" State %d has 2 (or more) reducible items\n", i);
1761 report_item(g, is->items.syms[prev_reduce]);
1762 report_item(g, is->items.syms[last_reduce]);
1769 SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping
1770 look ahead, or if a symbol in a look-ahead can be shifted. They differ only
1771 in the source of the look ahead set.
1773 We build two datasets to reflect the "action" table: one which maps
1774 terminals to items where that terminal could be shifted and another
1775 which maps terminals to items that could be reduced when the terminal
1776 is in look-ahead. We report when we get conflicts between the two.
1778 As a special case, if we find a SHIFT/REDUCE conflict, on the NEWLINE
1779 terminal, we ignore it. NEWLINES are handled specially with its own
1780 rules for when to shift and when to reduce. Conflicts are expected,
1781 but handled internally.
1783 static int conflicts_slr(struct grammar *g, enum grammar_type type)
1788 for (i = 0; i < g->states; i++) {
1789 struct itemset *is = g->statetab[i];
1790 struct symset shifts = INIT_DATASET;
1791 struct symset reduce = INIT_DATASET;
1795 /* First collect the shifts */
1796 for (j = 0; j < is->items.cnt; j++) {
1797 unsigned short itm = is->items.syms[j];
1798 int p = item_prod(itm);
1799 int bp = item_dot(itm);
1800 struct production *pr = g->productions[p];
1803 if (bp >= pr->body_size ||
1804 pr->body[bp]->type != Terminal)
1809 if (s->precedence && is->precedence)
1810 /* Precedence resolves this, so no conflict */
1813 if (symset_find(&shifts, s->num) < 0)
1814 symset_add(&shifts, s->num, itm);
1816 /* Now look for reductions and conflicts */
1817 for (j = 0; j < is->items.cnt; j++) {
1818 unsigned short itm = is->items.syms[j];
1819 int p = item_prod(itm);
1820 int bp = item_dot(itm);
1821 struct production *pr = g->productions[p];
1823 if (bp < pr->body_size)
1828 la = g->follow[pr->head->num];
1830 la = set_find(g, is->items.data[j]);
1832 for (k = 0; k < la.cnt; k++) {
1833 int pos = symset_find(&shifts, la.syms[k]);
1834 if (pos >= 0 && la.syms[k] != TK_newline) {
1835 printf(" State %d has SHIFT/REDUCE conflict on ", i);
1837 prtxt(g->symtab[la.syms[k]]->name);
1839 report_item(g, shifts.data[pos]);
1840 report_item(g, itm);
1842 pos = symset_find(&reduce, la.syms[k]);
1844 symset_add(&reduce, la.syms[k], itm);
1847 printf(" State %d has REDUCE/REDUCE conflict on ", i);
1848 prtxt(g->symtab[la.syms[k]]->name);
1850 report_item(g, itm);
1851 report_item(g, reduce.data[pos]);
1855 symset_free(shifts);
1856 symset_free(reduce);
1862 ## Generating the parser
1864 The exported part of the parser is the `parse_XX` function, where the name
1865 `XX` is based on the name of the parser files.
1867 This takes a `code_node`, a partially initialized `token_config`, and an
1868 optional `FILE` to send tracing to. The `token_config` gets the list of
1869 known words added and then is used with the `code_node` to initialize the
1872 `parse_XX` then calls the library function `parser_run` to actually complete
1873 the parse. This needs the `states` table and functions to call the various
1874 pieces of code provided in the grammar file, so they are generated first.
1876 ###### parser_generate
1878 static void gen_parser(FILE *f, struct grammar *g, char *file, char *name,
1879 struct code_node *pre_reduce)
1885 gen_reduce(f, g, file, pre_reduce);
1888 fprintf(f, "#line 0 \"gen_parser\"\n");
1889 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace)\n",
1892 fprintf(f, "\tstruct token_state *tokens;\n");
1893 fprintf(f, "\tconfig->words_marks = known;\n");
1894 fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
1895 fprintf(f, "\ttokens = token_open(code, config);\n");
1896 fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n");
1897 fprintf(f, "\ttoken_close(tokens);\n");
1898 fprintf(f, "\treturn rv;\n");
1899 fprintf(f, "}\n\n");
1902 ### Known words table
1904 The known words table is simply an array of terminal symbols.
1905 The table of nonterminals used for tracing is a similar array.
1909 static void gen_known(FILE *f, struct grammar *g)
1912 fprintf(f, "#line 0 \"gen_known\"\n");
1913 fprintf(f, "static const char *known[] = {\n");
1914 for (i = TK_reserved;
1915 i < g->num_syms && g->symtab[i]->type == Terminal;
1917 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1918 g->symtab[i]->name.txt);
1919 fprintf(f, "};\n\n");
1922 static void gen_non_term(FILE *f, struct grammar *g)
1925 fprintf(f, "#line 0 \"gen_non_term\"\n");
1926 fprintf(f, "static const char *non_term[] = {\n");
1927 for (i = TK_reserved;
1930 if (g->symtab[i]->type == Nonterminal)
1931 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1932 g->symtab[i]->name.txt);
1933 fprintf(f, "};\n\n");
1936 ### States and the goto tables.
1938 For each state we record the goto table and details of the reducible
1939 production if there is one.
1940 Some of the details of the reducible production are stored in the
1941 `do_reduce` function to come later. Here we store the production
1942 number, the body size (useful for stack management), and the resulting
1943 symbol (useful for knowing how to free data later).
1945 The go to table is stored in a simple array of `sym` and corresponding
1948 ###### exported types
1956 const struct lookup * go_to;
1966 static void gen_goto(FILE *f, struct grammar *g)
1969 fprintf(f, "#line 0 \"gen_goto\"\n");
1970 for (i = 0; i < g->states; i++) {
1971 struct symset gt = g->statetab[i]->go_to;
1976 fprintf(f, "static const struct lookup goto_%d[] = {\n",
1978 for (j = 0; j < gt.cnt; j++)
1979 fprintf(f, "\t{ %d, %d },\n",
1980 gt.syms[j], gt.data[j]);
1985 static void gen_states(FILE *f, struct grammar *g)
1988 fprintf(f, "#line 0 \"gen_states\"\n");
1989 fprintf(f, "static const struct state states[] = {\n");
1990 for (i = 0; i < g->states; i++) {
1991 struct itemset *is = g->statetab[i];
1992 int j, prod = -1, prod_len;
1994 for (j = 0; j < is->items.cnt; j++) {
1995 int itm = is->items.syms[j];
1996 int p = item_prod(itm);
1997 int bp = item_dot(itm);
1998 struct production *pr = g->productions[p];
2000 if (bp < pr->body_size)
2002 /* This is what we reduce - choose longest */
2003 if (prod < 0 || prod_len < pr->body_size) {
2005 prod_len = pr->body_size;
2009 fprintf(f, "\t[%d] = { %d, goto_%d, ",
2010 i, is->go_to.cnt, i);
2012 fprintf(f, "\t[%d] = { 0, NULL, ", i);
2014 struct production *pr = g->productions[prod];
2015 struct symbol *hd = pr->head;
2016 fprintf(f, "%d, %d, %d, ",
2017 prod, pr->body_size, pr->head->num);
2018 if (hd->struct_name.txt == NULL)
2019 fprintf(f, "0 },\n");
2021 fprintf(f, "sizeof(struct %.*s%s) },\n",
2022 hd->struct_name.len,
2023 hd->struct_name.txt,
2024 hd->isref ? "*" : "");
2026 fprintf(f, "-1, -1, -1, -1 },\n");
2028 fprintf(f, "};\n\n");
2031 ### The `do_reduce` function and the code
2033 When the parser engine decides to reduce a production, it calls
2034 `do_reduce` which runs the code that was included with the production,
2037 This code needs to be able to store data somewhere. Rather than
2038 requiring `do_reduce` to `malloc` that "somewhere", we pass in a large
2039 buffer and have `do_reduce` return the size to be saved.
2041 In order for the code to access "global" context, we pass in the
2042 "config" pointer that was passed to the parser function. If the `struct
2043 token_config` is embedded in some larger structure, the reducing code
2044 can access the larger structure using pointer manipulation. Performing
2045 that pointer manipulation, and any other common code or variables for
2046 all reduce actions, can be provided in code node called "reduce" which
2047 is passed around in `pre_reduce` which you might have already noticed.
2049 The code fragments require translation when written out. Any `$N` needs
2050 to be converted to a reference either to that buffer (if `$0`) or to the
2051 structure returned by a previous reduction. These pointers need to be
2052 cast to the appropriate type for each access. All this is handled in
2055 `gen_code` also allows symbol references to contain a '`<`' as in
2056 '`$<2`'. This is particularly useful for references (or pointers), but
2057 can be used with structures too. The `<` implies that the value is
2058 being moved out, so the object will not be automatically freed. It is
2059 equivalent to assigning `NULL` to the pointer or filling a structure
2062 Instead of a number `N`, the `$` or `$<` can be followed by some letters
2063 and possibly a number. A number by itself (other than zero) selects a
2064 symbol from the body of the production. A sequence of letters selects
2065 the shortest symbol in the body which contains those letters in the
2066 given order. If a number follows the letters, then a later occurrence
2067 of that symbol is chosen. So "`$AB2`" will refer to the structure
2068 attached to the second occurrence of the shortest symbol which contains
2069 an `A` followed by a `B`. If there is no unique shortest system, or if
2070 the number given is too large, then the symbol reference is not
2071 transformed, and will cause an error when the code is compiled.
2075 static int textchr(struct text t, char c, int s)
2078 for (i = s; i < t.len; i++)
2084 static int subseq_match(char *seq, int slen, struct text name)
2088 st = textchr(name, *seq, st);
2098 static int choose_sym(char **namep, int len, struct production *p)
2100 char *name = *namep;
2109 ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'))) {
2115 while (len > 0 && (c >= '0' && c <= '9')) {
2118 n = n * 10 + (c - '0');
2122 if (name == *namep || n > p->body_size)
2128 for (i = 0; i < p->body_size; i++) {
2129 if (!subseq_match(nam, namlen, p->body[i]->name))
2131 if (slen == 0 || p->body[i]->name.len < slen) {
2133 slen = p->body[i]->name.len;
2135 if (s >= 0 && p->body[i] != p->body[s] &&
2136 p->body[i]->name.len == p->body[s]->name.len)
2137 /* not unique, so s cannot be used */
2144 for (i = 0; i < p->body_size; i++)
2145 if (p->body[i] == p->body[s]) {
2150 if (n > 0 || i == p->body_size)
2156 static void gen_code(struct production *p, FILE *f, struct grammar *g)
2159 char *used = calloc(1, p->body_size);
2162 fprintf(f, "\t\t\t");
2163 for (c = p->code.txt; c < p->code.txt + p->code.len; c++) {
2177 n = choose_sym(&c, p->code.txt + p->code.len - c, p);
2186 fprintf(f, "(*(struct %.*s*%s)ret)",
2187 p->head->struct_name.len,
2188 p->head->struct_name.txt,
2189 p->head->isref ? "*":"");
2190 else if (p->body[n-1]->type == Terminal)
2191 fprintf(f, "(*(struct token *)body[%d])",
2193 else if (p->body[n-1]->struct_name.txt == NULL)
2194 fprintf(f, "$%d", n);
2196 fprintf(f, "(*(struct %.*s*%s)body[%d])",
2197 p->body[n-1]->struct_name.len,
2198 p->body[n-1]->struct_name.txt,
2199 p->body[n-1]->isref ? "*":"", n-1);
2205 for (i = 0; i < p->body_size; i++) {
2206 if (p->body[i]->struct_name.txt &&
2208 // assume this has been copied out
2209 if (p->body[i]->isref)
2210 fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i);
2212 fprintf(f, "\t\tmemset(body[%d], 0, sizeof(struct %.*s));\n", i, p->body[i]->struct_name.len, p->body[i]->struct_name.txt);
2220 static void gen_reduce(FILE *f, struct grammar *g, char *file,
2221 struct code_node *pre_reduce)
2224 fprintf(f, "#line 1 \"gen_reduce\"\n");
2225 fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n");
2227 fprintf(f, "\tint ret_size = 0;\n");
2229 code_node_print(f, pre_reduce, file);
2231 fprintf(f, "#line 4 \"gen_reduce\"\n");
2232 fprintf(f, "\tswitch(prod) {\n");
2233 for (i = 0; i < g->production_count; i++) {
2234 struct production *p = g->productions[i];
2235 fprintf(f, "\tcase %d:\n", i);
2238 fprintf(f, "#line %d \"%s\"\n", p->code_line, file);
2242 if (p->head->struct_name.txt)
2243 fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n",
2244 p->head->struct_name.len,
2245 p->head->struct_name.txt,
2246 p->head->isref ? "*":"");
2248 fprintf(f, "\t\tbreak;\n");
2250 fprintf(f, "\t}\n\treturn ret_size;\n}\n\n");
2255 As each non-terminal can potentially cause a different type of data
2256 structure to be allocated and filled in, we need to be able to free it when
2259 It is particularly important to have fine control over freeing during error
2260 recovery where individual stack frames might need to be freed.
2262 For this, the grammar author is required to defined a `free_XX` function for
2263 each structure that is used by a non-terminal. `do_free` will call whichever
2264 is appropriate given a symbol number, and will call `free` (as is
2265 appropriate for tokens) on any terminal symbol.
2269 static void gen_free(FILE *f, struct grammar *g)
2273 fprintf(f, "#line 0 \"gen_free\"\n");
2274 fprintf(f, "static void do_free(short sym, void *asn)\n");
2276 fprintf(f, "\tif (!asn) return;\n");
2277 fprintf(f, "\tif (sym < %d) {\n", g->first_nonterm);
2278 fprintf(f, "\t\tfree(asn);\n\t\treturn;\n\t}\n");
2279 fprintf(f, "\tswitch(sym) {\n");
2281 for (i = 0; i < g->num_syms; i++) {
2282 struct symbol *s = g->symtab[i];
2284 s->type != Nonterminal ||
2285 s->struct_name.txt == NULL)
2288 fprintf(f, "\tcase %d:\n", s->num);
2290 fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n",
2292 s->struct_name.txt);
2293 fprintf(f, "\t\tfree(asn);\n");
2295 fprintf(f, "\t\tfree_%.*s(asn);\n",
2297 s->struct_name.txt);
2298 fprintf(f, "\t\tbreak;\n");
2300 fprintf(f, "\t}\n}\n\n");
2303 ## The main routine.
2305 There are three key parts to the "main" function of parsergen: processing
2306 the arguments, loading the grammar file, and dealing with the grammar.
2308 ### Argument processing.
2310 Command line options allow the selection of analysis mode, name of output
2311 file, and whether or not a report should be generated. By default we create
2312 a report only if no code output was requested.
2314 The `parse_XX` function name uses the basename of the output file which
2315 should not have a suffix (`.c`). `.c` is added to the given name for the
2316 code, and `.h` is added for the header (if header text is specifed in the
2323 static const struct option long_options[] = {
2324 { "LR0", 0, NULL, '0' },
2325 { "LR05", 0, NULL, '5' },
2326 { "SLR", 0, NULL, 'S' },
2327 { "LALR", 0, NULL, 'L' },
2328 { "LR1", 0, NULL, '1' },
2329 { "tag", 1, NULL, 't' },
2330 { "report", 0, NULL, 'R' },
2331 { "output", 1, NULL, 'o' },
2332 { NULL, 0, NULL, 0 }
2334 const char *options = "05SL1t:Ro:";
2336 ###### process arguments
2338 char *outfile = NULL;
2343 enum grammar_type type = LR05;
2344 while ((opt = getopt_long(argc, argv, options,
2345 long_options, NULL)) != -1) {
2360 outfile = optarg; break;
2362 tag = optarg; break;
2364 fprintf(stderr, "Usage: parsergen -[05SL1R] [-t tag] [-o output] input\n");
2369 infile = argv[optind++];
2371 fprintf(stderr, "No input file given\n");
2374 if (outfile && report == 1)
2377 if (name && strchr(name, '/'))
2378 name = strrchr(name, '/')+1;
2380 if (optind < argc) {
2381 fprintf(stderr, "Excess command line arguments\n");
2385 ### Loading the grammar file
2387 To be able to run `mdcode` and `scanner` on the grammar we need to memory
2390 Once we have extracted the code (with `mdcode`) we expect to find three
2391 or four sections: header, code, grammar, reduce.
2392 Anything else that is not excluded by the `--tag` option is an error.
2394 "header", "code", and "reduce" are optional, though it is hard to build
2395 a working parser without the first two. "grammar" must be provided.
2399 #include <sys/mman.h>
2404 static void pr_err(char *msg)
2407 fprintf(stderr, "%s\n", msg);
2411 struct section *table;
2415 fd = open(infile, O_RDONLY);
2417 fprintf(stderr, "parsergen: cannot open %s: %s\n",
2418 infile, strerror(errno));
2421 len = lseek(fd, 0, 2);
2422 file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2423 table = code_extract(file, file+len, pr_err);
2425 struct code_node *hdr = NULL;
2426 struct code_node *code = NULL;
2427 struct code_node *gram = NULL;
2428 struct code_node *pre_reduce = NULL;
2429 for (s = table; s; s = s->next) {
2430 struct text sec = s->section;
2431 if (tag && !strip_tag(&sec, tag))
2433 if (text_is(sec, "header"))
2435 else if (text_is(sec, "code"))
2437 else if (text_is(sec, "grammar"))
2439 else if (text_is(sec, "reduce"))
2440 pre_reduce = s->code;
2442 fprintf(stderr, "Unknown content section: %.*s\n",
2443 s->section.len, s->section.txt);
2448 ### Processing the input
2450 As we need to append an extention to a filename and then open it for
2451 writing, and we need to do this twice, it helps to have a separate function.
2455 static FILE *open_ext(char *base, char *ext)
2457 char *fn = malloc(strlen(base) + strlen(ext) + 1);
2459 strcat(strcpy(fn, base), ext);
2465 If we can read the grammar, then we analyse and optionally report on it. If
2466 the report finds conflicts we will exit with an error status.
2468 ###### process input
2469 struct grammar *g = NULL;
2471 fprintf(stderr, "No grammar section provided\n");
2474 g = grammar_read(gram);
2476 fprintf(stderr, "Failure to parse grammar\n");
2481 grammar_analyse(g, type);
2483 if (grammar_report(g, type))
2487 If a "headers" section is defined, we write it out and include a declaration
2488 for the `parse_XX` function so it can be used from separate code.
2490 if (rv == 0 && hdr && outfile) {
2491 FILE *f = open_ext(outfile, ".h");
2493 code_node_print(f, hdr, infile);
2494 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace);\n",
2498 fprintf(stderr, "Cannot create %s.h\n",
2504 And if all goes well and an output file was provided, we create the `.c`
2505 file with the code section (if any) and the parser tables and function.
2507 if (rv == 0 && outfile) {
2508 FILE *f = open_ext(outfile, ".c");
2511 code_node_print(f, code, infile);
2512 gen_parser(f, g, infile, name, pre_reduce);
2515 fprintf(stderr, "Cannot create %s.c\n",
2521 And that about wraps it up. We need to set the locale so that UTF-8 is
2522 recognised properly, and link with `libicuuc` as `libmdcode` requires that.
2524 ###### File: parsergen.mk
2525 parsergen : parsergen.o libscanner.o libmdcode.o
2526 $(CC) $(CFLAGS) -o parsergen parsergen.o libscanner.o libmdcode.o -licuuc
2533 int main(int argc, char *argv[])
2538 setlocale(LC_ALL,"");
2540 ## process arguments
2547 ## The SHIFT/REDUCE parser
2549 Having analysed the grammar and generated all the tables, we only need
2550 the shift/reduce engine to bring it all together.
2552 ### Goto table lookup
2554 The parser generator has nicely provided us with goto tables sorted by
2555 symbol number. We need a binary search function to find a symbol in the
2558 ###### parser functions
2560 static int search(const struct state *l, int sym)
2563 int hi = l->go_to_cnt;
2567 while (lo + 1 < hi) {
2568 int mid = (lo + hi) / 2;
2569 if (l->go_to[mid].sym <= sym)
2574 if (l->go_to[lo].sym == sym)
2575 return l->go_to[lo].state;
2580 ### Memory allocation
2582 The `scanner` returns tokens in a local variable - we want them in allocated
2583 memory so they can live in the `asn_stack`. So we provide `tok_copy` to
2584 make an allocated copy as required.
2586 ###### parser includes
2589 ###### parser functions
2591 static struct token *tok_copy(struct token tk)
2593 struct token *new = malloc(sizeof(*new));
2598 ### The state stack.
2600 The core data structure for the parser is the stack. This tracks all
2601 the symbols that have been recognised or partially recognised.
2603 The stack usually won't grow very large - maybe a few tens of entries.
2604 So we dynamically grow an array as required but never bother to
2605 shrink it down again.
2607 We keep the stack as two separate allocations. One, `asn_stack` stores
2608 the "abstract syntax nodes" which are created by each reduction. When
2609 we call `do_reduce` we need to pass an array of the `asn`s of the body
2610 of the production, and by keeping a separate `asn` stack, we can just
2611 pass a pointer into this stack.
2613 The other allocation stores all other stack fields of which there are
2614 several. The `state` is the most important one and guides the parsing
2615 process. The `sym` is nearly unnecessary as it is implicit in the
2616 state. However when we want to free entries from the `asn_stack`, it
2617 helps to know what type they are so we can call the right freeing
2618 function. The symbol leads us to the right free function through
2621 The `indents` count tracks the line indents with-in the symbol or
2622 immediately follow it. These are used to allow indent information to
2623 guide parsing and error recovery.
2625 `since_newline` tracks how many stack frames since the last
2626 start-of-line (whether indented or not). So if `since_newline` is
2627 zero, then this symbol is at the start of a line. Similarly
2628 `since_indent` counts the number of states since an indent, it is zero
2629 precisely when `indents` is not zero.
2631 `newline_permitted` keeps track of whether newlines should be ignored
2634 The stack is most properly seen as alternating states and symbols -
2635 states, like the 'DOT' in items, are between symbols. Each frame in
2636 our stack holds a state and the symbol that was before it. The
2637 bottom of stack holds the start state but no symbol, as nothing came
2638 before the beginning. As we need to store some value, `TK_eof` is used
2639 to mark the beginning of the file as well as the end.
2641 ###### parser functions
2646 short newline_permitted;
2650 short since_newline;
2660 Two operations are needed on the stack - shift (which is like push) and pop.
2662 Shift applies not only to terminals but also to non-terminals. When we
2663 reduce a production we will pop off frames corresponding to the body
2664 symbols, then push on a frame for the head of the production. This last
2665 is exactly the same process as shifting in a terminal so we use the same
2666 function for both. In both cases we provide the symbol, the number of
2667 indents the symbol contains (which will be zero for a terminal symbol)
2668 and a flag indicating the the symbol was at (or was reduced from a
2669 symbol which was at) the start of a line. The state is deduced from the
2670 current top-of-stack state and the new symbol.
2672 To simplify other code we arrange for `shift` to fail if there is no `goto`
2673 state for the symbol. This is useful in basic parsing due to our design
2674 that we shift when we can, and reduce when we cannot. So the `shift`
2675 function reports if it could.
2677 `shift` is also used to push state zero onto the stack, so if the
2678 stack is empty, it always chooses zero as the next state.
2680 So `shift` finds the next state. If that succeeds it extends the
2681 allocations if needed and pushes all the information onto the stacks.
2683 ###### parser functions
2685 static int shift(struct parser *p,
2686 short sym, short indents, short start_of_line,
2688 const struct state states[])
2690 // Push an entry onto the stack
2691 struct frame next = {0};
2692 int newstate = p->tos
2693 ? search(&states[p->stack[p->tos-1].state],
2698 if (p->tos >= p->stack_size) {
2699 p->stack_size += 10;
2700 p->stack = realloc(p->stack, p->stack_size
2701 * sizeof(p->stack[0]));
2702 p->asn_stack = realloc(p->asn_stack, p->stack_size
2703 * sizeof(p->asn_stack[0]));
2706 next.indents = indents;
2707 next.state = newstate;
2709 if (!start_of_line) {
2711 next.since_newline = p->stack[p->tos-1].since_newline + 1;
2713 next.since_newline = 1;
2716 next.since_indent = 0;
2718 next.since_indent = p->stack[p->tos-1].since_indent + 1;
2720 next.since_indent = 1;
2722 p->stack[p->tos] = next;
2723 p->asn_stack[p->tos] = asn;
2728 `pop` primarily moves the top of stack (`tos`) back down the required
2729 amount and frees any `asn` entries that need to be freed. It also
2730 collects a summary of the indents and line starts in the symbols that
2731 are being removed. It is called _after_ we reduce a production, just
2732 before we `shift` the nonterminal in.
2734 ###### parser functions
2736 static int pop(struct parser *p, int num,
2737 short *start_of_line,
2738 void(*do_free)(short sym, void *asn))
2745 for (i = 0; i < num; i++) {
2746 sol |= !p->stack[p->tos+i].since_newline;
2747 indents += p->stack[p->tos+i].indents;
2748 do_free(p->stack[p->tos+i].sym,
2749 p->asn_stack[p->tos+i]);
2752 *start_of_line = sol;
2756 ### The heart of the parser.
2758 Now we have the parser. For each token we might shift it, trigger a
2759 reduction, or start error handling. 2D tokens (IN, OUT, EOL) also need
2762 We return whatever `asn` was returned by reducing production zero.
2764 If we can neither shift nor reduce we have an error to handle. We pop
2765 single entries off the stack until we can shift the `TK_error` symbol,
2766 then drop input tokens until we find one we can shift into the new error
2767 state. We need to ensure that something is dropped or shifted after an
2768 error, or we could get into an infinite loop, only shifting in
2769 `TK_error`, then reducing. So we track if there has been a shift since
2770 the last error, and if not the next error always discards one token.
2772 When we find `TK_in` and `TK_out` tokens which report indents we need
2773 to handle them directly as the grammar cannot express what we want to
2776 `TK_in` tokens are easy: we simply update indent count in the top stack frame to
2777 record how many indents there are following the previous token.
2779 `TK_out` tokens must be canceled against an indent count
2780 within the stack. If we can reduce some symbols that are all since
2781 the most recent indent, then we do that first. If the minimum prefix
2782 of the current state then extends back before the most recent indent,
2783 that indent can be cancelled. If the minimum prefix is shorter then
2784 the indent had ended prematurely and we must start error handling, which
2785 is still a work-in-progress.
2787 `TK_newline` tokens are ignored unless the top stack frame records
2788 that they are permitted. In that case they will not be considered for
2789 shifting if it is possible to reduce some symbols that are all since
2790 the most recent start of line. This is how a newline forcibly
2791 terminates any line-like structure - we try to reduce down to at most
2792 one symbol for each line where newlines are allowed.
2793 A consequence of this is that a rule like
2795 ###### Example: newlines - broken
2799 IfStatement -> Newlines if ....
2801 cannot work, as the NEWLINE will never be shifted as the empty string
2802 will be reduced first. Optional sets of newlines need to be include
2803 in the thing that preceed:
2805 ###### Example: newlines - works
2809 IfStatement -> If ....
2811 Here the NEWLINE will be shifted because nothing can be reduced until
2814 When during error handling we discard tokens read in, we want to keep
2815 discarding until we see one that is recognised. If we had a full set
2816 of LR(1) grammar states, this would mean looking in the look-ahead set,
2817 but we don't keep a full look-ahead set. We only record the subset
2818 that leads to SHIFT. We can, however, deduce the look-ahead set by
2819 looking at the SHIFT subsets for all states that we can get to by
2820 reducing zero or more times. So we need a little function which
2821 checks if a given token is in any of these look-ahead sets.
2823 ###### parser includes
2828 static int in_lookahead(struct token *tk, const struct state *states, int state)
2830 while (state >= 0) {
2831 if (search(&states[state], tk->num) >= 0)
2833 if (states[state].reduce_prod < 0)
2835 state = search(&states[state], states[state].reduce_sym);
2840 void *parser_run(struct token_state *tokens,
2841 const struct state states[],
2842 int (*do_reduce)(int, void**, struct token_config*, void*),
2843 void (*do_free)(short, void*),
2844 FILE *trace, const char *non_term[],
2845 struct token_config *config)
2847 struct parser p = { 0 };
2848 struct token *tk = NULL;
2850 int shift_since_err = 1;
2853 shift(&p, TK_eof, 0, 1, NULL, states);
2854 while (!accepted && p.tos > 0) {
2855 struct token *err_tk;
2856 struct frame *tos = &p.stack[p.tos-1];
2858 tk = tok_copy(token_next(tokens));
2859 parser_trace(trace, &p,
2860 tk, states, non_term, config->known_count);
2862 if (tk->num == TK_in) {
2864 tos->since_newline = 0;
2865 tos->since_indent = 0;
2868 parser_trace_action(trace, "Record");
2871 if (tk->num == TK_out) {
2872 if (states[tos->state].reduce_size >= 0 &&
2873 states[tos->state].reduce_size <= tos->since_indent)
2877 struct frame *in = tos - tos->since_indent;
2879 if (in->indents == 0) {
2880 /* Reassess since_indent and newline_permitted */
2882 in->since_indent = in[-1].since_indent + 1;
2883 in->newline_permitted = in[-1].newline_permitted;
2885 in->since_indent = 0;
2886 in->newline_permitted = 0;
2890 in->since_indent = in[-1].since_indent + 1;
2895 parser_trace_action(trace, "Cancel");
2898 // fall through to error handling as both SHIFT and REDUCE
2901 if (tk->num == TK_newline) {
2902 if (!tos->newline_permitted) {
2905 parser_trace_action(trace, "Discard");
2908 if (tos->since_newline > 1 &&
2909 states[tos->state].reduce_size >= 0 &&
2910 states[tos->state].reduce_size <= tos->since_newline)
2913 if (shift(&p, tk->num, 0, tk->num == TK_newline, tk, states)) {
2914 shift_since_err = 1;
2916 parser_trace_action(trace, "Shift");
2920 if (states[tos->state].reduce_prod >= 0 &&
2921 states[tos->state].newline_only &&
2922 !(tk->num == TK_newline ||
2923 tk->num == TK_eof ||
2924 tk->num == TK_out ||
2925 (tos->indents == 0 && tos->since_newline == 0))) {
2926 /* Anything other than newline or out or eof
2927 * in an error unless we are already at start
2928 * of line, as this production must end at EOL.
2930 } else if (states[tos->state].reduce_prod >= 0) {
2933 const struct state *nextstate = &states[tos->state];
2934 int prod = nextstate->reduce_prod;
2935 int size = nextstate->reduce_size;
2936 int res_size = nextstate->result_size;
2937 short indents, start_of_line;
2939 body = p.asn_stack + (p.tos - size);
2940 res = res_size ? calloc(1, res_size) : NULL;
2941 res_size = do_reduce(prod, body, config, res);
2942 if (res_size != nextstate->result_size)
2945 indents = pop(&p, size, &start_of_line,
2948 if (!shift(&p, nextstate->reduce_sym,
2949 indents, start_of_line,
2951 if (prod != 0) abort();
2955 parser_trace_action(trace, "Reduce");
2958 /* Error. We walk up the stack until we
2959 * find a state which will accept TK_error.
2960 * We then shift in TK_error and see what state
2961 * that takes us too.
2962 * Then we discard input tokens until
2963 * we find one that is acceptable.
2965 parser_trace_action(trace, "ERROR");
2966 short indents = 0, start_of_line;
2968 err_tk = tok_copy(*tk);
2970 shift(&p, TK_error, 0, 0,
2971 err_tk, states) == 0)
2972 // discard this state
2973 indents += pop(&p, 1, &start_of_line, do_free);
2976 // no state accepted TK_error
2979 if (!shift_since_err) {
2980 /* must discard at least one token to avoid
2983 if (tk->num == TK_eof)
2986 tk = tok_copy(token_next(tokens));
2988 shift_since_err = 0;
2989 tos = &p.stack[p.tos-1];
2990 while (!in_lookahead(tk, states, tos->state) &&
2991 tk->num != TK_eof) {
2993 tk = tok_copy(token_next(tokens));
2994 shift_since_err = 1;
2995 if (tk->num == TK_in)
2997 if (tk->num == TK_out) {
3001 // FIXME update since_indent here
3004 tos->indents += indents;
3007 pop(&p, p.tos, NULL, do_free);
3013 ###### exported functions
3014 void *parser_run(struct token_state *tokens,
3015 const struct state states[],
3016 int (*do_reduce)(int, void**, struct token_config*, void*),
3017 void (*do_free)(short, void*),
3018 FILE *trace, const char *non_term[],
3019 struct token_config *config);
3023 Being able to visualize the parser in action can be invaluable when
3024 debugging the parser code, or trying to understand how the parse of a
3025 particular grammar progresses. The stack contains all the important
3026 state, so just printing out the stack every time around the parse loop
3027 can make it possible to see exactly what is happening.
3029 This doesn't explicitly show each SHIFT and REDUCE action. However they
3030 are easily deduced from the change between consecutive lines, and the
3031 details of each state can be found by cross referencing the states list
3032 in the stack with the "report" that parsergen can generate.
3034 For terminal symbols, we just dump the token. For non-terminals we
3035 print the name of the symbol. The look ahead token is reported at the
3036 end inside square brackets.
3038 ###### parser functions
3040 static char *reserved_words[] = {
3041 [TK_error] = "ERROR",
3044 [TK_newline] = "NEWLINE",
3048 void parser_trace(FILE *trace, struct parser *p,
3049 struct token *tk, const struct state states[],
3050 const char *non_term[], int knowns)
3055 for (i = 0; i < p->tos; i++) {
3056 struct frame *f = &p->stack[i];
3059 if (sym < TK_reserved &&
3060 reserved_words[sym] != NULL)
3061 fputs(reserved_words[sym], trace);
3062 else if (sym < TK_reserved + knowns) {
3063 struct token *t = p->asn_stack[i];
3064 text_dump(trace, t->txt, 20);
3066 fputs(non_term[sym - TK_reserved - knowns],
3069 fprintf(trace, ".%d", f->indents);
3070 if (f->since_newline == 0)
3074 fprintf(trace, "(%d) ", f->state);
3076 fprintf(trace, "[");
3077 if (tk->num < TK_reserved &&
3078 reserved_words[tk->num] != NULL)
3079 fputs(reserved_words[tk->num], trace);
3081 text_dump(trace, tk->txt, 20);
3082 fprintf(trace, ":%d:%d]", tk->line, tk->col);
3085 void parser_trace_action(FILE *trace, char *action)
3088 fprintf(trace, " - %s\n", action);
3093 The obvious example for a parser is a calculator.
3095 As `scanner` provides number parsing function using `libgmp` it is not much
3096 work to perform arbitrary rational number calculations.
3098 This calculator takes one expression, or an equality test, per line.
3099 The results are printed and if any equality test fails, the program
3100 exits with an error.
3102 ###### File: parsergen.mk
3103 calc.c calc.h : parsergen parsergen.mdc
3104 ./parsergen --tag calc -o calc parsergen.mdc
3105 calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o
3106 $(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp
3108 ./calc parsergen.mdc
3113 #include "parse_number.h"
3114 // what do we use for a demo-grammar? A calculator of course.
3126 #include <sys/mman.h>
3132 #include "scanner.h"
3137 static void free_number(struct number *n)
3143 static int text_is(struct text t, char *s)
3145 return (strlen(s) == t.len &&
3146 strncmp(s, t.txt, t.len) == 0);
3149 int main(int argc, char *argv[])
3151 int fd = open(argv[1], O_RDONLY);
3152 int len = lseek(fd, 0, 2);
3153 char *file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
3154 struct section *table = code_extract(file, file+len, NULL);
3156 struct token_config config = {
3157 .ignored = (1 << TK_line_comment)
3160 .number_chars = ".,_+-",
3164 for (s = table; s; s = s->next)
3165 if (text_is(s->section, "example: input"))
3166 parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
3168 struct section *t = table->next;
3169 code_free(table->code);
3181 Session -> Session Line
3184 Line -> Expression NEWLINE ${ gmp_printf("Answer = %Qd\n", $1.val);
3185 { mpf_t fl; mpf_init2(fl, 20); mpf_set_q(fl, $1.val);
3186 gmp_printf(" or as a decimal: %Fg\n", fl);
3190 | Expression = Expression NEWLINE ${
3191 if (mpq_equal($1.val, $3.val))
3192 gmp_printf("Both equal %Qd\n", $1.val);
3194 gmp_printf("NOT EQUAL: %Qd\n != : %Qd\n",
3199 | NEWLINE ${ printf("Blank line\n"); }$
3200 | ERROR NEWLINE ${ printf("Skipped a bad line\n"); }$
3203 Expression -> Expression + Expression ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$
3204 | Expression - Expression ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$
3205 | Expression * Expression ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$
3206 | Expression / Expression ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$
3207 | Expression // Expression ${ {
3210 mpz_init(z0); mpz_init(z1); mpz_init(z2);
3211 mpz_tdiv_q(z1, mpq_numref($1.val), mpq_denref($1.val));
3212 mpz_tdiv_q(z2, mpq_numref($3.val), mpq_denref($3.val));
3213 mpz_tdiv_q(z0, z1, z2);
3214 mpq_set_z($0.val, z0);
3215 mpz_clear(z0); mpz_clear(z1); mpz_clear(z2);
3217 | NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$
3218 | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$
3223 3.1415926535 - 355/113
3225 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9
3227 1 * 1000 + 2 * 100 + 3 * 10 + 4 * 1