1 # LR(1) Parser Generator #
3 This parser generator takes a grammar description combined with code
4 fragments, analyses it, and can report details about the analysis and
5 write out C code files which can be compiled to make a parser.
7 There are several distinct sections.
9 1. `grammar_read` will read a grammar from a literate-code file and
10 store the productions, symbols, and code fragments.
11 2. `grammar_analyse` will take that grammar and build LR parsing
12 states and look-ahead tables.
13 3. `grammar_report` will present the details of the analysis
14 in a readable format and will report any conflicts.
15 4. `parser_generate` will write out C code files with various
16 tables and with the code fragments arranged in useful places.
17 5. `parser_run` is a library function intended to be linked together
18 with the generated parser tables to complete the implementation of
20 6. Finally `calc` is a test grammar for a simple calculator. The
21 `parsergen` program built from the C code in this file can extract
22 that grammar directly from this file and process it.
25 ###### File: parsergen.c
30 ## forward declarations
41 ###### File: libparser.c
48 ###### File: parsergen.mk
51 parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc
54 ## Reading the grammar
56 The grammar must be stored in a markdown literate code file as parsed
57 by "[mdcode][]". It must have three top level (i.e. unreferenced)
58 sections: `header`, `code`, and `grammar`. The first two will be
59 literally copied into the generated `.c` and `.h`. files. The last
60 contains the grammar. This is tokenised with "[scanner][]".
62 If the `--tag` option is given, then any top level header that doesn't
63 start with the tag is ignored, and the tag is striped from the rest. So
65 means that the three needed sections must be `Foo: header`, `Foo: code`,
69 [scanner]: scanner.html
75 ###### parser includes
79 The grammar contains production sets, precedence/associativity
80 declarations, and data type declarations. These are all parsed with
81 _ad hoc_ parsing as we don't have a parser generator yet.
83 The precedence and associativity can be set for each production, but
84 can be inherited from symbols. The data type (either structure or a
85 reference to a structure) is potentially defined for each non-terminal
86 and describes what C structure is used to store information about each
90 enum assoc {Left, Right, Non};
91 char *assoc_names[] = {"Left","Right","Non"};
94 struct text struct_name;
97 unsigned short precedence;
101 unsigned short precedence;
109 The strings reported by `mdcode` and `scanner` are `struct text` which have
110 length rather than being null terminated. To help with printing and
111 comparing we define `text_is` and `prtxt`, which should possibly go in
112 `mdcode`. `scanner` does provide `text_dump` which is useful for strings
113 which might contain control characters.
115 `strip_tag` is a bit like `strncmp`, but adds a test for a colon,
116 because that is what we need to detect tags.
119 static int text_is(struct text t, char *s)
121 return (strlen(s) == t.len &&
122 strncmp(s, t.txt, t.len) == 0);
124 static void prtxt(struct text t)
126 printf("%.*s", t.len, t.txt);
129 static int strip_tag(struct text *t, char *tag)
131 int skip = strlen(tag) + 1;
132 if (skip >= t->len ||
133 strncmp(t->txt, tag, skip-1) != 0 ||
134 t->txt[skip-1] != ':')
136 while (skip < t->len && t->txt[skip] == ' ')
145 Productions are comprised primarily of symbols - terminal and
146 non-terminal. We do not make a syntactic distinction between the two,
147 though non-terminals must be identifiers. Non-terminal symbols are
148 those which appear in the head of a production, terminal symbols are
149 those which don't. There are also "virtual" symbols used for precedence
150 marking discussed later, and sometimes we won't know what type a symbol
153 ###### forward declarations
154 enum symtype { Unknown, Virtual, Terminal, Nonterminal };
155 char *symtypes = "UVTN";
159 Symbols can be either `TK_ident` or `TK_mark`. They are saved in a
160 table of known symbols and the resulting parser will report them as
161 `TK_reserved + N`. A small set of identifiers are reserved for the
162 different token types that `scanner` can report.
165 static char *reserved_words[] = {
166 [TK_error] = "ERROR",
167 [TK_number] = "NUMBER",
168 [TK_ident] = "IDENTIFIER",
170 [TK_string] = "STRING",
171 [TK_multi_string] = "MULTI_STRING",
174 [TK_newline] = "NEWLINE",
180 Note that `TK_eof` and the two `TK_*_comment` tokens cannot be
181 recognised. The former is automatically expected at the end of the text
182 being parsed. The latter are always ignored by the parser.
184 All of these symbols are stored in a simple symbol table. We use the
185 `struct text` from `mdcode` to store the name and link them together
186 into a sorted list using an insertion sort.
188 We don't have separate `find` and `insert` functions as any symbol we
189 find needs to be remembered. We simply expect `find` to always return a
190 symbol, but its type might be `Unknown`.
199 ###### grammar fields
204 static struct symbol *sym_find(struct grammar *g, struct text s)
206 struct symbol **l = &g->syms;
211 (cmp = text_cmp((*l)->name, s)) < 0)
215 n = calloc(1, sizeof(*n));
224 static void symbols_init(struct grammar *g)
226 int entries = sizeof(reserved_words)/sizeof(reserved_words[0]);
228 for (i = 0; i < entries; i++) {
231 t.txt = reserved_words[i];
234 t.len = strlen(t.txt);
241 ### Data types and precedence.
243 Data type specification and precedence specification are both
244 introduced by a dollar sign at the start of the line. If the next
245 word is `LEFT`, `RIGHT` or `NON`, then the line specifies a
246 precedence, otherwise it specifies a data type.
248 The data type name is simply stored and applied to the head of all
249 subsequent productions. It must be the name of a structure optionally
250 preceded by an asterisk which means a reference or "pointer". So
251 `$expression` maps to `struct expression` and `$*statement` maps to
252 `struct statement *`.
254 Any productions given before the first data type declaration will have
255 no data type associated with them and can carry no information. In
256 order to allow other non-terminals to have no type, the data type
257 `$void` can be given. This does *not* mean that `struct void` will be
258 used, but rather than no type will be associated with future
261 The precedence line must contain a list of symbols - typically
262 terminal symbols, but not necessarily. It can only contain symbols
263 that have not been seen yet, so precedence declaration must precede
264 the productions that they affect.
266 A precedence line may also contain a "virtual" symbol which is an
267 identifier preceded by `$$`. Virtual terminals carry precedence
268 information but are not included in the grammar. A production can
269 declare that it inherits the precedence of a given virtual symbol.
271 This use for `$$` precludes it from being used as a symbol in the
272 described language. Two other symbols: `${` and `}$` are also
275 Each new precedence line introduces a new precedence level and
276 declares how it associates. This level is stored in each symbol
277 listed and may be inherited by any production which uses the symbol. A
278 production inherits from the last symbol which has a precedence.
280 ###### grammar fields
281 struct text current_type;
286 enum symbols { TK_virtual = TK_reserved, TK_open, TK_close };
287 static const char *known[] = { "$$", "${", "}$" };
290 static char *dollar_line(struct token_state *ts, struct grammar *g, int isref)
292 struct token t = token_next(ts);
297 if (t.num != TK_ident) {
298 err = "type or assoc expected after '$'";
301 if (text_is(t.txt, "LEFT"))
303 else if (text_is(t.txt, "RIGHT"))
305 else if (text_is(t.txt, "NON"))
308 g->current_type = t.txt;
309 g->type_isref = isref;
310 if (text_is(t.txt, "void"))
311 g->current_type.txt = NULL;
313 if (t.num != TK_newline) {
314 err = "Extra tokens after type name";
321 err = "$* cannot be followed by a precedence";
325 // This is a precedence line, need some symbols.
329 while (t.num != TK_newline) {
330 enum symtype type = Terminal;
332 if (t.num == TK_virtual) {
335 if (t.num != TK_ident) {
336 err = "$$ must be followed by a word";
339 } else if (t.num != TK_ident &&
341 err = "Illegal token in precedence line";
344 s = sym_find(g, t.txt);
345 if (s->type != Unknown) {
346 err = "Symbols in precedence line must not already be known.";
350 s->precedence = g->prec_levels;
355 err = "No symbols given on precedence line";
359 while (t.num != TK_newline && t.num != TK_eof)
366 A production either starts with an identifier which is the head
367 non-terminal, or a vertical bar (`|`) in which case this production
368 uses the same head as the previous one. The identifier must be
369 followed by a `->` mark. All productions for a given non-terminal must
370 be grouped together with the `nonterminal ->` given only once.
372 After this a (possibly empty) sequence of identifiers and marks form
373 the body of the production. A virtual symbol may be given after the
374 body by preceding it with `$$`. If a virtual symbol is given, the
375 precedence of the production is that for the virtual symbol. If none
376 is given, the precedence is inherited from the last symbol in the
377 production which has a precedence specified.
379 After the optional precedence may come the `${` mark. This indicates
380 the start of a code fragment. If present, this must be on the same
381 line as the start of the production.
383 All of the text from the `${` through to the matching `}$` is
384 collected and forms the code-fragment for the production. It must all
385 be in one `code_node` of the literate code. The `}$` must be
386 at the end of a line.
388 Text in the code fragment will undergo substitutions where `$N` or
389 `$<N`,for some numeric `N`, will be replaced with a variable holding
390 the parse information for the particular symbol in the production.
391 `$0` is the head of the production, `$1` is the first symbol of the
392 body, etc. The type of `$N` for a terminal symbol is `struct token`.
393 For a non-terminal, it is whatever has been declared for that symbol.
394 The `<` may be included for symbols declared as storing a reference
395 (not a structure) and means that the reference is being moved out, so
396 it will not automatically be freed.
398 While building productions we will need to add to an array which needs to
402 static void array_add(void *varray, int *cnt, void *new)
404 void ***array = varray;
407 current = ((*cnt-1) | (step-1))+1;
408 if (*cnt == current) {
411 *array = realloc(*array, current * sizeof(void*));
413 (*array)[*cnt] = new;
417 Collecting the code fragment simply involves reading tokens until we
418 hit the end token `}$`, and noting the character position of the start and
422 static struct text collect_code(struct token_state *state,
427 code.txt = start.txt.txt + start.txt.len;
429 t = token_next(state);
430 while (t.node == start.node &&
431 t.num != TK_close && t.num != TK_error &&
433 if (t.num == TK_close && t.node == start.node)
434 code.len = t.txt.txt - code.txt;
440 Now we have all the bit we need to parse a full production.
445 ###### grammar fields
446 struct production **productions;
447 int production_count;
449 ###### production fields
451 struct symbol **body;
456 int first_production;
459 static char *parse_production(struct grammar *g,
461 struct token_state *state)
463 /* Head has already been parsed. */
466 struct production p, *pp;
468 memset(&p, 0, sizeof(p));
470 tk = token_next(state);
471 while (tk.num == TK_ident || tk.num == TK_mark) {
472 struct symbol *bs = sym_find(g, tk.txt);
473 if (bs->type == Unknown)
475 if (bs->type == Virtual) {
476 err = "Virtual symbol not permitted in production";
479 if (bs->precedence) {
480 p.precedence = bs->precedence;
483 array_add(&p.body, &p.body_size, bs);
484 tk = token_next(state);
486 if (tk.num == TK_virtual) {
488 tk = token_next(state);
489 if (tk.num != TK_ident) {
490 err = "word required after $$";
493 vs = sym_find(g, tk.txt);
494 if (vs->type != Virtual) {
495 err = "symbol after $$ must be virtual";
498 p.precedence = vs->precedence;
500 tk = token_next(state);
502 if (tk.num == TK_open) {
503 p.code = collect_code(state, tk);
504 if (p.code.txt == NULL) {
505 err = "code fragment not closed properly";
508 tk = token_next(state);
510 if (tk.num != TK_newline && tk.num != TK_eof) {
511 err = "stray tokens at end of line";
514 pp = malloc(sizeof(*pp));
516 array_add(&g->productions, &g->production_count, pp);
519 while (tk.num != TK_newline && tk.num != TK_eof)
520 tk = token_next(state);
524 With the ability to parse production and dollar-lines, we have nearly all
525 that we need to parse a grammar from a `code_node`.
527 The head of the first production will effectively be the `start` symbol of
528 the grammar. However it won't _actually_ be so. Processing the grammar is
529 greatly simplified if the real start symbol only has a single production,
530 and expects `$eof` as the final terminal. So when we find the first
531 explicit production we insert an extra production as production zero which
534 ###### Example: production 0
537 where `START` is the first non-terminal given.
539 ###### create production zero
540 struct production *p = calloc(1,sizeof(*p));
541 struct text start = {"$start",6};
542 struct text eof = {"$eof",4};
543 struct text code = {"$0 = $<1;", 9};
544 p->head = sym_find(g, start);
545 p->head->type = Nonterminal;
546 p->head->struct_name = g->current_type;
547 p->head->isref = g->type_isref;
548 if (g->current_type.txt)
550 array_add(&p->body, &p->body_size, head);
551 array_add(&p->body, &p->body_size, sym_find(g, eof));
552 p->head->first_production = g->production_count;
553 array_add(&g->productions, &g->production_count, p);
555 Now we are ready to read in the grammar.
558 static struct grammar *grammar_read(struct code_node *code)
560 struct token_config conf = {
563 .words_marks = known,
564 .known_count = sizeof(known)/sizeof(known[0]),
566 .ignored = (1 << TK_line_comment)
567 | (1 << TK_block_comment)
570 | (1 << TK_multi_string)
575 struct token_state *state = token_open(code, &conf);
577 struct symbol *head = NULL;
581 g = calloc(1, sizeof(*g));
584 for (tk = token_next(state); tk.num != TK_eof;
585 tk = token_next(state)) {
586 if (tk.num == TK_newline)
588 if (tk.num == TK_ident) {
590 head = sym_find(g, tk.txt);
591 if (head->type == Nonterminal)
592 err = "This non-terminal has already be used.";
593 else if (head->type == Virtual)
594 err = "Virtual symbol not permitted in head of production";
596 head->type = Nonterminal;
597 head->struct_name = g->current_type;
598 head->isref = g->type_isref;
599 if (g->production_count == 0) {
600 ## create production zero
602 head->first_production = g->production_count;
603 tk = token_next(state);
604 if (tk.num == TK_mark &&
605 text_is(tk.txt, "->"))
606 err = parse_production(g, head, state);
608 err = "'->' missing in production";
610 } else if (tk.num == TK_mark
611 && text_is(tk.txt, "|")) {
612 // another production for same non-term
614 err = parse_production(g, head, state);
616 err = "First production must have a head";
617 } else if (tk.num == TK_mark
618 && text_is(tk.txt, "$")) {
619 err = dollar_line(state, g, 0);
620 } else if (tk.num == TK_mark
621 && text_is(tk.txt, "$*")) {
622 err = dollar_line(state, g, 1);
624 err = "Unrecognised token at start of line.";
632 fprintf(stderr, "Error at line %d: %s\n",
639 ## Analysing the grammar
641 The central task in analysing the grammar is to determine a set of
642 states to drive the parsing process. This will require finding
643 various sets of symbols and of "items". Some of these sets will need
644 to attach information to each element in the set, so they are more
647 Each "item" may have a 'look-ahead' or `LA` set associated with
648 it. Many of these will be the same as each other. To avoid memory
649 wastage, and to simplify some comparisons of sets, these sets will be
650 stored in a list of unique sets, each assigned a number.
652 Once we have the data structures in hand to manage these sets and
653 lists, we can start setting the 'nullable' flag, build the 'FIRST'
654 sets, and then create the item sets which define the various states.
658 Though we don't only store symbols in these sets, they are the main
659 things we store, so they are called symbol sets or "symsets".
661 A symset has a size, an array of shorts, and an optional array of data
662 values, which are also shorts. If the array of data is not present,
663 we store an impossible pointer, as `NULL` is used to indicate that no
664 memory has been allocated yet;
669 unsigned short *syms, *data;
671 #define NO_DATA ((unsigned short *)1)
672 const struct symset INIT_SYMSET = { 0, NULL, NO_DATA };
673 const struct symset INIT_DATASET = { 0, NULL, NULL };
675 The arrays of shorts are allocated in blocks of 8 and are kept sorted
676 by using an insertion sort. We don't explicitly record the amount of
677 allocated space as it can be derived directly from the current `cnt` using
678 `((cnt - 1) | 7) + 1`.
681 static void symset_add(struct symset *s, unsigned short key, unsigned short val)
684 int current = ((s->cnt-1) | 7) + 1;
685 if (current == s->cnt) {
687 s->syms = realloc(s->syms, sizeof(*s->syms) * current);
688 if (s->data != NO_DATA)
689 s->data = realloc(s->data, sizeof(*s->data) * current);
692 while (i > 0 && s->syms[i-1] > key) {
693 s->syms[i] = s->syms[i-1];
694 if (s->data != NO_DATA)
695 s->data[i] = s->data[i-1];
699 if (s->data != NO_DATA)
704 Finding a symbol (or item) in a `symset` uses a simple binary search.
705 We return the index where the value was found (so data can be accessed),
706 or `-1` to indicate failure.
708 static int symset_find(struct symset *ss, unsigned short key)
715 while (lo + 1 < hi) {
716 int mid = (lo + hi) / 2;
717 if (ss->syms[mid] <= key)
722 if (ss->syms[lo] == key)
727 We will often want to form the union of two symsets. When we do, we
728 will often want to know if anything changed (as they might mean there
729 is more work to do). So `symset_union` reports whether anything was
730 added to the first set. We use a slow quadratic approach as these
731 sets don't really get very big. If profiles shows this to be a problem is
732 can be optimised later.
734 static int symset_union(struct symset *a, struct symset *b)
738 for (i = 0; i < b->cnt; i++)
739 if (symset_find(a, b->syms[i]) < 0) {
740 unsigned short data = 0;
741 if (b->data != NO_DATA)
743 symset_add(a, b->syms[i], data);
749 And of course we must be able to free a symset.
751 static void symset_free(struct symset ss)
754 if (ss.data != NO_DATA)
760 Some symsets are simply stored somewhere appropriate in the `grammar`
761 data structure, others needs a bit of indirection. This applies
762 particularly to "LA" sets. These will be paired with "items" in an
763 "itemset". As itemsets will be stored in a symset, the "LA" set needs to be
764 stored in the `data` field, so we need a mapping from a small (short)
765 number to an LA `symset`.
767 As mentioned earlier this involves creating a list of unique symsets.
769 For now, we just use a linear list sorted by insertion. A skiplist
770 would be more efficient and may be added later.
777 struct setlist *next;
780 ###### grammar fields
781 struct setlist *sets;
786 static int ss_cmp(struct symset a, struct symset b)
789 int diff = a.cnt - b.cnt;
792 for (i = 0; i < a.cnt; i++) {
793 diff = (int)a.syms[i] - (int)b.syms[i];
800 static int save_set(struct grammar *g, struct symset ss)
802 struct setlist **sl = &g->sets;
806 while (*sl && (cmp = ss_cmp((*sl)->ss, ss)) < 0)
813 s = malloc(sizeof(*s));
822 Finding a set by number is currently performed by a simple linear search.
823 If this turns out to hurt performance, we can store the sets in a dynamic
824 array like the productions.
826 static struct symset set_find(struct grammar *g, int num)
828 struct setlist *sl = g->sets;
829 while (sl && sl->num != num)
835 ### Setting `nullable`
837 We set `nullable` on the head symbol for any production for which all
838 the body symbols (if any) are nullable. As this is a recursive
839 definition, any change in the `nullable` setting means that we need to
840 re-evaluate where it needs to be set.
842 We simply loop around performing the same calculations until no more
849 static void set_nullable(struct grammar *g)
852 while (check_again) {
855 for (p = 0; p < g->production_count; p++) {
856 struct production *pr = g->productions[p];
859 if (pr->head->nullable)
861 for (s = 0; s < pr->body_size; s++)
862 if (! pr->body[s]->nullable)
864 if (s == pr->body_size) {
865 pr->head->nullable = 1;
872 ### Setting `can_eol` and `line_like`
874 In order to be able to ignore newline tokens when not relevant, but
875 still include them in the parse when needed, we will need to know
876 which states can start a "line-like" section of code. We ignore
877 newlines when there is an indent since the most recent start of a
880 To know which symbols are line-like, we first need to know which
881 symbols start with a NEWLINE token. Any symbol which is followed by a
882 NEWLINE, or anything that starts with a NEWLINE, is deemed to be a line-like symbol.
883 Certainly when trying to parse one of these we must take not of NEWLINEs.
885 Clearly the `TK_newline` token can start with a NEWLINE. Any symbol
886 which is the head of a production that contains a starts-with-NEWLINE
887 symbol preceeded only by nullable symbols is also a
888 starts-with-NEWLINE symbol. We use a new field `can_eol` to record
889 this attribute of symbols, and compute it in a repetitive manner
890 similar to `set_nullable`.
892 Once we have that, we can determine which symbols are `line_like` be
893 seeing which are followed by a `can_eol` symbol in any production.
900 static void set_can_eol(struct grammar *g)
903 g->symtab[TK_newline]->can_eol = 1;
904 while (check_again) {
907 for (p = 0; p < g->production_count; p++) {
908 struct production *pr = g->productions[p];
911 if (pr->head->can_eol)
914 for (s = 0 ; s < pr->body_size; s++) {
915 if (pr->body[s]->can_eol) {
916 pr->head->can_eol = 1;
920 if (!pr->body[s]->nullable)
927 static void set_line_like(struct grammar *g)
930 for (p = 0; p < g->production_count; p++) {
931 struct production *pr = g->productions[p];
934 for (s = 1; s < pr->body_size; s++)
935 if (pr->body[s]->can_eol)
936 pr->body[s-1]->line_like = 1;
940 ### Building the `first` sets
942 When calculating what can follow a particular non-terminal, we will need to
943 know what the "first" terminal in any subsequent non-terminal might be. So
944 we calculate the `first` set for every non-terminal and store them in an
945 array. We don't bother recording the "first" set for terminals as they are
948 As the "first" for one symbol might depend on the "first" of another,
949 we repeat the calculations until no changes happen, just like with
950 `set_nullable`. This makes use of the fact that `symset_union`
951 reports if any change happens.
953 The core of this which finds the "first" of part of a production body
954 will be reused for computing the "follow" sets, so we split it out
955 into a separate function.
957 ###### grammar fields
958 struct symset *first;
962 static int add_first(struct production *pr, int start,
963 struct symset *target, struct grammar *g,
968 for (s = start; s < pr->body_size; s++) {
969 struct symbol *bs = pr->body[s];
970 if (bs->type == Terminal) {
971 if (symset_find(target, bs->num) < 0) {
972 symset_add(target, bs->num, 0);
976 } else if (symset_union(target, &g->first[bs->num]))
982 *to_end = (s == pr->body_size);
986 static void build_first(struct grammar *g)
990 g->first = calloc(g->num_syms, sizeof(g->first[0]));
991 for (s = 0; s < g->num_syms; s++)
992 g->first[s] = INIT_SYMSET;
994 while (check_again) {
997 for (p = 0; p < g->production_count; p++) {
998 struct production *pr = g->productions[p];
999 struct symset *head = &g->first[pr->head->num];
1001 if (add_first(pr, 0, head, g, NULL))
1007 ### Building the `follow` sets.
1009 There are two different situations when we will want to generate "follow"
1010 sets. If we are doing an SLR analysis, we want to generate a single
1011 "follow" set for each non-terminal in the grammar. That is what is
1012 happening here. If we are doing an LALR or LR analysis we will want
1013 to generate a separate "LA" set for each item. We do that later
1014 in state generation.
1016 There are two parts to generating a "follow" set. Firstly we look at
1017 every place that any non-terminal appears in the body of any
1018 production, and we find the set of possible "first" symbols after
1019 there. This is done using `add_first` above and only needs to be done
1020 once as the "first" sets are now stable and will not change.
1024 for (p = 0; p < g->production_count; p++) {
1025 struct production *pr = g->productions[p];
1028 for (b = 0; b < pr->body_size - 1; b++) {
1029 struct symbol *bs = pr->body[b];
1030 if (bs->type == Terminal)
1032 add_first(pr, b+1, &g->follow[bs->num], g, NULL);
1036 The second part is to add the "follow" set of the head of a production
1037 to the "follow" sets of the final symbol in the production, and any
1038 other symbol which is followed only by `nullable` symbols. As this
1039 depends on "follow" itself we need to repeatedly perform the process
1040 until no further changes happen.
1044 for (again = 0, p = 0;
1045 p < g->production_count;
1046 p < g->production_count-1
1047 ? p++ : again ? (again = 0, p = 0)
1049 struct production *pr = g->productions[p];
1052 for (b = pr->body_size - 1; b >= 0; b--) {
1053 struct symbol *bs = pr->body[b];
1054 if (bs->type == Terminal)
1056 if (symset_union(&g->follow[bs->num],
1057 &g->follow[pr->head->num]))
1064 We now just need to create and initialise the `follow` list to get a
1067 ###### grammar fields
1068 struct symset *follow;
1071 static void build_follow(struct grammar *g)
1074 g->follow = calloc(g->num_syms, sizeof(g->follow[0]));
1075 for (s = 0; s < g->num_syms; s++)
1076 g->follow[s] = INIT_SYMSET;
1080 ### Building itemsets and states
1082 There are three different levels of detail that can be chosen for
1083 building the itemsets and states for the LR grammar. They are:
1085 1. LR(0) or SLR(1), where no look-ahead is considered.
1086 2. LALR(1) where we build look-ahead sets with each item and merge
1087 the LA sets when we find two paths to the same "kernel" set of items.
1088 3. LR(1) where different look-ahead for any item in the set means
1089 a different state must be created.
1091 ###### forward declarations
1092 enum grammar_type { LR0, LR05, SLR, LALR, LR1 };
1094 We need to be able to look through existing states to see if a newly
1095 generated state already exists. For now we use a simple sorted linked
1098 An item is a pair of numbers: the production number and the position of
1099 "DOT", which is an index into the body of the production.
1100 As the numbers are not enormous we can combine them into a single "short"
1101 and store them in a `symset` - 4 bits for "DOT" and 12 bits for the
1102 production number (so 4000 productions with maximum of 15 symbols in the
1105 Comparing the itemsets will be a little different to comparing symsets
1106 as we want to do the lookup after generating the "kernel" of an
1107 itemset, so we need to ignore the offset=zero items which are added during
1110 To facilitate this, we modify the "DOT" number so that "0" sorts to
1111 the end of the list in the symset, and then only compare items before
1115 static inline unsigned short item_num(int production, int index)
1117 return production | ((31-index) << 11);
1119 static inline int item_prod(unsigned short item)
1121 return item & 0x7ff;
1123 static inline int item_index(unsigned short item)
1125 return (31-(item >> 11)) & 0x1f;
1128 For LR(1) analysis we need to compare not just the itemset in a state
1129 but also the LA sets. As we assign each unique LA set a number, we
1130 can just compare the symset and the data values together.
1133 static int itemset_cmp(struct symset a, struct symset b,
1134 enum grammar_type type)
1140 i < a.cnt && i < b.cnt &&
1141 item_index(a.syms[i]) > 0 &&
1142 item_index(b.syms[i]) > 0;
1144 int diff = a.syms[i] - b.syms[i];
1148 diff = a.data[i] - b.data[i];
1153 if (i == a.cnt || item_index(a.syms[i]) == 0)
1157 if (i == b.cnt || item_index(b.syms[i]) == 0)
1163 if (type < LR1 || av == -1)
1166 a.data[i] - b.data[i];
1169 And now we can build the list of itemsets. The lookup routine returns
1170 both a success flag and a pointer to where in the list an insert
1171 should happen, so we don't need to search a second time.
1175 struct itemset *next;
1177 struct symset items;
1178 struct symset go_to;
1184 ###### grammar fields
1185 struct itemset *items;
1189 static int itemset_find(struct grammar *g, struct itemset ***where,
1190 struct symset kernel, enum grammar_type type)
1192 struct itemset **ip;
1194 for (ip = &g->items; *ip ; ip = & (*ip)->next) {
1195 struct itemset *i = *ip;
1197 diff = itemset_cmp(i->items, kernel, type);
1210 Adding an itemset may require merging the LA sets if LALR analysis is
1211 happening. If any new LA set adds any symbols that weren't in the old LA set, we
1212 clear the `completed` flag so that the dependants of this itemset will be
1213 recalculated and their LA sets updated.
1215 `add_itemset` must consume the symsets it is passed, either by adding
1216 them to a data structure, of freeing them.
1218 static int add_itemset(struct grammar *g, struct symset ss,
1219 enum grammar_type type)
1221 struct itemset **where, *is;
1223 int found = itemset_find(g, &where, ss, type);
1225 is = calloc(1, sizeof(*is));
1226 is->state = g->states;
1230 is->go_to = INIT_DATASET;
1239 for (i = 0; i < ss.cnt; i++) {
1240 struct symset temp = INIT_SYMSET, s;
1241 if (ss.data[i] == is->items.data[i])
1243 s = set_find(g, is->items.data[i]);
1244 symset_union(&temp, &s);
1245 s = set_find(g, ss.data[i]);
1246 if (symset_union(&temp, &s)) {
1247 is->items.data[i] = save_set(g, temp);
1258 To build all the itemsets, we first insert the initial itemset made
1259 from production zero, complete each itemset, and then generate new
1260 itemsets from old until no new ones can be made.
1262 Completing an itemset means finding all the items where "DOT" is followed by
1263 a nonterminal and adding "DOT=0" items for every production from that
1264 non-terminal - providing each item hasn't already been added.
1266 If LA sets are needed, the LA set for each new item is found using
1267 `add_first` which was developed earlier for `FIRST` and `FOLLOW`. This may
1268 be supplemented by the LA set for the item which produce the new item.
1270 We also collect a set of all symbols which follow "DOT" (in `done`) as this
1271 is used in the next stage.
1272 If any of these symbols are flagged as starting a line, then this
1273 state must be a `starts_line` state so now is a good time to record that.
1275 NOTE: precedence handling should happen here - I haven't written this yet
1278 ###### complete itemset
1279 for (i = 0; i < is->items.cnt; i++) {
1280 int p = item_prod(is->items.syms[i]);
1281 int bs = item_index(is->items.syms[i]);
1282 struct production *pr = g->productions[p];
1285 struct symset LA = INIT_SYMSET;
1286 unsigned short sn = 0;
1288 if (is->min_prefix == 0 ||
1289 (bs > 0 && bs < is->min_prefix))
1290 is->min_prefix = bs;
1291 if (bs == pr->body_size)
1294 if (symset_find(&done, s->num) < 0) {
1295 symset_add(&done, s->num, 0);
1297 is->starts_line = 1;
1299 if (s->type != Nonterminal)
1305 add_first(pr, bs+1, &LA, g, &to_end);
1307 struct symset ss = set_find(g, is->items.data[i]);
1308 symset_union(&LA, &ss);
1310 sn = save_set(g, LA);
1311 LA = set_find(g, sn);
1314 /* Add productions for this symbol */
1315 for (p2 = s->first_production;
1316 p2 < g->production_count &&
1317 g->productions[p2]->head == s;
1319 int itm = item_num(p2, 0);
1320 int pos = symset_find(&is->items, itm);
1322 symset_add(&is->items, itm, sn);
1323 /* Will have re-ordered, so start
1324 * from beginning again */
1326 } else if (type >= LALR) {
1327 struct symset ss = set_find(g, is->items.data[pos]);
1328 struct symset tmp = INIT_SYMSET;
1330 symset_union(&tmp, &ss);
1331 if (symset_union(&tmp, &LA)) {
1332 is->items.data[pos] = save_set(g, tmp);
1340 For each symbol we found (and placed in `done`) we collect all the items for
1341 which this symbol is next, and create a new itemset with "DOT" advanced over
1342 the symbol. This is then added to the collection of itemsets (or merged
1343 with a pre-existing itemset).
1345 ###### derive itemsets
1346 // Now we have a completed itemset, so we need to
1347 // compute all the 'next' itemsets and create them
1348 // if they don't exist.
1349 for (i = 0; i < done.cnt; i++) {
1351 unsigned short state;
1352 struct symbol *sym = g->symtab[done.syms[i]];
1353 struct symset newitemset = INIT_SYMSET;
1355 newitemset = INIT_DATASET;
1357 for (j = 0; j < is->items.cnt; j++) {
1358 int itm = is->items.syms[j];
1359 int p = item_prod(itm);
1360 int bp = item_index(itm);
1361 struct production *pr = g->productions[p];
1362 unsigned short la = 0;
1365 if (bp == pr->body_size)
1367 if (pr->body[bp] != sym)
1370 la = is->items.data[j];
1371 pos = symset_find(&newitemset, pr->head->num);
1373 symset_add(&newitemset, item_num(p, bp+1), la);
1374 else if (type >= LALR) {
1375 // Need to merge la set.
1376 int la2 = newitemset.data[pos];
1378 struct symset ss = set_find(g, la2);
1379 struct symset LA = INIT_SYMSET;
1380 symset_union(&LA, &ss);
1381 ss = set_find(g, la);
1382 if (symset_union(&LA, &ss))
1383 newitemset.data[pos] = save_set(g, LA);
1389 state = add_itemset(g, newitemset, type);
1390 if (symset_find(&is->go_to, done.syms[i]) < 0)
1391 symset_add(&is->go_to, done.syms[i], state);
1394 All that is left is to crate the initial itemset from production zero, and
1395 with `TK_eof` as the LA set.
1398 static void build_itemsets(struct grammar *g, enum grammar_type type)
1400 struct symset first = INIT_SYMSET;
1403 unsigned short la = 0;
1405 // LA set just has eof
1406 struct symset eof = INIT_SYMSET;
1407 symset_add(&eof, TK_eof, 0);
1408 la = save_set(g, eof);
1409 first = INIT_DATASET;
1411 // production 0, offset 0 (with no data)
1412 symset_add(&first, item_num(0, 0), la);
1413 add_itemset(g, first, type);
1414 for (again = 0, is = g->items;
1416 is = is->next ?: again ? (again = 0, g->items) : NULL) {
1418 struct symset done = INIT_SYMSET;
1429 ### Completing the analysis.
1431 The exact process of analysis depends on which level was requested. For
1432 `LR0` and `LR05` we don't need first and follow sets at all. For `LALR` and
1433 `LR1` we need first, but not follow. For `SLR` we need both.
1435 We don't build the "action" tables that you might expect as the parser
1436 doesn't use them. They will be calculated to some extent if needed for
1439 Once we have built everything we allocate arrays for the two lists:
1440 symbols and itemsets. This allows more efficient access during reporting.
1441 The symbols are grouped as terminals and non-terminals and we record the
1442 changeover point in `first_nonterm`.
1444 ###### grammar fields
1445 struct symbol **symtab;
1446 struct itemset **statetab;
1449 ###### grammar_analyse
1451 static void grammar_analyse(struct grammar *g, enum grammar_type type)
1455 int snum = TK_reserved;
1456 for (s = g->syms; s; s = s->next)
1457 if (s->num < 0 && s->type == Terminal) {
1461 g->first_nonterm = snum;
1462 for (s = g->syms; s; s = s->next)
1468 g->symtab = calloc(g->num_syms, sizeof(g->symtab[0]));
1469 for (s = g->syms; s; s = s->next)
1470 g->symtab[s->num] = s;
1481 build_itemsets(g, type);
1483 g->statetab = calloc(g->states, sizeof(g->statetab[0]));
1484 for (is = g->items; is ; is = is->next)
1485 g->statetab[is->state] = is;
1488 ## Reporting on the Grammar
1490 The purpose of the report is to give the grammar developer insight into
1491 how the grammar parser will work. It is basically a structured dump of
1492 all the tables that have been generated, plus a description of any conflicts.
1494 ###### grammar_report
1495 static int grammar_report(struct grammar *g, enum grammar_type type)
1501 return report_conflicts(g, type);
1504 Firstly we have the complete list of symbols, together with the
1505 "FIRST" set if that was generated. We add a mark to each symbol to
1506 show if it can end in a newline (`>`), if it is considered to be
1507 "line-like" (`<`), or if it is nullable (`.`).
1511 static void report_symbols(struct grammar *g)
1515 printf("SYMBOLS + FIRST:\n");
1517 printf("SYMBOLS:\n");
1519 for (n = 0; n < g->num_syms; n++) {
1520 struct symbol *s = g->symtab[n];
1524 printf(" %c%c%c%3d%c: ",
1525 s->nullable ? '.':' ',
1526 s->can_eol ? '>':' ',
1527 s->line_like ? '<':' ',
1528 s->num, symtypes[s->type]);
1531 printf(" (%d%s)", s->precedence,
1532 assoc_names[s->assoc]);
1534 if (g->first && s->type == Nonterminal) {
1537 for (j = 0; j < g->first[n].cnt; j++) {
1540 prtxt(g->symtab[g->first[n].syms[j]]->name);
1547 Then we have the follow sets if they were computed.
1549 static void report_follow(struct grammar *g)
1552 printf("FOLLOW:\n");
1553 for (n = 0; n < g->num_syms; n++)
1554 if (g->follow[n].cnt) {
1558 prtxt(g->symtab[n]->name);
1559 for (j = 0; j < g->follow[n].cnt; j++) {
1562 prtxt(g->symtab[g->follow[n].syms[j]]->name);
1568 And finally the item sets. These include the GO TO tables and, for
1569 LALR and LR1, the LA set for each item. Lots of stuff, so we break
1570 it up a bit. First the items, with production number and associativity.
1572 static void report_item(struct grammar *g, int itm)
1574 int p = item_prod(itm);
1575 int dot = item_index(itm);
1576 struct production *pr = g->productions[p];
1580 prtxt(pr->head->name);
1582 for (i = 0; i < pr->body_size; i++) {
1583 printf(" %s", (dot == i ? ". ": ""));
1584 prtxt(pr->body[i]->name);
1586 if (dot == pr->body_size)
1590 printf(" (%d%s)", pr->precedence,
1591 assoc_names[pr->assoc]);
1595 The LA sets which are (possibly) reported with each item:
1597 static void report_la(struct grammar *g, int lanum)
1599 struct symset la = set_find(g, lanum);
1603 printf(" LOOK AHEAD(%d)", lanum);
1604 for (i = 0; i < la.cnt; i++) {
1607 prtxt(g->symtab[la.syms[i]]->name);
1612 Then the go to sets:
1615 static void report_goto(struct grammar *g, struct symset gt)
1620 for (i = 0; i < gt.cnt; i++) {
1622 prtxt(g->symtab[gt.syms[i]]->name);
1623 printf(" -> %d\n", gt.data[i]);
1627 Now we can report all the item sets complete with items, LA sets, and GO TO.
1629 static void report_itemsets(struct grammar *g)
1632 printf("ITEM SETS(%d)\n", g->states);
1633 for (s = 0; s < g->states; s++) {
1635 struct itemset *is = g->statetab[s];
1636 printf(" Itemset %d:%s min prefix=%d\n",
1637 s, is->starts_line?" (startsline)":"", is->min_prefix);
1638 for (j = 0; j < is->items.cnt; j++) {
1639 report_item(g, is->items.syms[j]);
1640 if (is->items.data != NO_DATA)
1641 report_la(g, is->items.data[j]);
1643 report_goto(g, is->go_to);
1647 ### Reporting conflicts
1649 Conflict detection varies a lot among different analysis levels. However
1650 LR0 and LR0.5 are quite similar - having no follow sets, and SLR, LALR and
1651 LR1 are also similar as they have FOLLOW or LA sets.
1655 ## conflict functions
1657 static int report_conflicts(struct grammar *g, enum grammar_type type)
1660 printf("Conflicts:\n");
1662 cnt = conflicts_lr0(g, type);
1664 cnt = conflicts_slr(g, type);
1666 printf(" - no conflicts\n");
1670 LR0 conflicts are any state which have both a reducible item and
1671 a shiftable item, or two reducible items.
1673 LR05 conflicts only occurs if two possibly reductions exist,
1674 as shifts always over-ride reductions.
1676 ###### conflict functions
1677 static int conflicts_lr0(struct grammar *g, enum grammar_type type)
1681 for (i = 0; i < g->states; i++) {
1682 struct itemset *is = g->statetab[i];
1683 int last_reduce = -1;
1684 int prev_reduce = -1;
1685 int last_shift = -1;
1689 for (j = 0; j < is->items.cnt; j++) {
1690 int itm = is->items.syms[j];
1691 int p = item_prod(itm);
1692 int bp = item_index(itm);
1693 struct production *pr = g->productions[p];
1695 if (bp == pr->body_size) {
1696 prev_reduce = last_reduce;
1700 if (pr->body[bp]->type == Terminal)
1703 if (type == LR0 && last_reduce >= 0 && last_shift >= 0) {
1704 printf(" State %d has both SHIFT and REDUCE:\n", i);
1705 report_item(g, is->items.syms[last_shift]);
1706 report_item(g, is->items.syms[last_reduce]);
1709 if (prev_reduce >= 0) {
1710 printf(" State %d has 2 (or more) reducible items\n", i);
1711 report_item(g, is->items.syms[prev_reduce]);
1712 report_item(g, is->items.syms[last_reduce]);
1719 SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping
1720 look ahead, or if a symbol in a look-ahead can be shifted. They differ only
1721 in the source of the look ahead set.
1723 We build two datasets to reflect the "action" table: one which maps
1724 terminals to items where that terminal could be shifted and another
1725 which maps terminals to items that could be reduced when the terminal
1726 is in look-ahead. We report when we get conflicts between the two.
1728 static int conflicts_slr(struct grammar *g, enum grammar_type type)
1733 for (i = 0; i < g->states; i++) {
1734 struct itemset *is = g->statetab[i];
1735 struct symset shifts = INIT_DATASET;
1736 struct symset reduce = INIT_DATASET;
1740 /* First collect the shifts */
1741 for (j = 0; j < is->items.cnt; j++) {
1742 unsigned short itm = is->items.syms[j];
1743 int p = item_prod(itm);
1744 int bp = item_index(itm);
1745 struct production *pr = g->productions[p];
1747 if (bp < pr->body_size &&
1748 pr->body[bp]->type == Terminal) {
1750 int sym = pr->body[bp]->num;
1751 if (symset_find(&shifts, sym) < 0)
1752 symset_add(&shifts, sym, itm);
1755 /* Now look for reduction and conflicts */
1756 for (j = 0; j < is->items.cnt; j++) {
1757 unsigned short itm = is->items.syms[j];
1758 int p = item_prod(itm);
1759 int bp = item_index(itm);
1760 struct production *pr = g->productions[p];
1762 if (bp < pr->body_size)
1767 la = g->follow[pr->head->num];
1769 la = set_find(g, is->items.data[j]);
1771 for (k = 0; k < la.cnt; k++) {
1772 int pos = symset_find(&shifts, la.syms[k]);
1774 printf(" State %d has SHIFT/REDUCE conflict on ", i);
1775 prtxt(g->symtab[la.syms[k]]->name);
1777 report_item(g, shifts.data[pos]);
1778 report_item(g, itm);
1781 pos = symset_find(&reduce, la.syms[k]);
1783 symset_add(&reduce, la.syms[k], itm);
1786 printf(" State %d has REDUCE/REDUCE conflict on ", i);
1787 prtxt(g->symtab[la.syms[k]]->name);
1789 report_item(g, itm);
1790 report_item(g, reduce.data[pos]);
1794 symset_free(shifts);
1795 symset_free(reduce);
1801 ## Generating the parser
1803 The exported part of the parser is the `parse_XX` function, where the name
1804 `XX` is based on the name of the parser files.
1806 This takes a `code_node`, a partially initialized `token_config`, and an
1807 optional `FILE` to send tracing to. The `token_config` gets the list of
1808 known words added and then is used with the `code_node` to initialize the
1811 `parse_XX` then calls the library function `parser_run` to actually complete
1812 the parse. This needs the `states` table and function to call the various
1813 pieces of code provided in the grammar file, so they are generated first.
1815 ###### parser_generate
1817 static void gen_parser(FILE *f, struct grammar *g, char *file, char *name)
1823 gen_reduce(f, g, file);
1826 fprintf(f, "#line 0 \"gen_parser\"\n");
1827 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace)\n",
1830 fprintf(f, "\tstruct token_state *tokens;\n");
1831 fprintf(f, "\tconfig->words_marks = known;\n");
1832 fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
1833 fprintf(f, "\tconfig->ignored |= (1 << TK_line_comment) | (1 << TK_block_comment);\n");
1834 fprintf(f, "\ttokens = token_open(code, config);\n");
1835 fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n");
1836 fprintf(f, "\ttoken_close(tokens);\n");
1837 fprintf(f, "\treturn rv;\n");
1838 fprintf(f, "}\n\n");
1841 ### Known words table
1843 The known words table is simply an array of terminal symbols.
1844 The table of nonterminals used for tracing is a similar array.
1848 static void gen_known(FILE *f, struct grammar *g)
1851 fprintf(f, "#line 0 \"gen_known\"\n");
1852 fprintf(f, "static const char *known[] = {\n");
1853 for (i = TK_reserved;
1854 i < g->num_syms && g->symtab[i]->type == Terminal;
1856 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1857 g->symtab[i]->name.txt);
1858 fprintf(f, "};\n\n");
1861 static void gen_non_term(FILE *f, struct grammar *g)
1864 fprintf(f, "#line 0 \"gen_non_term\"\n");
1865 fprintf(f, "static const char *non_term[] = {\n");
1866 for (i = TK_reserved;
1869 if (g->symtab[i]->type == Nonterminal)
1870 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1871 g->symtab[i]->name.txt);
1872 fprintf(f, "};\n\n");
1875 ### States and the goto tables.
1877 For each state we record the goto table, the reducible production if
1878 there is one, or a symbol to shift for error recovery.
1879 Some of the details of the reducible production are stored in the
1880 `do_reduce` function to come later. Here we store the production number,
1881 the body size (useful for stack management) and the resulting symbol (useful
1882 for knowing how to free data later).
1884 The go to table is stored in a simple array of `sym` and corresponding
1887 ###### exported types
1895 const struct lookup * go_to;
1907 static void gen_goto(FILE *f, struct grammar *g)
1910 fprintf(f, "#line 0 \"gen_goto\"\n");
1911 for (i = 0; i < g->states; i++) {
1913 fprintf(f, "static const struct lookup goto_%d[] = {\n",
1915 struct symset gt = g->statetab[i]->go_to;
1916 for (j = 0; j < gt.cnt; j++)
1917 fprintf(f, "\t{ %d, %d },\n",
1918 gt.syms[j], gt.data[j]);
1925 static void gen_states(FILE *f, struct grammar *g)
1928 fprintf(f, "#line 0 \"gen_states\"\n");
1929 fprintf(f, "static const struct state states[] = {\n");
1930 for (i = 0; i < g->states; i++) {
1931 struct itemset *is = g->statetab[i];
1932 int j, prod = -1, prod_len;
1934 int shift_len = 0, shift_remain = 0;
1935 for (j = 0; j < is->items.cnt; j++) {
1936 int itm = is->items.syms[j];
1937 int p = item_prod(itm);
1938 int bp = item_index(itm);
1939 struct production *pr = g->productions[p];
1941 if (bp < pr->body_size) {
1942 if (shift_sym < 0 ||
1943 (shift_len == bp && shift_remain > pr->body_size - bp)) {
1944 shift_sym = pr->body[bp]->num;
1946 shift_remain = pr->body_size - bp;
1950 /* This is what we reduce */
1951 if (prod < 0 || prod_len < pr->body_size) {
1953 prod_len = pr->body_size;
1958 fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, 0, %d, %d },\n",
1959 i, is->go_to.cnt, i, prod,
1960 g->productions[prod]->body_size,
1961 g->productions[prod]->head->num,
1962 is->starts_line, is->min_prefix);
1964 fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, %d, %d },\n",
1965 i, is->go_to.cnt, i, shift_sym,
1966 is->starts_line, is->min_prefix);
1968 fprintf(f, "};\n\n");
1971 ### The `do_reduce` function and the code
1973 When the parser engine decides to reduce a production, it calls `do_reduce`.
1974 This has two functions.
1976 Firstly, if a non-NULL `trace` file is passed, it prints out details of the
1977 production being reduced. Secondly it runs the code that was included with
1978 the production if any.
1980 This code needs to be able to store data somewhere. Rather than requiring
1981 `do_reduce` to `malloc` that "somewhere", we pass in a large buffer and have
1982 `do_reduce` return the size to be saved.
1984 In order for the code to access "global" context, we pass in the
1985 "config" pointer that was passed to parser function. If the `struct
1986 token_config` is embedded in some larger structure, the reducing code
1987 can access the larger structure using pointer manipulation.
1989 The code fragment requires translation when written out. Any `$N` needs to
1990 be converted to a reference either to that buffer (if `$0`) or to the
1991 structure returned by a previous reduction. These pointers need to be cast
1992 to the appropriate type for each access. All this is handling in
1995 `gen_code` also allows symbol references to contain a '`<`' as in '`$<2`'.
1996 This applied only to symbols with references (or pointers), not those with structures.
1997 The `<` implies that the reference it being moved out, so the object will not be
1998 automatically freed. This is equivalent to assigning `NULL` to the pointer.
2002 static void gen_code(struct production *p, FILE *f, struct grammar *g)
2005 char *used = calloc(1, p->body_size);
2008 fprintf(f, "\t\t\t");
2009 for (c = p->code.txt; c < p->code.txt + p->code.len; c++) {
2023 if (*c < '0' || *c > '9') {
2030 while (c[1] >= '0' && c[1] <= '9') {
2032 n = n * 10 + *c - '0';
2035 fprintf(f, "(*(struct %.*s*%s)ret)",
2036 p->head->struct_name.len,
2037 p->head->struct_name.txt,
2038 p->head->isref ? "*":"");
2039 else if (n > p->body_size)
2040 fprintf(f, "$%d", n);
2041 else if (p->body[n-1]->type == Terminal)
2042 fprintf(f, "(*(struct token *)body[%d])",
2044 else if (p->body[n-1]->struct_name.txt == NULL)
2045 fprintf(f, "$%d", n);
2047 fprintf(f, "(*(struct %.*s*%s)body[%d])",
2048 p->body[n-1]->struct_name.len,
2049 p->body[n-1]->struct_name.txt,
2050 p->body[n-1]->isref ? "*":"", n-1);
2055 for (i = 0; i < p->body_size; i++) {
2056 if (p->body[i]->struct_name.txt &&
2057 p->body[i]->isref &&
2059 // assume this has been copied out
2060 fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i);
2067 static void gen_reduce(FILE *f, struct grammar *g, char *file)
2070 fprintf(f, "#line 0 \"gen_reduce\"\n");
2071 fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n");
2073 fprintf(f, "\tint ret_size = 0;\n");
2075 fprintf(f, "\tswitch(prod) {\n");
2076 for (i = 0; i < g->production_count; i++) {
2077 struct production *p = g->productions[i];
2078 fprintf(f, "\tcase %d:\n", i);
2083 if (p->head->struct_name.txt)
2084 fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n",
2085 p->head->struct_name.len,
2086 p->head->struct_name.txt,
2087 p->head->isref ? "*":"");
2089 fprintf(f, "\t\tbreak;\n");
2091 fprintf(f, "\t}\n\treturn ret_size;\n}\n\n");
2096 As each non-terminal can potentially cause a different type of data
2097 structure to be allocated and filled in, we need to be able to free it when
2100 It is particularly important to have fine control over freeing during error
2101 recovery where individual stack frames might need to be freed.
2103 For this, the grammar author is required to defined a `free_XX` function for
2104 each structure that is used by a non-terminal. `do_free` will call whichever
2105 is appropriate given a symbol number, and will call `free` (as is
2106 appropriate for tokens on any terminal symbol.
2110 static void gen_free(FILE *f, struct grammar *g)
2114 fprintf(f, "#line 0 \"gen_free\"\n");
2115 fprintf(f, "static void do_free(short sym, void *asn)\n");
2117 fprintf(f, "\tif (!asn) return;\n");
2118 fprintf(f, "\tif (sym < %d) {\n", g->first_nonterm);
2119 fprintf(f, "\t\tfree(asn);\n\t\treturn;\n\t}\n");
2120 fprintf(f, "\tswitch(sym) {\n");
2122 for (i = 0; i < g->num_syms; i++) {
2123 struct symbol *s = g->symtab[i];
2125 s->type != Nonterminal ||
2126 s->struct_name.txt == NULL)
2129 fprintf(f, "\tcase %d:\n", s->num);
2131 fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n",
2133 s->struct_name.txt);
2134 fprintf(f, "\t\tfree(asn);\n");
2136 fprintf(f, "\t\tfree_%.*s(asn);\n",
2138 s->struct_name.txt);
2139 fprintf(f, "\t\tbreak;\n");
2141 fprintf(f, "\t}\n}\n\n");
2144 ## The main routine.
2146 There are three key parts to the "main" function of parsergen: processing
2147 the arguments, loading the grammar file, and dealing with the grammar.
2149 ### Argument processing.
2151 Command line options allow the selection of analysis mode, name of output
2152 file, and whether or not a report should be generated. By default we create
2153 a report only if no code output was requested.
2155 The `parse_XX` function name uses the basename of the output file which
2156 should not have a suffix (`.c`). `.c` is added to the given name for the
2157 code, and `.h` is added for the header (if header text is specifed in the
2164 static const struct option long_options[] = {
2165 { "LR0", 0, NULL, '0' },
2166 { "LR05", 0, NULL, '5' },
2167 { "SLR", 0, NULL, 'S' },
2168 { "LALR", 0, NULL, 'L' },
2169 { "LR1", 0, NULL, '1' },
2170 { "tag", 1, NULL, 't' },
2171 { "report", 0, NULL, 'R' },
2172 { "output", 1, NULL, 'o' },
2173 { NULL, 0, NULL, 0 }
2175 const char *options = "05SL1t:Ro:";
2177 ###### process arguments
2179 char *outfile = NULL;
2184 enum grammar_type type = LR05;
2185 while ((opt = getopt_long(argc, argv, options,
2186 long_options, NULL)) != -1) {
2201 outfile = optarg; break;
2203 tag = optarg; break;
2205 fprintf(stderr, "Usage: parsergen ...\n");
2210 infile = argv[optind++];
2212 fprintf(stderr, "No input file given\n");
2215 if (outfile && report == 1)
2218 if (name && strchr(name, '/'))
2219 name = strrchr(name, '/')+1;
2221 if (optind < argc) {
2222 fprintf(stderr, "Excess command line arguments\n");
2226 ### Loading the grammar file
2228 To be able to run `mdcode` and `scanner` on the grammar we need to memory
2231 One we have extracted the code (with `mdcode`) we expect to find three
2232 sections: header, code, and grammar. Anything else that is not
2233 excluded by the `--tag` option is an error.
2235 "header" and "code" are optional, though it is hard to build a working
2236 parser with neither. "grammar" must be provided.
2240 #include <sys/mman.h>
2245 static void pr_err(char *msg)
2248 fprintf(stderr, "%s\n", msg);
2252 struct section *table;
2256 fd = open(infile, O_RDONLY);
2258 fprintf(stderr, "parsergen: cannot open %s: %s\n",
2259 infile, strerror(errno));
2262 len = lseek(fd, 0, 2);
2263 file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2264 table = code_extract(file, file+len, pr_err);
2266 struct code_node *hdr = NULL;
2267 struct code_node *code = NULL;
2268 struct code_node *gram = NULL;
2269 for (s = table; s; s = s->next) {
2270 struct text sec = s->section;
2271 if (tag && !strip_tag(&sec, tag))
2273 if (text_is(sec, "header"))
2275 else if (text_is(sec, "code"))
2277 else if (text_is(sec, "grammar"))
2280 fprintf(stderr, "Unknown content section: %.*s\n",
2281 s->section.len, s->section.txt);
2286 ### Processing the input
2288 As we need to append an extention to a filename and then open it for
2289 writing, and we need to do this twice, it helps to have a separate function.
2293 static FILE *open_ext(char *base, char *ext)
2295 char *fn = malloc(strlen(base) + strlen(ext) + 1);
2297 strcat(strcpy(fn, base), ext);
2303 If we can read the grammar, then we analyse and optionally report on it. If
2304 the report finds conflicts we will exit with an error status.
2306 ###### process input
2307 struct grammar *g = NULL;
2309 fprintf(stderr, "No grammar section provided\n");
2312 g = grammar_read(gram);
2314 fprintf(stderr, "Failure to parse grammar\n");
2319 grammar_analyse(g, type);
2321 if (grammar_report(g, type))
2325 If a headers section is defined, we write it out and include a declaration
2326 for the `parse_XX` function so it can be used from separate code.
2328 if (rv == 0 && hdr && outfile) {
2329 FILE *f = open_ext(outfile, ".h");
2331 code_node_print(f, hdr, infile);
2332 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace);\n",
2336 fprintf(stderr, "Cannot create %s.h\n",
2342 And if all goes well and an output file was provided, we create the `.c`
2343 file with the code section (if any) and the parser tables and function.
2345 if (rv == 0 && outfile) {
2346 FILE *f = open_ext(outfile, ".c");
2349 code_node_print(f, code, infile);
2350 gen_parser(f, g, infile, name);
2353 fprintf(stderr, "Cannot create %s.c\n",
2359 And that about wraps it up. We need to set the locale so that UTF-8 is
2360 recognised properly, and link with `libicuuc` as `libmdcode` requires that.
2362 ###### File: parsergen.mk
2363 parsergen : parsergen.o libscanner.o libmdcode.o
2364 $(CC) $(CFLAGS) -o parsergen parsergen.o libscanner.o libmdcode.o -licuuc
2371 int main(int argc, char *argv[])
2376 setlocale(LC_ALL,"");
2378 ## process arguments
2385 ## The SHIFT/REDUCE parser
2387 Having analysed the grammar and generated all the tables, we only need the
2388 shift/reduce engine to bring it all together.
2390 ### Goto table lookup
2392 The parser generator has nicely provided us with goto tables sorted by
2393 symbol number. We need a binary search function to find a symbol in the
2396 ###### parser functions
2398 static int search(const struct state *l, int sym)
2401 int hi = l->go_to_cnt;
2405 while (lo + 1 < hi) {
2406 int mid = (lo + hi) / 2;
2407 if (l->go_to[mid].sym <= sym)
2412 if (l->go_to[lo].sym == sym)
2413 return l->go_to[lo].state;
2418 ### The state stack.
2420 The core data structure for the parser is the stack. This tracks all the
2421 symbols that have been recognised or partially recognised.
2423 The stack usually won't grow very large - maybe a few tens of entries. So
2424 we dynamically resize and array as required but never bother to shrink it
2427 We keep the stack as two separate allocations. One, `asn_stack` stores the
2428 "abstract syntax nodes" which are created by each reduction. When we call
2429 `do_reduce` we need to pass an array of the `asn`s of the body of the
2430 production, and by keeping a separate `asn` stack, we can just pass a
2431 pointer into this stack.
2433 The other allocation stores all other stack fields of which there are six.
2434 The `state` is the most important one and guides the parsing process. The
2435 `sym` is nearly unnecessary. However when we want to free entries from the
2436 `asn_stack`, it helps to know what type they are so we can call the right
2437 freeing function. The symbol leads us to the right free function through
2440 The `indents` count tracks the line indents with in the symbol or
2441 immediately follow it. These are used to allow indent information to
2442 guide parsing and error recovery.
2444 `since_newline` tracks how many stack frames since the last
2445 start-of-line (whether indented or not). So if `since_newline` is
2446 zero, then this symbol is at the start of a line. Similarly
2447 `since_indent` counts the number of states since an indent, it is zero
2448 precisely when `indents` is not zero.
2450 `newline_permitted` keeps track of whether newlines should be ignored
2453 The stack is most properly seen as alternating states and symbols -
2454 states, like the 'DOT' in items, are between symbols. Each frame in
2455 our stack holds a state and the symbol that was before it. The
2456 bottom of stack holds the start state but no symbol, as nothing came
2457 before the beginning.
2459 ###### parser functions
2464 short newline_permitted;
2468 short since_newline;
2478 Two operations are needed on the stack - shift (which is like push) and pop.
2480 Shift applies not only to terminals but also to non-terminals. When
2481 we reduce a production we will pop off entries corresponding to the
2482 body symbols, then push on an item for the head of the production.
2483 This last is exactly the same process as shifting in a terminal so we
2484 use the same function for both. In both cases we provide the symbol,
2485 the number of indents the symbol contains (which will be zero for a
2486 terminal symbol) and a flag indicating the the symbol was at (or was
2487 reduced from a symbol which was at) the start of a line. The state is
2488 deduced from the current top-of-stack state and the new symbol.
2490 To simplify other code we arrange for `shift` to fail if there is no `goto`
2491 state for the symbol. This is useful in basic parsing due to our design
2492 that we shift when we can, and reduce when we cannot. So the `shift`
2493 function reports if it could.
2495 `shift` is also used to push state zero onto the stack, so if the
2496 stack is empty, it always chooses zero as the next state.
2498 So `shift` finds the next state. If that succeeds it extends the
2499 allocations if needed and pushes all the information onto the stacks.
2501 Newlines are permitted after a `starts_line` state until an internal
2502 indent. If the new frame has neither a `starts_line` state nor an
2503 indent, newlines are permitted if the previous stack frame permitted
2506 ###### parser functions
2508 static int shift(struct parser *p,
2509 short sym, short indents, short start_of_line,
2511 const struct state states[])
2513 // Push an entry onto the stack
2514 struct frame next = {0};
2515 int newstate = p->tos
2516 ? search(&states[p->stack[p->tos-1].state],
2521 if (p->tos >= p->stack_size) {
2522 p->stack_size += 10;
2523 p->stack = realloc(p->stack, p->stack_size
2524 * sizeof(p->stack[0]));
2525 p->asn_stack = realloc(p->asn_stack, p->stack_size
2526 * sizeof(p->asn_stack[0]));
2529 next.indents = indents;
2530 next.state = newstate;
2531 if (states[newstate].starts_line)
2532 next.newline_permitted = 1;
2534 next.newline_permitted = 0;
2536 next.newline_permitted =
2537 p->stack[p->tos-1].newline_permitted;
2539 next.newline_permitted = 0;
2541 if (!start_of_line) {
2543 next.since_newline = p->stack[p->tos-1].since_newline + 1;
2545 next.since_newline = 1;
2548 next.since_indent = 0;
2550 next.since_indent = p->stack[p->tos-1].since_indent + 1;
2552 next.since_indent = 1;
2554 p->stack[p->tos] = next;
2555 p->asn_stack[p->tos] = asn;
2560 `pop` primarily moves the top of stack (`tos`) back down the required
2561 amount and frees any `asn` entries that need to be freed. It also
2562 collects a summary of the indents and line starts in the symbols that
2563 are being removed. It is called _after_ we reduce a production, just
2564 before we `shift` the nonterminal in.
2566 ###### parser functions
2568 static int pop(struct parser *p, int num,
2569 short *start_of_line,
2570 void(*do_free)(short sym, void *asn))
2577 for (i = 0; i < num; i++) {
2578 sol |= !p->stack[p->tos+1].since_newline;
2579 indents += p->stack[p->tos+i].indents;
2580 do_free(p->stack[p->tos+i].sym,
2581 p->asn_stack[p->tos+i]);
2584 *start_of_line = sol;
2588 ### Memory allocation
2590 The `scanner` returns tokens in a local variable - we want them in allocated
2591 memory so they can live in the `asn_stack`. Similarly the `asn` produced by
2592 a reduce is in a large buffer. Both of these require some allocation and
2593 copying, hence `memdup` and `tokcopy`.
2595 ###### parser includes
2598 ###### parser functions
2600 void *memdup(void *m, int len)
2606 memcpy(ret, m, len);
2610 static struct token *tok_copy(struct token tk)
2612 struct token *new = malloc(sizeof(*new));
2617 ### The heart of the parser.
2619 Now we have the parser. If we can shift we do, though newlines and
2620 reducing indenting may block that. If not and we can reduce we do
2621 that. If the production we reduced was production zero, then we have
2622 accepted the input and can finish.
2624 We return whatever `asn` was returned by reducing production zero.
2626 If we can neither shift nor reduce we have an error to handle. We pop
2627 single entries off the stack until we can shift the `TK_error` symbol, then
2628 drop input tokens until we find one we can shift into the new error state.
2630 When we find `TK_in` and `TK_out` tokens which report indents we need
2631 to handle them directly as the grammar cannot express what we want to
2634 `TK_in` tokens are easy: we simply update indent count in the top stack frame to
2635 record how many indents there are following the previous token.
2637 `TK_out` tokens must either be canceled against an indent count
2638 within the stack. If we can reduce some symbols that are all since
2639 the most recent indent, then we do that first. If the minimum prefix
2640 of the current state then extents back before the most recent indent,
2641 that indent can be cancelled. If the minimum prefix is shorter then
2642 the indent is premature and we must start error handling, which
2643 currently doesn't work at all.
2645 `TK_newline` tokens are ignored unless the top stack frame records
2646 that they are permitted. In that case they will not be considered for
2647 shifting if it is possible to reduce some symbols that are all since
2648 the most recent start of line. This is how a newline forcible
2649 terminates any line-like structure - we try to reduce down to at most
2650 one symbol for each line where newlines are allowed.
2652 ###### parser includes
2655 void *parser_run(struct token_state *tokens,
2656 const struct state states[],
2657 int (*do_reduce)(int, void**, struct token_config*, void*),
2658 void (*do_free)(short, void*),
2659 FILE *trace, const char *non_term[],
2660 struct token_config *config)
2662 struct parser p = { 0 };
2663 struct token *tk = NULL;
2667 shift(&p, TK_eof, 0, 1, NULL, states);
2669 struct token *err_tk;
2670 struct frame *tos = &p.stack[p.tos-1];
2672 tk = tok_copy(token_next(tokens));
2673 parser_trace(trace, &p,
2674 tk, states, non_term, config->known_count);
2676 if (tk->num == TK_in) {
2678 tos->since_newline = 0;
2679 tos->since_indent = 0;
2680 if (!states[tos->state].starts_line)
2681 tos->newline_permitted = 0;
2684 parser_trace_action(trace, "Record");
2687 if (tk->num == TK_out) {
2688 if (states[tos->state].reduce_size >= 0 &&
2689 states[tos->state].reduce_size <= tos->since_indent)
2691 if (states[tos->state].min_prefix >= tos->since_indent) {
2693 struct frame *in = tos - tos->since_indent;
2695 if (in->indents == 0) {
2696 /* Reassess since_indent and newline_permitted */
2698 in->since_indent = in[-1].since_indent + 1;
2699 in->newline_permitted = in[-1].newline_permitted;
2701 in->since_indent = 0;
2702 in->newline_permitted = 0;
2704 if (states[in->state].starts_line)
2705 in->newline_permitted = 1;
2708 in->since_indent = in[-1].since_indent + 1;
2709 if (states[in->state].starts_line)
2710 in->newline_permitted = 1;
2712 in->newline_permitted = in[-1].newline_permitted;
2717 parser_trace_action(trace, "Cancel");
2720 // fall through and force a REDUCE (as 'shift'
2723 if (tk->num == TK_newline) {
2724 if (!tos->newline_permitted) {
2727 parser_trace_action(trace, "Discard");
2730 if (tos->since_newline > 1 &&
2731 states[tos->state].reduce_size >= 0 &&
2732 states[tos->state].reduce_size <= tos->since_newline)
2735 if (shift(&p, tk->num, 0, tk->num == TK_newline, tk, states)) {
2737 parser_trace_action(trace, "Shift");
2741 if (states[tos->state].reduce_prod >= 0) {
2744 const struct state *nextstate = &states[tos->state];
2745 int prod = nextstate->reduce_prod;
2746 int size = nextstate->reduce_size;
2748 static char buf[16*1024];
2749 short indents, start_of_line;
2751 body = p.asn_stack + (p.tos - size);
2753 bufsize = do_reduce(prod, body, config, buf);
2755 indents = pop(&p, size, &start_of_line,
2757 res = memdup(buf, bufsize);
2758 memset(buf, 0, bufsize);
2759 if (!shift(&p, nextstate->reduce_sym,
2760 indents, start_of_line,
2762 if (prod != 0) abort();
2766 parser_trace_action(trace, "Reduce");
2769 if (tk->num == TK_out) {
2770 // Indent problem - synthesise tokens to get us
2772 fprintf(stderr, "Synthesize %d to handle indent problem\n", states[tos->state].shift_sym);
2773 shift(&p, states[tos->state].shift_sym,
2774 0, 1, tok_copy(*tk), states);
2775 // FIXME need to report this error somehow
2776 parser_trace_action(trace, "Synthesize");
2779 /* Error. We walk up the stack until we
2780 * find a state which will accept TK_error.
2781 * We then shift in TK_error and see what state
2782 * that takes us too.
2783 * Then we discard input tokens until
2784 * we find one that is acceptable.
2786 parser_trace_action(trace, "ERROR");
2787 short indents = 0, start_of_line;
2789 err_tk = tok_copy(*tk);
2790 while (shift(&p, TK_error, 0, 0,
2791 err_tk, states) == 0
2793 // discard this state
2794 indents += pop(&p, 1, &start_of_line, do_free);
2797 // no state accepted TK_error
2800 tos = &p.stack[p.tos-1];
2801 while (search(&states[tos->state], tk->num) < 0 &&
2802 tk->num != TK_eof) {
2804 tk = tok_copy(token_next(tokens));
2805 if (tk->num == TK_in)
2807 if (tk->num == TK_out) {
2811 // FIXME update since_indent here
2814 if (p.tos == 0 && tk->num == TK_eof)
2816 tos = &p.stack[p.tos-1];
2817 tos->indents += indents;
2820 pop(&p, p.tos, NULL, do_free);
2826 ###### exported functions
2827 void *parser_run(struct token_state *tokens,
2828 const struct state states[],
2829 int (*do_reduce)(int, void**, struct token_config*, void*),
2830 void (*do_free)(short, void*),
2831 FILE *trace, const char *non_term[],
2832 struct token_config *config);
2836 Being able to visualize the parser in action can be invaluable when
2837 debugging the parser code, or trying to understand how the parse of a
2838 particular grammar progresses. The stack contains all the important
2839 state, so just printing out the stack every time around the parse loop
2840 can make it possible to see exactly what is happening.
2842 This doesn't explicitly show each SHIFT and REDUCE action. However they
2843 are easily deduced from the change between consecutive lines, and the
2844 details of each state can be found by cross referencing the states list
2845 in the stack with the "report" that parsergen can generate.
2847 For terminal symbols, we just dump the token. For non-terminals we
2848 print the name of the symbol. The look ahead token is reported at the
2849 end inside square brackets.
2851 ###### parser functions
2853 static char *reserved_words[] = {
2854 [TK_error] = "ERROR",
2857 [TK_newline] = "NEWLINE",
2860 static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
2862 fprintf(trace, "(%d", f->state);
2863 if (states[f->state].starts_line)
2864 fprintf(trace, "s");
2865 if (f->newline_permitted)
2866 fprintf(trace, "n%d", f->since_newline);
2867 fprintf(trace, ") ");
2870 void parser_trace(FILE *trace, struct parser *p,
2871 struct token *tk, const struct state states[],
2872 const char *non_term[], int knowns)
2877 for (i = 0; i < p->tos; i++) {
2878 struct frame *f = &p->stack[i];
2881 if (sym < TK_reserved &&
2882 reserved_words[sym] != NULL)
2883 fputs(reserved_words[sym], trace);
2884 else if (sym < TK_reserved + knowns) {
2885 struct token *t = p->asn_stack[i];
2886 text_dump(trace, t->txt, 20);
2888 fputs(non_term[sym - TK_reserved - knowns],
2891 fprintf(trace, ".%d", f->indents);
2892 if (f->since_newline == 0)
2896 parser_trace_state(trace, f, states);
2898 fprintf(trace, "[");
2899 if (tk->num < TK_reserved &&
2900 reserved_words[tk->num] != NULL)
2901 fputs(reserved_words[tk->num], trace);
2903 text_dump(trace, tk->txt, 20);
2907 void parser_trace_action(FILE *trace, char *action)
2910 fprintf(trace, " - %s\n", action);
2915 The obvious example for a parser is a calculator.
2917 As `scanner` provides number parsing function using `libgmp` is it not much
2918 work to perform arbitrary rational number calculations.
2920 This calculator takes one expression, or an equality test per line. The
2921 results are printed and if any equality test fails, the program exits with
2924 ###### File: parsergen.mk
2925 calc.c calc.h : parsergen parsergen.mdc
2926 ./parsergen --tag calc -o calc parsergen.mdc
2927 calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o
2928 $(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp
2933 // what do we use for a demo-grammar? A calculator of course.
2945 #include <sys/mman.h>
2950 #include "scanner.h"
2956 static void free_number(struct number *n)
2962 int main(int argc, char *argv[])
2964 int fd = open(argv[1], O_RDONLY);
2965 int len = lseek(fd, 0, 2);
2966 char *file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2967 struct section *s = code_extract(file, file+len, NULL);
2968 struct token_config config = {
2969 .ignored = (1 << TK_line_comment)
2970 | (1 << TK_block_comment)
2973 .number_chars = ".,_+-",
2977 parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
2979 struct section *t = s->next;
2989 Session -> Session Line
2992 Line -> Expression NEWLINE ${ gmp_printf("Answer = %Qd\n", $1.val);
2993 { mpf_t fl; mpf_init2(fl, 20); mpf_set_q(fl, $1.val);
2994 gmp_printf(" or as a decimal: %Fg\n", fl);
2998 | Expression = Expression NEWLINE ${
2999 if (mpq_equal($1.val, $3.val))
3000 gmp_printf("Both equal %Qd\n", $1.val);
3002 gmp_printf("NOT EQUAL: %Qd\n != : %Qd\n",
3007 | NEWLINE ${ printf("Blank line\n"); }$
3008 | ERROR NEWLINE ${ printf("Skipped a bad line\n"); }$
3011 Expression -> Expression + Term ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$
3012 | Expression - Term ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$
3013 | Term ${ mpq_init($0.val); mpq_set($0.val, $1.val); }$
3015 Term -> Term * Factor ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$
3016 | Term / Factor ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$
3017 | Factor ${ mpq_init($0.val); mpq_set($0.val, $1.val); }$
3019 Factor -> NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$
3020 | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$