1 # LR(1) Parser Generator #
3 This parser generator takes a grammar description combined with code
4 fragments, analyses it, and can report details about the analysis and
5 write out C code files which can be compiled to make a parser.
7 There are several distinct sections.
9 1. `grammar_read` will read a grammar from a literate-code file and
10 store the productions, symbols, and code fragments.
11 2. `grammar_analyse` will take that grammar and build LR parsing
12 states and look-ahead tables.
13 3. `grammar_report` will present the details of the analysis
14 in a readable format and will report any conflicts.
15 4. `parser_generate` will write out C code files with various
16 tables and with the code fragments arranged in useful places.
17 5. `parser_run` is a library function intended to be linked together
18 with the generated parser tables to complete the implementation of
20 6. Finally `calc` is a test grammar for a simple calculator. The
21 `parsergen` program built from the C code in this file can extract
22 that grammar directly from this file and process it.
25 ###### File: parsergen.c
30 ## forward declarations
41 ###### File: libparser.c
48 ###### File: parsergen.mk
51 parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc
54 ## Reading the grammar
56 The grammar must be stored in a markdown literate code file as parsed
57 by "[mdcode][]". It must have three top level (i.e. unreferenced)
58 sections: `header`, `code`, and `grammar`. The first two will be
59 literally copied into the generated `.c` and `.h`. files. The last
60 contains the grammar. This is tokenised with "[scanner][]".
62 If the `--tag` option is given, then any top level header that doesn't
63 start with the tag is ignored, and the tag is striped from the rest. So
65 means that the three needed sections must be `Foo: header`, `Foo: code`,
66 and `Foo: grammar`. The tag `calc` is used to extract the same calculator
70 [scanner]: scanner.html
76 ###### parser includes
80 The grammar contains production sets, precedence/associativity
81 declarations, and data type declarations. These are all parsed with
82 _ad hoc_ parsing as we don't have a parser generator yet.
84 The precedence and associativity can be set for each production, but
85 can be inherited from symbols. The data type (either structure or a
86 reference to a structure) is potentially defined for each non-terminal
87 and describes what C structure is used to store information about each
91 enum assoc {Left, Right, Non};
92 char *assoc_names[] = {"Left","Right","Non"};
95 struct text struct_name;
98 unsigned short precedence;
102 unsigned short precedence;
110 The strings reported by `mdcode` and `scanner` are `struct text` which have
111 length rather than being null terminated. To help with printing and
112 comparing we define `text_is` and `prtxt`, which should possibly go in
113 `mdcode`. `scanner` does provide `text_dump` which is useful for strings
114 which might contain control characters.
116 `strip_tag` is a bit like `strncmp`, but adds a test for a colon,
117 because that is what we need to detect tags.
120 static int text_is(struct text t, char *s)
122 return (strlen(s) == t.len &&
123 strncmp(s, t.txt, t.len) == 0);
125 static void prtxt(struct text t)
127 printf("%.*s", t.len, t.txt);
130 static int strip_tag(struct text *t, char *tag)
132 int skip = strlen(tag) + 1;
133 if (skip >= t->len ||
134 strncmp(t->txt, tag, skip-1) != 0 ||
135 t->txt[skip-1] != ':')
137 while (skip < t->len && t->txt[skip] == ' ')
146 Productions are comprised primarily of symbols - terminal and
147 non-terminal. We do not make a syntactic distinction between the two,
148 though non-terminals must be identifiers. Non-terminal symbols are
149 those which appear in the head of a production, terminal symbols are
150 those which don't. There are also "virtual" symbols used for precedence
151 marking discussed later, and sometimes we won't know what type a symbol
154 ###### forward declarations
155 enum symtype { Unknown, Virtual, Terminal, Nonterminal };
156 char *symtypes = "UVTN";
160 Symbols can be either `TK_ident` or `TK_mark`. They are saved in a
161 table of known symbols and the resulting parser will report them as
162 `TK_reserved + N`. A small set of identifiers are reserved for the
163 different token types that `scanner` can report.
166 static char *reserved_words[] = {
167 [TK_error] = "ERROR",
168 [TK_number] = "NUMBER",
169 [TK_ident] = "IDENTIFIER",
171 [TK_string] = "STRING",
172 [TK_multi_string] = "MULTI_STRING",
175 [TK_newline] = "NEWLINE",
181 Note that `TK_eof` and the two `TK_*_comment` tokens cannot be
182 recognised. The former is automatically expected at the end of the text
183 being parsed. The latter are always ignored by the parser.
185 All of these symbols are stored in a simple symbol table. We use the
186 `struct text` from `mdcode` to store the name and link them together
187 into a sorted list using an insertion sort.
189 We don't have separate `find` and `insert` functions as any symbol we
190 find needs to be remembered. We simply expect `find` to always return a
191 symbol, but its type might be `Unknown`.
200 ###### grammar fields
205 static struct symbol *sym_find(struct grammar *g, struct text s)
207 struct symbol **l = &g->syms;
212 (cmp = text_cmp((*l)->name, s)) < 0)
216 n = calloc(1, sizeof(*n));
225 static void symbols_init(struct grammar *g)
227 int entries = sizeof(reserved_words)/sizeof(reserved_words[0]);
229 for (i = 0; i < entries; i++) {
232 t.txt = reserved_words[i];
235 t.len = strlen(t.txt);
242 ### Data types and precedence.
244 Data type specification and precedence specification are both
245 introduced by a dollar sign at the start of the line. If the next
246 word is `LEFT`, `RIGHT` or `NON`, then the line specifies a
247 precedence, otherwise it specifies a data type.
249 The data type name is simply stored and applied to the head of all
250 subsequent productions. It must be the name of a structure optionally
251 preceded by an asterisk which means a reference or "pointer". So
252 `$expression` maps to `struct expression` and `$*statement` maps to
253 `struct statement *`.
255 Any productions given before the first data type declaration will have
256 no data type associated with them and can carry no information. In
257 order to allow other non-terminals to have no type, the data type
258 `$void` can be given. This does *not* mean that `struct void` will be
259 used, but rather than no type will be associated with future
262 The precedence line must contain a list of symbols - typically
263 terminal symbols, but not necessarily. It can only contain symbols
264 that have not been seen yet, so precedence declaration must precede
265 the productions that they affect.
267 A precedence line may also contain a "virtual" symbol which is an
268 identifier preceded by `$$`. Virtual terminals carry precedence
269 information but are not included in the grammar. A production can
270 declare that it inherits the precedence of a given virtual symbol.
272 This use for `$$` precludes it from being used as a symbol in the
273 described language. Two other symbols: `${` and `}$` are also
276 Each new precedence line introduces a new precedence level and
277 declares how it associates. This level is stored in each symbol
278 listed and may be inherited by any production which uses the symbol. A
279 production inherits from the last symbol which has a precedence.
281 ###### grammar fields
282 struct text current_type;
287 enum symbols { TK_virtual = TK_reserved, TK_open, TK_close };
288 static const char *known[] = { "$$", "${", "}$" };
291 static char *dollar_line(struct token_state *ts, struct grammar *g, int isref)
293 struct token t = token_next(ts);
298 if (t.num != TK_ident) {
299 err = "type or assoc expected after '$'";
302 if (text_is(t.txt, "LEFT"))
304 else if (text_is(t.txt, "RIGHT"))
306 else if (text_is(t.txt, "NON"))
309 g->current_type = t.txt;
310 g->type_isref = isref;
311 if (text_is(t.txt, "void"))
312 g->current_type.txt = NULL;
314 if (t.num != TK_newline) {
315 err = "Extra tokens after type name";
322 err = "$* cannot be followed by a precedence";
326 // This is a precedence line, need some symbols.
330 while (t.num != TK_newline) {
331 enum symtype type = Terminal;
333 if (t.num == TK_virtual) {
336 if (t.num != TK_ident) {
337 err = "$$ must be followed by a word";
340 } else if (t.num != TK_ident &&
342 err = "Illegal token in precedence line";
345 s = sym_find(g, t.txt);
346 if (s->type != Unknown) {
347 err = "Symbols in precedence line must not already be known.";
351 s->precedence = g->prec_levels;
356 err = "No symbols given on precedence line";
360 while (t.num != TK_newline && t.num != TK_eof)
367 A production either starts with an identifier which is the head
368 non-terminal, or a vertical bar (`|`) in which case this production
369 uses the same head as the previous one. The identifier must be
370 followed by a `->` mark. All productions for a given non-terminal must
371 be grouped together with the `nonterminal ->` given only once.
373 After this a (possibly empty) sequence of identifiers and marks form
374 the body of the production. A virtual symbol may be given after the
375 body by preceding it with `$$`. If a virtual symbol is given, the
376 precedence of the production is that for the virtual symbol. If none
377 is given, the precedence is inherited from the last symbol in the
378 production which has a precedence specified.
380 After the optional precedence may come the `${` mark. This indicates
381 the start of a code fragment. If present, this must be on the same
382 line as the start of the production.
384 All of the text from the `${` through to the matching `}$` is
385 collected and forms the code-fragment for the production. It must all
386 be in one `code_node` of the literate code. The `}$` must be
387 at the end of a line.
389 Text in the code fragment will undergo substitutions where `$N` or
390 `$<N`,for some numeric `N`, will be replaced with a variable holding
391 the parse information for the particular symbol in the production.
392 `$0` is the head of the production, `$1` is the first symbol of the
393 body, etc. The type of `$N` for a terminal symbol is `struct token`.
394 For a non-terminal, it is whatever has been declared for that symbol.
395 The `<` may be included for symbols declared as storing a reference
396 (not a structure) and means that the reference is being moved out, so
397 it will not automatically be freed.
399 While building productions we will need to add to an array which needs to
403 static void array_add(void *varray, int *cnt, void *new)
405 void ***array = varray;
408 current = ((*cnt-1) | (step-1))+1;
409 if (*cnt == current) {
412 *array = realloc(*array, current * sizeof(void*));
414 (*array)[*cnt] = new;
418 Collecting the code fragment simply involves reading tokens until we
419 hit the end token `}$`, and noting the character position of the start and
423 static struct text collect_code(struct token_state *state,
428 code.txt = start.txt.txt + start.txt.len;
430 t = token_next(state);
431 while (t.node == start.node &&
432 t.num != TK_close && t.num != TK_error &&
434 if (t.num == TK_close && t.node == start.node)
435 code.len = t.txt.txt - code.txt;
441 Now we have all the bits we need to parse a full production.
446 ###### grammar fields
447 struct production **productions;
448 int production_count;
450 ###### production fields
452 struct symbol **body;
458 int first_production;
461 static char *parse_production(struct grammar *g,
463 struct token_state *state)
465 /* Head has already been parsed. */
468 struct production p, *pp;
470 memset(&p, 0, sizeof(p));
472 tk = token_next(state);
473 while (tk.num == TK_ident || tk.num == TK_mark) {
474 struct symbol *bs = sym_find(g, tk.txt);
475 if (bs->type == Unknown)
477 if (bs->type == Virtual) {
478 err = "Virtual symbol not permitted in production";
481 if (bs->precedence) {
482 p.precedence = bs->precedence;
485 array_add(&p.body, &p.body_size, bs);
486 tk = token_next(state);
488 if (tk.num == TK_virtual) {
490 tk = token_next(state);
491 if (tk.num != TK_ident) {
492 err = "word required after $$";
495 vs = sym_find(g, tk.txt);
496 if (vs->type != Virtual) {
497 err = "symbol after $$ must be virtual";
500 p.precedence = vs->precedence;
502 tk = token_next(state);
504 if (tk.num == TK_open) {
505 p.code_line = tk.line;
506 p.code = collect_code(state, tk);
507 if (p.code.txt == NULL) {
508 err = "code fragment not closed properly";
511 tk = token_next(state);
513 if (tk.num != TK_newline && tk.num != TK_eof) {
514 err = "stray tokens at end of line";
517 pp = malloc(sizeof(*pp));
519 array_add(&g->productions, &g->production_count, pp);
522 while (tk.num != TK_newline && tk.num != TK_eof)
523 tk = token_next(state);
527 With the ability to parse production and dollar-lines, we have nearly all
528 that we need to parse a grammar from a `code_node`.
530 The head of the first production will effectively be the `start` symbol of
531 the grammar. However it won't _actually_ be so. Processing the grammar is
532 greatly simplified if the real start symbol only has a single production,
533 and expects `$eof` as the final terminal. So when we find the first
534 explicit production we insert an extra production as production zero which
537 ###### Example: production 0
540 where `START` is the first non-terminal given.
542 ###### create production zero
543 struct production *p = calloc(1,sizeof(*p));
544 struct text start = {"$start",6};
545 struct text eof = {"$eof",4};
546 struct text code = {"$0 = $<1;", 9};
547 p->head = sym_find(g, start);
548 p->head->type = Nonterminal;
549 p->head->struct_name = g->current_type;
550 p->head->isref = g->type_isref;
551 if (g->current_type.txt)
553 array_add(&p->body, &p->body_size, head);
554 array_add(&p->body, &p->body_size, sym_find(g, eof));
555 p->head->first_production = g->production_count;
556 array_add(&g->productions, &g->production_count, p);
558 Now we are ready to read in the grammar. We ignore comments
559 and strings so that the marks which introduce them can be
560 used as terminals in the grammar. We don't ignore numbers
561 even though we don't allow them as that causes the scanner
562 to produce errors that the parser is better positioned to handle.
565 static struct grammar *grammar_read(struct code_node *code)
567 struct token_config conf = {
570 .words_marks = known,
571 .known_count = sizeof(known)/sizeof(known[0]),
573 .ignored = (1 << TK_line_comment)
574 | (1 << TK_block_comment)
577 | (1 << TK_multi_string)
582 struct token_state *state = token_open(code, &conf);
584 struct symbol *head = NULL;
588 g = calloc(1, sizeof(*g));
591 for (tk = token_next(state); tk.num != TK_eof;
592 tk = token_next(state)) {
593 if (tk.num == TK_newline)
595 if (tk.num == TK_ident) {
597 head = sym_find(g, tk.txt);
598 if (head->type == Nonterminal)
599 err = "This non-terminal has already be used.";
600 else if (head->type == Virtual)
601 err = "Virtual symbol not permitted in head of production";
603 head->type = Nonterminal;
604 head->struct_name = g->current_type;
605 head->isref = g->type_isref;
606 if (g->production_count == 0) {
607 ## create production zero
609 head->first_production = g->production_count;
610 tk = token_next(state);
611 if (tk.num == TK_mark &&
612 text_is(tk.txt, "->"))
613 err = parse_production(g, head, state);
615 err = "'->' missing in production";
617 } else if (tk.num == TK_mark
618 && text_is(tk.txt, "|")) {
619 // another production for same non-term
621 err = parse_production(g, head, state);
623 err = "First production must have a head";
624 } else if (tk.num == TK_mark
625 && text_is(tk.txt, "$")) {
626 err = dollar_line(state, g, 0);
627 } else if (tk.num == TK_mark
628 && text_is(tk.txt, "$*")) {
629 err = dollar_line(state, g, 1);
631 err = "Unrecognised token at start of line.";
639 fprintf(stderr, "Error at line %d: %s\n",
646 ## Analysing the grammar
648 The central task in analysing the grammar is to determine a set of
649 states to drive the parsing process. This will require finding
650 various sets of symbols and of "items". Some of these sets will need
651 to attach information to each element in the set, so they are more
654 Each "item" may have a 'look-ahead' or `LA` set associated with
655 it. Many of these will be the same as each other. To avoid memory
656 wastage, and to simplify some comparisons of sets, these sets will be
657 stored in a list of unique sets, each assigned a number.
659 Once we have the data structures in hand to manage these sets and
660 lists, we can start setting the 'nullable' flag, build the 'FIRST'
661 sets, and then create the item sets which define the various states.
665 Though we don't only store symbols in these sets, they are the main
666 things we store, so they are called symbol sets or "symsets".
668 A symset has a size, an array of shorts, and an optional array of data
669 values, which are also shorts. If the array of data is not present,
670 we store an impossible pointer, as `NULL` is used to indicate that no
671 memory has been allocated yet;
676 unsigned short *syms, *data;
678 #define NO_DATA ((unsigned short *)1)
679 const struct symset INIT_SYMSET = { 0, NULL, NO_DATA };
680 const struct symset INIT_DATASET = { 0, NULL, NULL };
682 The arrays of shorts are allocated in blocks of 8 and are kept sorted
683 by using an insertion sort. We don't explicitly record the amount of
684 allocated space as it can be derived directly from the current `cnt` using
685 `((cnt - 1) | 7) + 1`.
688 static void symset_add(struct symset *s, unsigned short key, unsigned short val)
691 int current = ((s->cnt-1) | 7) + 1;
692 if (current == s->cnt) {
694 s->syms = realloc(s->syms, sizeof(*s->syms) * current);
695 if (s->data != NO_DATA)
696 s->data = realloc(s->data, sizeof(*s->data) * current);
699 while (i > 0 && s->syms[i-1] > key) {
700 s->syms[i] = s->syms[i-1];
701 if (s->data != NO_DATA)
702 s->data[i] = s->data[i-1];
706 if (s->data != NO_DATA)
711 Finding a symbol (or item) in a `symset` uses a simple binary search.
712 We return the index where the value was found (so data can be accessed),
713 or `-1` to indicate failure.
715 static int symset_find(struct symset *ss, unsigned short key)
722 while (lo + 1 < hi) {
723 int mid = (lo + hi) / 2;
724 if (ss->syms[mid] <= key)
729 if (ss->syms[lo] == key)
734 We will often want to form the union of two symsets. When we do, we
735 will often want to know if anything changed (as that might mean there
736 is more work to do). So `symset_union` reports whether anything was
737 added to the first set. We use a slow quadratic approach as these
738 sets don't really get very big. If profiles shows this to be a problem it
739 can be optimised later.
741 static int symset_union(struct symset *a, struct symset *b)
745 for (i = 0; i < b->cnt; i++)
746 if (symset_find(a, b->syms[i]) < 0) {
747 unsigned short data = 0;
748 if (b->data != NO_DATA)
750 symset_add(a, b->syms[i], data);
756 And of course we must be able to free a symset.
758 static void symset_free(struct symset ss)
761 if (ss.data != NO_DATA)
767 Some symsets are simply stored somewhere appropriate in the `grammar`
768 data structure, others needs a bit of indirection. This applies
769 particularly to "LA" sets. These will be paired with "items" in an
770 "itemset". As itemsets will be stored in a symset, the "LA" set needs to be
771 stored in the `data` field, so we need a mapping from a small (short)
772 number to an LA `symset`.
774 As mentioned earlier this involves creating a list of unique symsets.
776 For now, we just use a linear list sorted by insertion. A skiplist
777 would be more efficient and may be added later.
784 struct setlist *next;
787 ###### grammar fields
788 struct setlist *sets;
793 static int ss_cmp(struct symset a, struct symset b)
796 int diff = a.cnt - b.cnt;
799 for (i = 0; i < a.cnt; i++) {
800 diff = (int)a.syms[i] - (int)b.syms[i];
807 static int save_set(struct grammar *g, struct symset ss)
809 struct setlist **sl = &g->sets;
813 while (*sl && (cmp = ss_cmp((*sl)->ss, ss)) < 0)
820 s = malloc(sizeof(*s));
829 Finding a set by number is currently performed by a simple linear search.
830 If this turns out to hurt performance, we can store the sets in a dynamic
831 array like the productions.
833 static struct symset set_find(struct grammar *g, int num)
835 struct setlist *sl = g->sets;
836 while (sl && sl->num != num)
842 ### Setting `nullable`
844 We set `nullable` on the head symbol for any production for which all
845 the body symbols (if any) are nullable. As this is a recursive
846 definition, any change in the `nullable` setting means that we need to
847 re-evaluate where it needs to be set.
849 We simply loop around performing the same calculations until no more
856 static void set_nullable(struct grammar *g)
859 while (check_again) {
862 for (p = 0; p < g->production_count; p++) {
863 struct production *pr = g->productions[p];
866 if (pr->head->nullable)
868 for (s = 0; s < pr->body_size; s++)
869 if (! pr->body[s]->nullable)
871 if (s == pr->body_size) {
872 pr->head->nullable = 1;
879 ### Setting `can_eol` and `line_like`
881 In order to be able to ignore newline tokens when not relevant, but
882 still include them in the parse when needed, we will need to know
883 which states can start a "line-like" section of code. We ignore
884 newlines when there is an indent since the most recent start of a
887 To know which symbols are line-like, we first need to know which
888 symbols start with a NEWLINE token. Any symbol which is followed by a
889 NEWLINE, or anything that starts with a NEWLINE, is deemed to be a line-like symbol.
890 Certainly when trying to parse one of these we must take note of NEWLINEs.
892 Clearly the `TK_newline` token can start with a NEWLINE. Any symbol
893 which is the head of a production that contains a starts-with-NEWLINE
894 symbol preceeded only by nullable symbols is also a
895 starts-with-NEWLINE symbol. We use a new field `can_eol` to record
896 this attribute of symbols, and compute it in a repetitive manner
897 similar to `set_nullable`.
899 Once we have that, we can determine which symbols are `line_like` by
900 seeing which are followed by a `can_eol` symbol in any production.
907 static void set_can_eol(struct grammar *g)
910 g->symtab[TK_newline]->can_eol = 1;
911 while (check_again) {
914 for (p = 0; p < g->production_count; p++) {
915 struct production *pr = g->productions[p];
918 if (pr->head->can_eol)
921 for (s = 0 ; s < pr->body_size; s++) {
922 if (pr->body[s]->can_eol) {
923 pr->head->can_eol = 1;
927 if (!pr->body[s]->nullable)
934 static void set_line_like(struct grammar *g)
937 for (p = 0; p < g->production_count; p++) {
938 struct production *pr = g->productions[p];
941 for (s = 1; s < pr->body_size; s++)
942 if (pr->body[s]->can_eol)
943 pr->body[s-1]->line_like = 1;
947 ### Building the `first` sets
949 When calculating what can follow a particular non-terminal, we will need to
950 know what the "first" terminal in any subsequent non-terminal might be. So
951 we calculate the `first` set for every non-terminal and store them in an
952 array. We don't bother recording the "first" set for terminals as they are
955 As the "first" for one symbol might depend on the "first" of another,
956 we repeat the calculations until no changes happen, just like with
957 `set_nullable`. This makes use of the fact that `symset_union`
958 reports if any change happens.
960 The core of this, which finds the "first" of part of a production body,
961 will be reused for computing the "follow" sets, so we split it out
962 into a separate function.
964 ###### grammar fields
965 struct symset *first;
969 static int add_first(struct production *pr, int start,
970 struct symset *target, struct grammar *g,
975 for (s = start; s < pr->body_size; s++) {
976 struct symbol *bs = pr->body[s];
977 if (bs->type == Terminal) {
978 if (symset_find(target, bs->num) < 0) {
979 symset_add(target, bs->num, 0);
983 } else if (symset_union(target, &g->first[bs->num]))
989 *to_end = (s == pr->body_size);
993 static void build_first(struct grammar *g)
997 g->first = calloc(g->num_syms, sizeof(g->first[0]));
998 for (s = 0; s < g->num_syms; s++)
999 g->first[s] = INIT_SYMSET;
1001 while (check_again) {
1004 for (p = 0; p < g->production_count; p++) {
1005 struct production *pr = g->productions[p];
1006 struct symset *head = &g->first[pr->head->num];
1008 if (add_first(pr, 0, head, g, NULL))
1014 ### Building the `follow` sets.
1016 There are two different situations when we will want to generate "follow"
1017 sets. If we are doing an SLR analysis, we want to generate a single
1018 "follow" set for each non-terminal in the grammar. That is what is
1019 happening here. If we are doing an LALR or LR analysis we will want
1020 to generate a separate "LA" set for each item. We do that later
1021 in state generation.
1023 There are two parts to generating a "follow" set. Firstly we look at
1024 every place that any non-terminal appears in the body of any
1025 production, and we find the set of possible "first" symbols after
1026 there. This is done using `add_first` above and only needs to be done
1027 once as the "first" sets are now stable and will not change.
1031 for (p = 0; p < g->production_count; p++) {
1032 struct production *pr = g->productions[p];
1035 for (b = 0; b < pr->body_size - 1; b++) {
1036 struct symbol *bs = pr->body[b];
1037 if (bs->type == Terminal)
1039 add_first(pr, b+1, &g->follow[bs->num], g, NULL);
1043 The second part is to add the "follow" set of the head of a production
1044 to the "follow" sets of the final symbol in the production, and any
1045 other symbol which is followed only by `nullable` symbols. As this
1046 depends on "follow" itself we need to repeatedly perform the process
1047 until no further changes happen.
1051 for (again = 0, p = 0;
1052 p < g->production_count;
1053 p < g->production_count-1
1054 ? p++ : again ? (again = 0, p = 0)
1056 struct production *pr = g->productions[p];
1059 for (b = pr->body_size - 1; b >= 0; b--) {
1060 struct symbol *bs = pr->body[b];
1061 if (bs->type == Terminal)
1063 if (symset_union(&g->follow[bs->num],
1064 &g->follow[pr->head->num]))
1071 We now just need to create and initialise the `follow` list to get a
1074 ###### grammar fields
1075 struct symset *follow;
1078 static void build_follow(struct grammar *g)
1081 g->follow = calloc(g->num_syms, sizeof(g->follow[0]));
1082 for (s = 0; s < g->num_syms; s++)
1083 g->follow[s] = INIT_SYMSET;
1087 ### Building itemsets and states
1089 There are three different levels of detail that can be chosen for
1090 building the itemsets and states for the LR grammar. They are:
1092 1. LR(0) or SLR(1), where no look-ahead is considered.
1093 2. LALR(1) where we build look-ahead sets with each item and merge
1094 the LA sets when we find two paths to the same "kernel" set of items.
1095 3. LR(1) where different look-ahead for any item in the set means
1096 a different state must be created.
1098 ###### forward declarations
1099 enum grammar_type { LR0, LR05, SLR, LALR, LR1 };
1101 We need to be able to look through existing states to see if a newly
1102 generated state already exists. For now we use a simple sorted linked
1105 An item is a pair of numbers: the production number and the position of
1106 "DOT", which is an index into the body of the production.
1107 As the numbers are not enormous we can combine them into a single "short"
1108 and store them in a `symset` - 4 bits for "DOT" and 12 bits for the
1109 production number (so 4000 productions with maximum of 15 symbols in the
1112 Comparing the itemsets will be a little different to comparing symsets
1113 as we want to do the lookup after generating the "kernel" of an
1114 itemset, so we need to ignore the offset=zero items which are added during
1117 To facilitate this, we modify the "DOT" number so that "0" sorts to
1118 the end of the list in the symset, and then only compare items before
1122 static inline unsigned short item_num(int production, int index)
1124 return production | ((31-index) << 11);
1126 static inline int item_prod(unsigned short item)
1128 return item & 0x7ff;
1130 static inline int item_index(unsigned short item)
1132 return (31-(item >> 11)) & 0x1f;
1135 For LR(1) analysis we need to compare not just the itemset in a state
1136 but also the LA sets. As we assign each unique LA set a number, we
1137 can just compare the symset and the data values together.
1140 static int itemset_cmp(struct symset a, struct symset b,
1141 enum grammar_type type)
1147 i < a.cnt && i < b.cnt &&
1148 item_index(a.syms[i]) > 0 &&
1149 item_index(b.syms[i]) > 0;
1151 int diff = a.syms[i] - b.syms[i];
1155 diff = a.data[i] - b.data[i];
1160 if (i == a.cnt || item_index(a.syms[i]) == 0)
1164 if (i == b.cnt || item_index(b.syms[i]) == 0)
1170 if (type < LR1 || av == -1)
1173 a.data[i] - b.data[i];
1176 And now we can build the list of itemsets. The lookup routine returns
1177 both a success flag and a pointer to where in the list an insert
1178 should happen, so we don't need to search a second time.
1180 FIXME: document min_prefix
1184 struct itemset *next;
1186 struct symset items;
1187 struct symset go_to;
1193 ###### grammar fields
1194 struct itemset *items;
1198 static int itemset_find(struct grammar *g, struct itemset ***where,
1199 struct symset kernel, enum grammar_type type)
1201 struct itemset **ip;
1203 for (ip = &g->items; *ip ; ip = & (*ip)->next) {
1204 struct itemset *i = *ip;
1206 diff = itemset_cmp(i->items, kernel, type);
1219 Adding an itemset may require merging the LA sets if LALR analysis is
1220 happening. If any new LA set adds any symbols that weren't in the old LA set, we
1221 clear the `completed` flag so that the dependants of this itemset will be
1222 recalculated and their LA sets updated.
1224 `add_itemset` must consume the symsets it is passed, either by adding
1225 them to a data structure, of freeing them.
1227 static int add_itemset(struct grammar *g, struct symset ss,
1228 enum grammar_type type)
1230 struct itemset **where, *is;
1232 int found = itemset_find(g, &where, ss, type);
1234 is = calloc(1, sizeof(*is));
1235 is->state = g->states;
1239 is->go_to = INIT_DATASET;
1248 for (i = 0; i < ss.cnt; i++) {
1249 struct symset temp = INIT_SYMSET, s;
1250 if (ss.data[i] == is->items.data[i])
1252 s = set_find(g, is->items.data[i]);
1253 symset_union(&temp, &s);
1254 s = set_find(g, ss.data[i]);
1255 if (symset_union(&temp, &s)) {
1256 is->items.data[i] = save_set(g, temp);
1267 To build all the itemsets, we first insert the initial itemset made
1268 from production zero, complete each itemset, and then generate new
1269 itemsets from old until no new ones can be made.
1271 Completing an itemset means finding all the items where "DOT" is followed by
1272 a nonterminal and adding "DOT=0" items for every production from that
1273 non-terminal - providing each item hasn't already been added.
1275 If LA sets are needed, the LA set for each new item is found using
1276 `add_first` which was developed earlier for `FIRST` and `FOLLOW`. This may
1277 be supplemented by the LA set for the item which produce the new item.
1279 We also collect a set of all symbols which follow "DOT" (in `done`) as this
1280 is used in the next stage.
1281 If any of these symbols are flagged as starting a line, then this
1282 state must be a `starts_line` state so now is a good time to record that.
1284 NOTE: precedence handling should happen here - I haven't written this yet
1287 ###### complete itemset
1288 for (i = 0; i < is->items.cnt; i++) {
1289 int p = item_prod(is->items.syms[i]);
1290 int bs = item_index(is->items.syms[i]);
1291 struct production *pr = g->productions[p];
1294 struct symset LA = INIT_SYMSET;
1295 unsigned short sn = 0;
1297 if (is->min_prefix == 0 ||
1298 (bs > 0 && bs < is->min_prefix))
1299 is->min_prefix = bs;
1300 if (bs == pr->body_size)
1303 if (symset_find(&done, s->num) < 0) {
1304 symset_add(&done, s->num, 0);
1306 is->starts_line = 1;
1308 if (s->type != Nonterminal)
1314 add_first(pr, bs+1, &LA, g, &to_end);
1316 struct symset ss = set_find(g, is->items.data[i]);
1317 symset_union(&LA, &ss);
1319 sn = save_set(g, LA);
1320 LA = set_find(g, sn);
1323 /* Add productions for this symbol */
1324 for (p2 = s->first_production;
1325 p2 < g->production_count &&
1326 g->productions[p2]->head == s;
1328 int itm = item_num(p2, 0);
1329 int pos = symset_find(&is->items, itm);
1331 symset_add(&is->items, itm, sn);
1332 /* Will have re-ordered, so start
1333 * from beginning again */
1335 } else if (type >= LALR) {
1336 struct symset ss = set_find(g, is->items.data[pos]);
1337 struct symset tmp = INIT_SYMSET;
1339 symset_union(&tmp, &ss);
1340 if (symset_union(&tmp, &LA)) {
1341 is->items.data[pos] = save_set(g, tmp);
1349 For each symbol we found (and placed in `done`) we collect all the items for
1350 which this symbol is next, and create a new itemset with "DOT" advanced over
1351 the symbol. This is then added to the collection of itemsets (or merged
1352 with a pre-existing itemset).
1354 ###### derive itemsets
1355 // Now we have a completed itemset, so we need to
1356 // compute all the 'next' itemsets and create them
1357 // if they don't exist.
1358 for (i = 0; i < done.cnt; i++) {
1360 unsigned short state;
1361 struct symbol *sym = g->symtab[done.syms[i]];
1362 struct symset newitemset = INIT_SYMSET;
1364 newitemset = INIT_DATASET;
1366 for (j = 0; j < is->items.cnt; j++) {
1367 int itm = is->items.syms[j];
1368 int p = item_prod(itm);
1369 int bp = item_index(itm);
1370 struct production *pr = g->productions[p];
1371 unsigned short la = 0;
1374 if (bp == pr->body_size)
1376 if (pr->body[bp] != sym)
1379 la = is->items.data[j];
1380 pos = symset_find(&newitemset, pr->head->num);
1382 symset_add(&newitemset, item_num(p, bp+1), la);
1383 else if (type >= LALR) {
1384 // Need to merge la set.
1385 int la2 = newitemset.data[pos];
1387 struct symset ss = set_find(g, la2);
1388 struct symset LA = INIT_SYMSET;
1389 symset_union(&LA, &ss);
1390 ss = set_find(g, la);
1391 if (symset_union(&LA, &ss))
1392 newitemset.data[pos] = save_set(g, LA);
1398 state = add_itemset(g, newitemset, type);
1399 if (symset_find(&is->go_to, done.syms[i]) < 0)
1400 symset_add(&is->go_to, done.syms[i], state);
1403 All that is left is to crate the initial itemset from production zero, and
1404 with `TK_eof` as the LA set.
1407 static void build_itemsets(struct grammar *g, enum grammar_type type)
1409 struct symset first = INIT_SYMSET;
1412 unsigned short la = 0;
1414 // LA set just has eof
1415 struct symset eof = INIT_SYMSET;
1416 symset_add(&eof, TK_eof, 0);
1417 la = save_set(g, eof);
1418 first = INIT_DATASET;
1420 // production 0, offset 0 (with no data)
1421 symset_add(&first, item_num(0, 0), la);
1422 add_itemset(g, first, type);
1423 for (again = 0, is = g->items;
1425 is = is->next ?: again ? (again = 0, g->items) : NULL) {
1427 struct symset done = INIT_SYMSET;
1438 ### Completing the analysis.
1440 The exact process of analysis depends on which level was requested. For
1441 `LR0` and `LR05` we don't need first and follow sets at all. For `LALR` and
1442 `LR1` we need first, but not follow. For `SLR` we need both.
1444 We don't build the "action" tables that you might expect as the parser
1445 doesn't use them. They will be calculated to some extent if needed for
1448 Once we have built everything we allocate arrays for the two lists:
1449 symbols and itemsets. This allows more efficient access during reporting.
1450 The symbols are grouped as terminals and non-terminals and we record the
1451 changeover point in `first_nonterm`.
1453 ###### grammar fields
1454 struct symbol **symtab;
1455 struct itemset **statetab;
1458 ###### grammar_analyse
1460 static void grammar_analyse(struct grammar *g, enum grammar_type type)
1464 int snum = TK_reserved;
1465 for (s = g->syms; s; s = s->next)
1466 if (s->num < 0 && s->type == Terminal) {
1470 g->first_nonterm = snum;
1471 for (s = g->syms; s; s = s->next)
1477 g->symtab = calloc(g->num_syms, sizeof(g->symtab[0]));
1478 for (s = g->syms; s; s = s->next)
1479 g->symtab[s->num] = s;
1490 build_itemsets(g, type);
1492 g->statetab = calloc(g->states, sizeof(g->statetab[0]));
1493 for (is = g->items; is ; is = is->next)
1494 g->statetab[is->state] = is;
1497 ## Reporting on the Grammar
1499 The purpose of the report is to give the grammar developer insight into
1500 how the grammar parser will work. It is basically a structured dump of
1501 all the tables that have been generated, plus a description of any conflicts.
1503 ###### grammar_report
1504 static int grammar_report(struct grammar *g, enum grammar_type type)
1510 return report_conflicts(g, type);
1513 Firstly we have the complete list of symbols, together with the
1514 "FIRST" set if that was generated. We add a mark to each symbol to
1515 show if it can end in a newline (`>`), if it is considered to be
1516 "line-like" (`<`), or if it is nullable (`.`).
1520 static void report_symbols(struct grammar *g)
1524 printf("SYMBOLS + FIRST:\n");
1526 printf("SYMBOLS:\n");
1528 for (n = 0; n < g->num_syms; n++) {
1529 struct symbol *s = g->symtab[n];
1533 printf(" %c%c%c%3d%c: ",
1534 s->nullable ? '.':' ',
1535 s->can_eol ? '>':' ',
1536 s->line_like ? '<':' ',
1537 s->num, symtypes[s->type]);
1540 printf(" (%d%s)", s->precedence,
1541 assoc_names[s->assoc]);
1543 if (g->first && s->type == Nonterminal) {
1546 for (j = 0; j < g->first[n].cnt; j++) {
1549 prtxt(g->symtab[g->first[n].syms[j]]->name);
1556 Then we have the follow sets if they were computed.
1558 static void report_follow(struct grammar *g)
1561 printf("FOLLOW:\n");
1562 for (n = 0; n < g->num_syms; n++)
1563 if (g->follow[n].cnt) {
1567 prtxt(g->symtab[n]->name);
1568 for (j = 0; j < g->follow[n].cnt; j++) {
1571 prtxt(g->symtab[g->follow[n].syms[j]]->name);
1577 And finally the item sets. These include the GO TO tables and, for
1578 LALR and LR1, the LA set for each item. Lots of stuff, so we break
1579 it up a bit. First the items, with production number and associativity.
1581 static void report_item(struct grammar *g, int itm)
1583 int p = item_prod(itm);
1584 int dot = item_index(itm);
1585 struct production *pr = g->productions[p];
1589 prtxt(pr->head->name);
1591 for (i = 0; i < pr->body_size; i++) {
1592 printf(" %s", (dot == i ? ". ": ""));
1593 prtxt(pr->body[i]->name);
1595 if (dot == pr->body_size)
1599 printf(" (%d%s)", pr->precedence,
1600 assoc_names[pr->assoc]);
1604 The LA sets which are (possibly) reported with each item:
1606 static void report_la(struct grammar *g, int lanum)
1608 struct symset la = set_find(g, lanum);
1612 printf(" LOOK AHEAD(%d)", lanum);
1613 for (i = 0; i < la.cnt; i++) {
1616 prtxt(g->symtab[la.syms[i]]->name);
1621 Then the go to sets:
1624 static void report_goto(struct grammar *g, struct symset gt)
1629 for (i = 0; i < gt.cnt; i++) {
1631 prtxt(g->symtab[gt.syms[i]]->name);
1632 printf(" -> %d\n", gt.data[i]);
1636 Now we can report all the item sets complete with items, LA sets, and GO TO.
1638 static void report_itemsets(struct grammar *g)
1641 printf("ITEM SETS(%d)\n", g->states);
1642 for (s = 0; s < g->states; s++) {
1644 struct itemset *is = g->statetab[s];
1645 printf(" Itemset %d:%s min prefix=%d\n",
1646 s, is->starts_line?" (startsline)":"", is->min_prefix);
1647 for (j = 0; j < is->items.cnt; j++) {
1648 report_item(g, is->items.syms[j]);
1649 if (is->items.data != NO_DATA)
1650 report_la(g, is->items.data[j]);
1652 report_goto(g, is->go_to);
1656 ### Reporting conflicts
1658 Conflict detection varies a lot among different analysis levels. However
1659 LR0 and LR0.5 are quite similar - having no follow sets, and SLR, LALR and
1660 LR1 are also similar as they have FOLLOW or LA sets.
1664 ## conflict functions
1666 static int report_conflicts(struct grammar *g, enum grammar_type type)
1669 printf("Conflicts:\n");
1671 cnt = conflicts_lr0(g, type);
1673 cnt = conflicts_slr(g, type);
1675 printf(" - no conflicts\n");
1679 LR0 conflicts are any state which have both a reducible item and
1680 a shiftable item, or two reducible items.
1682 LR05 conflicts only occur if two possible reductions exist,
1683 as shifts always over-ride reductions.
1685 ###### conflict functions
1686 static int conflicts_lr0(struct grammar *g, enum grammar_type type)
1690 for (i = 0; i < g->states; i++) {
1691 struct itemset *is = g->statetab[i];
1692 int last_reduce = -1;
1693 int prev_reduce = -1;
1694 int last_shift = -1;
1698 for (j = 0; j < is->items.cnt; j++) {
1699 int itm = is->items.syms[j];
1700 int p = item_prod(itm);
1701 int bp = item_index(itm);
1702 struct production *pr = g->productions[p];
1704 if (bp == pr->body_size) {
1705 prev_reduce = last_reduce;
1709 if (pr->body[bp]->type == Terminal)
1712 if (type == LR0 && last_reduce >= 0 && last_shift >= 0) {
1713 printf(" State %d has both SHIFT and REDUCE:\n", i);
1714 report_item(g, is->items.syms[last_shift]);
1715 report_item(g, is->items.syms[last_reduce]);
1718 if (prev_reduce >= 0) {
1719 printf(" State %d has 2 (or more) reducible items\n", i);
1720 report_item(g, is->items.syms[prev_reduce]);
1721 report_item(g, is->items.syms[last_reduce]);
1728 SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping
1729 look ahead, or if a symbol in a look-ahead can be shifted. They differ only
1730 in the source of the look ahead set.
1732 We build two datasets to reflect the "action" table: one which maps
1733 terminals to items where that terminal could be shifted and another
1734 which maps terminals to items that could be reduced when the terminal
1735 is in look-ahead. We report when we get conflicts between the two.
1737 static int conflicts_slr(struct grammar *g, enum grammar_type type)
1742 for (i = 0; i < g->states; i++) {
1743 struct itemset *is = g->statetab[i];
1744 struct symset shifts = INIT_DATASET;
1745 struct symset reduce = INIT_DATASET;
1749 /* First collect the shifts */
1750 for (j = 0; j < is->items.cnt; j++) {
1751 unsigned short itm = is->items.syms[j];
1752 int p = item_prod(itm);
1753 int bp = item_index(itm);
1754 struct production *pr = g->productions[p];
1756 if (bp < pr->body_size &&
1757 pr->body[bp]->type == Terminal) {
1759 int sym = pr->body[bp]->num;
1760 if (symset_find(&shifts, sym) < 0)
1761 symset_add(&shifts, sym, itm);
1764 /* Now look for reduction and conflicts */
1765 for (j = 0; j < is->items.cnt; j++) {
1766 unsigned short itm = is->items.syms[j];
1767 int p = item_prod(itm);
1768 int bp = item_index(itm);
1769 struct production *pr = g->productions[p];
1771 if (bp < pr->body_size)
1776 la = g->follow[pr->head->num];
1778 la = set_find(g, is->items.data[j]);
1780 for (k = 0; k < la.cnt; k++) {
1781 int pos = symset_find(&shifts, la.syms[k]);
1783 printf(" State %d has SHIFT/REDUCE conflict on ", i);
1784 prtxt(g->symtab[la.syms[k]]->name);
1786 report_item(g, shifts.data[pos]);
1787 report_item(g, itm);
1790 pos = symset_find(&reduce, la.syms[k]);
1792 symset_add(&reduce, la.syms[k], itm);
1795 printf(" State %d has REDUCE/REDUCE conflict on ", i);
1796 prtxt(g->symtab[la.syms[k]]->name);
1798 report_item(g, itm);
1799 report_item(g, reduce.data[pos]);
1803 symset_free(shifts);
1804 symset_free(reduce);
1810 ## Generating the parser
1812 The exported part of the parser is the `parse_XX` function, where the name
1813 `XX` is based on the name of the parser files.
1815 This takes a `code_node`, a partially initialized `token_config`, and an
1816 optional `FILE` to send tracing to. The `token_config` gets the list of
1817 known words added and then is used with the `code_node` to initialize the
1820 `parse_XX` then calls the library function `parser_run` to actually complete
1821 the parse. This needs the `states` table and function to call the various
1822 pieces of code provided in the grammar file, so they are generated first.
1824 ###### parser_generate
1826 static void gen_parser(FILE *f, struct grammar *g, char *file, char *name)
1832 gen_reduce(f, g, file);
1835 fprintf(f, "#line 0 \"gen_parser\"\n");
1836 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace)\n",
1839 fprintf(f, "\tstruct token_state *tokens;\n");
1840 fprintf(f, "\tconfig->words_marks = known;\n");
1841 fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
1842 fprintf(f, "\tconfig->ignored |= (1 << TK_line_comment) | (1 << TK_block_comment);\n");
1843 fprintf(f, "\ttokens = token_open(code, config);\n");
1844 fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n");
1845 fprintf(f, "\ttoken_close(tokens);\n");
1846 fprintf(f, "\treturn rv;\n");
1847 fprintf(f, "}\n\n");
1850 ### Known words table
1852 The known words table is simply an array of terminal symbols.
1853 The table of nonterminals used for tracing is a similar array.
1857 static void gen_known(FILE *f, struct grammar *g)
1860 fprintf(f, "#line 0 \"gen_known\"\n");
1861 fprintf(f, "static const char *known[] = {\n");
1862 for (i = TK_reserved;
1863 i < g->num_syms && g->symtab[i]->type == Terminal;
1865 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1866 g->symtab[i]->name.txt);
1867 fprintf(f, "};\n\n");
1870 static void gen_non_term(FILE *f, struct grammar *g)
1873 fprintf(f, "#line 0 \"gen_non_term\"\n");
1874 fprintf(f, "static const char *non_term[] = {\n");
1875 for (i = TK_reserved;
1878 if (g->symtab[i]->type == Nonterminal)
1879 fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
1880 g->symtab[i]->name.txt);
1881 fprintf(f, "};\n\n");
1884 ### States and the goto tables.
1886 For each state we record the goto table, the reducible production if
1887 there is one, or a symbol to shift for error recovery.
1888 Some of the details of the reducible production are stored in the
1889 `do_reduce` function to come later. Here we store the production number,
1890 the body size (useful for stack management) and the resulting symbol (useful
1891 for knowing how to free data later).
1893 The go to table is stored in a simple array of `sym` and corresponding
1896 ###### exported types
1904 const struct lookup * go_to;
1916 static void gen_goto(FILE *f, struct grammar *g)
1919 fprintf(f, "#line 0 \"gen_goto\"\n");
1920 for (i = 0; i < g->states; i++) {
1922 fprintf(f, "static const struct lookup goto_%d[] = {\n",
1924 struct symset gt = g->statetab[i]->go_to;
1925 for (j = 0; j < gt.cnt; j++)
1926 fprintf(f, "\t{ %d, %d },\n",
1927 gt.syms[j], gt.data[j]);
1934 static void gen_states(FILE *f, struct grammar *g)
1937 fprintf(f, "#line 0 \"gen_states\"\n");
1938 fprintf(f, "static const struct state states[] = {\n");
1939 for (i = 0; i < g->states; i++) {
1940 struct itemset *is = g->statetab[i];
1941 int j, prod = -1, prod_len;
1943 int shift_len = 0, shift_remain = 0;
1944 for (j = 0; j < is->items.cnt; j++) {
1945 int itm = is->items.syms[j];
1946 int p = item_prod(itm);
1947 int bp = item_index(itm);
1948 struct production *pr = g->productions[p];
1950 if (bp < pr->body_size) {
1951 if (shift_sym < 0 ||
1952 (shift_len == bp && shift_remain > pr->body_size - bp)) {
1953 shift_sym = pr->body[bp]->num;
1955 shift_remain = pr->body_size - bp;
1959 /* This is what we reduce */
1960 if (prod < 0 || prod_len < pr->body_size) {
1962 prod_len = pr->body_size;
1967 fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, 0, %d, %d },\n",
1968 i, is->go_to.cnt, i, prod,
1969 g->productions[prod]->body_size,
1970 g->productions[prod]->head->num,
1971 is->starts_line, is->min_prefix);
1973 fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, %d, %d },\n",
1974 i, is->go_to.cnt, i, shift_sym,
1975 is->starts_line, is->min_prefix);
1977 fprintf(f, "};\n\n");
1980 ### The `do_reduce` function and the code
1982 When the parser engine decides to reduce a production, it calls `do_reduce`.
1983 This has two functions.
1985 Firstly, if a non-NULL `trace` file is passed, it prints out details of the
1986 production being reduced. Secondly it runs the code that was included with
1987 the production if any.
1989 This code needs to be able to store data somewhere. Rather than requiring
1990 `do_reduce` to `malloc` that "somewhere", we pass in a large buffer and have
1991 `do_reduce` return the size to be saved.
1993 In order for the code to access "global" context, we pass in the
1994 "config" pointer that was passed to parser function. If the `struct
1995 token_config` is embedded in some larger structure, the reducing code
1996 can access the larger structure using pointer manipulation.
1998 The code fragment requires translation when written out. Any `$N` needs to
1999 be converted to a reference either to that buffer (if `$0`) or to the
2000 structure returned by a previous reduction. These pointers need to be cast
2001 to the appropriate type for each access. All this is handled in
2004 `gen_code` also allows symbol references to contain a '`<`' as in '`$<2`'.
2005 This applied only to symbols with references (or pointers), not those with structures.
2006 The `<` implies that the reference it being moved out, so the object will not be
2007 automatically freed. This is equivalent to assigning `NULL` to the pointer.
2011 static void gen_code(struct production *p, FILE *f, struct grammar *g)
2014 char *used = calloc(1, p->body_size);
2017 fprintf(f, "\t\t\t");
2018 for (c = p->code.txt; c < p->code.txt + p->code.len; c++) {
2032 if (*c < '0' || *c > '9') {
2039 while (c[1] >= '0' && c[1] <= '9') {
2041 n = n * 10 + *c - '0';
2044 fprintf(f, "(*(struct %.*s*%s)ret)",
2045 p->head->struct_name.len,
2046 p->head->struct_name.txt,
2047 p->head->isref ? "*":"");
2048 else if (n > p->body_size)
2049 fprintf(f, "$%d", n);
2050 else if (p->body[n-1]->type == Terminal)
2051 fprintf(f, "(*(struct token *)body[%d])",
2053 else if (p->body[n-1]->struct_name.txt == NULL)
2054 fprintf(f, "$%d", n);
2056 fprintf(f, "(*(struct %.*s*%s)body[%d])",
2057 p->body[n-1]->struct_name.len,
2058 p->body[n-1]->struct_name.txt,
2059 p->body[n-1]->isref ? "*":"", n-1);
2064 for (i = 0; i < p->body_size; i++) {
2065 if (p->body[i]->struct_name.txt &&
2066 p->body[i]->isref &&
2068 // assume this has been copied out
2069 fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i);
2076 static void gen_reduce(FILE *f, struct grammar *g, char *file)
2079 fprintf(f, "#line 0 \"gen_reduce\"\n");
2080 fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n");
2082 fprintf(f, "\tint ret_size = 0;\n");
2084 fprintf(f, "\tswitch(prod) {\n");
2085 for (i = 0; i < g->production_count; i++) {
2086 struct production *p = g->productions[i];
2087 fprintf(f, "\tcase %d:\n", i);
2090 fprintf(f, "#line %d \"%s\"\n", p->code_line, file);
2094 if (p->head->struct_name.txt)
2095 fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n",
2096 p->head->struct_name.len,
2097 p->head->struct_name.txt,
2098 p->head->isref ? "*":"");
2100 fprintf(f, "\t\tbreak;\n");
2102 fprintf(f, "\t}\n\treturn ret_size;\n}\n\n");
2107 As each non-terminal can potentially cause a different type of data
2108 structure to be allocated and filled in, we need to be able to free it when
2111 It is particularly important to have fine control over freeing during error
2112 recovery where individual stack frames might need to be freed.
2114 For this, the grammar author is required to defined a `free_XX` function for
2115 each structure that is used by a non-terminal. `do_free` will call whichever
2116 is appropriate given a symbol number, and will call `free` (as is
2117 appropriate for tokens) on any terminal symbol.
2121 static void gen_free(FILE *f, struct grammar *g)
2125 fprintf(f, "#line 0 \"gen_free\"\n");
2126 fprintf(f, "static void do_free(short sym, void *asn)\n");
2128 fprintf(f, "\tif (!asn) return;\n");
2129 fprintf(f, "\tif (sym < %d) {\n", g->first_nonterm);
2130 fprintf(f, "\t\tfree(asn);\n\t\treturn;\n\t}\n");
2131 fprintf(f, "\tswitch(sym) {\n");
2133 for (i = 0; i < g->num_syms; i++) {
2134 struct symbol *s = g->symtab[i];
2136 s->type != Nonterminal ||
2137 s->struct_name.txt == NULL)
2140 fprintf(f, "\tcase %d:\n", s->num);
2142 fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n",
2144 s->struct_name.txt);
2145 fprintf(f, "\t\tfree(asn);\n");
2147 fprintf(f, "\t\tfree_%.*s(asn);\n",
2149 s->struct_name.txt);
2150 fprintf(f, "\t\tbreak;\n");
2152 fprintf(f, "\t}\n}\n\n");
2155 ## The main routine.
2157 There are three key parts to the "main" function of parsergen: processing
2158 the arguments, loading the grammar file, and dealing with the grammar.
2160 ### Argument processing.
2162 Command line options allow the selection of analysis mode, name of output
2163 file, and whether or not a report should be generated. By default we create
2164 a report only if no code output was requested.
2166 The `parse_XX` function name uses the basename of the output file which
2167 should not have a suffix (`.c`). `.c` is added to the given name for the
2168 code, and `.h` is added for the header (if header text is specifed in the
2175 static const struct option long_options[] = {
2176 { "LR0", 0, NULL, '0' },
2177 { "LR05", 0, NULL, '5' },
2178 { "SLR", 0, NULL, 'S' },
2179 { "LALR", 0, NULL, 'L' },
2180 { "LR1", 0, NULL, '1' },
2181 { "tag", 1, NULL, 't' },
2182 { "report", 0, NULL, 'R' },
2183 { "output", 1, NULL, 'o' },
2184 { NULL, 0, NULL, 0 }
2186 const char *options = "05SL1t:Ro:";
2188 ###### process arguments
2190 char *outfile = NULL;
2195 enum grammar_type type = LR05;
2196 while ((opt = getopt_long(argc, argv, options,
2197 long_options, NULL)) != -1) {
2212 outfile = optarg; break;
2214 tag = optarg; break;
2216 fprintf(stderr, "Usage: parsergen ...\n");
2221 infile = argv[optind++];
2223 fprintf(stderr, "No input file given\n");
2226 if (outfile && report == 1)
2229 if (name && strchr(name, '/'))
2230 name = strrchr(name, '/')+1;
2232 if (optind < argc) {
2233 fprintf(stderr, "Excess command line arguments\n");
2237 ### Loading the grammar file
2239 To be able to run `mdcode` and `scanner` on the grammar we need to memory
2242 Once we have extracted the code (with `mdcode`) we expect to find three
2243 sections: header, code, and grammar. Anything else that is not
2244 excluded by the `--tag` option is an error.
2246 "header" and "code" are optional, though it is hard to build a working
2247 parser with neither. "grammar" must be provided.
2251 #include <sys/mman.h>
2256 static void pr_err(char *msg)
2259 fprintf(stderr, "%s\n", msg);
2263 struct section *table;
2267 fd = open(infile, O_RDONLY);
2269 fprintf(stderr, "parsergen: cannot open %s: %s\n",
2270 infile, strerror(errno));
2273 len = lseek(fd, 0, 2);
2274 file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2275 table = code_extract(file, file+len, pr_err);
2277 struct code_node *hdr = NULL;
2278 struct code_node *code = NULL;
2279 struct code_node *gram = NULL;
2280 for (s = table; s; s = s->next) {
2281 struct text sec = s->section;
2282 if (tag && !strip_tag(&sec, tag))
2284 if (text_is(sec, "header"))
2286 else if (text_is(sec, "code"))
2288 else if (text_is(sec, "grammar"))
2291 fprintf(stderr, "Unknown content section: %.*s\n",
2292 s->section.len, s->section.txt);
2297 ### Processing the input
2299 As we need to append an extention to a filename and then open it for
2300 writing, and we need to do this twice, it helps to have a separate function.
2304 static FILE *open_ext(char *base, char *ext)
2306 char *fn = malloc(strlen(base) + strlen(ext) + 1);
2308 strcat(strcpy(fn, base), ext);
2314 If we can read the grammar, then we analyse and optionally report on it. If
2315 the report finds conflicts we will exit with an error status.
2317 ###### process input
2318 struct grammar *g = NULL;
2320 fprintf(stderr, "No grammar section provided\n");
2323 g = grammar_read(gram);
2325 fprintf(stderr, "Failure to parse grammar\n");
2330 grammar_analyse(g, type);
2332 if (grammar_report(g, type))
2336 If a "headers" section is defined, we write it out and include a declaration
2337 for the `parse_XX` function so it can be used from separate code.
2339 if (rv == 0 && hdr && outfile) {
2340 FILE *f = open_ext(outfile, ".h");
2342 code_node_print(f, hdr, infile);
2343 fprintf(f, "void *parse_%s(struct code_node *code, struct token_config *config, FILE *trace);\n",
2347 fprintf(stderr, "Cannot create %s.h\n",
2353 And if all goes well and an output file was provided, we create the `.c`
2354 file with the code section (if any) and the parser tables and function.
2356 if (rv == 0 && outfile) {
2357 FILE *f = open_ext(outfile, ".c");
2360 code_node_print(f, code, infile);
2361 gen_parser(f, g, infile, name);
2364 fprintf(stderr, "Cannot create %s.c\n",
2370 And that about wraps it up. We need to set the locale so that UTF-8 is
2371 recognised properly, and link with `libicuuc` as `libmdcode` requires that.
2373 ###### File: parsergen.mk
2374 parsergen : parsergen.o libscanner.o libmdcode.o
2375 $(CC) $(CFLAGS) -o parsergen parsergen.o libscanner.o libmdcode.o -licuuc
2382 int main(int argc, char *argv[])
2387 setlocale(LC_ALL,"");
2389 ## process arguments
2396 ## The SHIFT/REDUCE parser
2398 Having analysed the grammar and generated all the tables, we only need the
2399 shift/reduce engine to bring it all together.
2401 ### Goto table lookup
2403 The parser generator has nicely provided us with goto tables sorted by
2404 symbol number. We need a binary search function to find a symbol in the
2407 ###### parser functions
2409 static int search(const struct state *l, int sym)
2412 int hi = l->go_to_cnt;
2416 while (lo + 1 < hi) {
2417 int mid = (lo + hi) / 2;
2418 if (l->go_to[mid].sym <= sym)
2423 if (l->go_to[lo].sym == sym)
2424 return l->go_to[lo].state;
2429 ### The state stack.
2431 The core data structure for the parser is the stack. This tracks all the
2432 symbols that have been recognised or partially recognised.
2434 The stack usually won't grow very large - maybe a few tens of entries. So
2435 we dynamically resize and array as required but never bother to shrink it
2438 We keep the stack as two separate allocations. One, `asn_stack` stores the
2439 "abstract syntax nodes" which are created by each reduction. When we call
2440 `do_reduce` we need to pass an array of the `asn`s of the body of the
2441 production, and by keeping a separate `asn` stack, we can just pass a
2442 pointer into this stack.
2444 The other allocation stores all other stack fields of which there are six.
2445 The `state` is the most important one and guides the parsing process. The
2446 `sym` is nearly unnecessary. However when we want to free entries from the
2447 `asn_stack`, it helps to know what type they are so we can call the right
2448 freeing function. The symbol leads us to the right free function through
2451 The `indents` count tracks the line indents with-in the symbol or
2452 immediately follow it. These are used to allow indent information to
2453 guide parsing and error recovery.
2455 `since_newline` tracks how many stack frames since the last
2456 start-of-line (whether indented or not). So if `since_newline` is
2457 zero, then this symbol is at the start of a line. Similarly
2458 `since_indent` counts the number of states since an indent, it is zero
2459 precisely when `indents` is not zero.
2461 `newline_permitted` keeps track of whether newlines should be ignored
2464 The stack is most properly seen as alternating states and symbols -
2465 states, like the 'DOT' in items, are between symbols. Each frame in
2466 our stack holds a state and the symbol that was before it. The
2467 bottom of stack holds the start state but no symbol, as nothing came
2468 before the beginning.
2470 ###### parser functions
2475 short newline_permitted;
2479 short since_newline;
2489 Two operations are needed on the stack - shift (which is like push) and pop.
2491 Shift applies not only to terminals but also to non-terminals. When
2492 we reduce a production we will pop off entries corresponding to the
2493 body symbols, then push on an item for the head of the production.
2494 This last is exactly the same process as shifting in a terminal so we
2495 use the same function for both. In both cases we provide the symbol,
2496 the number of indents the symbol contains (which will be zero for a
2497 terminal symbol) and a flag indicating the the symbol was at (or was
2498 reduced from a symbol which was at) the start of a line. The state is
2499 deduced from the current top-of-stack state and the new symbol.
2501 To simplify other code we arrange for `shift` to fail if there is no `goto`
2502 state for the symbol. This is useful in basic parsing due to our design
2503 that we shift when we can, and reduce when we cannot. So the `shift`
2504 function reports if it could.
2506 `shift` is also used to push state zero onto the stack, so if the
2507 stack is empty, it always chooses zero as the next state.
2509 So `shift` finds the next state. If that succeeds it extends the
2510 allocations if needed and pushes all the information onto the stacks.
2512 Newlines are permitted after a `starts_line` state until an internal
2513 indent. If the new frame has neither a `starts_line` state nor an
2514 indent, newlines are permitted if the previous stack frame permitted
2517 ###### parser functions
2519 static int shift(struct parser *p,
2520 short sym, short indents, short start_of_line,
2522 const struct state states[])
2524 // Push an entry onto the stack
2525 struct frame next = {0};
2526 int newstate = p->tos
2527 ? search(&states[p->stack[p->tos-1].state],
2532 if (p->tos >= p->stack_size) {
2533 p->stack_size += 10;
2534 p->stack = realloc(p->stack, p->stack_size
2535 * sizeof(p->stack[0]));
2536 p->asn_stack = realloc(p->asn_stack, p->stack_size
2537 * sizeof(p->asn_stack[0]));
2540 next.indents = indents;
2541 next.state = newstate;
2542 if (states[newstate].starts_line)
2543 next.newline_permitted = 1;
2545 next.newline_permitted = 0;
2547 next.newline_permitted =
2548 p->stack[p->tos-1].newline_permitted;
2550 next.newline_permitted = 0;
2552 if (!start_of_line) {
2554 next.since_newline = p->stack[p->tos-1].since_newline + 1;
2556 next.since_newline = 1;
2559 next.since_indent = 0;
2561 next.since_indent = p->stack[p->tos-1].since_indent + 1;
2563 next.since_indent = 1;
2565 p->stack[p->tos] = next;
2566 p->asn_stack[p->tos] = asn;
2571 `pop` primarily moves the top of stack (`tos`) back down the required
2572 amount and frees any `asn` entries that need to be freed. It also
2573 collects a summary of the indents and line starts in the symbols that
2574 are being removed. It is called _after_ we reduce a production, just
2575 before we `shift` the nonterminal in.
2577 ###### parser functions
2579 static int pop(struct parser *p, int num,
2580 short *start_of_line,
2581 void(*do_free)(short sym, void *asn))
2588 for (i = 0; i < num; i++) {
2589 sol |= !p->stack[p->tos+i].since_newline;
2590 indents += p->stack[p->tos+i].indents;
2591 do_free(p->stack[p->tos+i].sym,
2592 p->asn_stack[p->tos+i]);
2595 *start_of_line = sol;
2599 ### Memory allocation
2601 The `scanner` returns tokens in a local variable - we want them in allocated
2602 memory so they can live in the `asn_stack`. Similarly the `asn` produced by
2603 a reduce is in a large buffer. Both of these require some allocation and
2604 copying, hence `memdup` and `tokcopy`.
2606 ###### parser includes
2609 ###### parser functions
2611 void *memdup(void *m, int len)
2617 memcpy(ret, m, len);
2621 static struct token *tok_copy(struct token tk)
2623 struct token *new = malloc(sizeof(*new));
2628 ### The heart of the parser.
2630 Now we have the parser. If we can shift we do, though newlines and
2631 reducing indenting may block that. If not and we can reduce we do
2632 that. If the production we reduced was production zero, then we have
2633 accepted the input and can finish.
2635 We return whatever `asn` was returned by reducing production zero.
2637 If we can neither shift nor reduce we have an error to handle. We pop
2638 single entries off the stack until we can shift the `TK_error` symbol, then
2639 drop input tokens until we find one we can shift into the new error state.
2641 When we find `TK_in` and `TK_out` tokens which report indents we need
2642 to handle them directly as the grammar cannot express what we want to
2645 `TK_in` tokens are easy: we simply update indent count in the top stack frame to
2646 record how many indents there are following the previous token.
2648 `TK_out` tokens must be canceled against an indent count
2649 within the stack. If we can reduce some symbols that are all since
2650 the most recent indent, then we do that first. If the minimum prefix
2651 of the current state then extends back before the most recent indent,
2652 that indent can be cancelled. If the minimum prefix is shorter then
2653 the indent is premature and we must start error handling, which
2654 currently doesn't work at all.
2656 `TK_newline` tokens are ignored unless the top stack frame records
2657 that they are permitted. In that case they will not be considered for
2658 shifting if it is possible to reduce some symbols that are all since
2659 the most recent start of line. This is how a newline forcible
2660 terminates any line-like structure - we try to reduce down to at most
2661 one symbol for each line where newlines are allowed.
2663 ###### parser includes
2666 void *parser_run(struct token_state *tokens,
2667 const struct state states[],
2668 int (*do_reduce)(int, void**, struct token_config*, void*),
2669 void (*do_free)(short, void*),
2670 FILE *trace, const char *non_term[],
2671 struct token_config *config)
2673 struct parser p = { 0 };
2674 struct token *tk = NULL;
2678 shift(&p, TK_eof, 0, 1, NULL, states);
2680 struct token *err_tk;
2681 struct frame *tos = &p.stack[p.tos-1];
2683 tk = tok_copy(token_next(tokens));
2684 parser_trace(trace, &p,
2685 tk, states, non_term, config->known_count);
2687 if (tk->num == TK_in) {
2689 tos->since_newline = 0;
2690 tos->since_indent = 0;
2691 if (!states[tos->state].starts_line)
2692 tos->newline_permitted = 0;
2695 parser_trace_action(trace, "Record");
2698 if (tk->num == TK_out) {
2699 if (states[tos->state].reduce_size >= 0 &&
2700 states[tos->state].reduce_size <= tos->since_indent)
2702 if (states[tos->state].min_prefix >= tos->since_indent) {
2704 struct frame *in = tos - tos->since_indent;
2706 if (in->indents == 0) {
2707 /* Reassess since_indent and newline_permitted */
2709 in->since_indent = in[-1].since_indent + 1;
2710 in->newline_permitted = in[-1].newline_permitted;
2712 in->since_indent = 0;
2713 in->newline_permitted = 0;
2715 if (states[in->state].starts_line)
2716 in->newline_permitted = 1;
2719 in->since_indent = in[-1].since_indent + 1;
2720 if (states[in->state].starts_line)
2721 in->newline_permitted = 1;
2723 in->newline_permitted = in[-1].newline_permitted;
2728 parser_trace_action(trace, "Cancel");
2731 // fall through and force a REDUCE (as 'shift'
2734 if (tk->num == TK_newline) {
2735 if (!tos->newline_permitted) {
2738 parser_trace_action(trace, "Discard");
2741 if (tos->since_newline > 1 &&
2742 states[tos->state].reduce_size >= 0 &&
2743 states[tos->state].reduce_size <= tos->since_newline)
2746 if (shift(&p, tk->num, 0, tk->num == TK_newline, tk, states)) {
2748 parser_trace_action(trace, "Shift");
2752 if (states[tos->state].reduce_prod >= 0) {
2755 const struct state *nextstate = &states[tos->state];
2756 int prod = nextstate->reduce_prod;
2757 int size = nextstate->reduce_size;
2759 static char buf[16*1024];
2760 short indents, start_of_line;
2762 body = p.asn_stack + (p.tos - size);
2764 bufsize = do_reduce(prod, body, config, buf);
2766 indents = pop(&p, size, &start_of_line,
2768 res = memdup(buf, bufsize);
2769 memset(buf, 0, bufsize);
2770 if (!shift(&p, nextstate->reduce_sym,
2771 indents, start_of_line,
2773 if (prod != 0) abort();
2777 parser_trace_action(trace, "Reduce");
2780 if (tk->num == TK_out) {
2781 // Indent problem - synthesise tokens to get us
2783 fprintf(stderr, "Synthesize %d to handle indent problem\n", states[tos->state].shift_sym);
2784 shift(&p, states[tos->state].shift_sym,
2785 0, 1, tok_copy(*tk), states);
2786 // FIXME need to report this error somehow
2787 parser_trace_action(trace, "Synthesize");
2790 /* Error. We walk up the stack until we
2791 * find a state which will accept TK_error.
2792 * We then shift in TK_error and see what state
2793 * that takes us too.
2794 * Then we discard input tokens until
2795 * we find one that is acceptable.
2797 parser_trace_action(trace, "ERROR");
2798 short indents = 0, start_of_line;
2800 err_tk = tok_copy(*tk);
2801 while (shift(&p, TK_error, 0, 0,
2802 err_tk, states) == 0
2804 // discard this state
2805 indents += pop(&p, 1, &start_of_line, do_free);
2808 // no state accepted TK_error
2811 tos = &p.stack[p.tos-1];
2812 while (search(&states[tos->state], tk->num) < 0 &&
2813 tk->num != TK_eof) {
2815 tk = tok_copy(token_next(tokens));
2816 if (tk->num == TK_in)
2818 if (tk->num == TK_out) {
2822 // FIXME update since_indent here
2825 if (p.tos == 0 && tk->num == TK_eof)
2827 tos = &p.stack[p.tos-1];
2828 tos->indents += indents;
2832 pop(&p, p.tos, NULL, do_free);
2838 ###### exported functions
2839 void *parser_run(struct token_state *tokens,
2840 const struct state states[],
2841 int (*do_reduce)(int, void**, struct token_config*, void*),
2842 void (*do_free)(short, void*),
2843 FILE *trace, const char *non_term[],
2844 struct token_config *config);
2848 Being able to visualize the parser in action can be invaluable when
2849 debugging the parser code, or trying to understand how the parse of a
2850 particular grammar progresses. The stack contains all the important
2851 state, so just printing out the stack every time around the parse loop
2852 can make it possible to see exactly what is happening.
2854 This doesn't explicitly show each SHIFT and REDUCE action. However they
2855 are easily deduced from the change between consecutive lines, and the
2856 details of each state can be found by cross referencing the states list
2857 in the stack with the "report" that parsergen can generate.
2859 For terminal symbols, we just dump the token. For non-terminals we
2860 print the name of the symbol. The look ahead token is reported at the
2861 end inside square brackets.
2863 ###### parser functions
2865 static char *reserved_words[] = {
2866 [TK_error] = "ERROR",
2869 [TK_newline] = "NEWLINE",
2872 static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
2874 fprintf(trace, "(%d", f->state);
2875 if (states[f->state].starts_line)
2876 fprintf(trace, "s");
2877 if (f->newline_permitted)
2878 fprintf(trace, "n%d", f->since_newline);
2879 fprintf(trace, ") ");
2882 void parser_trace(FILE *trace, struct parser *p,
2883 struct token *tk, const struct state states[],
2884 const char *non_term[], int knowns)
2889 for (i = 0; i < p->tos; i++) {
2890 struct frame *f = &p->stack[i];
2893 if (sym < TK_reserved &&
2894 reserved_words[sym] != NULL)
2895 fputs(reserved_words[sym], trace);
2896 else if (sym < TK_reserved + knowns) {
2897 struct token *t = p->asn_stack[i];
2898 text_dump(trace, t->txt, 20);
2900 fputs(non_term[sym - TK_reserved - knowns],
2903 fprintf(trace, ".%d", f->indents);
2904 if (f->since_newline == 0)
2908 parser_trace_state(trace, f, states);
2910 fprintf(trace, "[");
2911 if (tk->num < TK_reserved &&
2912 reserved_words[tk->num] != NULL)
2913 fputs(reserved_words[tk->num], trace);
2915 text_dump(trace, tk->txt, 20);
2919 void parser_trace_action(FILE *trace, char *action)
2922 fprintf(trace, " - %s\n", action);
2927 The obvious example for a parser is a calculator.
2929 As `scanner` provides number parsing function using `libgmp` is it not much
2930 work to perform arbitrary rational number calculations.
2932 This calculator takes one expression, or an equality test, per line. The
2933 results are printed and if any equality test fails, the program exits with
2936 ###### File: parsergen.mk
2937 calc.c calc.h : parsergen parsergen.mdc
2938 ./parsergen --tag calc -o calc parsergen.mdc
2939 calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o
2940 $(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp
2945 // what do we use for a demo-grammar? A calculator of course.
2957 #include <sys/mman.h>
2962 #include "scanner.h"
2968 static void free_number(struct number *n)
2974 int main(int argc, char *argv[])
2976 int fd = open(argv[1], O_RDONLY);
2977 int len = lseek(fd, 0, 2);
2978 char *file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
2979 struct section *s = code_extract(file, file+len, NULL);
2980 struct token_config config = {
2981 .ignored = (1 << TK_line_comment)
2982 | (1 << TK_block_comment)
2985 .number_chars = ".,_+-",
2989 parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
2991 struct section *t = s->next;
3001 Session -> Session Line
3004 Line -> Expression NEWLINE ${ gmp_printf("Answer = %Qd\n", $1.val);
3005 { mpf_t fl; mpf_init2(fl, 20); mpf_set_q(fl, $1.val);
3006 gmp_printf(" or as a decimal: %Fg\n", fl);
3010 | Expression = Expression NEWLINE ${
3011 if (mpq_equal($1.val, $3.val))
3012 gmp_printf("Both equal %Qd\n", $1.val);
3014 gmp_printf("NOT EQUAL: %Qd\n != : %Qd\n",
3019 | NEWLINE ${ printf("Blank line\n"); }$
3020 | ERROR NEWLINE ${ printf("Skipped a bad line\n"); }$
3023 Expression -> Expression + Term ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$
3024 | Expression - Term ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$
3025 | Term ${ mpq_init($0.val); mpq_set($0.val, $1.val); }$
3027 Term -> Term * Factor ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$
3028 | Term / Factor ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$
3029 | Factor ${ mpq_init($0.val); mpq_set($0.val, $1.val); }$
3031 Factor -> NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$
3032 | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$