X-Git-Url: https://ocean-lang.org/code/?a=blobdiff_plain;f=csrc%2Fparsergen.mdc;h=02abed32520c18dcaa1d677b81529f25ec040e03;hb=36e687aa5a84b52dc4add6ae2dc5f7abe71e3e2d;hp=b78a00d3511e7825bcbe9ef8cbc7340b8ed66fae;hpb=e9af634f13a0957cd510052483b6f9e35a986596;p=ocean diff --git a/csrc/parsergen.mdc b/csrc/parsergen.mdc index b78a00d..02abed3 100644 --- a/csrc/parsergen.mdc +++ b/csrc/parsergen.mdc @@ -5,9 +5,9 @@ fragments, analyses it, and can report details about the analysis and write out C code files which can be compiled to make a parser. "2D support" means that indentation and line breaks can be significant. -Indent tokens (IN and OUT) and end-of-line (EOL) tokens can be used to -describe the grammar and the parser can selectively ignore these where -they aren't relevant. +Indent tokens (IN and OUT) and end-of-line tokens (EOL and NEWLINE) can +be used to describe the grammar and the parser can selectively ignore +these where they aren't relevant. There are several distinct sections. @@ -152,11 +152,11 @@ because that is what we need to detect tags. Productions are comprised primarily of symbols - terminal and non-terminal. We do not make a syntactic distinction between the two, -though non-terminals must be identifiers. Non-terminal symbols are -those which appear in the head of a production, terminal symbols are -those which don't. There are also "virtual" symbols used for precedence -marking discussed later, and sometimes we won't know what type a symbol -is yet. +though non-terminals must be identifiers while terminals can also be +marks. Non-terminal symbols are those which appear in the head of a +production, terminal symbols are those which don't. There are also +"virtual" symbols used for precedence marking discussed later, and +sometimes we won't know what type a symbol is yet. To help with code safety it is possible to declare the terminal symbols. If this is done, then any symbol used in a production that does not @@ -291,7 +291,7 @@ carry precedence information but are not included in the grammar. A production can declare that it inherits the precedence of a given virtual symbol. -This use for `$$` precludes it from being used as a symbol in the +This use of `$$` precludes it from being used as a symbol in the described language. Two other symbols: `${` and `}$` are also unavailable. @@ -393,9 +393,10 @@ here is told which was found in the `isref` argument. found += 1; t = token_next(ts); } - if (found == 0) + if (found == 0) { err = "No symbols given on precedence/TERM line"; goto abort; + } return NULL; abort: while (t.num != TK_newline && t.num != TK_eof) @@ -514,10 +515,6 @@ Now we have all the bits we need to parse a full production. tk = token_next(state); while (tk.num == TK_ident || tk.num == TK_mark) { struct symbol *bs = sym_find(g, tk.txt); - if (bs->type == Unknown) { - if (!g->terminals_declared) - bs->type = Terminal; - } if (bs->type == Virtual) { err = "Virtual symbol not permitted in production"; goto abort; @@ -691,20 +688,22 @@ used as a terminal anywhere that a terminal is expected. goto abort; } token_close(state); - if (g->terminals_declared) { - struct symbol *s; - int errs = 0; - for (s = g->syms; s; s = s->next) { - if (s->type != Unknown) - continue; - errs += 1; - fprintf(stderr, "Token %.*s not declared\n", - s->name.len, s->name.txt); - } - if (errs) { - free(g); // FIXME free content - g = NULL; + + struct symbol *s; + for (s = g->syms; s; s = s->next) { + if (s->type != Unknown) + continue; + if (!g->terminals_declared) { + s->type = Terminal; + continue; } + err = "not declared"; + fprintf(stderr, "Token %.*s not declared\n", + s->name.len, s->name.txt); + } + if (err) { + free(g); // FIXME free content + g = NULL; } return g; abort: @@ -728,8 +727,8 @@ and to simplify some comparisons of sets, these sets will be stored in a list of unique sets, each assigned a number. Once we have the data structures in hand to manage these sets and lists, -we can start setting the 'nullable' flag, build the 'FIRST' sets, and -then create the item sets which define the various states. +we can start setting the 'nullable' flag, build the 'FIRST' and 'FOLLOW' +sets, and then create the item sets which define the various states. ### Symbol sets. @@ -1192,10 +1191,27 @@ And now we can build the list of itemsets. The lookup routine returns both a success flag and a pointer to where in the list an insert should happen, so we don't need to search a second time. +Each state will often have at most one reducible production, but +occasionally will have more. If there are more we will need a full +action table (merged in the go to table). If there is one, we record it +in the state. If none, we record that too. These special values will +be needed both in the generator and in the parser, so they need to be +declared twice. + +###### exported types + enum { + NONE_REDUCIBLE = -1, + MANY_REDUCIBLE = -2, + }; ###### declarations + enum { + NONE_REDUCIBLE = -1, + MANY_REDUCIBLE = -2, + }; struct itemset { struct itemset *next; short state; + short reducible_prod; /* or NONE_REDUCIBLE or MANY_REDUCIBLE */ struct symset items; struct symset go_to; enum assoc assoc; @@ -1319,8 +1335,15 @@ so the item is ineffective. struct symset LA = INIT_SYMSET; unsigned short sn = 0; - if (bs == pr->body_size) + if (i == 0) + is->reducible_prod = NONE_REDUCIBLE; + if (bs == pr->body_size) { + if (is->reducible_prod == NONE_REDUCIBLE) + is->reducible_prod = p; + else + is->reducible_prod = MANY_REDUCIBLE; continue; + } s = pr->body[bs]; if (s->precedence && is->precedence && is->precedence > s->precedence) @@ -1877,9 +1900,10 @@ optional `FILE` to send tracing to. The `token_config` gets the list of known words added and then is used with the `code_node` to initialize the scanner. -`parse_XX` then calls the library function `parser_run` to actually complete -the parse. This needs the `states` table and functions to call the various -pieces of code provided in the grammar file, so they are generated first. +`parse_XX` then calls the library function `parser_run` to actually +complete the parse. This needs the `states` table, the `reductions` +table and functions to call the various pieces of code provided in the +grammar file, so they are generated first. ###### parser_generate @@ -1888,8 +1912,10 @@ pieces of code provided in the grammar file, so they are generated first. { gen_known(f, g); gen_non_term(f, g); + gen_action(f, g); gen_goto(f, g); gen_states(f, g); + gen_reductions(f, g); gen_reduce(f, g, file, pre_reduce); gen_free(f, g); @@ -1901,7 +1927,7 @@ pieces of code provided in the grammar file, so they are generated first. fprintf(f, "\tconfig->words_marks = known;\n"); fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n"); fprintf(f, "\ttokens = token_open(code, config);\n"); - fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n"); + fprintf(f, "\tvoid *rv = parser_run(tokens, states, reductions, do_reduce, do_free, trace, non_term, config);\n"); fprintf(f, "\ttoken_close(tokens);\n"); fprintf(f, "\treturn rv;\n"); fprintf(f, "}\n\n"); @@ -1942,14 +1968,63 @@ The table of nonterminals used for tracing is a similar array. fprintf(f, "};\n\n"); } -### States and the goto tables. +### The Action table + +For this parser we do not have a separate action table. The action +table normally maps terminals to one of the actions SHIFT, REDUCE(p), or +ACCEPT. We can find which terminals to SHIFT via a lookup in the go to +table, and most states have at most one reducible production and in that +case to effect that reduction for any non-shiftable terminal in the +look-ahead. + +However, with SLR, LALR, and canonical-LR grammars, there may +occasionally be multiple possible reductions which we choose between +based on the look-ahead. To handle this possibility we allow reductions +to be stored in the go to table. An extra flag indicates if an entry is +a state to go to, or a production to reduce. We add the entries with +that flag here, if necessary. + +###### functions + #define IS_REDUCTION (0x8000) + + static void gen_action(FILE *f, struct grammar *g) + { + int i, j, k; + for (i = 0; i < g->states; i++) { + struct itemset *is = g->statetab[i]; + struct symset *gt = &is->go_to; + + if (is->reducible_prod != MANY_REDUCIBLE) + continue; + for (j = 0; j < is->items.cnt; j++) { + int itm = is->items.syms[j]; + int p = item_prod(itm); + int bp = item_dot(itm); + struct production *pr = g->productions[p]; + struct symset la; + if (bp != pr->body_size) + continue; + /* This is a reducible item */ + if (g->follow) + la = g->follow[pr->head->num]; + else + la = set_find(g, is->items.data[j]); + for (k = 0 ; k < la.cnt; k++) + symset_add(gt, la.syms[k], p | IS_REDUCTION); + } + } + } + +### States, reductions, and the go to tables. -For each state we record the goto table and details of the reducible -production if there is one. -Some of the details of the reducible production are stored in the -`do_reduce` function to come later. Here we store the production -number, the body size (useful for stack management), and the resulting -symbol (useful for knowing how to free data later). +For each state we record the go to table and the reducible production if +there is one, the details of which are in a separate table of +reductions. Some of the details of the reducible production are stored +in the `do_reduce` function to come later. In the go to table we store +the production number and in the reductions table: the body size (useful +for stack management), the resulting symbol (useful for knowing how to +free data later), and the size of the resulting asn object (useful for +preallocation space. The go to table is stored in a simple array of `sym` and corresponding `state`. @@ -1958,15 +2033,18 @@ The go to table is stored in a simple array of `sym` and corresponding struct lookup { short sym; - short state; + short state_or_reduction:15; + unsigned short is_reduction:1; + }; + struct reduction { + short size; + short sym; + short result_size; }; struct state { + short reduce_prod; // or NONE_REDUCIBLE or MANY_REDUCIBLE short go_to_cnt; - const struct lookup * go_to; - short reduce_prod; - short reduce_size; - short reduce_sym; - short result_size; + const struct lookup * go_to; // includes reductions }; ###### functions @@ -1983,13 +2061,38 @@ The go to table is stored in a simple array of `sym` and corresponding continue; fprintf(f, "static const struct lookup goto_%d[] = {\n", i); - for (j = 0; j < gt.cnt; j++) - fprintf(f, "\t{ %d, %d },\n", - gt.syms[j], gt.data[j]); + for (j = 0; j < gt.cnt; j++) { + if (gt.data[j] & IS_REDUCTION) + fprintf(f, "\t{ %d, %d, 1 },\n", + gt.syms[j], gt.data[j]&~IS_REDUCTION); + else + fprintf(f, "\t{ %d, %d, 0 },\n", + gt.syms[j], gt.data[j]); + } fprintf(f, "};\n"); } } + static void gen_reductions(FILE *f, struct grammar *g) + { + int i; + fprintf(f, "#line 0 \"gen_reductions\"\n"); + fprintf(f, "static const struct reduction reductions[] = {\n"); + for (i = 0; i < g->production_count; i++) { + struct production *pr = g->productions[i]; + struct symbol *hd = pr->head; + fprintf(f, "\t{%d, %d, ", pr->body_size, hd->num); + if (hd->struct_name.txt == NULL) + fprintf(f, "0 },\n"); + else + fprintf(f, "sizeof(struct %.*s%s) },\n", + hd->struct_name.len, + hd->struct_name.txt, + hd->isref ? "*" : ""); + } + fprintf(f, "};\n\n"); + } + static void gen_states(FILE *f, struct grammar *g) { int i; @@ -1997,41 +2100,13 @@ The go to table is stored in a simple array of `sym` and corresponding fprintf(f, "static const struct state states[] = {\n"); for (i = 0; i < g->states; i++) { struct itemset *is = g->statetab[i]; - int j, prod = -1, prod_len; - - for (j = 0; j < is->items.cnt; j++) { - int itm = is->items.syms[j]; - int p = item_prod(itm); - int bp = item_dot(itm); - struct production *pr = g->productions[p]; + int prod = is->reducible_prod; - if (bp < pr->body_size) - continue; - /* This is what we reduce - choose longest */ - if (prod < 0 || prod_len < pr->body_size) { - prod = p; - prod_len = pr->body_size; - } - } if (is->go_to.cnt) - fprintf(f, "\t[%d] = { %d, goto_%d, ", - i, is->go_to.cnt, i); + fprintf(f, "\t[%d] = { %d, %d, goto_%d },\n", + i, prod, is->go_to.cnt, i); else - fprintf(f, "\t[%d] = { 0, NULL, ", i); - if (prod >= 0) { - struct production *pr = g->productions[prod]; - struct symbol *hd = pr->head; - fprintf(f, "%d, %d, %d, ", - prod, pr->body_size, pr->head->num); - if (hd->struct_name.txt == NULL) - fprintf(f, "0 },\n"); - else - fprintf(f, "sizeof(struct %.*s%s) },\n", - hd->struct_name.len, - hd->struct_name.txt, - hd->isref ? "*" : ""); - } else - fprintf(f, "-1, -1, -1, -1 },\n"); + fprintf(f, "\t[%d] = { %d, 0, NULL },\n", i, prod); } fprintf(f, "};\n\n"); } @@ -2042,9 +2117,9 @@ When the parser engine decides to reduce a production, it calls `do_reduce` which runs the code that was included with the production, if any. -This code needs to be able to store data somewhere. Rather than -requiring `do_reduce` to `malloc` that "somewhere", we pass in a large -buffer and have `do_reduce` return the size to be saved. +This code needs to be able to store data somewhere so we record the size +of the data expected with each state so it can be allocated before +`do_reduce` is called. In order for the code to access "global" context, we pass in the "config" pointer that was passed to the parser function. If the `struct @@ -2209,17 +2284,17 @@ transformed, and will cause an error when the code is compiled. } c -= 1; } - fputs("\n", f); for (i = 0; i < p->body_size; i++) { if (p->body[i]->struct_name.txt && used[i]) { // assume this has been copied out if (p->body[i]->isref) - fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i); + fprintf(f, "\t\t*(void**)body[%d] = NULL;", i); else - fprintf(f, "\t\tmemset(body[%d], 0, sizeof(struct %.*s));\n", i, p->body[i]->struct_name.len, p->body[i]->struct_name.txt); + fprintf(f, "\t\tmemset(body[%d], 0, sizeof(struct %.*s));", i, p->body[i]->struct_name.len, p->body[i]->struct_name.txt); } } + fputs("\n", f); free(used); } @@ -2245,6 +2320,7 @@ transformed, and will cause an error when the code is compiled. if (p->code.txt) { fprintf(f, "#line %d \"%s\"\n", p->code_line, file); gen_code(p, f, g); + fprintf(f, "#line 7 \"gen_reduce\"\n"); } if (p->head->struct_name.txt) @@ -2565,15 +2641,15 @@ recognised properly, and link with `libicuuc` as `libmdcode` requires that. Having analysed the grammar and generated all the tables, we only need the shift/reduce engine to bring it all together. -### Goto table lookup +### Go to table lookup -The parser generator has nicely provided us with goto tables sorted by +The parser generator has nicely provided us with go to tables sorted by symbol number. We need a binary search function to find a symbol in the table. ###### parser functions - static int search(const struct state *l, int sym) + static int search(const struct state *l, int sym, int reduction) { int lo = 0; int hi = l->go_to_cnt; @@ -2587,10 +2663,11 @@ table. else hi = mid; } - if (l->go_to[lo].sym == sym) - return l->go_to[lo].state; - else + if (l->go_to[lo].sym != sym) return -1; + if (!reduction == !l->go_to[lo].is_reduction) + return l->go_to[lo].state_or_reduction; + return -1; } ### Memory allocation @@ -2620,14 +2697,14 @@ The stack usually won't grow very large - maybe a few tens of entries. So we dynamically grow an array as required but never bother to shrink it down again. -We keep the stack as two separate allocations. One, `asn_stack` stores +We keep the stack as two separate allocations. One, `asn_stack`, stores the "abstract syntax nodes" which are created by each reduction. When we call `do_reduce` we need to pass an array of the `asn`s of the body of the production, and by keeping a separate `asn` stack, we can just pass a pointer into this stack. The other allocation stores all other stack fields of which there are -several. The `state` is the most important one and guides the parsing +two. The `state` is the most important one and guides the parsing process. The `sym` is nearly unnecessary as it is implicit in the state. However when we want to free entries from the `asn_stack`, it helps to know what type they are so we can call the right freeing @@ -2641,13 +2718,6 @@ bottom of stack holds the start state but no symbol, as nothing came before the beginning. As we need to store some value, `TK_eof` is used to mark the beginning of the file as well as the end. -Indents (IN) are sometimes shifted and sometimes only accounted. -Whatever decision is made must apply equally to the matching OUT. To -ensure this we keep a stack of bits in `ignored_indents` and -`indent_depth`. When we process an IN, we record if it was ignored. -When we see an out, we pop of the relavant bit and use it to decide how -to handle the OUT. - ###### parser functions struct parser { @@ -2673,7 +2743,7 @@ is exactly the same process as shifting in a terminal so we use the same function for both. In both cases we provide the symbol. The state is deduced from the current top-of-stack state and the new symbol. -To simplify other code we arrange for `shift` to fail if there is no `goto` +To simplify other code we arrange for `shift` to fail if there is no `go to` state for the symbol. This is useful in basic parsing due to our design that we shift when we can, and reduce when we cannot. So the `shift` function reports if it could. @@ -2693,10 +2763,11 @@ allocations if needed and pushes all the information onto the stacks. struct frame next = {0}; int newstate = p->tos ? search(&states[p->stack[p->tos-1].state], - sym) + sym, 0) : 0; if (newstate < 0) return 0; + if (p->tos >= p->stack_size) { p->stack_size += 10; p->stack = realloc(p->stack, p->stack_size @@ -2733,8 +2804,8 @@ in. ### The heart of the parser. Now we have the parser. For each token we might shift it, trigger a -reduction, or start error handling. 2D tokens (IN, OUT, NEWLINE) might -also be ignored. Ignoring tokens is combined with shifting. +reduction, or start error handling. 2D tokens (IN, OUT, NEWLINE, EOL) +might also be ignored. Ignoring tokens is combined with shifting. ###### parser vars @@ -2758,19 +2829,37 @@ also be ignored. Ignoring tokens is combined with shifting. } Indents are ignored unless they can be shifted onto the stack -immediately. The end of an indented section - the OUT token - is -ignored precisely when the indent was ignored. To keep track of this we -need a small stack of flags, which is easily stored as bits in an -`unsigned long`. This will never overflow and the scanner only allows -20 levels of indentation. +immediately or nothing can be shifted (in which case we reduce, and try +again). The end of an indented section - the OUT token - is ignored +precisely when the indent was ignored. To keep track of this we need a +small stack of flags, which is easily stored as bits in an `unsigned +long`. This will never overflow and the scanner only allows 20 levels +of indentation. ###### parser state unsigned long ignored_indents; - int indent_depth; NEWLINE is ignored when in an indented section of text which was not explicitly expected by the grammar. So if the most recent indent is -ignored, so is any EOL token. +ignored, so is any NEWLINE token. + +If a NEWLINE is seen but it cannot be shifted, we try to shift an EOL +token instead. If that succeeds, we make a new copy of the NEWLINE +token and continue. This allows a NEWLINE to appear to be preceded by +an indefinite number of EOL tokens. + +The token number for `EOL` cannot be statically declared, so when the +parser starts we need to look through the array of non-terminals to find +the EOL. + +###### parser state + int tk_eol; + +###### find eol + p.tk_eol = 0; + while (strcmp(non_term[p.tk_eol], "EOL") != 0) + p.tk_eol += 1; + p.tk_eol += TK_reserved + config->known_count; For other tokens, we shift the next token if that is possible, otherwise we try to reduce a production. @@ -2778,45 +2867,52 @@ we try to reduce a production. ###### try shift or ignore if ((tk->num == TK_newline || tk->num == TK_out) && - (p.ignored_indents & (1 << p.indent_depth))) { + (p.ignored_indents & 1)) { /* indented, so ignore OUT and NEWLINE */ if (tk->num == TK_out) - p.indent_depth -= 1; + p.ignored_indents >>= 1; free(tk); tk = NULL; parser_trace_action(trace, "Ignore"); continue; } - if (tk->num == TK_newline) { - if (1) { - free(tk); - tk = NULL; - parser_trace_action(trace, "Discard"); - continue; - } - } if (shift(&p, tk->num, tk, states)) { if (tk->num == TK_out) - p.indent_depth -= 1; - if (tk->num == TK_in) { - p.indent_depth += 1; - p.ignored_indents &= ~(1 << p.indent_depth); - } - tk = NULL; + p.ignored_indents >>= 1; + if (tk->num == TK_in) + p.ignored_indents <<= 1; + parser_trace_action(trace, "Shift"); + tk = NULL; ## did shift continue; + } else if (tk->num == TK_newline && + shift(&p, p.tk_eol, tk, states)) { + tk = tok_copy(*tk); + parser_trace_action(trace, "ShiftEOL"); + continue; } if (tk->num == TK_in) { - /* No indent expected here, so ignore IN */ - free(tk); - tk = NULL; - p.indent_depth += 1; - p.ignored_indents |= (1 << p.indent_depth); - parser_trace_action(trace, "Ignore"); - continue; + int should_reduce; + if (states[tos->state].reduce_prod == MANY_REDUCIBLE) + /* Only reduce if TK_in in lookahead */ + should_reduce = (search(&states[tos->state], TK_in, 1) >= 0); + else + /* Only reduce if we cannot shift anything */ + should_reduce = (states[tos->state].go_to_cnt == 0); + if (!should_reduce) { + /* No indent expected here and reduce is not indicated, + * so ignore IN + */ + free(tk); + tk = NULL; + p.ignored_indents <<= 1; + p.ignored_indents |= 1; + parser_trace_action(trace, "Ignore"); + continue; + } } We have already discussed the bulk of the handling of a "reduce" action, @@ -2832,24 +2928,26 @@ report that that input has been accepted. ###### parser vars void *ret = NULL; + int reduction; ###### try reduce - if (states[tos->state].reduce_prod >= 0) { + reduction = states[tos->state].reduce_prod; + if (reduction == MANY_REDUCIBLE) + reduction = search(&states[tos->state], tk->num, 1); + if (reduction >= 0) { void **body; void *res; - const struct state *nextstate = &states[tos->state]; - int prod = nextstate->reduce_prod; - int size = nextstate->reduce_size; - int res_size = nextstate->result_size; + int size = reductions[reduction].size; + int res_size = reductions[reduction].result_size; body = p.asn_stack + (p.tos - size); res = res_size ? calloc(1, res_size) : NULL; - res_size = do_reduce(prod, body, config, res); - if (res_size != nextstate->result_size) + res_size = do_reduce(reduction, body, config, res); + if (res_size != reductions[reduction].result_size) abort(); pop(&p, size, do_free); - if (!shift(&p, nextstate->reduce_sym, res, states)) { + if (!shift(&p, reductions[reduction].sym, res, states)) { accepted = 1; ret = res; parser_trace_action(trace, "Accept"); @@ -2908,6 +3006,7 @@ dropping tokens until either we manage to shift one, or reach end-of-file. void *parser_run(struct token_state *tokens, const struct state states[], + const struct reduction reductions[], int (*do_reduce)(int, void**, struct token_config*, void*), void (*do_free)(short, void*), FILE *trace, const char *non_term[], @@ -2915,6 +3014,8 @@ dropping tokens until either we manage to shift one, or reach end-of-file. { ## parser vars + ## find eol + ## heart of parser free(tk); @@ -2927,6 +3028,7 @@ dropping tokens until either we manage to shift one, or reach end-of-file. ###### exported functions void *parser_run(struct token_state *tokens, const struct state states[], + const struct reduction reductions[], int (*do_reduce)(int, void**, struct token_config*, void*), void (*do_free)(short, void*), FILE *trace, const char *non_term[], @@ -3139,3 +3241,73 @@ exits with an error. 355//113 error + +## A second example + +I had originally thought that the LR05 style grammar would suit me there +should never be a sequence of token that means either of two different +things depending on what comes follows it. Without that ambiguity, the +extra strength of LALR, or even SLR, would not be needed. However I +discovered this was too simplistic. There are times when it is quite +reasonable for the empty string, or maybe a NEWLINE, to have different +meanings depending on the next token. For this, LR05 is not sufficient, +so I added full action-table support. + +This example tests the selection of REDUCE actions based on the +look-ahead symbol, using a trivial grammar where the empty string can +reasonably mean different things depending on what it precedes. + + +####### File: parsergen.mk + demos :: do_lalr_demo + lalr_demo.c lalr_demo.h : parsergen parsergen.mdc + ./parsergen --LALR --tag demo -o lalr_demo parsergen.mdc + lalr_demo: lalr_demo.o libparser.o libscanner.o libmdcode.o + $(CC) -o lalr_demo lalr_demo.c libparser.o libscanner.o libmdcode.o -licuuc + do_lalr_demo: lalr_demo + NOTRACE=1 ./lalr_demo + @echo 'Should produce "start of line, empty sign, empty sigl"' + +####### demo: grammar + + $TERM $ @ + - + line -> ${ printf("start of line"); }$ + | line term + term -> sigl IDENTIFIER + | sign NUMBER + sigl -> $ + | @ + | ${ printf(", empty sigl"); }$ + sign -> + + | - + | ${ printf(", empty sign"); }$ + +####### demo: header + + // + +####### demo: code + + #include + #include + #include "mdcode.h" + #include "scanner.h" + #include "parser.h" + #include "lalr_demo.h" + int main(int argc, char *argv[]) + { + char test[] = "````\n$a -1 5 b\n"; + struct section *table = code_extract(test, test+sizeof(test)-1, NULL); + struct token_config config = { + .ignored = (1 << TK_line_comment) + | (1 << TK_newline) + | (1 << TK_in) + | (1 << TK_out), + .number_chars = "", + .word_start = "", + .word_cont = "", + }; + parse_lalr_demo(table->code, &config, getenv("NOTRACE") ? NULL : stderr); + printf("\n"); + exit(0); + }