X-Git-Url: https://ocean-lang.org/code/?p=ocean;a=blobdiff_plain;f=csrc%2Fparsergen.mdc;h=d9467442eeab1dfec4720f7a2b4f5c6666d43867;hp=0986fc28c2b2d0d411eac8ed71d822fb0e323c01;hb=10db06aed6af588a0ccd05e80a0f50286949d56c;hpb=f41750b4888738b8123551983d1575a4774d1f1f diff --git a/csrc/parsergen.mdc b/csrc/parsergen.mdc index 0986fc2..d946744 100644 --- a/csrc/parsergen.mdc +++ b/csrc/parsergen.mdc @@ -5,6 +5,9 @@ fragments, analyses it, and can report details about the analysis and write out C code files which can be compiled to make a parser. "2D support" means that indentation and line breaks can be significant. +Indent tokens (IN and OUT) and end-of-line tokens (EOL and NEWLINE) can +be used to describe the grammar and the parser can selectively ignore +these where they aren't relevant. There are several distinct sections. @@ -149,11 +152,11 @@ because that is what we need to detect tags. Productions are comprised primarily of symbols - terminal and non-terminal. We do not make a syntactic distinction between the two, -though non-terminals must be identifiers. Non-terminal symbols are -those which appear in the head of a production, terminal symbols are -those which don't. There are also "virtual" symbols used for precedence -marking discussed later, and sometimes we won't know what type a symbol -is yet. +though non-terminals must be identifiers while terminals can also be +marks. Non-terminal symbols are those which appear in the head of a +production, terminal symbols are those which don't. There are also +"virtual" symbols used for precedence marking discussed later, and +sometimes we won't know what type a symbol is yet. To help with code safety it is possible to declare the terminal symbols. If this is done, then any symbol used in a production that does not @@ -288,7 +291,7 @@ carry precedence information but are not included in the grammar. A production can declare that it inherits the precedence of a given virtual symbol. -This use for `$$` precludes it from being used as a symbol in the +This use of `$$` precludes it from being used as a symbol in the described language. Two other symbols: `${` and `}$` are also unavailable. @@ -390,9 +393,10 @@ here is told which was found in the `isref` argument. found += 1; t = token_next(ts); } - if (found == 0) + if (found == 0) { err = "No symbols given on precedence/TERM line"; goto abort; + } return NULL; abort: while (t.num != TK_newline && t.num != TK_eof) @@ -511,10 +515,6 @@ Now we have all the bits we need to parse a full production. tk = token_next(state); while (tk.num == TK_ident || tk.num == TK_mark) { struct symbol *bs = sym_find(g, tk.txt); - if (bs->type == Unknown) { - if (!g->terminals_declared) - bs->type = Terminal; - } if (bs->type == Virtual) { err = "Virtual symbol not permitted in production"; goto abort; @@ -688,20 +688,22 @@ used as a terminal anywhere that a terminal is expected. goto abort; } token_close(state); - if (g->terminals_declared) { - struct symbol *s; - int errs = 0; - for (s = g->syms; s; s = s->next) { - if (s->type != Unknown) - continue; - errs += 1; - fprintf(stderr, "Token %.*s not declared\n", - s->name.len, s->name.txt); - } - if (errs) { - free(g); // FIXME free content - g = NULL; + + struct symbol *s; + for (s = g->syms; s; s = s->next) { + if (s->type != Unknown) + continue; + if (!g->terminals_declared) { + s->type = Terminal; + continue; } + err = "not declared"; + fprintf(stderr, "Token %.*s not declared\n", + s->name.len, s->name.txt); + } + if (err) { + free(g); // FIXME free content + g = NULL; } return g; abort: @@ -725,8 +727,8 @@ and to simplify some comparisons of sets, these sets will be stored in a list of unique sets, each assigned a number. Once we have the data structures in hand to manage these sets and lists, -we can start setting the 'nullable' flag, build the 'FIRST' sets, and -then create the item sets which define the various states. +we can start setting the 'nullable' flag, build the 'FIRST' and 'FOLLOW' +sets, and then create the item sets which define the various states. ### Symbol sets. @@ -1874,9 +1876,10 @@ optional `FILE` to send tracing to. The `token_config` gets the list of known words added and then is used with the `code_node` to initialize the scanner. -`parse_XX` then calls the library function `parser_run` to actually complete -the parse. This needs the `states` table and functions to call the various -pieces of code provided in the grammar file, so they are generated first. +`parse_XX` then calls the library function `parser_run` to actually +complete the parse. This needs the `states` table, the `reductions` +table and functions to call the various pieces of code provided in the +grammar file, so they are generated first. ###### parser_generate @@ -1887,6 +1890,7 @@ pieces of code provided in the grammar file, so they are generated first. gen_non_term(f, g); gen_goto(f, g); gen_states(f, g); + gen_reductions(f, g); gen_reduce(f, g, file, pre_reduce); gen_free(f, g); @@ -1898,7 +1902,7 @@ pieces of code provided in the grammar file, so they are generated first. fprintf(f, "\tconfig->words_marks = known;\n"); fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n"); fprintf(f, "\ttokens = token_open(code, config);\n"); - fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n"); + fprintf(f, "\tvoid *rv = parser_run(tokens, states, reductions, do_reduce, do_free, trace, non_term, config);\n"); fprintf(f, "\ttoken_close(tokens);\n"); fprintf(f, "\treturn rv;\n"); fprintf(f, "}\n\n"); @@ -1939,14 +1943,16 @@ The table of nonterminals used for tracing is a similar array. fprintf(f, "};\n\n"); } -### States and the goto tables. +### States, reductions, and the go to tables. -For each state we record the goto table and details of the reducible -production if there is one. -Some of the details of the reducible production are stored in the -`do_reduce` function to come later. Here we store the production -number, the body size (useful for stack management), and the resulting -symbol (useful for knowing how to free data later). +For each state we record the go to table and the reducible production if +there is one, the details of which are in a separate table of +reductions. Some of the details of the reducible production are stored +in the `do_reduce` function to come later. In the go to table we store +the production number and in the reductions table: the body size (useful +for stack management), the resulting symbol (useful for knowing how to +free data later), and the size of the resulting asn object (useful for +preallocation space. The go to table is stored in a simple array of `sym` and corresponding `state`. @@ -1957,13 +1963,15 @@ The go to table is stored in a simple array of `sym` and corresponding short sym; short state; }; + struct reduction { + short size; + short sym; + short result_size; + }; struct state { + short reduce_prod; short go_to_cnt; const struct lookup * go_to; - short reduce_prod; - short reduce_size; - short reduce_sym; - short result_size; }; ###### functions @@ -1987,6 +1995,26 @@ The go to table is stored in a simple array of `sym` and corresponding } } + static void gen_reductions(FILE *f, struct grammar *g) + { + int i; + fprintf(f, "#line 0 \"gen_reductions\"\n"); + fprintf(f, "static const struct reduction reductions[] = {\n"); + for (i = 0; i < g->production_count; i++) { + struct production *pr = g->productions[i]; + struct symbol *hd = pr->head; + fprintf(f, "\t{%d, %d, ", pr->body_size, hd->num); + if (hd->struct_name.txt == NULL) + fprintf(f, "0 },\n"); + else + fprintf(f, "sizeof(struct %.*s%s) },\n", + hd->struct_name.len, + hd->struct_name.txt, + hd->isref ? "*" : ""); + } + fprintf(f, "};\n\n"); + } + static void gen_states(FILE *f, struct grammar *g) { int i; @@ -2011,24 +2039,10 @@ The go to table is stored in a simple array of `sym` and corresponding } } if (is->go_to.cnt) - fprintf(f, "\t[%d] = { %d, goto_%d, ", - i, is->go_to.cnt, i); + fprintf(f, "\t[%d] = { %d, %d, goto_%d },\n", + i, prod, is->go_to.cnt, i); else - fprintf(f, "\t[%d] = { 0, NULL, ", i); - if (prod >= 0) { - struct production *pr = g->productions[prod]; - struct symbol *hd = pr->head; - fprintf(f, "%d, %d, %d, ", - prod, pr->body_size, pr->head->num); - if (hd->struct_name.txt == NULL) - fprintf(f, "0 },\n"); - else - fprintf(f, "sizeof(struct %.*s%s) },\n", - hd->struct_name.len, - hd->struct_name.txt, - hd->isref ? "*" : ""); - } else - fprintf(f, "-1, -1, -1, -1 },\n"); + fprintf(f, "\t[%d] = { %d, 0, NULL },\n", i, prod); } fprintf(f, "};\n\n"); } @@ -2039,9 +2053,9 @@ When the parser engine decides to reduce a production, it calls `do_reduce` which runs the code that was included with the production, if any. -This code needs to be able to store data somewhere. Rather than -requiring `do_reduce` to `malloc` that "somewhere", we pass in a large -buffer and have `do_reduce` return the size to be saved. +This code needs to be able to store data somewhere so we record the size +of the data expected with each state so it can be allocated before +`do_reduce` is called. In order for the code to access "global" context, we pass in the "config" pointer that was passed to the parser function. If the `struct @@ -2562,9 +2576,9 @@ recognised properly, and link with `libicuuc` as `libmdcode` requires that. Having analysed the grammar and generated all the tables, we only need the shift/reduce engine to bring it all together. -### Goto table lookup +### Go to table lookup -The parser generator has nicely provided us with goto tables sorted by +The parser generator has nicely provided us with go to tables sorted by symbol number. We need a binary search function to find a symbol in the table. @@ -2617,14 +2631,14 @@ The stack usually won't grow very large - maybe a few tens of entries. So we dynamically grow an array as required but never bother to shrink it down again. -We keep the stack as two separate allocations. One, `asn_stack` stores +We keep the stack as two separate allocations. One, `asn_stack`, stores the "abstract syntax nodes" which are created by each reduction. When we call `do_reduce` we need to pass an array of the `asn`s of the body of the production, and by keeping a separate `asn` stack, we can just pass a pointer into this stack. The other allocation stores all other stack fields of which there are -several. The `state` is the most important one and guides the parsing +two. The `state` is the most important one and guides the parsing process. The `sym` is nearly unnecessary as it is implicit in the state. However when we want to free entries from the `asn_stack`, it helps to know what type they are so we can call the right freeing @@ -2648,6 +2662,8 @@ to mark the beginning of the file as well as the end. void **asn_stack; int stack_size; int tos; + + ## parser state }; #### Shift and pop @@ -2661,7 +2677,7 @@ is exactly the same process as shifting in a terminal so we use the same function for both. In both cases we provide the symbol. The state is deduced from the current top-of-stack state and the new symbol. -To simplify other code we arrange for `shift` to fail if there is no `goto` +To simplify other code we arrange for `shift` to fail if there is no `go to` state for the symbol. This is useful in basic parsing due to our design that we shift when we can, and reduce when we cannot. So the `shift` function reports if it could. @@ -2685,6 +2701,7 @@ allocations if needed and pushes all the information onto the stacks. : 0; if (newstate < 0) return 0; + if (p->tos >= p->stack_size) { p->stack_size += 10; p->stack = realloc(p->stack, p->stack_size @@ -2721,52 +2738,144 @@ in. ### The heart of the parser. Now we have the parser. For each token we might shift it, trigger a -reduction, or start error handling. 2D tokens (IN, OUT, EOL) also need -to be handled. +reduction, or start error handling. 2D tokens (IN, OUT, NEWLINE, EOL) +might also be ignored. Ignoring tokens is combined with shifting. + +###### parser vars + + struct parser p = { 0 }; + struct token *tk = NULL; + int accepted = 0; -We return whatever `asn` was returned by reducing production zero. +###### heart of parser -When we find `TK_in` and `TK_out` tokens which report indents we need -to handle them directly as the grammar cannot express what we want to -do with them. + shift(&p, TK_eof, NULL, states); + while (!accepted && p.tos > 0) { + struct frame *tos = &p.stack[p.tos-1]; + if (!tk) + tk = tok_copy(token_next(tokens)); + parser_trace(trace, &p, + tk, states, non_term, config->known_count); + + ## try shift or ignore + ## try reduce + ## handle error + } -`TK_in` tokens are easy: we simply update indent count in the top stack frame to -record how many indents there are following the previous token. +Indents are ignored unless they can be shifted onto the stack +immediately or nothing can be shifted (in which case we reduce, and try +again). The end of an indented section - the OUT token - is ignored +precisely when the indent was ignored. To keep track of this we need a +small stack of flags, which is easily stored as bits in an `unsigned +long`. This will never overflow and the scanner only allows 20 levels +of indentation. -`TK_out` tokens must be canceled against an indent count -within the stack. If we can reduce some symbols that are all since -the most recent indent, then we do that first. If the minimum prefix -of the current state then extends back before the most recent indent, -that indent can be cancelled. If the minimum prefix is shorter then -the indent had ended prematurely and we must start error handling, which -is still a work-in-progress. +###### parser state + unsigned long ignored_indents; -`TK_newline` tokens are ignored unless the top stack frame records -that they are permitted. In that case they will not be considered for -shifting if it is possible to reduce some symbols that are all since -the most recent start of line. This is how a newline forcibly -terminates any line-like structure - we try to reduce down to at most -one symbol for each line where newlines are allowed. -A consequence of this is that a rule like +NEWLINE is ignored when in an indented section of text which was not +explicitly expected by the grammar. So if the most recent indent is +ignored, so is any NEWLINE token. -###### Example: newlines - broken +If a NEWLINE is seen but it cannot be shifted, we try to shift an EOL +token instead. If that succeeds, we make a new copy of the NEWLINE +token and continue. This allows a NEWLINE to appear to be preceded by +an indefinite number of EOL tokens. - Newlines -> - | NEWLINE Newlines - IfStatement -> Newlines if .... +The token number for `EOL` cannot be statically declared, so when the +parser starts we need to look through the array of non-terminals to find +the EOL. -cannot work, as the NEWLINE will never be shifted as the empty string -will be reduced first. Optional sets of newlines need to be include -in the thing that preceed: +###### parser state + int tk_eol; -###### Example: newlines - works +###### find eol + p.tk_eol = 0; + while (strcmp(non_term[p.tk_eol], "EOL") != 0) + p.tk_eol += 1; + p.tk_eol += TK_reserved + config->known_count; - If -> if - | NEWLINE If - IfStatement -> If .... +For other tokens, we shift the next token if that is possible, otherwise +we try to reduce a production. -Here the NEWLINE will be shifted because nothing can be reduced until -the `if` is seen. +###### try shift or ignore + + if ((tk->num == TK_newline || tk->num == TK_out) && + (p.ignored_indents & 1)) { + /* indented, so ignore OUT and NEWLINE */ + if (tk->num == TK_out) + p.ignored_indents >>= 1; + free(tk); + tk = NULL; + parser_trace_action(trace, "Ignore"); + continue; + } + + if (shift(&p, tk->num, tk, states)) { + if (tk->num == TK_out) + p.ignored_indents >>= 1; + if (tk->num == TK_in) + p.ignored_indents <<= 1; + + parser_trace_action(trace, "Shift"); + tk = NULL; + ## did shift + continue; + } else if (tk->num == TK_newline && + shift(&p, p.tk_eol, tk, states)) { + tk = tok_copy(*tk); + parser_trace_action(trace, "ShiftEOL"); + continue; + } + + if (tk->num == TK_in && states[p.stack[p.tos-1].state].go_to_cnt > 0) { + /* No indent expected here and reduce is not mandatory, so ignore IN */ + free(tk); + tk = NULL; + p.ignored_indents <<= 1; + p.ignored_indents |= 1; + parser_trace_action(trace, "Ignore"); + continue; + } + +We have already discussed the bulk of the handling of a "reduce" action, +with the `pop()` and `shift()` functions doing much of the work. There +is a little more complexity needed to manage storage for the asn (Abstract +Syntax Node), and also a test of whether the reduction is permitted. + +When we try to shift the result of reducing production-zero, it will +fail because there is no next state. In this case the asn will not have +been stored on the stack, so it get stored in the `ret` variable, and we +report that that input has been accepted. + +###### parser vars + + void *ret = NULL; + +###### try reduce + + if (states[tos->state].reduce_prod >= 0) { + void **body; + void *res; + const struct state *nextstate = &states[tos->state]; + int prod = nextstate->reduce_prod; + int size = reductions[prod].size; + int res_size = reductions[prod].result_size; + + body = p.asn_stack + (p.tos - size); + res = res_size ? calloc(1, res_size) : NULL; + res_size = do_reduce(prod, body, config, res); + if (res_size != reductions[prod].result_size) + abort(); + pop(&p, size, do_free); + if (!shift(&p, reductions[prod].sym, res, states)) { + accepted = 1; + ret = res; + parser_trace_action(trace, "Accept"); + } else + parser_trace_action(trace, "Reduce"); + continue; + } If we can neither shift nor reduce we have an error to handle. There are two possible responses to an error: we can pop single frames off the @@ -2818,85 +2927,18 @@ dropping tokens until either we manage to shift one, or reach end-of-file. void *parser_run(struct token_state *tokens, const struct state states[], + const struct reduction reductions[], int (*do_reduce)(int, void**, struct token_config*, void*), void (*do_free)(short, void*), FILE *trace, const char *non_term[], struct token_config *config) { - struct parser p = { 0 }; - struct token *tk = NULL; - int accepted = 0; - void *ret = NULL; ## parser vars - shift(&p, TK_eof, NULL, states); - while (!accepted && p.tos > 0) { - struct frame *tos = &p.stack[p.tos-1]; - if (!tk) - tk = tok_copy(token_next(tokens)); - parser_trace(trace, &p, - tk, states, non_term, config->known_count); - - if (tk->num == TK_in) { - free(tk); - tk = NULL; - parser_trace_action(trace, "Record"); - continue; - } - if (tk->num == TK_out) { - if (1) { - // OK to cancel + ## find eol - free(tk); - tk = NULL; - parser_trace_action(trace, "Cancel"); - continue; - } - // fall through to error handling as both SHIFT and REDUCE - // will fail. - } - if (tk->num == TK_newline) { - if (1) { - free(tk); - tk = NULL; - parser_trace_action(trace, "Discard"); - continue; - } - } - if (shift(&p, tk->num, tk, states)) { - tk = NULL; - parser_trace_action(trace, "Shift"); - ## did shift - continue; - } + ## heart of parser - if (states[tos->state].reduce_prod >= 0) { - void **body; - void *res; - const struct state *nextstate = &states[tos->state]; - int prod = nextstate->reduce_prod; - int size = nextstate->reduce_size; - int res_size = nextstate->result_size; - - body = p.asn_stack + (p.tos - size); - res = res_size ? calloc(1, res_size) : NULL; - res_size = do_reduce(prod, body, config, res); - if (res_size != nextstate->result_size) - abort(); - - pop(&p, size, do_free); - - if (!shift(&p, nextstate->reduce_sym, - res, states)) { - if (prod != 0) abort(); - accepted = 1; - ret = res; - } - parser_trace_action(trace, "Reduce"); - continue; - } - ## handle error - } free(tk); pop(&p, p.tos, do_free); free(p.asn_stack); @@ -2907,6 +2949,7 @@ dropping tokens until either we manage to shift one, or reach end-of-file. ###### exported functions void *parser_run(struct token_state *tokens, const struct state states[], + const struct reduction reductions[], int (*do_reduce)(int, void**, struct token_config*, void*), void (*do_free)(short, void*), FILE *trace, const char *non_term[],