}
}
+### Setting `can_eol`
+
+In order to be able to ignore newline tokens when not relevant, but
+still include them in the parse when needed, we will need to know
+which states can start a "line-like" section of code. We ignore
+newlines when there is an indent since the most recent start of a
+line-like section.
+
+To know what is line-like, we first need to know which symbols can end
+a line-like section, which is precisely those which can end with a
+newline token. These symbols don't necessarily alway end with a
+newline, but they can. Hence they are not described as "lines" but
+only "line-like".
+
+Clearly the `TK_newline` token can end with a newline. Any symbol
+which is the head of a production that contains a line-ending symbol
+followed only by nullable symbols is also a line-ending symbol. We
+use a new field `can_eol` to record this attribute of symbols, and
+compute it in a repetitive manner similar to `set_nullable`.
+
+###### symbol fields
+ int can_eol;
+
+###### functions
+ static void set_can_eol(struct grammar *g)
+ {
+ int check_again = 1;
+ g->symtab[TK_newline]->can_eol = 1;
+ while (check_again) {
+ int p;
+ check_again = 0;
+ for (p = 0; p < g->production_count; p++) {
+ struct production *pr = g->productions[p];
+ int s;
+
+ if (pr->head->can_eol)
+ continue;
+
+ for (s = pr->body_size - 1; s >= 0; s--) {
+ if (pr->body[s]->can_eol) {
+ pr->head->can_eol = 1;
+ check_again = 1;
+ break;
+ }
+ if (!pr->body[s]->nullable)
+ break;
+ }
+ }
+ }
+ }
+
### Building the `first` sets
When calculating what can follow a particular non-terminal, we will need to
-know what the "first" terminal in an subsequent non-terminal might be. So
+know what the "first" terminal in any subsequent non-terminal might be. So
we calculate the `first` set for every non-terminal and store them in an
array. We don't bother recording the "first" set for terminals as they are
trivial.
struct symset items;
struct symset go_to;
char completed;
+ char starts_line;
};
###### grammar fields
them to a data structure, of freeing them.
static int add_itemset(struct grammar *g, struct symset ss,
- enum grammar_type type)
+ enum grammar_type type, int starts_line)
{
struct itemset **where, *is;
int i;
is->items = ss;
is->next = *where;
is->go_to = INIT_DATASET;
+ is->starts_line = starts_line;
*where = is;
return is->state;
}
sn = save_set(g, LA);
LA = set_find(g, sn);
}
+
/* Add productions for this symbol */
for (p2 = s->first_production;
p2 < g->production_count &&
###### derive itemsets
// Now we have a completed itemset, so we need to
- // create all the 'next' itemset and create them
+ // compute all the 'next' itemsets and create them
// if they don't exist.
for (i = 0; i < done.cnt; i++) {
int j;
unsigned short state;
+ int starts_line = 0;
+ struct symbol *sym = g->symtab[done.syms[i]];
struct symset newitemset = INIT_SYMSET;
if (type >= LALR)
newitemset = INIT_DATASET;
+ if (sym->can_eol ||
+ (sym->nullable && is->starts_line))
+ starts_line = 1;
for (j = 0; j < is->items.cnt; j++) {
int itm = is->items.syms[j];
int p = item_prod(itm);
if (bp == pr->body_size)
continue;
- if (pr->body[bp]->num != done.syms[i])
+ if (pr->body[bp] != sym)
continue;
if (type >= LALR)
la = is->items.data[j];
}
}
}
- state = add_itemset(g, newitemset, type);
+ state = add_itemset(g, newitemset, type, starts_line);
if (symset_find(&is->go_to, done.syms[i]) < 0)
symset_add(&is->go_to, done.syms[i], state);
}
}
// production 0, offset 0 (with no data)
symset_add(&first, item_num(0, 0), la);
- add_itemset(g, first, type);
+ add_itemset(g, first, type, g->productions[0]->body[0]->can_eol);
for (again = 0, is = g->items;
is;
is = is->next ?: again ? (again = 0, g->items) : NULL) {
for (s = g->syms; s; s = s->next)
g->symtab[s->num] = s;
- if (type >= SLR) {
- set_nullable(g);
+ set_nullable(g);
+ set_can_eol(g);
+ if (type >= SLR)
build_first(g);
- }
+
if (type == SLR)
build_follow(g);
if (!s)
continue;
- printf(" %c%3d%c: ",
- s->nullable ? '*':' ',
+ printf(" %c%c%3d%c: ",
+ s->nullable ? '.':' ',
+ s->can_eol ? '>':' ',
s->num, symtypes[s->type]);
prtxt(s->name);
if (s->precedence)
for (s = 0; s < g->states; s++) {
int j;
struct itemset *is = g->statetab[s];
- printf(" Itemset %d:\n", s);
+ printf(" Itemset %d:%s\n", s, is->starts_line?" (startsline)":"");
for (j = 0; j < is->items.cnt; j++) {
report_item(g, is->items.syms[j]);
if (is->items.data != NO_DATA)
### States and the goto tables.
-For each state we record the goto table and the reducible production if
-there is one.
+For each state we record the goto table, the reducible production if
+there is one, or a symbol to shift for error recovery.
Some of the details of the reducible production are stored in the
`do_reduce` function to come later. Here we store the production number,
the body size (useful for stack management) and the resulting symbol (useful
short reduce_prod;
short reduce_size;
short reduce_sym;
+ short shift_sym;
+ short starts_line;
};
fprintf(f, "static const struct state states[] = {\n");
for (i = 0; i < g->states; i++) {
struct itemset *is = g->statetab[i];
- int j, prod = -1;
+ int j, prod = -1, prod_len;
+ int shift_sym = -1;
+ int shift_len = 0, shift_remain = 0;
for (j = 0; j < is->items.cnt; j++) {
int itm = is->items.syms[j];
int p = item_prod(itm);
int bp = item_index(itm);
struct production *pr = g->productions[p];
- if (bp < pr->body_size)
+ if (bp < pr->body_size) {
+ if (shift_sym < 0 ||
+ (shift_len == bp && shift_remain > pr->body_size - bp)) {
+ shift_sym = pr->body[bp]->num;
+ shift_len = bp;
+ shift_remain = pr->body_size - bp;
+ }
continue;
+ }
/* This is what we reduce */
- prod = p;
- break;
+ if (prod < 0 || prod_len < pr->body_size) {
+ prod = p;
+ prod_len = pr->body_size;
+ }
}
- fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d },\n",
- i, is->go_to.cnt, i, prod,
- prod < 0 ? -1 : g->productions[prod]->body_size,
- prod < 0 ? -1 : g->productions[prod]->head->num);
+ if (prod >= 0)
+ fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, 0, %d },\n",
+ i, is->go_to.cnt, i, prod,
+ g->productions[prod]->body_size,
+ g->productions[prod]->head->num,
+ is->starts_line);
+ else
+ fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, %d },\n",
+ i, is->go_to.cnt, i, shift_sym,
+ is->starts_line);
}
fprintf(f, "};\n\n");
}
production, and by keeping a separate `asn` stack, we can just pass a
pointer into this stack.
-The other allocation stores all other stack fields of which there are two.
+The other allocation stores all other stack fields of which there are four.
The `state` is the most important one and guides the parsing process. The
`sym` is nearly unnecessary. However when we want to free entries from the
`asn_stack`, it helps to know what type they are so we can call the right
freeing function. The symbol leads us to the right free function through
`do_free`.
+The `indents` count and the `starts_indented` flag track the line
+indents in the symbol. These are used to allow indent information to
+guide parsing and error recovery.
+
As well as the stack of frames we have a `next` frame which is
assembled from the incoming token and other information prior to
pushing it onto the stack.
struct frame {
short state;
short sym;
+ short starts_indented;
+ short indents;
+ short newline_permitted;
} *stack, next;
void **asn_stack;
int stack_size;
function reports if it could.
So `shift` finds the next state. If that succeed it extends the allocations
-if needed and pushed all the information onto the stacks.
+if needed and pushes all the information onto the stacks.
###### parser functions
p->asn_stack[p->tos] = asn;
p->tos++;
p->next.state = newstate;
+ p->next.indents = 0;
+ p->next.starts_indented = 0;
+ // if new state doesn't start a line, we inherit newline_permitted status
+ if (states[newstate].starts_line)
+ p->next.newline_permitted = 1;
return 1;
}
{
int i;
p->tos -= num;
- for (i = 0; i < num; i++)
+ for (i = 0; i < num; i++) {
+ p->next.indents += p->stack[p->tos+i].indents;
do_free(p->stack[p->tos+i].sym,
p->asn_stack[p->tos+i]);
+ }
- if (num)
+ if (num) {
p->next.state = p->stack[p->tos].state;
+ p->next.starts_indented = p->stack[p->tos].starts_indented;
+ p->next.newline_permitted = p->stack[p->tos].newline_permitted;
+ if (p->next.indents > p->next.starts_indented)
+ p->next.newline_permitted = 0;
+ }
}
### Memory allocation
single entries off the stack until we can shift the `TK_error` symbol, then
drop input tokens until we find one we can shift into the new error state.
+When we find `TK_in` and `TK_out` tokens which report indents we need
+to handle them directly as the grammar cannot express what we want to
+do with them.
+
+`TK_in` tokens are easy: we simply update the `next` stack frame to
+record how many indents there are and that the next token started with
+an indent.
+
+`TK_out` tokens must either be counted off against any pending indent,
+or must force reductions until there is a pending indent which isn't
+at the start of a production.
###### parser includes
#include "parser.h"
FILE *trace, const char *non_term[], int knowns)
{
struct parser p = { 0 };
- struct token *tk;
+ struct token *tk = NULL;
int accepted = 0;
void *ret;
- tk = tok_copy(token_next(tokens));
while (!accepted) {
+ struct token *err_tk;
+ if (!tk)
+ tk = tok_copy(token_next(tokens));
p.next.sym = tk->num;
if (trace)
parser_trace(trace, &p, tk, states, non_term, knowns);
+ if (p.next.sym == TK_in) {
+ p.next.starts_indented = 1;
+ p.next.indents = 1;
+ free(tk);
+ tk = NULL;
+ continue;
+ }
+ if (p.next.sym == TK_out) {
+ if (p.stack[p.tos-1].indents > p.stack[p.tos-1].starts_indented ||
+ (p.stack[p.tos-1].indents == 1 &&
+ states[p.next.state].reduce_size > 1)) {
+ p.stack[p.tos-1].indents -= 1;
+ if (p.stack[p.tos-1].indents == p.stack[p.tos-1].starts_indented) {
+ // no internal indent any more, reassess 'newline_permitted'
+ if (states[p.stack[p.tos-1].state].starts_line)
+ p.stack[p.tos-1].newline_permitted = 1;
+ else if (p.tos > 1)
+ p.stack[p.tos-1].newline_permitted = p.stack[p.tos-2].newline_permitted;
+ }
+ free(tk);
+ tk = NULL;
+ continue;
+ }
+ // fall through and force a REDUCE (as 'shift'
+ // will fail).
+ }
+ if (p.next.sym == TK_newline) {
+ if (! p.stack[p.tos-1].newline_permitted) {
+ free(tk);
+ tk = NULL;
+ continue;
+ }
+ }
if (shift(&p, tk, states)) {
- tk = tok_copy(token_next(tokens));
+ tk = NULL;
continue;
}
if (states[p.next.state].reduce_prod >= 0) {
accepted = 1;
continue;
}
+ if (tk->num == TK_out) {
+ // Indent problem - synthesise tokens to get us
+ // out of here.
+ fprintf(stderr, "Synthesize %d to handle indent problem\n", states[p.next.state].shift_sym);
+ p.next.sym = states[p.next.state].shift_sym;
+ shift(&p, tok_copy(*tk), states);
+ // FIXME need to report this error somehow
+ continue;
+ }
/* Error. We walk up the stack until we
* find a state which will accept TK_error.
* We then shift in TK_error and see what state
* Then we discard input tokens until
* we find one that is acceptable.
*/
+
+ err_tk = tok_copy(*tk);
p.next.sym = TK_error;
- while (shift(&p, tk, states) == 0
+ while (shift(&p, err_tk, states) == 0
&& p.tos > 0)
// discard this state
pop(&p, 1, do_free);
- tk = tok_copy(*tk);
+ if (p.tos == 0) {
+ free(err_tk);
+ // no state accepted TK_error
+ break;
+ }
while (search(&states[p.next.state], tk->num) < 0 &&
tk->num != TK_eof) {
free(tk);
tk = tok_copy(token_next(tokens));
+ if (tk->num == TK_in)
+ p.next.indents += 1;
+ if (tk->num == TK_out) {
+ if (p.next.indents == 0)
+ break;
+ p.next.indents -= 1;
+ }
}
if (p.tos == 0 && tk->num == TK_eof)
break;
[TK_newline] = "NEWLINE",
[TK_eof] = "$eof",
};
+ static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
+ {
+ fprintf(trace, "(%d", f->state);
+ if (f->indents)
+ fprintf(trace, "%c%d", f->starts_indented?':':'.',
+ f->indents);
+ if (states[f->state].starts_line)
+ fprintf(trace, "s");
+ if (f->newline_permitted)
+ fprintf(trace, "n");
+ fprintf(trace, ") ");
+ }
+
void parser_trace(FILE *trace, struct parser *p,
struct token *tk, const struct state states[],
const char *non_term[], int knowns)
int i;
for (i = 0; i < p->tos; i++) {
int sym = p->stack[i].sym;
- fprintf(trace, "(%d) ", p->stack[i].state);
+ parser_trace_state(trace, &p->stack[i], states);
if (sym < TK_reserved &&
reserved_words[sym] != NULL)
fputs(reserved_words[sym], trace);
trace);
fputs(" ", trace);
}
- fprintf(trace, "(%d) [", p->next.state);
+ parser_trace_state(trace, &p->next, states);
+ fprintf(trace, " [");
if (tk->num < TK_reserved &&
reserved_words[tk->num] != NULL)
fputs(reserved_words[tk->num], trace);