write out C code files which can be compiled to make a parser.
"2D support" means that indentation and line breaks can be significant.
+Indent tokens (IN and OUT) and end-of-line tokens (EOL and NEWLINE) can
+be used to describe the grammar and the parser can selectively ignore
+these where they aren't relevant.
There are several distinct sections.
struct production {
unsigned short precedence;
enum assoc assoc;
- char line_like;
## production fields
};
struct grammar {
Productions are comprised primarily of symbols - terminal and
non-terminal. We do not make a syntactic distinction between the two,
-though non-terminals must be identifiers. Non-terminal symbols are
-those which appear in the head of a production, terminal symbols are
-those which don't. There are also "virtual" symbols used for precedence
-marking discussed later, and sometimes we won't know what type a symbol
-is yet.
+though non-terminals must be identifiers while terminals can also be
+marks. Non-terminal symbols are those which appear in the head of a
+production, terminal symbols are those which don't. There are also
+"virtual" symbols used for precedence marking discussed later, and
+sometimes we won't know what type a symbol is yet.
To help with code safety it is possible to declare the terminal symbols.
If this is done, then any symbol used in a production that does not
Symbols can be either `TK_ident` or `TK_mark`. They are saved in a
table of known symbols and the resulting parser will report them as
`TK_reserved + N`. A small set of identifiers are reserved for the
-different token types that `scanner` can report.
+different token types that `scanner` can report, and an even smaller set
+are reserved for a special token that the parser can generate (`EOL`) as
+will be described later. This latter set cannot use predefined numbers,
+so they are marked as `isspecial` for now and will get assigned a number
+with the non-terminals later.
###### declarations
{ TK_out, "OUT" },
{ TK_newline, "NEWLINE" },
{ TK_eof, "$eof" },
+ { -1, "EOL" },
};
+
###### symbol fields
short num;
+ unsigned int isspecial:1;
Note that `TK_eof` and the two `TK_*_comment` tokens cannot be
recognised. The former is automatically expected at the end of the text
s = sym_find(g, t);
s->type = Terminal;
s->num = reserved_words[i].num;
+ s->isspecial = 1;
}
}
production can declare that it inherits the precedence of a given
virtual symbol.
-This use for `$$` precludes it from being used as a symbol in the
+This use of `$$` precludes it from being used as a symbol in the
described language. Two other symbols: `${` and `}$` are also
unavailable.
found += 1;
t = token_next(ts);
}
- if (found == 0)
+ if (found == 0) {
err = "No symbols given on precedence/TERM line";
goto abort;
+ }
return NULL;
abort:
while (t.num != TK_newline && t.num != TK_eof)
tk = token_next(state);
while (tk.num == TK_ident || tk.num == TK_mark) {
struct symbol *bs = sym_find(g, tk.txt);
- if (bs->type == Unknown) {
- if (!g->terminals_declared)
- bs->type = Terminal;
- }
if (bs->type == Virtual) {
err = "Virtual symbol not permitted in production";
goto abort;
goto abort;
}
vs = sym_find(g, tk.txt);
- if (vs->num == TK_newline)
- p.line_like = 1;
- else if (vs->num == TK_out)
- p.line_like = 2;
- else if (vs->precedence == 0) {
+ if (vs->precedence == 0) {
err = "symbol after $$ must have precedence";
goto abort;
} else {
goto abort;
}
token_close(state);
- if (g->terminals_declared) {
- struct symbol *s;
- int errs = 0;
- for (s = g->syms; s; s = s->next) {
- if (s->type != Unknown)
- continue;
- errs += 1;
- fprintf(stderr, "Token %.*s not declared\n",
- s->name.len, s->name.txt);
- }
- if (errs) {
- free(g); // FIXME free content
- g = NULL;
+
+ struct symbol *s;
+ for (s = g->syms; s; s = s->next) {
+ if (s->type != Unknown)
+ continue;
+ if (!g->terminals_declared) {
+ s->type = Terminal;
+ continue;
}
+ err = "not declared";
+ fprintf(stderr, "Token %.*s not declared\n",
+ s->name.len, s->name.txt);
+ }
+ if (err) {
+ free(g); // FIXME free content
+ g = NULL;
}
return g;
abort:
list of unique sets, each assigned a number.
Once we have the data structures in hand to manage these sets and lists,
-we can start setting the 'nullable' flag, build the 'FIRST' sets, and
-then create the item sets which define the various states.
+we can start setting the 'nullable' flag, build the 'FIRST' and 'FOLLOW'
+sets, and then create the item sets which define the various states.
### Symbol sets.
}
}
-### Setting `line_like`
-
-In order to be able to ignore newline tokens when not relevant, but
-still include them in the parse when needed, we will need to know
-which states can start a "line-like" section of code. We ignore
-newlines when there is an indent since the most recent start of a
-line-like symbol.
-
-A "line_like" symbol is simply any symbol that can derive a NEWLINE.
-If a symbol cannot derive a NEWLINE, then it is only part of a line -
-so is word-like. If it can derive a NEWLINE, then we consider it to
-be like a line.
-
-Clearly the `TK_newline` token can derive a NEWLINE. Any symbol which
-is the head of a production that contains a line_like symbol is also a
-line-like symbol. We use a new field `line_like` to record this
-attribute of symbols, and compute it in a repetitive manner similar to
-`set_nullable`.
-
-###### symbol fields
- int line_like;
-
-###### functions
- static void set_line_like(struct grammar *g)
- {
- int check_again = 1;
- g->symtab[TK_newline]->line_like = 1;
- while (check_again) {
- int p;
- check_again = 0;
- for (p = 0; p < g->production_count; p++) {
- struct production *pr = g->productions[p];
- int s;
-
- if (pr->head->line_like)
- continue;
-
- for (s = 0 ; s < pr->body_size; s++) {
- if (pr->body[s]->line_like) {
- pr->head->line_like = 1;
- check_again = 1;
- break;
- }
- }
- }
- }
- }
-
### Building the `first` sets
When calculating what can follow a particular non-terminal, we will need
need to be consider for completion again. So a `completed` flag is
needed.
-For correct handling of `TK_newline` when parsing, we will need to
-know which states (itemsets) can occur at the start of a line, so we
-will record a `starts_line` flag too whenever DOT is at the start of a
-`line_like` symbol.
-
-Finally, for handling `TK_out` we need to know whether productions in the
-current state started *before* the most recent indent. A state
-doesn't usually keep details of individual productions, so we need to
-add one extra detail. `min_prefix` is the smallest non-zero number of
-symbols *before* DOT in any production in an itemset. This will allow
-us to determine if the the most recent indent is sufficiently recent
-to cancel it against a `TK_out`. If it was seen longer ago than the
-`min_prefix`, and if the current state cannot be reduced, then the
-indented section must have ended in the middle of a syntactic unit, so
-an error must be signaled.
-
And now we can build the list of itemsets. The lookup routine returns
both a success flag and a pointer to where in the list an insert should
happen, so we don't need to search a second time.
enum assoc assoc;
unsigned short precedence;
char completed;
- char starts_line;
- int min_prefix;
};
###### grammar fields
item.
We also collect a set of all symbols which follow "DOT" (in `done`) as
-this is used in the next stage. If any of these symbols are flagged as
-`line_like`, then this state must be a `starts_line` state so now is a
-good time to record that.
+this is used in the next stage.
When itemsets are created we assign a precedence to the itemset from the
complete item, if there is one. We ignore the possibility of there
struct symbol *s;
struct symset LA = INIT_SYMSET;
unsigned short sn = 0;
- struct symset LAnl = INIT_SYMSET;
- unsigned short snnl = 0;
- if (is->min_prefix == 0 ||
- (bs > 0 && bs < is->min_prefix))
- is->min_prefix = bs;
if (bs == pr->body_size)
continue;
s = pr->body[bs];
* not Right-associative, so we mustn't shift it.
*/
continue;
- if (symset_find(&done, s->num) < 0) {
+ if (symset_find(&done, s->num) < 0)
symset_add(&done, s->num, 0);
- }
+
if (s->type != Nonterminal)
continue;
- if (s->line_like)
- is->starts_line = 1;
check_again = 1;
if (type >= LALR) {
// Need the LA set.
}
sn = save_set(g, LA);
LA = set_find(g, sn);
- if (symset_find(&LA, TK_newline))
- symset_add(&LAnl, TK_newline, 0);
- snnl = save_set(g, LAnl);
- LAnl = set_find(g, snnl);
}
/* Add productions for this symbol */
int itm = item_num(p2, 0);
int pos = symset_find(&is->items, itm);
if (pos < 0) {
- if (g->productions[p2]->line_like)
- symset_add(&is->items, itm, snnl);
- else
- symset_add(&is->items, itm, sn);
+ symset_add(&is->items, itm, sn);
/* Will have re-ordered, so start
* from beginning again */
i = -1;
struct symset tmp = INIT_SYMSET;
struct symset *la = &LA;
- if (g->productions[p2]->line_like)
- la = &LAnl;
symset_union(&tmp, &ss);
if (symset_union(&tmp, la)) {
is->items.data[pos] = save_set(g, tmp);
Once we have built everything we allocate arrays for the two lists:
symbols and itemsets. This allows more efficient access during
-reporting. The symbols are grouped as terminals and then non-terminals,
-and we record the changeover point in `first_nonterm`.
+reporting. The symbols are grouped as terminals, then non-terminals,
+then virtual, with the start of non-terminals recorded as `first_nonterm`.
+Special terminals -- meaning just EOL -- are included with the
+non-terminals so that they are not expected by the scanner.
###### grammar fields
struct symbol **symtab;
struct itemset *is;
int snum = TK_reserved;
for (s = g->syms; s; s = s->next)
- if (s->num < 0 && s->type == Terminal) {
+ if (s->num < 0 && s->type == Terminal && !s->isspecial) {
s->num = snum;
snum++;
}
g->symtab[s->num] = s;
set_nullable(g);
- set_line_like(g);
if (type >= SLR)
build_first(g);
Firstly we have the complete list of symbols, together with the
"FIRST" set if that was generated. We add a mark to each symbol to
-show if it is considered to be "line-like" (`<`), or if it is nullable (`.`).
+show if it is nullable (`.`).
###### functions
if (!s)
continue;
- printf(" %c%c%3d%c: ",
+ printf(" %c%3d%c: ",
s->nullable ? '.':' ',
- s->line_like ? '<':' ',
s->num, symtypes[s->type]);
prtxt(s->name);
if (s->precedence)
printf(" [%d%s]", s->precedence,
assoc_names[s->assoc]);
}
- if (pr->line_like == 1)
- printf(" $$NEWLINE");
- else if (pr->line_like)
- printf(" $$OUT");
printf("\n");
}
for (s = 0; s < g->states; s++) {
int j;
struct itemset *is = g->statetab[s];
- printf(" Itemset %d:%s min prefix=%d",
- s, is->starts_line?" (startsline)":"", is->min_prefix);
+ printf(" Itemset %d:", s);
if (is->precedence)
printf(" %d%s", is->precedence, assoc_names[is->assoc]);
printf("\n");
which maps terminals to items that could be reduced when the terminal
is in look-ahead. We report when we get conflicts between the two.
-As a special case, if we find a SHIFT/REDUCE conflict, on the NEWLINE
-terminal, we ignore it. NEWLINES are handled specially with its own
-rules for when to shift and when to reduce. Conflicts are expected,
-but handled internally.
-
static int conflicts_slr(struct grammar *g, enum grammar_type type)
{
int i;
int k;
for (k = 0; k < la.cnt; k++) {
int pos = symset_find(&shifts, la.syms[k]);
- if (pos >= 0 && la.syms[k] != TK_newline) {
+ if (pos >= 0) {
printf(" State %d has SHIFT/REDUCE conflict on ", i);
cnt++;
prtxt(g->symtab[la.syms[k]]->name);
known words added and then is used with the `code_node` to initialize the
scanner.
-`parse_XX` then calls the library function `parser_run` to actually complete
-the parse. This needs the `states` table and functions to call the various
-pieces of code provided in the grammar file, so they are generated first.
+`parse_XX` then calls the library function `parser_run` to actually
+complete the parse. This needs the `states` table, the `reductions`
+table and functions to call the various pieces of code provided in the
+grammar file, so they are generated first.
###### parser_generate
gen_non_term(f, g);
gen_goto(f, g);
gen_states(f, g);
+ gen_reductions(f, g);
gen_reduce(f, g, file, pre_reduce);
gen_free(f, g);
fprintf(f, "\tconfig->words_marks = known;\n");
fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
fprintf(f, "\ttokens = token_open(code, config);\n");
- fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n");
+ fprintf(f, "\tvoid *rv = parser_run(tokens, states, reductions, do_reduce, do_free, trace, non_term, config);\n");
fprintf(f, "\ttoken_close(tokens);\n");
fprintf(f, "\treturn rv;\n");
fprintf(f, "}\n\n");
for (i = TK_reserved;
i < g->num_syms;
i++)
- if (g->symtab[i]->type == Nonterminal)
+ if (g->symtab[i]->type == Nonterminal ||
+ g->symtab[i]->isspecial)
fprintf(f, "\t\"%.*s\",\n", g->symtab[i]->name.len,
g->symtab[i]->name.txt);
fprintf(f, "};\n\n");
}
-### States and the goto tables.
+### States, reductions, and the go to tables.
-For each state we record the goto table and details of the reducible
-production if there is one.
-Some of the details of the reducible production are stored in the
-`do_reduce` function to come later. Here we store the production
-number, the body size (useful for stack management), and the resulting
-symbol (useful for knowing how to free data later).
+For each state we record the go to table and the reducible production if
+there is one, the details of which are in a separate table of
+reductions. Some of the details of the reducible production are stored
+in the `do_reduce` function to come later. In the go to table we store
+the production number and in the reductions table: the body size (useful
+for stack management), the resulting symbol (useful for knowing how to
+free data later), and the size of the resulting asn object (useful for
+preallocation space.
The go to table is stored in a simple array of `sym` and corresponding
`state`.
short sym;
short state;
};
+ struct reduction {
+ short size;
+ short sym;
+ short result_size;
+ };
struct state {
short go_to_cnt;
const struct lookup * go_to;
short reduce_prod;
- short reduce_size;
- short reduce_sym;
- char starts_line;
- char newline_only;
- short min_prefix;
};
###### functions
}
}
+ static void gen_reductions(FILE *f, struct grammar *g)
+ {
+ int i;
+ fprintf(f, "#line 0 \"gen_reductions\"\n");
+ fprintf(f, "static const struct reduction reductions[] = {\n");
+ for (i = 0; i < g->production_count; i++) {
+ struct production *pr = g->productions[i];
+ struct symbol *hd = pr->head;
+ fprintf(f, "\t{%d, %d, ", pr->body_size, hd->num);
+ if (hd->struct_name.txt == NULL)
+ fprintf(f, "0 },\n");
+ else
+ fprintf(f, "sizeof(struct %.*s%s) },\n",
+ hd->struct_name.len,
+ hd->struct_name.txt,
+ hd->isref ? "*" : "");
+ }
+ fprintf(f, "};\n\n");
+ }
+
static void gen_states(FILE *f, struct grammar *g)
{
int i;
}
}
if (is->go_to.cnt)
- fprintf(f, "\t[%d] = { %d, goto_%d, ",
- i, is->go_to.cnt, i);
+ fprintf(f, "\t[%d] = { %d, goto_%d, %d },\n",
+ i, is->go_to.cnt, i, prod);
else
- fprintf(f, "\t[%d] = { 0, NULL, ", i);
- if (prod >= 0) {
- struct production *pr = g->productions[prod];
- fprintf(f, "%d, %d, %d, %d, %d, %d },\n", prod,
- pr->body_size,
- pr->head->num,
- is->starts_line,
- pr->line_like,
- is->min_prefix);
- } else
- fprintf(f, "-1, -1, -1, %d, 0, %d },\n",
- is->starts_line, is->min_prefix);
+ fprintf(f, "\t[%d] = { 0, NULL, %d },\n", i, prod);
}
fprintf(f, "};\n\n");
}
`do_reduce` which runs the code that was included with the production,
if any.
-This code needs to be able to store data somewhere. Rather than
-requiring `do_reduce` to `malloc` that "somewhere", we pass in a large
-buffer and have `do_reduce` return the size to be saved.
+This code needs to be able to store data somewhere so we record the size
+of the data expected with each state so it can be allocated before
+`do_reduce` is called.
In order for the code to access "global" context, we pass in the
"config" pointer that was passed to the parser function. If the `struct
fprintf(f, "static void do_free(short sym, void *asn)\n");
fprintf(f, "{\n");
fprintf(f, "\tif (!asn) return;\n");
- fprintf(f, "\tif (sym < %d) {\n", g->first_nonterm);
+ fprintf(f, "\tif (sym < %d", g->first_nonterm);
+ /* Need to handle special terminals too */
+ for (i = 0; i < g->num_syms; i++) {
+ struct symbol *s = g->symtab[i];
+ if (i >= g->first_nonterm && s->type == Terminal &&
+ s->isspecial)
+ fprintf(f, " || sym == %d", s->num);
+ }
+ fprintf(f, ") {\n");
fprintf(f, "\t\tfree(asn);\n\t\treturn;\n\t}\n");
fprintf(f, "\tswitch(sym) {\n");
Having analysed the grammar and generated all the tables, we only need
the shift/reduce engine to bring it all together.
-### Goto table lookup
+### Go to table lookup
-The parser generator has nicely provided us with goto tables sorted by
+The parser generator has nicely provided us with go to tables sorted by
symbol number. We need a binary search function to find a symbol in the
table.
### Memory allocation
The `scanner` returns tokens in a local variable - we want them in allocated
-memory so they can live in the `asn_stack`. Similarly the `asn` produced by
-a reduce is in a large buffer. Both of these require some allocation and
-copying, hence `memdup` and `tok_copy`.
+memory so they can live in the `asn_stack`. So we provide `tok_copy` to
+make an allocated copy as required.
###### parser includes
#include <memory.h>
###### parser functions
- void *memdup(void *m, int len)
- {
- void *ret;
- if (len == 0)
- return NULL;
- ret = malloc(len);
- memcpy(ret, m, len);
- return ret;
- }
-
static struct token *tok_copy(struct token tk)
{
struct token *new = malloc(sizeof(*new));
So we dynamically grow an array as required but never bother to
shrink it down again.
-We keep the stack as two separate allocations. One, `asn_stack` stores
+We keep the stack as two separate allocations. One, `asn_stack`, stores
the "abstract syntax nodes" which are created by each reduction. When
we call `do_reduce` we need to pass an array of the `asn`s of the body
of the production, and by keeping a separate `asn` stack, we can just
pass a pointer into this stack.
The other allocation stores all other stack fields of which there are
-several. The `state` is the most important one and guides the parsing
+two. The `state` is the most important one and guides the parsing
process. The `sym` is nearly unnecessary as it is implicit in the
state. However when we want to free entries from the `asn_stack`, it
helps to know what type they are so we can call the right freeing
function. The symbol leads us to the right free function through
`do_free`.
-The `indents` count tracks the line indents with-in the symbol or
-immediately follow it. These are used to allow indent information to
-guide parsing and error recovery.
-
-`since_newline` tracks how many stack frames since the last
-start-of-line (whether indented or not). So if `since_newline` is
-zero, then this symbol is at the start of a line. Similarly
-`since_indent` counts the number of states since an indent, it is zero
-precisely when `indents` is not zero.
-
-`newline_permitted` keeps track of whether newlines should be ignored
-or not.
-
The stack is most properly seen as alternating states and symbols -
states, like the 'DOT' in items, are between symbols. Each frame in
our stack holds a state and the symbol that was before it. The
struct parser {
struct frame {
short state;
- short newline_permitted;
-
short sym;
- short indents;
- short since_newline;
- short since_indent;
} *stack;
void **asn_stack;
int stack_size;
int tos;
+
+ ## parser state
};
#### Shift and pop
reduce a production we will pop off frames corresponding to the body
symbols, then push on a frame for the head of the production. This last
is exactly the same process as shifting in a terminal so we use the same
-function for both. In both cases we provide the symbol, the number of
-indents the symbol contains (which will be zero for a terminal symbol)
-and a flag indicating the the symbol was at (or was reduced from a
-symbol which was at) the start of a line. The state is deduced from the
-current top-of-stack state and the new symbol.
+function for both. In both cases we provide the symbol. The state is
+deduced from the current top-of-stack state and the new symbol.
-To simplify other code we arrange for `shift` to fail if there is no `goto`
+To simplify other code we arrange for `shift` to fail if there is no `go to`
state for the symbol. This is useful in basic parsing due to our design
that we shift when we can, and reduce when we cannot. So the `shift`
function reports if it could.
So `shift` finds the next state. If that succeeds it extends the
allocations if needed and pushes all the information onto the stacks.
-Newlines are permitted after a `starts_line` state until an internal
-indent. If the new frame has neither a `starts_line` state nor an
-indent, newlines are permitted if the previous stack frame permitted
-them.
+An extra complication is added to `shift` by the `EOL` token. This
+token must be generated when a `NEWLINE` is seen, but an `EOL` is
+expected. When this happens, the `NEWLINE` is NOT consumed, so multiple
+EOL can appear before a NEWLINE. To indicate that the token was shifted
+by not consumed, `shift` can return the special value `2`. The token
+number for `EOL` cannot be statically declared, so when the parser
+starts we need to look through the array of non-terminals to find the
+EOL.
+
+###### parser state
+ int tk_eol;
+
+###### find eol
+ p.tk_eol = 0;
+ while (strcmp(non_term[p.tk_eol], "EOL") != 0)
+ p.tk_eol += 1;
+ p.tk_eol += TK_reserved + config->known_count;
+
###### parser functions
static int shift(struct parser *p,
- short sym, short indents, short start_of_line,
- void *asn,
+ short sym, void *asn,
const struct state states[])
{
- // Push an entry onto the stack
struct frame next = {0};
+ int ret;
int newstate = p->tos
? search(&states[p->stack[p->tos-1].state],
sym)
: 0;
- if (newstate < 0)
+ if (newstate >= 0)
+ ret = 1;
+ else if (sym != TK_newline)
return 0;
+ else {
+ // have a NEWLINE, might need an EOL
+ sym = p->tk_eol;
+ newstate = p->tos
+ ? search(&states[p->stack[p->tos-1].state],
+ sym)
+ : 0;
+ if (newstate < 0)
+ return 0;
+ ret = 2;
+ asn = tok_copy(*(struct token*)asn);
+ }
+
if (p->tos >= p->stack_size) {
p->stack_size += 10;
p->stack = realloc(p->stack, p->stack_size
* sizeof(p->asn_stack[0]));
}
next.sym = sym;
- next.indents = indents;
next.state = newstate;
- if (states[newstate].starts_line)
- next.newline_permitted = 1;
- else if (indents)
- next.newline_permitted = 0;
- else if (p->tos)
- next.newline_permitted =
- p->stack[p->tos-1].newline_permitted;
- else
- next.newline_permitted = 0;
-
- if (!start_of_line) {
- if (p->tos)
- next.since_newline = p->stack[p->tos-1].since_newline + 1;
- else
- next.since_newline = 1;
- }
- if (indents)
- next.since_indent = 0;
- else if (p->tos)
- next.since_indent = p->stack[p->tos-1].since_indent + 1;
- else
- next.since_indent = 1;
p->stack[p->tos] = next;
p->asn_stack[p->tos] = asn;
p->tos++;
- return 1;
+ return ret;
}
`pop` primarily moves the top of stack (`tos`) back down the required
-amount and frees any `asn` entries that need to be freed. It also
-collects a summary of the indents and line starts in the symbols that
-are being removed. It is called _after_ we reduce a production, just
-before we `shift` the nonterminal in.
+amount and frees any `asn` entries that need to be freed. It is called
+_after_ we reduce a production, just before we `shift` the nonterminal
+in.
###### parser functions
- static int pop(struct parser *p, int num,
- short *start_of_line,
- void(*do_free)(short sym, void *asn))
+ static void pop(struct parser *p, int num,
+ void(*do_free)(short sym, void *asn))
{
int i;
- short indents = 0;
- int sol = 0;
p->tos -= num;
- for (i = 0; i < num; i++) {
- sol |= !p->stack[p->tos+i].since_newline;
- indents += p->stack[p->tos+i].indents;
- do_free(p->stack[p->tos+i].sym,
- p->asn_stack[p->tos+i]);
- }
- if (start_of_line)
- *start_of_line = sol;
- return indents;
+ for (i = 0; i < num; i++)
+ do_free(p->stack[p->tos+i].sym, p->asn_stack[p->tos+i]);
}
### The heart of the parser.
Now we have the parser. For each token we might shift it, trigger a
-reduction, or start error handling. 2D tokens (IN, OUT, EOL) also need
-to be handled.
-
-We return whatever `asn` was returned by reducing production zero.
-
-If we can neither shift nor reduce we have an error to handle. We pop
-single entries off the stack until we can shift the `TK_error` symbol,
-then drop input tokens until we find one we can shift into the new error
-state. We need to ensure that something is dropped or shifted after an
-error, or we could get into an infinite loop, only shifting in
-`TK_error`, then reducing. So we track if there has been a shift since
-the last error, and if not the next error always discards one token.
-
-When we find `TK_in` and `TK_out` tokens which report indents we need
-to handle them directly as the grammar cannot express what we want to
-do with them.
-
-`TK_in` tokens are easy: we simply update indent count in the top stack frame to
-record how many indents there are following the previous token.
-
-`TK_out` tokens must be canceled against an indent count
-within the stack. If we can reduce some symbols that are all since
-the most recent indent, then we do that first. If the minimum prefix
-of the current state then extends back before the most recent indent,
-that indent can be cancelled. If the minimum prefix is shorter then
-the indent had ended prematurely and we must start error handling, which
-is still a work-in-progress.
-
-`TK_newline` tokens are ignored unless the top stack frame records
-that they are permitted. In that case they will not be considered for
-shifting if it is possible to reduce some symbols that are all since
-the most recent start of line. This is how a newline forcibly
-terminates any line-like structure - we try to reduce down to at most
-one symbol for each line where newlines are allowed.
-A consequence of this is that a rule like
-
-###### Example: newlines - broken
-
- Newlines ->
- | NEWLINE Newlines
- IfStatement -> Newlines if ....
-
-cannot work, as the NEWLINE will never be shifted as the empty string
-will be reduced first. Optional sets of newlines need to be include
-in the thing that preceed:
-
-###### Example: newlines - works
-
- If -> if
- | NEWLINE If
- IfStatement -> If ....
-
-Here the NEWLINE will be shifted because nothing can be reduced until
-the `if` is seen.
-
-When during error handling we discard tokens read in, we want to keep
-discarding until we see one that is recognised. If we had a full set
-of LR(1) grammar states, this would mean looking in the look-ahead set,
-but we don't keep a full look-ahead set. We only record the subset
-that leads to SHIFT. We can, however, deduce the look-ahead set by
-looking at the SHIFT subsets for all states that we can get to by
-reducing zero or more times. So we need a little function which
-checks if a given token is in any of these look-ahead sets.
+reduction, or start error handling. 2D tokens (IN, OUT, NEWLINE, EOL)
+might also be ignored. Ignoring tokens is combined with shifting.
-###### parser includes
- #include "parser.h"
+###### parser vars
-###### parser_run
+ struct parser p = { 0 };
+ struct token *tk = NULL;
+ int accepted = 0;
- static int in_lookahead(struct token *tk, const struct state *states, int state)
- {
- while (state >= 0) {
- if (search(&states[state], tk->num) >= 0)
- return 1;
- if (states[state].reduce_prod < 0)
- return 0;
- state = search(&states[state], states[state].reduce_sym);
+###### heart of parser
+
+ shift(&p, TK_eof, NULL, states);
+ while (!accepted && p.tos > 0) {
+ struct frame *tos = &p.stack[p.tos-1];
+ if (!tk)
+ tk = tok_copy(token_next(tokens));
+ parser_trace(trace, &p,
+ tk, states, non_term, config->known_count);
+
+ ## try shift or ignore
+ ## try reduce
+ ## handle error
+ }
+
+Indents are ignored unless they can be shifted onto the stack
+immediately or nothing can be shifted (in which case we reduce, and try
+again). The end of an indented section - the OUT token - is ignored
+precisely when the indent was ignored. To keep track of this we need a
+small stack of flags, which is easily stored as bits in an `unsigned
+long`. This will never overflow and the scanner only allows 20 levels
+of indentation.
+
+###### parser state
+ unsigned long ignored_indents;
+
+NEWLINE/EOL is ignored when in an indented section of text which was not
+explicitly expected by the grammar. So if the most recent indent is
+ignored, so is any EOL token.
+
+For other tokens, we shift the next token if that is possible, otherwise
+we try to reduce a production.
+
+###### try shift or ignore
+
+ if ((tk->num == TK_newline || tk->num == TK_out) &&
+ (p.ignored_indents & 1)) {
+ /* indented, so ignore OUT and NEWLINE */
+ if (tk->num == TK_out)
+ p.ignored_indents >>= 1;
+ free(tk);
+ tk = NULL;
+ parser_trace_action(trace, "Ignore");
+ continue;
+ }
+
+ switch (shift(&p, tk->num, tk, states)) {
+ case 1:
+ if (tk->num == TK_out)
+ p.ignored_indents >>= 1;
+ if (tk->num == TK_in)
+ p.ignored_indents <<= 1;
+
+ tk = NULL;
+ /* fallthrough */
+ case 2:
+ parser_trace_action(trace, tk ? "ShiftEOL" : "Shift");
+ ## did shift
+ continue;
+ }
+
+ if (tk->num == TK_in && states[p.stack[p.tos-1].state].go_to_cnt > 0) {
+ /* No indent expected here and reduce is not mandatory, so ignore IN */
+ free(tk);
+ tk = NULL;
+ p.ignored_indents <<= 1;
+ p.ignored_indents |= 1;
+ parser_trace_action(trace, "Ignore");
+ continue;
+ }
+
+We have already discussed the bulk of the handling of a "reduce" action,
+with the `pop()` and `shift()` functions doing much of the work. There
+is a little more complexity needed to manage storage for the asn (Abstract
+Syntax Node), and also a test of whether the reduction is permitted.
+
+When we try to shift the result of reducing production-zero, it will
+fail because there is no next state. In this case the asn will not have
+been stored on the stack, so it get stored in the `ret` variable, and we
+report that that input has been accepted.
+
+###### parser vars
+
+ void *ret = NULL;
+
+###### try reduce
+
+ if (states[tos->state].reduce_prod >= 0) {
+ void **body;
+ void *res;
+ const struct state *nextstate = &states[tos->state];
+ int prod = nextstate->reduce_prod;
+ int size = reductions[prod].size;
+ int res_size = reductions[prod].result_size;
+
+ body = p.asn_stack + (p.tos - size);
+ res = res_size ? calloc(1, res_size) : NULL;
+ res_size = do_reduce(prod, body, config, res);
+ if (res_size != reductions[prod].result_size)
+ abort();
+ pop(&p, size, do_free);
+ if (!shift(&p, reductions[prod].sym, res, states)) {
+ accepted = 1;
+ ret = res;
+ parser_trace_action(trace, "Accept");
+ } else
+ parser_trace_action(trace, "Reduce");
+ continue;
+ }
+
+If we can neither shift nor reduce we have an error to handle. There
+are two possible responses to an error: we can pop single frames off the
+stack until we find a frame that can shift `TK_error`, or we can discard
+the current look-ahead symbol.
+
+When we first see an error we do the first of these and set a flag to
+record that we are processing an error. If the normal shift/reduce
+tests fail to find that anything can be done from that state, we start
+dropping tokens until either we manage to shift one, or reach end-of-file.
+
+###### parser vars
+
+ int in_err = 0;
+
+###### did shift
+
+ in_err = 0;
+
+###### handle error
+
+ if (in_err) {
+ parser_trace_action(trace, "DISCARD");
+ if (tk->num == TK_eof)
+ break;
+ free(tk);
+ tk = NULL;
+ } else {
+ struct token *err_tk;
+
+ parser_trace_action(trace, "ERROR");
+
+ err_tk = tok_copy(*tk);
+ while (p.tos > 0 && shift(&p, TK_error, err_tk, states) == 0)
+ // discard this state
+ pop(&p, 1, do_free);
+ if (p.tos == 0) {
+ free(err_tk);
+ // no state accepted TK_error
+ break;
}
- return 0;
+ in_err = 1;
}
+###### parser includes
+ #include "parser.h"
+
+###### parser_run
+
void *parser_run(struct token_state *tokens,
const struct state states[],
+ const struct reduction reductions[],
int (*do_reduce)(int, void**, struct token_config*, void*),
void (*do_free)(short, void*),
FILE *trace, const char *non_term[],
struct token_config *config)
{
- struct parser p = { 0 };
- struct token *tk = NULL;
- int accepted = 0;
- int shift_since_err = 1;
- void *ret = NULL;
-
- shift(&p, TK_eof, 0, 1, NULL, states);
- while (!accepted && p.tos > 0) {
- struct token *err_tk;
- struct frame *tos = &p.stack[p.tos-1];
- if (!tk)
- tk = tok_copy(token_next(tokens));
- parser_trace(trace, &p,
- tk, states, non_term, config->known_count);
-
- if (tk->num == TK_in) {
- tos->indents += 1;
- tos->since_newline = 0;
- tos->since_indent = 0;
- if (!states[tos->state].starts_line)
- tos->newline_permitted = 0;
- free(tk);
- tk = NULL;
- parser_trace_action(trace, "Record");
- continue;
- }
- if (tk->num == TK_out) {
- if (states[tos->state].reduce_size >= 0 &&
- states[tos->state].reduce_size <= tos->since_indent)
- goto force_reduce;
- if (states[tos->state].min_prefix >= tos->since_indent) {
- // OK to cancel
- struct frame *in = tos - tos->since_indent;
- in->indents -= 1;
- if (in->indents == 0) {
- /* Reassess since_indent and newline_permitted */
- if (in > p.stack) {
- in->since_indent = in[-1].since_indent + 1;
- in->newline_permitted = in[-1].newline_permitted;
- } else {
- in->since_indent = 0;
- in->newline_permitted = 0;
- }
- if (states[in->state].starts_line)
- in->newline_permitted = 1;
- while (in < tos) {
- in += 1;
- in->since_indent = in[-1].since_indent + 1;
- if (states[in->state].starts_line)
- in->newline_permitted = 1;
- else
- in->newline_permitted = in[-1].newline_permitted;
- }
- }
- free(tk);
- tk = NULL;
- parser_trace_action(trace, "Cancel");
- continue;
- }
- // fall through to error handling as both SHIFT and REDUCE
- // will fail.
- }
- if (tk->num == TK_newline) {
- if (!tos->newline_permitted) {
- free(tk);
- tk = NULL;
- parser_trace_action(trace, "Discard");
- continue;
- }
- if (tos->since_newline > 1 &&
- states[tos->state].reduce_size >= 0 &&
- states[tos->state].reduce_size <= tos->since_newline)
- goto force_reduce;
- }
- if (shift(&p, tk->num, 0, tk->num == TK_newline, tk, states)) {
- shift_since_err = 1;
- tk = NULL;
- parser_trace_action(trace, "Shift");
- continue;
- }
- force_reduce:
- if (states[tos->state].reduce_prod >= 0 &&
- states[tos->state].newline_only &&
- !(tk->num == TK_newline ||
- tk->num == TK_eof ||
- tk->num == TK_out ||
- (tos->indents == 0 && tos->since_newline == 0))) {
- /* Anything other than newline or out or eof
- * in an error unless we are already at start
- * of line, as this production must end at EOL.
- */
- } else if (states[tos->state].reduce_prod >= 0) {
- void **body;
- void *res;
- const struct state *nextstate = &states[tos->state];
- int prod = nextstate->reduce_prod;
- int size = nextstate->reduce_size;
- int bufsize;
- static char buf[16*1024];
- short indents, start_of_line;
-
- body = p.asn_stack + (p.tos - size);
-
- bufsize = do_reduce(prod, body, config, buf);
-
- indents = pop(&p, size, &start_of_line,
- do_free);
- res = memdup(buf, bufsize);
- memset(buf, 0, bufsize);
- if (!shift(&p, nextstate->reduce_sym,
- indents, start_of_line,
- res, states)) {
- if (prod != 0) abort();
- accepted = 1;
- ret = res;
- }
- parser_trace_action(trace, "Reduce");
- continue;
- }
- /* Error. We walk up the stack until we
- * find a state which will accept TK_error.
- * We then shift in TK_error and see what state
- * that takes us too.
- * Then we discard input tokens until
- * we find one that is acceptable.
- */
- parser_trace_action(trace, "ERROR");
- short indents = 0, start_of_line;
-
- err_tk = tok_copy(*tk);
- while (p.tos > 0 &&
- shift(&p, TK_error, 0, 0,
- err_tk, states) == 0)
- // discard this state
- indents += pop(&p, 1, &start_of_line, do_free);
- if (p.tos == 0) {
- free(err_tk);
- // no state accepted TK_error
- break;
- }
- if (!shift_since_err) {
- /* must discard at least one token to avoid
- * infinite loop.
- */
- if (tk->num == TK_eof)
- break;
- free(tk);
- tk = tok_copy(token_next(tokens));
- }
- shift_since_err = 0;
- tos = &p.stack[p.tos-1];
- while (!in_lookahead(tk, states, tos->state) &&
- tk->num != TK_eof) {
- free(tk);
- tk = tok_copy(token_next(tokens));
- shift_since_err = 1;
- if (tk->num == TK_in)
- indents += 1;
- if (tk->num == TK_out) {
- if (indents == 0)
- break;
- indents -= 1;
- // FIXME update since_indent here
- }
- }
- tos->indents += indents;
- }
+ ## parser vars
+
+ ## find eol
+
+ ## heart of parser
+
free(tk);
- pop(&p, p.tos, NULL, do_free);
+ pop(&p, p.tos, do_free);
free(p.asn_stack);
free(p.stack);
return ret;
###### exported functions
void *parser_run(struct token_state *tokens,
const struct state states[],
+ const struct reduction reductions[],
int (*do_reduce)(int, void**, struct token_config*, void*),
void (*do_free)(short, void*),
FILE *trace, const char *non_term[],
[TK_newline] = "NEWLINE",
[TK_eof] = "$eof",
};
- static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
- {
- fprintf(trace, "(%d", f->state);
- if (states[f->state].starts_line)
- fprintf(trace, "s");
- if (f->newline_permitted)
- fprintf(trace, "n%d", f->since_newline);
- fprintf(trace, ") ");
- }
void parser_trace(FILE *trace, struct parser *p,
struct token *tk, const struct state states[],
} else
fputs(non_term[sym - TK_reserved - knowns],
trace);
- if (f->indents)
- fprintf(trace, ".%d", f->indents);
- if (f->since_newline == 0)
- fputs("/", trace);
fputs(" ", trace);
}
- parser_trace_state(trace, f, states);
+ fprintf(trace, "(%d) ", f->state);
}
fprintf(trace, "[");
if (tk->num < TK_reserved &&