5. `parser_run` is a library function intended to be linked together
with the generated parser tables to complete the implementation of
a parser.
-6. `calc.cgm` is a test grammar for a simple calculator.
+6. Finally `calc` is a test grammar for a simple calculator. The
+ `parsergen` program built from the C code in this file can extract
+ that grammar directly from this file and process it.
+
###### File: parsergen.c
#include <unistd.h>
## parser includes
## parser functions
## parser_run
-###### File: calc.cgm
- ## demo grammar
###### File: parsergen.mk
CFLAGS += -Wall -g
all :: parsergen calc
- parsergen.c parsergen.mk calc.cgm libparser.c parser.h : parsergen.mdc
+ parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc
./md2c parsergen.mdc
## Reading the grammar
literally copied into the generated `.c` and `.h`. files. The last
contains the grammar. This is tokenised with "[scanner][]".
+If the `--tag` option is given, then any top level header that doesn't
+start with the tag is ignored, and the tag is striped from the rest. So
+`--tag Foo`
+means that the three needed sections must be `Foo: header`, `Foo: code`,
+and `Foo: grammar`.
+
[mdcode]: mdcode.html
[scanner]: scanner.html
`mdcode`. `scanner` does provide `text_dump` which is useful for strings
which might contain control characters.
+`strip_tag` is a bit like `strncmp`, but adds a test for a colon,
+because that is what we need to detect tags.
+
###### functions
static int text_is(struct text t, char *s)
{
printf("%.*s", t.len, t.txt);
}
+ static int strip_tag(struct text *t, char *tag)
+ {
+ int skip = strlen(tag) + 1;
+ if (skip >= t->len ||
+ strncmp(t->txt, tag, skip-1) != 0 ||
+ t->txt[skip-1] != ':')
+ return 0;
+ while (skip < t->len && t->txt[skip] == ' ')
+ skip++;
+ t->len -= skip;
+ t->txt += skip;
+ return 1;
+ }
+
### Symbols
Productions are comprised primarily of symbols - terminal and
p->head->type = Nonterminal;
array_add(&p->body, &p->body_size, head);
array_add(&p->body, &p->body_size, sym_find(g, eof));
- g->start = p->head->num;
p->head->first_production = g->production_count;
array_add(&g->productions, &g->production_count, p);
Now we are ready to read in the grammar.
-###### grammar fields
- int start; // the 'start' symbol.
-
###### grammar_read
static struct grammar *grammar_read(struct code_node *code)
{
};
struct token_state *state = token_open(code, &conf);
- struct token tk = token_next(state);
+ struct token tk;
struct symbol *head = NULL;
struct grammar *g;
char *err = NULL;
else {
head->type = Nonterminal;
head->struct_name = g->current_type;
- if (g->start == 0) {
+ if (g->production_count == 0) {
## create production zero
}
head->first_production = g->production_count;
}
}
+### Setting `can_eol`
+
+In order to be able to ignore newline tokens when not relevant, but
+still include them in the parse when needed, we will need to know
+which states can start a "line-like" section of code. We ignore
+newlines when there is an indent since the most recent start of a
+line-like section.
+
+To know what is line-like, we first need to know which symbols can end
+a line-like section, which is precisely those which can end with a
+newline token. These symbols don't necessarily alway end with a
+newline, but they can. Hence they are not described as "lines" but
+only "line-like".
+
+Clearly the `TK_newline` token can end with a newline. Any symbol
+which is the head of a production that contains a line-ending symbol
+followed only by nullable symbols is also a line-ending symbol. We
+use a new field `can_eol` to record this attribute of symbols, and
+compute it in a repetitive manner similar to `set_nullable`.
+
+###### symbol fields
+ int can_eol;
+
+###### functions
+ static void set_can_eol(struct grammar *g)
+ {
+ int check_again = 1;
+ g->symtab[TK_newline]->can_eol = 1;
+ while (check_again) {
+ int p;
+ check_again = 0;
+ for (p = 0; p < g->production_count; p++) {
+ struct production *pr = g->productions[p];
+ int s;
+
+ if (pr->head->can_eol)
+ continue;
+
+ for (s = pr->body_size - 1; s >= 0; s--) {
+ if (pr->body[s]->can_eol) {
+ pr->head->can_eol = 1;
+ check_again = 1;
+ break;
+ }
+ if (!pr->body[s]->nullable)
+ break;
+ }
+ }
+ }
+ }
+
### Building the `first` sets
When calculating what can follow a particular non-terminal, we will need to
-know what the "first" terminal in an subsequent non-terminal might be. So
+know what the "first" terminal in any subsequent non-terminal might be. So
we calculate the `first` set for every non-terminal and store them in an
array. We don't bother recording the "first" set for terminals as they are
trivial.
struct symset items;
struct symset go_to;
char completed;
+ char starts_line;
};
###### grammar fields
them to a data structure, of freeing them.
static int add_itemset(struct grammar *g, struct symset ss,
- enum grammar_type type)
+ enum grammar_type type, int starts_line)
{
struct itemset **where, *is;
int i;
is->items = ss;
is->next = *where;
is->go_to = INIT_DATASET;
+ is->starts_line = starts_line;
*where = is;
return is->state;
}
sn = save_set(g, LA);
LA = set_find(g, sn);
}
+
/* Add productions for this symbol */
for (p2 = s->first_production;
p2 < g->production_count &&
###### derive itemsets
// Now we have a completed itemset, so we need to
- // create all the 'next' itemset and create them
+ // compute all the 'next' itemsets and create them
// if they don't exist.
for (i = 0; i < done.cnt; i++) {
int j;
unsigned short state;
+ int starts_line = 0;
+ struct symbol *sym = g->symtab[done.syms[i]];
struct symset newitemset = INIT_SYMSET;
if (type >= LALR)
newitemset = INIT_DATASET;
+ if (sym->can_eol ||
+ (sym->nullable && is->starts_line))
+ starts_line = 1;
for (j = 0; j < is->items.cnt; j++) {
int itm = is->items.syms[j];
int p = item_prod(itm);
if (bp == pr->body_size)
continue;
- if (pr->body[bp]->num != done.syms[i])
+ if (pr->body[bp] != sym)
continue;
if (type >= LALR)
la = is->items.data[j];
}
}
}
- state = add_itemset(g, newitemset, type);
+ state = add_itemset(g, newitemset, type, starts_line);
if (symset_find(&is->go_to, done.syms[i]) < 0)
symset_add(&is->go_to, done.syms[i], state);
}
}
// production 0, offset 0 (with no data)
symset_add(&first, item_num(0, 0), la);
- add_itemset(g, first, type);
+ add_itemset(g, first, type, g->productions[0]->body[0]->can_eol);
for (again = 0, is = g->items;
is;
is = is->next ?: again ? (again = 0, g->items) : NULL) {
for (s = g->syms; s; s = s->next)
g->symtab[s->num] = s;
- if (type >= SLR) {
- set_nullable(g);
+ set_nullable(g);
+ set_can_eol(g);
+ if (type >= SLR)
build_first(g);
- }
+
if (type == SLR)
build_follow(g);
if (!s)
continue;
- printf(" %c%3d%c: ",
- s->nullable ? '*':' ',
+ printf(" %c%c%3d%c: ",
+ s->nullable ? '.':' ',
+ s->can_eol ? '>':' ',
s->num, symtypes[s->type]);
prtxt(s->name);
if (s->precedence)
for (s = 0; s < g->states; s++) {
int j;
struct itemset *is = g->statetab[s];
- printf(" Itemset %d:\n", s);
+ printf(" Itemset %d:%s\n", s, is->starts_line?" (startsline)":"");
for (j = 0; j < is->items.cnt; j++) {
report_item(g, is->items.syms[j]);
if (is->items.data != NO_DATA)
short reduce_size;
short reduce_sym;
short shift_sym;
+ short starts_line;
};
}
if (prod >= 0)
- fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, 0 },\n",
+ fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, 0, %d },\n",
i, is->go_to.cnt, i, prod,
g->productions[prod]->body_size,
- g->productions[prod]->head->num);
+ g->productions[prod]->head->num,
+ is->starts_line);
else
- fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d },\n",
- i, is->go_to.cnt, i, shift_sym);
+ fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, %d },\n",
+ i, is->go_to.cnt, i, shift_sym,
+ is->starts_line);
}
fprintf(f, "};\n\n");
}
{ "SLR", 0, NULL, 'S' },
{ "LALR", 0, NULL, 'L' },
{ "LR1", 0, NULL, '1' },
+ { "tag", 1, NULL, 't' },
{ "report", 0, NULL, 'R' },
{ "output", 1, NULL, 'o' },
{ NULL, 0, NULL, 0 }
};
- const char *options = "05SL1Ro:";
+ const char *options = "05SL1t:Ro:";
###### process arguments
int opt;
char *outfile = NULL;
char *infile;
char *name;
+ char *tag = NULL;
int report = 1;
enum grammar_type type = LR05;
while ((opt = getopt_long(argc, argv, options,
report = 2; break;
case 'o':
outfile = optarg; break;
+ case 't':
+ tag = optarg; break;
default:
fprintf(stderr, "Usage: parsergen ...\n");
exit(1);
struct code_node *code = NULL;
struct code_node *gram = NULL;
for (s = table; s; s = s->next) {
- if (text_is(s->section, "header"))
+ struct text sec = s->section;
+ if (tag && !strip_tag(&sec, tag))
+ continue;
+ if (text_is(sec, "header"))
hdr = s->code;
- else if (text_is(s->section, "code"))
+ else if (text_is(sec, "code"))
code = s->code;
- else if (text_is(s->section, "grammar"))
+ else if (text_is(sec, "grammar"))
gram = s->code;
else {
fprintf(stderr, "Unknown content section: %.*s\n",
short sym;
short starts_indented;
short indents;
+ short newline_permitted;
} *stack, next;
void **asn_stack;
int stack_size;
function reports if it could.
So `shift` finds the next state. If that succeed it extends the allocations
-if needed and pushed all the information onto the stacks.
+if needed and pushes all the information onto the stacks.
###### parser functions
p->next.state = newstate;
p->next.indents = 0;
p->next.starts_indented = 0;
+ // if new state doesn't start a line, we inherit newline_permitted status
+ if (states[newstate].starts_line)
+ p->next.newline_permitted = 1;
return 1;
}
if (num) {
p->next.state = p->stack[p->tos].state;
p->next.starts_indented = p->stack[p->tos].starts_indented;
+ p->next.newline_permitted = p->stack[p->tos].newline_permitted;
+ if (p->next.indents > p->next.starts_indented)
+ p->next.newline_permitted = 0;
}
}
int accepted = 0;
void *ret;
+ p.next.newline_permitted = states[0].starts_line;
while (!accepted) {
struct token *err_tk;
if (!tk)
(p.stack[p.tos-1].indents == 1 &&
states[p.next.state].reduce_size > 1)) {
p.stack[p.tos-1].indents -= 1;
+ if (p.stack[p.tos-1].indents == p.stack[p.tos-1].starts_indented) {
+ // no internal indent any more, reassess 'newline_permitted'
+ if (states[p.stack[p.tos-1].state].starts_line)
+ p.stack[p.tos-1].newline_permitted = 1;
+ else if (p.tos > 1)
+ p.stack[p.tos-1].newline_permitted = p.stack[p.tos-2].newline_permitted;
+ }
free(tk);
tk = NULL;
continue;
// fall through and force a REDUCE (as 'shift'
// will fail).
}
+ if (p.next.sym == TK_newline) {
+ if (!p.tos || ! p.stack[p.tos-1].newline_permitted) {
+ free(tk);
+ tk = NULL;
+ continue;
+ }
+ }
if (shift(&p, tk, states)) {
tk = NULL;
continue;
[TK_newline] = "NEWLINE",
[TK_eof] = "$eof",
};
+ static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
+ {
+ fprintf(trace, "(%d", f->state);
+ if (f->indents)
+ fprintf(trace, "%c%d", f->starts_indented?':':'.',
+ f->indents);
+ if (states[f->state].starts_line)
+ fprintf(trace, "s");
+ if (f->newline_permitted)
+ fprintf(trace, "n");
+ fprintf(trace, ") ");
+ }
+
void parser_trace(FILE *trace, struct parser *p,
struct token *tk, const struct state states[],
const char *non_term[], int knowns)
int i;
for (i = 0; i < p->tos; i++) {
int sym = p->stack[i].sym;
- fprintf(trace, "(%d) ", p->stack[i].state);
+ parser_trace_state(trace, &p->stack[i], states);
if (sym < TK_reserved &&
reserved_words[sym] != NULL)
fputs(reserved_words[sym], trace);
trace);
fputs(" ", trace);
}
- fprintf(trace, "(%d) [", p->next.state);
+ parser_trace_state(trace, &p->next, states);
+ fprintf(trace, " [");
if (tk->num < TK_reserved &&
reserved_words[tk->num] != NULL)
fputs(reserved_words[tk->num], trace);
something like this.
###### File: parsergen.mk
- calc.c : parsergen calc.cgm
- ./parsergen -o calc calc.cgm
+ calc.c calc.h : parsergen parsergen.mdc
+ ./parsergen --tag calc -o calc parsergen.mdc
calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o
$(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp
-###### demo grammar
-
- # header
- ~~~~~
+# calc: header
#include "number.h"
// what do we use for a demo-grammar? A calculator of course.
int err;
};
- ~~~~~
- # code
- ~~~~~
+# calc: code
#include <stdlib.h>
#include <unistd.h>
exit(0);
}
- ~~~~~
- # grammar
- ~~~~~
+# calc: grammar
Session -> Session Line
| Line