5. `parser_run` is a library function intended to be linked together
with the generated parser tables to complete the implementation of
a parser.
-6. `calc.cgm` is a test grammar for a simple calculator.
+6. Finally `calc` is a test grammar for a simple calculator. The
+ `parsergen` program built from the C code in this file can extract
+ that grammar directly from this file and process it.
+
###### File: parsergen.c
#include <unistd.h>
## parser includes
## parser functions
## parser_run
-###### File: calc.cgm
- ## demo grammar
###### File: parsergen.mk
CFLAGS += -Wall -g
all :: parsergen calc
- parsergen.c parsergen.mk calc.cgm libparser.c parser.h : parsergen.mdc
+ parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc
./md2c parsergen.mdc
## Reading the grammar
literally copied into the generated `.c` and `.h`. files. The last
contains the grammar. This is tokenised with "[scanner][]".
+If the `--tag` option is given, then any top level header that doesn't
+start with the tag is ignored, and the tag is striped from the rest. So
+`--tag Foo`
+means that the three needed sections must be `Foo: header`, `Foo: code`,
+and `Foo: grammar`.
+
[mdcode]: mdcode.html
[scanner]: scanner.html
_ad hoc_ parsing as we don't have a parser generator yet.
The precedence and associativity can be set for each production, but
-can be inherited from symbols. The data type is potentially defined
-for each non-terminal and describes what C structure is used to store
-information about each symbol.
+can be inherited from symbols. The data type (either structure or a
+reference to a structure) is potentially defined for each non-terminal
+and describes what C structure is used to store information about each
+symbol.
###### declarations
enum assoc {Left, Right, Non};
struct symbol {
struct text struct_name;
+ int isref;
enum assoc assoc;
unsigned short precedence;
## symbol fields
`mdcode`. `scanner` does provide `text_dump` which is useful for strings
which might contain control characters.
+`strip_tag` is a bit like `strncmp`, but adds a test for a colon,
+because that is what we need to detect tags.
+
###### functions
static int text_is(struct text t, char *s)
{
printf("%.*s", t.len, t.txt);
}
+ static int strip_tag(struct text *t, char *tag)
+ {
+ int skip = strlen(tag) + 1;
+ if (skip >= t->len ||
+ strncmp(t->txt, tag, skip-1) != 0 ||
+ t->txt[skip-1] != ':')
+ return 0;
+ while (skip < t->len && t->txt[skip] == ' ')
+ skip++;
+ t->len -= skip;
+ t->txt += skip;
+ return 1;
+ }
+
### Symbols
Productions are comprised primarily of symbols - terminal and
int num_syms;
###### functions
- static int text_cmp(struct text a, struct text b)
- {
- int len = a.len;
- if (a.len > b.len)
- len = b.len;
- int cmp = strncmp(a.txt, b.txt, len);
- if (cmp)
- return cmp;
- else
- return a.len - b.len;
- }
-
static struct symbol *sym_find(struct grammar *g, struct text s)
{
struct symbol **l = &g->syms;
precedence, otherwise it specifies a data type.
The data type name is simply stored and applied to the head of all
-subsequent productions. It must be the name of a structure, so `$expr`
-maps to `struct expr`.
-
-Any productions given before the first data type will have no data type
-and can carry no information. In order to allow other non-terminals to
-have no type, the data type `$void` can be given. This does *not* mean
-that `struct void` will be used, but rather than no type will be
-associated with future non-terminals.
+subsequent productions. It must be the name of a structure optionally
+preceded by an asterisk which means a reference or "pointer". So
+`$expression` maps to `struct expression` and `$*statement` maps to
+`struct statement *`.
+
+Any productions given before the first data type declaration will have
+no data type associated with them and can carry no information. In
+order to allow other non-terminals to have no type, the data type
+`$void` can be given. This does *not* mean that `struct void` will be
+used, but rather than no type will be associated with future
+non-terminals.
The precedence line must contain a list of symbols - typically
terminal symbols, but not necessarily. It can only contain symbols
###### grammar fields
struct text current_type;
+ int type_isref;
int prec_levels;
###### declarations
static const char *known[] = { "$$", "${", "}$" };
###### functions
- static char *dollar_line(struct token_state *ts, struct grammar *g)
+ static char *dollar_line(struct token_state *ts, struct grammar *g, int isref)
{
struct token t = token_next(ts);
char *err;
assoc = Non;
else {
g->current_type = t.txt;
+ g->type_isref = isref;
if (text_is(t.txt, "void"))
g->current_type.txt = NULL;
t = token_next(ts);
return NULL;
}
+ if (isref) {
+ err = "$* cannot be followed by a precedence";
+ goto abort;
+ }
+
// This is a precedence line, need some symbols.
found = 0;
g->prec_levels += 1;
be in one `code_node` of the literate code. The `}$` must be
at the end of a line.
-Text in the code fragment will undergo substitutions where `$N` for
-some numeric `N` will be replaced with a variable holding the parse
-information for the particular symbol in the production. `$0` is the
-head of the production, `$1` is the first symbol of the body, etc.
-The type of `$N` for a terminal symbol is `struct token`. For
-a non-terminal, it is whatever has been declared for that symbol.
+Text in the code fragment will undergo substitutions where `$N` or
+`$<N`,for some numeric `N`, will be replaced with a variable holding
+the parse information for the particular symbol in the production.
+`$0` is the head of the production, `$1` is the first symbol of the
+body, etc. The type of `$N` for a terminal symbol is `struct token`.
+For a non-terminal, it is whatever has been declared for that symbol.
+The `<` may be included for symbols declared as storing a reference
+(not a structure) and means that the reference is being moved out, so
+it will not automatically be freed.
While building productions we will need to add to an array which needs to
grow dynamically.
struct production *p = calloc(1,sizeof(*p));
struct text start = {"$start",6};
struct text eof = {"$eof",4};
+ struct text code = {"$0 = $<1;", 9};
p->head = sym_find(g, start);
p->head->type = Nonterminal;
+ p->head->struct_name = g->current_type;
+ p->head->isref = g->type_isref;
+ if (g->current_type.txt)
+ p->code = code;
array_add(&p->body, &p->body_size, head);
array_add(&p->body, &p->body_size, sym_find(g, eof));
- g->start = p->head->num;
p->head->first_production = g->production_count;
array_add(&g->productions, &g->production_count, p);
Now we are ready to read in the grammar.
-###### grammar fields
- int start; // the 'start' symbol.
-
###### grammar_read
static struct grammar *grammar_read(struct code_node *code)
{
};
struct token_state *state = token_open(code, &conf);
- struct token tk = token_next(state);
+ struct token tk;
struct symbol *head = NULL;
struct grammar *g;
char *err = NULL;
else {
head->type = Nonterminal;
head->struct_name = g->current_type;
- if (g->start == 0) {
+ head->isref = g->type_isref;
+ if (g->production_count == 0) {
## create production zero
}
head->first_production = g->production_count;
err = "First production must have a head";
} else if (tk.num == TK_mark
&& text_is(tk.txt, "$")) {
- err = dollar_line(state, g);
+ err = dollar_line(state, g, 0);
+ } else if (tk.num == TK_mark
+ && text_is(tk.txt, "$*")) {
+ err = dollar_line(state, g, 1);
} else {
err = "Unrecognised token at start of line.";
}
}
}
+### Setting `can_eol`
+
+In order to be able to ignore newline tokens when not relevant, but
+still include them in the parse when needed, we will need to know
+which states can start a "line-like" section of code. We ignore
+newlines when there is an indent since the most recent start of a
+line-like section.
+
+To know what is line-like, we first need to know which symbols can end
+a line-like section, which is precisely those which can end with a
+newline token. These symbols don't necessarily alway end with a
+newline, but they can. Hence they are not described as "lines" but
+only "line-like".
+
+Clearly the `TK_newline` token can end with a newline. Any symbol
+which is the head of a production that contains a line-ending symbol
+followed only by nullable symbols is also a line-ending symbol. We
+use a new field `can_eol` to record this attribute of symbols, and
+compute it in a repetitive manner similar to `set_nullable`.
+
+###### symbol fields
+ int can_eol;
+
+###### functions
+ static void set_can_eol(struct grammar *g)
+ {
+ int check_again = 1;
+ g->symtab[TK_newline]->can_eol = 1;
+ while (check_again) {
+ int p;
+ check_again = 0;
+ for (p = 0; p < g->production_count; p++) {
+ struct production *pr = g->productions[p];
+ int s;
+
+ if (pr->head->can_eol)
+ continue;
+
+ for (s = pr->body_size - 1; s >= 0; s--) {
+ if (pr->body[s]->can_eol) {
+ pr->head->can_eol = 1;
+ check_again = 1;
+ break;
+ }
+ if (!pr->body[s]->nullable)
+ break;
+ }
+ }
+ }
+ }
+
### Building the `first` sets
When calculating what can follow a particular non-terminal, we will need to
-know what the "first" terminal in an subsequent non-terminal might be. So
+know what the "first" terminal in any subsequent non-terminal might be. So
we calculate the `first` set for every non-terminal and store them in an
array. We don't bother recording the "first" set for terminals as they are
trivial.
struct symset items;
struct symset go_to;
char completed;
+ char starts_line;
};
###### grammar fields
}
Adding an itemset may require merging the LA sets if LALR analysis is
-happening. If any new LA set add symbol that weren't in the old LA set, we
+happening. If any new LA set adds any symbols that weren't in the old LA set, we
clear the `completed` flag so that the dependants of this itemset will be
recalculated and their LA sets updated.
them to a data structure, of freeing them.
static int add_itemset(struct grammar *g, struct symset ss,
- enum grammar_type type)
+ enum grammar_type type, int starts_line)
{
struct itemset **where, *is;
int i;
is->items = ss;
is->next = *where;
is->go_to = INIT_DATASET;
+ is->starts_line = starts_line;
*where = is;
return is->state;
}
#### The build
-To build all the itemsets, we first insert the initial itemset made from the
-start symbol, complete each itemset, and then generate new itemsets from old
-until no new ones can be made.
+To build all the itemsets, we first insert the initial itemset made
+from production zero, complete each itemset, and then generate new
+itemsets from old until no new ones can be made.
Completing an itemset means finding all the items where "DOT" is followed by
a nonterminal and adding "DOT=0" items for every production from that
sn = save_set(g, LA);
LA = set_find(g, sn);
}
+
/* Add productions for this symbol */
for (p2 = s->first_production;
p2 < g->production_count &&
###### derive itemsets
// Now we have a completed itemset, so we need to
- // create all the 'next' itemset and create them
+ // compute all the 'next' itemsets and create them
// if they don't exist.
for (i = 0; i < done.cnt; i++) {
int j;
unsigned short state;
+ int starts_line = 0;
+ struct symbol *sym = g->symtab[done.syms[i]];
struct symset newitemset = INIT_SYMSET;
if (type >= LALR)
newitemset = INIT_DATASET;
+ if (sym->can_eol ||
+ (sym->nullable && is->starts_line))
+ starts_line = 1;
for (j = 0; j < is->items.cnt; j++) {
int itm = is->items.syms[j];
int p = item_prod(itm);
if (bp == pr->body_size)
continue;
- if (pr->body[bp]->num != done.syms[i])
+ if (pr->body[bp] != sym)
continue;
if (type >= LALR)
la = is->items.data[j];
}
}
}
- state = add_itemset(g, newitemset, type);
+ state = add_itemset(g, newitemset, type, starts_line);
if (symset_find(&is->go_to, done.syms[i]) < 0)
symset_add(&is->go_to, done.syms[i], state);
}
}
// production 0, offset 0 (with no data)
symset_add(&first, item_num(0, 0), la);
- add_itemset(g, first, type);
+ add_itemset(g, first, type, g->productions[0]->body[0]->can_eol);
for (again = 0, is = g->items;
is;
is = is->next ?: again ? (again = 0, g->items) : NULL) {
for (s = g->syms; s; s = s->next)
g->symtab[s->num] = s;
- if (type >= SLR) {
- set_nullable(g);
+ set_nullable(g);
+ set_can_eol(g);
+ if (type >= SLR)
build_first(g);
- }
+
if (type == SLR)
build_follow(g);
The purpose of the report is to give the grammar developer insight into
how the grammar parser will work. It is basically a structured dump of
-all the tables that have been generated, plus an description of any conflicts.
+all the tables that have been generated, plus a description of any conflicts.
###### grammar_report
static int grammar_report(struct grammar *g, enum grammar_type type)
return report_conflicts(g, type);
}
-Firstly we have the complete list of symbols, together with the "FIRST"
-set if that was generated.
+Firstly we have the complete list of symbols, together with the
+"FIRST" set if that was generated. We add a mark to each symbol to
+show if it can end in a newline (`>`), or if it is nullable (`.`).
###### functions
if (!s)
continue;
- printf(" %c%3d%c: ",
- s->nullable ? '*':' ',
+ printf(" %c%c%3d%c: ",
+ s->nullable ? '.':' ',
+ s->can_eol ? '>':' ',
s->num, symtypes[s->type]);
prtxt(s->name);
if (s->precedence)
}
}
-Then we have to follow sets if they were computed.
+Then we have the follow sets if they were computed.
static void report_follow(struct grammar *g)
{
for (s = 0; s < g->states; s++) {
int j;
struct itemset *is = g->statetab[s];
- printf(" Itemset %d:\n", s);
+ printf(" Itemset %d:%s\n", s, is->starts_line?" (startsline)":"");
for (j = 0; j < is->items.cnt; j++) {
report_item(g, is->items.syms[j]);
if (is->items.data != NO_DATA)
}
LR0 conflicts are any state which have both a reducible item and
-a shiftable item.
+a shiftable item, or two reducible items.
LR05 conflicts only occurs if two possibly reductions exist,
as shifts always over-ride reductions.
}
SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping
-look ahead, or if a symbol in a look-ahead can be shifted. The differ only
+look ahead, or if a symbol in a look-ahead can be shifted. They differ only
in the source of the look ahead set.
-We build a dataset mapping terminal to item for possible SHIFTs and then
-another for possible REDUCE operations. We report when we get conflicts
-between the two.
+We build two datasets to reflect the "action" table: one which maps
+terminals to items where that terminal could be shifted and another
+which maps terminals to items that could be reduced when the terminal
+is in look-ahead. We report when we get conflicts between the two.
static int conflicts_slr(struct grammar *g, enum grammar_type type)
{
## Generating the parser
-The export part of the parser is the `parse_XX` function, where the name
+The exported part of the parser is the `parse_XX` function, where the name
`XX` is based on the name of the parser files.
This takes a `code_node`, a partially initialized `token_config`, and an
known words added and then is used with the `code_node` to initialize the
scanner.
-`parse_XX` then call the library function `parser_run` to actually complete
+`parse_XX` then calls the library function `parser_run` to actually complete
the parse. This needs the `states` table and function to call the various
pieces of code provided in the grammar file, so they are generated first.
fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
fprintf(f, "\tconfig->ignored |= (1 << TK_line_comment) | (1 << TK_block_comment);\n");
fprintf(f, "\ttokens = token_open(code, config);\n");
- fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config->known_count);\n");
+ fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n");
fprintf(f, "\ttoken_close(tokens);\n");
fprintf(f, "\treturn rv;\n");
fprintf(f, "}\n\n");
}
-### Table words table
+### Known words table
-The know words is simply an array of terminal symbols.
+The known words table is simply an array of terminal symbols.
The table of nonterminals used for tracing is a similar array.
###### functions
short reduce_size;
short reduce_sym;
short shift_sym;
+ short starts_line;
};
}
if (prod >= 0)
- fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, 0 },\n",
+ fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, 0, %d },\n",
i, is->go_to.cnt, i, prod,
g->productions[prod]->body_size,
- g->productions[prod]->head->num);
+ g->productions[prod]->head->num,
+ is->starts_line);
else
- fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d },\n",
- i, is->go_to.cnt, i, shift_sym);
+ fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, %d },\n",
+ i, is->go_to.cnt, i, shift_sym,
+ is->starts_line);
}
fprintf(f, "};\n\n");
}
`do_reduce` to `malloc` that "somewhere", we pass in a large buffer and have
`do_reduce` return the size to be saved.
+In order for the code to access "global" context, we pass in the
+"config" pointer that was passed to parser function. If the `struct
+token_config` is embedded in some larger structure, the reducing code
+can access the larger structure using pointer manipulation.
+
The code fragment requires translation when written out. Any `$N` needs to
be converted to a reference either to that buffer (if `$0`) or to the
-structure returned by a previous reduction. These pointer need to be cast
+structure returned by a previous reduction. These pointers need to be cast
to the appropriate type for each access. All this is handling in
`gen_code`.
+`gen_code` also allows symbol references to contain a '`<`' as in '`$<2`'.
+This applied only to symbols with references (or pointers), not those with structures.
+The `<` implies that the reference it being moved out, so the object will not be
+automatically freed. This is equivalent to assigning `NULL` to the pointer.
###### functions
static void gen_code(struct production *p, FILE *f, struct grammar *g)
{
char *c;
+ char *used = calloc(1, p->body_size);
+ int i;
+
fprintf(f, "\t\t\t");
for (c = p->code.txt; c < p->code.txt + p->code.len; c++) {
int n;
+ int use = 0;
if (*c != '$') {
fputc(*c, f);
if (*c == '\n')
continue;
}
c++;
+ if (*c == '<') {
+ use = 1;
+ c++;
+ }
if (*c < '0' || *c > '9') {
+ if (use)
+ fputc('<', f);
fputc(*c, f);
continue;
}
n = n * 10 + *c - '0';
}
if (n == 0)
- fprintf(f, "(*(struct %.*s*)ret)",
+ fprintf(f, "(*(struct %.*s*%s)ret)",
p->head->struct_name.len,
- p->head->struct_name.txt);
+ p->head->struct_name.txt,
+ p->head->isref ? "*":"");
else if (n > p->body_size)
fprintf(f, "$%d", n);
else if (p->body[n-1]->type == Terminal)
n-1);
else if (p->body[n-1]->struct_name.txt == NULL)
fprintf(f, "$%d", n);
- else
- fprintf(f, "(*(struct %.*s*)body[%d])",
+ else {
+ fprintf(f, "(*(struct %.*s*%s)body[%d])",
p->body[n-1]->struct_name.len,
- p->body[n-1]->struct_name.txt, n-1);
+ p->body[n-1]->struct_name.txt,
+ p->body[n-1]->isref ? "*":"", n-1);
+ used[n-1] = use;
+ }
}
fputs("\n", f);
+ for (i = 0; i < p->body_size; i++) {
+ if (p->body[i]->struct_name.txt &&
+ p->body[i]->isref &&
+ used[i])
+ // assume this has been copied out
+ fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i);
+ }
+ free(used);
}
###### functions
{
int i;
fprintf(f, "#line 0 \"gen_reduce\"\n");
- fprintf(f, "static int do_reduce(int prod, void **body, void *ret)\n");
+ fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n");
fprintf(f, "{\n");
fprintf(f, "\tint ret_size = 0;\n");
gen_code(p, f, g);
if (p->head->struct_name.txt)
- fprintf(f, "\t\tret_size = sizeof(struct %.*s);\n",
+ fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n",
p->head->struct_name.len,
- p->head->struct_name.txt);
+ p->head->struct_name.txt,
+ p->head->isref ? "*":"");
fprintf(f, "\t\tbreak;\n");
}
It is particularly important to have fine control over freeing during error
recovery where individual stack frames might need to be freed.
-For this, the grammar author required to defined a `free_XX` function for
-each structure that is used by a non-terminal. `do_free` all call whichever
+For this, the grammar author is required to defined a `free_XX` function for
+each structure that is used by a non-terminal. `do_free` will call whichever
is appropriate given a symbol number, and will call `free` (as is
-appropriate for tokens` on any terminal symbol.
+appropriate for tokens on any terminal symbol.
###### functions
continue;
fprintf(f, "\tcase %d:\n", s->num);
- fprintf(f, "\t\tfree_%.*s(asn);\n",
- s->struct_name.len,
- s->struct_name.txt);
+ if (s->isref) {
+ fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n",
+ s->struct_name.len,
+ s->struct_name.txt);
+ fprintf(f, "\t\tfree(asn);\n");
+ } else
+ fprintf(f, "\t\tfree_%.*s(asn);\n",
+ s->struct_name.len,
+ s->struct_name.txt);
fprintf(f, "\t\tbreak;\n");
}
fprintf(f, "\t}\n}\n\n");
{ "SLR", 0, NULL, 'S' },
{ "LALR", 0, NULL, 'L' },
{ "LR1", 0, NULL, '1' },
+ { "tag", 1, NULL, 't' },
{ "report", 0, NULL, 'R' },
{ "output", 1, NULL, 'o' },
{ NULL, 0, NULL, 0 }
};
- const char *options = "05SL1Ro:";
+ const char *options = "05SL1t:Ro:";
###### process arguments
int opt;
char *outfile = NULL;
char *infile;
char *name;
+ char *tag = NULL;
int report = 1;
enum grammar_type type = LR05;
while ((opt = getopt_long(argc, argv, options,
report = 2; break;
case 'o':
outfile = optarg; break;
+ case 't':
+ tag = optarg; break;
default:
fprintf(stderr, "Usage: parsergen ...\n");
exit(1);
To be able to run `mdcode` and `scanner` on the grammar we need to memory
map it.
-One we have extracted the code (with `mdcode`) we expect to file three
-sections: header, code, and grammar. Anything else is an error.
+One we have extracted the code (with `mdcode`) we expect to find three
+sections: header, code, and grammar. Anything else that is not
+excluded by the `--tag` option is an error.
"header" and "code" are optional, though it is hard to build a working
parser with neither. "grammar" must be provided.
struct code_node *code = NULL;
struct code_node *gram = NULL;
for (s = table; s; s = s->next) {
- if (text_is(s->section, "header"))
+ struct text sec = s->section;
+ if (tag && !strip_tag(&sec, tag))
+ continue;
+ if (text_is(sec, "header"))
hdr = s->code;
- else if (text_is(s->section, "code"))
+ else if (text_is(sec, "code"))
code = s->code;
- else if (text_is(s->section, "grammar"))
+ else if (text_is(sec, "grammar"))
gram = s->code;
else {
fprintf(stderr, "Unknown content section: %.*s\n",
}
And that about wraps it up. We need to set the locale so that UTF-8 is
-recognised properly, and link with `libicuuc` is `libmdcode` requires that.
+recognised properly, and link with `libicuuc` as `libmdcode` requires that.
###### File: parsergen.mk
parsergen : parsergen.o libscanner.o libmdcode.o
## The SHIFT/REDUCE parser
-Having analysed the grammar and generated all the table, we only need the
+Having analysed the grammar and generated all the tables, we only need the
shift/reduce engine to bring it all together.
### Goto table lookup
production, and by keeping a separate `asn` stack, we can just pass a
pointer into this stack.
-The other allocation stores all other stack fields of which there are two.
+The other allocation stores all other stack fields of which there are six.
The `state` is the most important one and guides the parsing process. The
`sym` is nearly unnecessary. However when we want to free entries from the
`asn_stack`, it helps to know what type they are so we can call the right
freeing function. The symbol leads us to the right free function through
`do_free`.
+The `indents` count and the `starts_indented` flag track the line
+indents in the symbol. These are used to allow indent information to
+guide parsing and error recovery.
+
+`newline_permitted` keeps track of whether newlines should be ignored
+or not, and `starts_line` records if this state stated on a newline.
+
As well as the stack of frames we have a `next` frame which is
assembled from the incoming token and other information prior to
pushing it onto the stack.
struct frame {
short state;
short sym;
+ short starts_indented;
+ short indents;
+ short newline_permitted;
} *stack, next;
void **asn_stack;
int stack_size;
function reports if it could.
So `shift` finds the next state. If that succeed it extends the allocations
-if needed and pushed all the information onto the stacks.
+if needed and pushes all the information onto the stacks.
###### parser functions
p->asn_stack[p->tos] = asn;
p->tos++;
p->next.state = newstate;
+ p->next.indents = 0;
+ p->next.starts_indented = 0;
+ // if new state doesn't start a line, we inherit newline_permitted status
+ if (states[newstate].starts_line)
+ p->next.newline_permitted = 1;
return 1;
}
{
int i;
p->tos -= num;
- for (i = 0; i < num; i++)
+ for (i = 0; i < num; i++) {
+ p->next.indents += p->stack[p->tos+i].indents;
do_free(p->stack[p->tos+i].sym,
p->asn_stack[p->tos+i]);
+ }
- if (num)
+ if (num) {
p->next.state = p->stack[p->tos].state;
+ p->next.starts_indented = p->stack[p->tos].starts_indented;
+ p->next.newline_permitted = p->stack[p->tos].newline_permitted;
+ if (p->next.indents > p->next.starts_indented)
+ p->next.newline_permitted = 0;
+ }
}
### Memory allocation
single entries off the stack until we can shift the `TK_error` symbol, then
drop input tokens until we find one we can shift into the new error state.
+When we find `TK_in` and `TK_out` tokens which report indents we need
+to handle them directly as the grammar cannot express what we want to
+do with them.
+
+`TK_in` tokens are easy: we simply update the `next` stack frame to
+record how many indents there are and that the next token started with
+an indent.
+
+`TK_out` tokens must either be counted off against any pending indent,
+or must force reductions until there is a pending indent which isn't
+at the start of a production.
+
+`TK_newline` tokens are ignored precisely if there has been an indent
+since the last state which could have been at the start of a line.
###### parser includes
#include "parser.h"
###### parser_run
void *parser_run(struct token_state *tokens,
const struct state states[],
- int (*do_reduce)(int, void**, void*),
+ int (*do_reduce)(int, void**, struct token_config*, void*),
void (*do_free)(short, void*),
- FILE *trace, const char *non_term[], int knowns)
+ FILE *trace, const char *non_term[],
+ struct token_config *config)
{
struct parser p = { 0 };
struct token *tk = NULL;
int accepted = 0;
void *ret;
+ p.next.newline_permitted = states[0].starts_line;
while (!accepted) {
struct token *err_tk;
if (!tk)
tk = tok_copy(token_next(tokens));
p.next.sym = tk->num;
if (trace)
- parser_trace(trace, &p, tk, states, non_term, knowns);
+ parser_trace(trace, &p, tk, states, non_term, config->known_count);
+ if (p.next.sym == TK_in) {
+ p.next.starts_indented = 1;
+ p.next.indents = 1;
+ free(tk);
+ tk = NULL;
+ continue;
+ }
+ if (p.next.sym == TK_out) {
+ if (p.stack[p.tos-1].indents > p.stack[p.tos-1].starts_indented ||
+ (p.stack[p.tos-1].indents == 1 &&
+ states[p.next.state].reduce_size > 1)) {
+ p.stack[p.tos-1].indents -= 1;
+ if (p.stack[p.tos-1].indents == p.stack[p.tos-1].starts_indented) {
+ // no internal indent any more, reassess 'newline_permitted'
+ if (states[p.stack[p.tos-1].state].starts_line)
+ p.stack[p.tos-1].newline_permitted = 1;
+ else if (p.tos > 1)
+ p.stack[p.tos-1].newline_permitted = p.stack[p.tos-2].newline_permitted;
+ }
+ free(tk);
+ tk = NULL;
+ continue;
+ }
+ // fall through and force a REDUCE (as 'shift'
+ // will fail).
+ }
+ if (p.next.sym == TK_newline) {
+ if (!p.tos || ! p.stack[p.tos-1].newline_permitted) {
+ free(tk);
+ tk = NULL;
+ continue;
+ }
+ }
if (shift(&p, tk, states)) {
tk = NULL;
continue;
body = p.asn_stack +
(p.tos - states[p.next.state].reduce_size);
- bufsize = do_reduce(prod, body, buf);
+ bufsize = do_reduce(prod, body, config, buf);
pop(&p, size, do_free);
shift(&p, memdup(buf, bufsize), states);
accepted = 1;
continue;
}
+ if (tk->num == TK_out) {
+ // Indent problem - synthesise tokens to get us
+ // out of here.
+ fprintf(stderr, "Synthesize %d to handle indent problem\n", states[p.next.state].shift_sym);
+ p.next.sym = states[p.next.state].shift_sym;
+ shift(&p, tok_copy(*tk), states);
+ // FIXME need to report this error somehow
+ continue;
+ }
/* Error. We walk up the stack until we
* find a state which will accept TK_error.
* We then shift in TK_error and see what state
tk->num != TK_eof) {
free(tk);
tk = tok_copy(token_next(tokens));
+ if (tk->num == TK_in)
+ p.next.indents += 1;
+ if (tk->num == TK_out) {
+ if (p.next.indents == 0)
+ break;
+ p.next.indents -= 1;
+ }
}
if (p.tos == 0 && tk->num == TK_eof)
break;
###### exported functions
void *parser_run(struct token_state *tokens,
const struct state states[],
- int (*do_reduce)(int, void**, void*),
+ int (*do_reduce)(int, void**, struct token_config*, void*),
void (*do_free)(short, void*),
- FILE *trace, const char *non_term[], int knowns);
+ FILE *trace, const char *non_term[],
+ struct token_config *config);
### Tracing
[TK_newline] = "NEWLINE",
[TK_eof] = "$eof",
};
+ static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
+ {
+ fprintf(trace, "(%d", f->state);
+ if (f->indents)
+ fprintf(trace, "%c%d", f->starts_indented?':':'.',
+ f->indents);
+ if (states[f->state].starts_line)
+ fprintf(trace, "s");
+ if (f->newline_permitted)
+ fprintf(trace, "n");
+ fprintf(trace, ") ");
+ }
+
void parser_trace(FILE *trace, struct parser *p,
struct token *tk, const struct state states[],
const char *non_term[], int knowns)
int i;
for (i = 0; i < p->tos; i++) {
int sym = p->stack[i].sym;
- fprintf(trace, "(%d) ", p->stack[i].state);
+ parser_trace_state(trace, &p->stack[i], states);
if (sym < TK_reserved &&
reserved_words[sym] != NULL)
fputs(reserved_words[sym], trace);
trace);
fputs(" ", trace);
}
- fprintf(trace, "(%d) [", p->next.state);
+ parser_trace_state(trace, &p->next, states);
+ fprintf(trace, " [");
if (tk->num < TK_reserved &&
reserved_words[tk->num] != NULL)
fputs(reserved_words[tk->num], trace);
work to perform arbitrary rational number calculations.
This calculator takes one expression, or an equality test per line. The
-results are printed and in any equality test fails, the program exits with
+results are printed and if any equality test fails, the program exits with
an error.
-Embedding mdcode inside mdcode is rather horrible. I'd like to find a
-better approach, but as the grammar file must have 3 components I need
-something like this.
-
###### File: parsergen.mk
- calc.c : parsergen calc.cgm
- ./parsergen -o calc calc.cgm
+ calc.c calc.h : parsergen parsergen.mdc
+ ./parsergen --tag calc -o calc parsergen.mdc
calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o
$(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp
-###### demo grammar
-
- # header
- ~~~~~
+# calc: header
#include "number.h"
// what do we use for a demo-grammar? A calculator of course.
int err;
};
- ~~~~~
- # code
- ~~~~~
+# calc: code
#include <stdlib.h>
#include <unistd.h>
.word_cont = "",
};
parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
+ while (s) {
+ struct section *t = s->next;
+ code_free(s->code);
+ free(s);
+ s = t;
+ }
exit(0);
}
- ~~~~~
- # grammar
- ~~~~~
+# calc: grammar
Session -> Session Line
| Line