_ad hoc_ parsing as we don't have a parser generator yet.
The precedence and associativity can be set for each production, but
-can be inherited from symbols. The data type is potentially defined
-for each non-terminal and describes what C structure is used to store
-information about each symbol.
+can be inherited from symbols. The data type (either structure or a
+reference to a structure) is potentially defined for each non-terminal
+and describes what C structure is used to store information about each
+symbol.
###### declarations
enum assoc {Left, Right, Non};
struct symbol {
struct text struct_name;
+ int isref;
enum assoc assoc;
unsigned short precedence;
## symbol fields
int num_syms;
###### functions
- static int text_cmp(struct text a, struct text b)
- {
- int len = a.len;
- if (a.len > b.len)
- len = b.len;
- int cmp = strncmp(a.txt, b.txt, len);
- if (cmp)
- return cmp;
- else
- return a.len - b.len;
- }
-
static struct symbol *sym_find(struct grammar *g, struct text s)
{
struct symbol **l = &g->syms;
precedence, otherwise it specifies a data type.
The data type name is simply stored and applied to the head of all
-subsequent productions. It must be the name of a structure, so `$expr`
-maps to `struct expr`.
-
-Any productions given before the first data type will have no data type
-and can carry no information. In order to allow other non-terminals to
-have no type, the data type `$void` can be given. This does *not* mean
-that `struct void` will be used, but rather than no type will be
-associated with future non-terminals.
+subsequent productions. It must be the name of a structure optionally
+preceded by an asterisk which means a reference or "pointer". So
+`$expression` maps to `struct expression` and `$*statement` maps to
+`struct statement *`.
+
+Any productions given before the first data type declaration will have
+no data type associated with them and can carry no information. In
+order to allow other non-terminals to have no type, the data type
+`$void` can be given. This does *not* mean that `struct void` will be
+used, but rather than no type will be associated with future
+non-terminals.
The precedence line must contain a list of symbols - typically
terminal symbols, but not necessarily. It can only contain symbols
###### grammar fields
struct text current_type;
+ int type_isref;
int prec_levels;
###### declarations
static const char *known[] = { "$$", "${", "}$" };
###### functions
- static char *dollar_line(struct token_state *ts, struct grammar *g)
+ static char *dollar_line(struct token_state *ts, struct grammar *g, int isref)
{
struct token t = token_next(ts);
char *err;
assoc = Non;
else {
g->current_type = t.txt;
+ g->type_isref = isref;
if (text_is(t.txt, "void"))
g->current_type.txt = NULL;
t = token_next(ts);
return NULL;
}
+ if (isref) {
+ err = "$* cannot be followed by a precedence";
+ goto abort;
+ }
+
// This is a precedence line, need some symbols.
found = 0;
g->prec_levels += 1;
be in one `code_node` of the literate code. The `}$` must be
at the end of a line.
-Text in the code fragment will undergo substitutions where `$N` for
-some numeric `N` will be replaced with a variable holding the parse
-information for the particular symbol in the production. `$0` is the
-head of the production, `$1` is the first symbol of the body, etc.
-The type of `$N` for a terminal symbol is `struct token`. For
-a non-terminal, it is whatever has been declared for that symbol.
+Text in the code fragment will undergo substitutions where `$N` or
+`$<N`,for some numeric `N`, will be replaced with a variable holding
+the parse information for the particular symbol in the production.
+`$0` is the head of the production, `$1` is the first symbol of the
+body, etc. The type of `$N` for a terminal symbol is `struct token`.
+For a non-terminal, it is whatever has been declared for that symbol.
+The `<` may be included for symbols declared as storing a reference
+(not a structure) and means that the reference is being moved out, so
+it will not automatically be freed.
While building productions we will need to add to an array which needs to
grow dynamically.
struct production *p = calloc(1,sizeof(*p));
struct text start = {"$start",6};
struct text eof = {"$eof",4};
+ struct text code = {"$0 = $<1;", 9};
p->head = sym_find(g, start);
p->head->type = Nonterminal;
+ p->head->struct_name = g->current_type;
+ p->head->isref = g->type_isref;
+ if (g->current_type.txt)
+ p->code = code;
array_add(&p->body, &p->body_size, head);
array_add(&p->body, &p->body_size, sym_find(g, eof));
p->head->first_production = g->production_count;
else {
head->type = Nonterminal;
head->struct_name = g->current_type;
+ head->isref = g->type_isref;
if (g->production_count == 0) {
## create production zero
}
err = "First production must have a head";
} else if (tk.num == TK_mark
&& text_is(tk.txt, "$")) {
- err = dollar_line(state, g);
+ err = dollar_line(state, g, 0);
+ } else if (tk.num == TK_mark
+ && text_is(tk.txt, "$*")) {
+ err = dollar_line(state, g, 1);
} else {
err = "Unrecognised token at start of line.";
}
}
Adding an itemset may require merging the LA sets if LALR analysis is
-happening. If any new LA set add symbol that weren't in the old LA set, we
+happening. If any new LA set adds any symbols that weren't in the old LA set, we
clear the `completed` flag so that the dependants of this itemset will be
recalculated and their LA sets updated.
#### The build
-To build all the itemsets, we first insert the initial itemset made from the
-start symbol, complete each itemset, and then generate new itemsets from old
-until no new ones can be made.
+To build all the itemsets, we first insert the initial itemset made
+from production zero, complete each itemset, and then generate new
+itemsets from old until no new ones can be made.
Completing an itemset means finding all the items where "DOT" is followed by
a nonterminal and adding "DOT=0" items for every production from that
The purpose of the report is to give the grammar developer insight into
how the grammar parser will work. It is basically a structured dump of
-all the tables that have been generated, plus an description of any conflicts.
+all the tables that have been generated, plus a description of any conflicts.
###### grammar_report
static int grammar_report(struct grammar *g, enum grammar_type type)
return report_conflicts(g, type);
}
-Firstly we have the complete list of symbols, together with the "FIRST"
-set if that was generated.
+Firstly we have the complete list of symbols, together with the
+"FIRST" set if that was generated. We add a mark to each symbol to
+show if it can end in a newline (`>`), or if it is nullable (`.`).
###### functions
}
}
-Then we have to follow sets if they were computed.
+Then we have the follow sets if they were computed.
static void report_follow(struct grammar *g)
{
}
LR0 conflicts are any state which have both a reducible item and
-a shiftable item.
+a shiftable item, or two reducible items.
LR05 conflicts only occurs if two possibly reductions exist,
as shifts always over-ride reductions.
}
SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping
-look ahead, or if a symbol in a look-ahead can be shifted. The differ only
+look ahead, or if a symbol in a look-ahead can be shifted. They differ only
in the source of the look ahead set.
-We build a dataset mapping terminal to item for possible SHIFTs and then
-another for possible REDUCE operations. We report when we get conflicts
-between the two.
+We build two datasets to reflect the "action" table: one which maps
+terminals to items where that terminal could be shifted and another
+which maps terminals to items that could be reduced when the terminal
+is in look-ahead. We report when we get conflicts between the two.
static int conflicts_slr(struct grammar *g, enum grammar_type type)
{
## Generating the parser
-The export part of the parser is the `parse_XX` function, where the name
+The exported part of the parser is the `parse_XX` function, where the name
`XX` is based on the name of the parser files.
This takes a `code_node`, a partially initialized `token_config`, and an
known words added and then is used with the `code_node` to initialize the
scanner.
-`parse_XX` then call the library function `parser_run` to actually complete
+`parse_XX` then calls the library function `parser_run` to actually complete
the parse. This needs the `states` table and function to call the various
pieces of code provided in the grammar file, so they are generated first.
fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
fprintf(f, "\tconfig->ignored |= (1 << TK_line_comment) | (1 << TK_block_comment);\n");
fprintf(f, "\ttokens = token_open(code, config);\n");
- fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config->known_count);\n");
+ fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n");
fprintf(f, "\ttoken_close(tokens);\n");
fprintf(f, "\treturn rv;\n");
fprintf(f, "}\n\n");
}
-### Table words table
+### Known words table
-The know words is simply an array of terminal symbols.
+The known words table is simply an array of terminal symbols.
The table of nonterminals used for tracing is a similar array.
###### functions
`do_reduce` to `malloc` that "somewhere", we pass in a large buffer and have
`do_reduce` return the size to be saved.
+In order for the code to access "global" context, we pass in the
+"config" pointer that was passed to parser function. If the `struct
+token_config` is embedded in some larger structure, the reducing code
+can access the larger structure using pointer manipulation.
+
The code fragment requires translation when written out. Any `$N` needs to
be converted to a reference either to that buffer (if `$0`) or to the
-structure returned by a previous reduction. These pointer need to be cast
+structure returned by a previous reduction. These pointers need to be cast
to the appropriate type for each access. All this is handling in
`gen_code`.
+`gen_code` also allows symbol references to contain a '`<`' as in '`$<2`'.
+This applied only to symbols with references (or pointers), not those with structures.
+The `<` implies that the reference it being moved out, so the object will not be
+automatically freed. This is equivalent to assigning `NULL` to the pointer.
###### functions
static void gen_code(struct production *p, FILE *f, struct grammar *g)
{
char *c;
+ char *used = calloc(1, p->body_size);
+ int i;
+
fprintf(f, "\t\t\t");
for (c = p->code.txt; c < p->code.txt + p->code.len; c++) {
int n;
+ int use = 0;
if (*c != '$') {
fputc(*c, f);
if (*c == '\n')
continue;
}
c++;
+ if (*c == '<') {
+ use = 1;
+ c++;
+ }
if (*c < '0' || *c > '9') {
+ if (use)
+ fputc('<', f);
fputc(*c, f);
continue;
}
n = n * 10 + *c - '0';
}
if (n == 0)
- fprintf(f, "(*(struct %.*s*)ret)",
+ fprintf(f, "(*(struct %.*s*%s)ret)",
p->head->struct_name.len,
- p->head->struct_name.txt);
+ p->head->struct_name.txt,
+ p->head->isref ? "*":"");
else if (n > p->body_size)
fprintf(f, "$%d", n);
else if (p->body[n-1]->type == Terminal)
n-1);
else if (p->body[n-1]->struct_name.txt == NULL)
fprintf(f, "$%d", n);
- else
- fprintf(f, "(*(struct %.*s*)body[%d])",
+ else {
+ fprintf(f, "(*(struct %.*s*%s)body[%d])",
p->body[n-1]->struct_name.len,
- p->body[n-1]->struct_name.txt, n-1);
+ p->body[n-1]->struct_name.txt,
+ p->body[n-1]->isref ? "*":"", n-1);
+ used[n-1] = use;
+ }
}
fputs("\n", f);
+ for (i = 0; i < p->body_size; i++) {
+ if (p->body[i]->struct_name.txt &&
+ p->body[i]->isref &&
+ used[i])
+ // assume this has been copied out
+ fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i);
+ }
+ free(used);
}
###### functions
{
int i;
fprintf(f, "#line 0 \"gen_reduce\"\n");
- fprintf(f, "static int do_reduce(int prod, void **body, void *ret)\n");
+ fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n");
fprintf(f, "{\n");
fprintf(f, "\tint ret_size = 0;\n");
gen_code(p, f, g);
if (p->head->struct_name.txt)
- fprintf(f, "\t\tret_size = sizeof(struct %.*s);\n",
+ fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n",
p->head->struct_name.len,
- p->head->struct_name.txt);
+ p->head->struct_name.txt,
+ p->head->isref ? "*":"");
fprintf(f, "\t\tbreak;\n");
}
It is particularly important to have fine control over freeing during error
recovery where individual stack frames might need to be freed.
-For this, the grammar author required to defined a `free_XX` function for
-each structure that is used by a non-terminal. `do_free` all call whichever
+For this, the grammar author is required to defined a `free_XX` function for
+each structure that is used by a non-terminal. `do_free` will call whichever
is appropriate given a symbol number, and will call `free` (as is
-appropriate for tokens` on any terminal symbol.
+appropriate for tokens on any terminal symbol.
###### functions
continue;
fprintf(f, "\tcase %d:\n", s->num);
- fprintf(f, "\t\tfree_%.*s(asn);\n",
- s->struct_name.len,
- s->struct_name.txt);
+ if (s->isref) {
+ fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n",
+ s->struct_name.len,
+ s->struct_name.txt);
+ fprintf(f, "\t\tfree(asn);\n");
+ } else
+ fprintf(f, "\t\tfree_%.*s(asn);\n",
+ s->struct_name.len,
+ s->struct_name.txt);
fprintf(f, "\t\tbreak;\n");
}
fprintf(f, "\t}\n}\n\n");
To be able to run `mdcode` and `scanner` on the grammar we need to memory
map it.
-One we have extracted the code (with `mdcode`) we expect to file three
-sections: header, code, and grammar. Anything else is an error.
+One we have extracted the code (with `mdcode`) we expect to find three
+sections: header, code, and grammar. Anything else that is not
+excluded by the `--tag` option is an error.
"header" and "code" are optional, though it is hard to build a working
parser with neither. "grammar" must be provided.
}
And that about wraps it up. We need to set the locale so that UTF-8 is
-recognised properly, and link with `libicuuc` is `libmdcode` requires that.
+recognised properly, and link with `libicuuc` as `libmdcode` requires that.
###### File: parsergen.mk
parsergen : parsergen.o libscanner.o libmdcode.o
## The SHIFT/REDUCE parser
-Having analysed the grammar and generated all the table, we only need the
+Having analysed the grammar and generated all the tables, we only need the
shift/reduce engine to bring it all together.
### Goto table lookup
production, and by keeping a separate `asn` stack, we can just pass a
pointer into this stack.
-The other allocation stores all other stack fields of which there are four.
+The other allocation stores all other stack fields of which there are six.
The `state` is the most important one and guides the parsing process. The
`sym` is nearly unnecessary. However when we want to free entries from the
`asn_stack`, it helps to know what type they are so we can call the right
indents in the symbol. These are used to allow indent information to
guide parsing and error recovery.
+`newline_permitted` keeps track of whether newlines should be ignored
+or not, and `starts_line` records if this state stated on a newline.
+
As well as the stack of frames we have a `next` frame which is
assembled from the incoming token and other information prior to
pushing it onto the stack.
or must force reductions until there is a pending indent which isn't
at the start of a production.
+`TK_newline` tokens are ignored precisely if there has been an indent
+since the last state which could have been at the start of a line.
+
###### parser includes
#include "parser.h"
###### parser_run
void *parser_run(struct token_state *tokens,
const struct state states[],
- int (*do_reduce)(int, void**, void*),
+ int (*do_reduce)(int, void**, struct token_config*, void*),
void (*do_free)(short, void*),
- FILE *trace, const char *non_term[], int knowns)
+ FILE *trace, const char *non_term[],
+ struct token_config *config)
{
struct parser p = { 0 };
struct token *tk = NULL;
int accepted = 0;
- void *ret;
+ void *ret = NULL;
p.next.newline_permitted = states[0].starts_line;
while (!accepted) {
tk = tok_copy(token_next(tokens));
p.next.sym = tk->num;
if (trace)
- parser_trace(trace, &p, tk, states, non_term, knowns);
+ parser_trace(trace, &p, tk, states, non_term, config->known_count);
if (p.next.sym == TK_in) {
p.next.starts_indented = 1;
}
if (states[p.next.state].reduce_prod >= 0) {
void **body;
+ void *res;
int prod = states[p.next.state].reduce_prod;
int size = states[p.next.state].reduce_size;
int bufsize;
body = p.asn_stack +
(p.tos - states[p.next.state].reduce_size);
- bufsize = do_reduce(prod, body, buf);
+ bufsize = do_reduce(prod, body, config, buf);
pop(&p, size, do_free);
- shift(&p, memdup(buf, bufsize), states);
- if (prod == 0)
+ res = memdup(buf, bufsize);
+ memset(buf, 0, bufsize);
+ if (!shift(&p, res, states)) {
+ if (prod != 0) abort();
accepted = 1;
+ ret = res;
+ }
continue;
}
if (tk->num == TK_out) {
break;
}
free(tk);
- if (accepted)
- ret = p.asn_stack[0];
- else
- pop(&p, p.tos, do_free);
+ pop(&p, p.tos, do_free);
free(p.asn_stack);
free(p.stack);
return ret;
###### exported functions
void *parser_run(struct token_state *tokens,
const struct state states[],
- int (*do_reduce)(int, void**, void*),
+ int (*do_reduce)(int, void**, struct token_config*, void*),
void (*do_free)(short, void*),
- FILE *trace, const char *non_term[], int knowns);
+ FILE *trace, const char *non_term[],
+ struct token_config *config);
### Tracing
work to perform arbitrary rational number calculations.
This calculator takes one expression, or an equality test per line. The
-results are printed and in any equality test fails, the program exits with
+results are printed and if any equality test fails, the program exits with
an error.
-Embedding mdcode inside mdcode is rather horrible. I'd like to find a
-better approach, but as the grammar file must have 3 components I need
-something like this.
-
###### File: parsergen.mk
calc.c calc.h : parsergen parsergen.mdc
./parsergen --tag calc -o calc parsergen.mdc
.word_cont = "",
};
parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
+ while (s) {
+ struct section *t = s->next;
+ code_free(s->code);
+ free(s);
+ s = t;
+ }
exit(0);
}