-# LR(1) Parser Generator #
+# LR(1) Parser Generator with 2D support #
This parser generator takes a grammar description combined with code
fragments, analyses it, and can report details about the analysis and
write out C code files which can be compiled to make a parser.
+"2D support" means that indentation and line breaks can be significant.
+
There are several distinct sections.
1. `grammar_read` will read a grammar from a literate-code file and
5. `parser_run` is a library function intended to be linked together
with the generated parser tables to complete the implementation of
a parser.
-6. `calc.cgm` is a test grammar for a simple calculator.
+6. Finally `calc` is a test grammar for a simple calculator. The
+ `parsergen` program built from the C code in this file can extract
+ that grammar directly from this file and process it.
###### File: parsergen.c
#include <unistd.h>
## parser includes
## parser functions
## parser_run
-###### File: calc.cgm
- ## demo grammar
###### File: parsergen.mk
CFLAGS += -Wall -g
all :: parsergen calc
- parsergen.c parsergen.mk calc.cgm libparser.c parser.h : parsergen.mdc
+ parsergen.c parsergen.mk libparser.c parser.h : parsergen.mdc
./md2c parsergen.mdc
## Reading the grammar
-The grammar must be stored in a markdown literate code file as parsed
-by "[mdcode][]". It must have three top level (i.e. unreferenced)
-sections: `header`, `code`, and `grammar`. The first two will be
-literally copied into the generated `.c` and `.h`. files. The last
-contains the grammar. This is tokenised with "[scanner][]".
+The grammar must be stored in a markdown literate code file as parsed by
+"[mdcode][]". It may have up to four top level (i.e. unreferenced)
+sections: `header`, `code`, `grammar` and `reduce`. The first two, if
+present, will be literally copied into the generated `.c` and `.h`
+files. The third is required and contains the grammar which is
+tokenised with "[scanner][]". `reduce` can provide variable
+declarations and common intialization code for all the reduction code
+fragments in the grammar.
+
+If the `--tag` option is given, then any top level header that doesn't
+start with the tag is ignored, and the tag is striped from the rest. So
+`--tag Foo` means that the four sections expected are `Foo: header`,
+`Foo: code`, `Foo: grammar` and `Foo: reduce`. The tag `calc` is used
+to extract the sample calculator from this file.
[mdcode]: mdcode.html
[scanner]: scanner.html
#include "scanner.h"
The grammar contains production sets, precedence/associativity
-declarations, and data type declarations. These are all parsed with
-_ad hoc_ parsing as we don't have a parser generator yet.
+declarations, general terminal declarations, and data type declarations.
+These are all parsed with _ad hoc_ parsing as we don't have a parser
+generator yet.
-The precedence and associativity can be set for each production, but
-can be inherited from symbols. The data type is potentially defined
-for each non-terminal and describes what C structure is used to store
-information about each symbol.
+The precedence and associativity can be set for each production, and
+can be inherited from symbols. The data type (either structure or a
+reference to a structure) is potentially defined for each non-terminal
+and describes what C structure is used to store information about each
+symbol.
###### declarations
enum assoc {Left, Right, Non};
struct symbol {
struct text struct_name;
+ int isref;
enum assoc assoc;
unsigned short precedence;
## symbol fields
struct production {
unsigned short precedence;
enum assoc assoc;
+ char line_like;
## production fields
};
struct grammar {
## grammar fields
};
-The strings reported by `mdcode` and `scanner` are `struct text` which have
-length rather than being null terminated. To help with printing and
+The strings reported by `mdcode` and `scanner` are `struct text` which
+have length rather than being nul terminated. To help with printing and
comparing we define `text_is` and `prtxt`, which should possibly go in
-`mdcode`. `scanner` does provide `text_dump` which is useful for strings
-which might contain control characters.
+`mdcode`. `scanner` does provide `text_dump` which is useful for
+strings which might contain control characters.
+
+`strip_tag` is a bit like `strncmp`, but adds a test for a colon,
+because that is what we need to detect tags.
###### functions
static int text_is(struct text t, char *s)
printf("%.*s", t.len, t.txt);
}
+ static int strip_tag(struct text *t, char *tag)
+ {
+ int skip = strlen(tag) + 1;
+ if (skip >= t->len ||
+ strncmp(t->txt, tag, skip-1) != 0 ||
+ t->txt[skip-1] != ':')
+ return 0;
+ while (skip < t->len && t->txt[skip] == ' ')
+ skip++;
+ t->len -= skip;
+ t->txt += skip;
+ return 1;
+ }
+
### Symbols
Productions are comprised primarily of symbols - terminal and
marking discussed later, and sometimes we won't know what type a symbol
is yet.
+To help with code safety it is possible to declare the terminal symbols.
+If this is done, then any symbol used in a production that does not
+appear in the head of any production and is not declared as a terminal
+is treated as an error.
+
###### forward declarations
enum symtype { Unknown, Virtual, Terminal, Nonterminal };
char *symtypes = "UVTN";
###### symbol fields
enum symtype type;
+###### grammar fields
+ int terminals_declared;
Symbols can be either `TK_ident` or `TK_mark`. They are saved in a
table of known symbols and the resulting parser will report them as
different token types that `scanner` can report.
###### declarations
- static char *reserved_words[] = {
- [TK_error] = "ERROR",
- [TK_number] = "NUMBER",
- [TK_ident] = "IDENTIFIER",
- [TK_mark] = "MARK",
- [TK_string] = "STRING",
- [TK_multi_string] = "MULTI_STRING",
- [TK_in] = "IN",
- [TK_out] = "OUT",
- [TK_newline] = "NEWLINE",
- [TK_eof] = "$eof",
+
+ static struct { int num; char *name; } reserved_words[] = {
+ { TK_error, "ERROR" },
+ { TK_number, "NUMBER" },
+ { TK_ident, "IDENTIFIER" },
+ { TK_mark, "MARK" },
+ { TK_string, "STRING" },
+ { TK_multi_string, "MULTI_STRING" },
+ { TK_in, "IN" },
+ { TK_out, "OUT" },
+ { TK_newline, "NEWLINE" },
+ { TK_eof, "$eof" },
};
###### symbol fields
short num;
int num_syms;
###### functions
- static int text_cmp(struct text a, struct text b)
- {
- int len = a.len;
- if (a.len > b.len)
- len = b.len;
- int cmp = strncmp(a.txt, b.txt, len);
- if (cmp)
- return cmp;
- else
- return a.len - b.len;
- }
-
static struct symbol *sym_find(struct grammar *g, struct text s)
{
struct symbol **l = &g->syms;
for (i = 0; i < entries; i++) {
struct text t;
struct symbol *s;
- t.txt = reserved_words[i];
- if (!t.txt)
- continue;
+ t.txt = reserved_words[i].name;
t.len = strlen(t.txt);
s = sym_find(g, t);
s->type = Terminal;
- s->num = i;
+ s->num = reserved_words[i].num;
}
}
### Data types and precedence.
-Data type specification and precedence specification are both
-introduced by a dollar sign at the start of the line. If the next
-word is `LEFT`, `RIGHT` or `NON`, then the line specifies a
+Data type specification, precedence specification, and declaration of
+terminals are all introduced by a dollar sign at the start of the line.
+If the next word is `LEFT`, `RIGHT` or `NON`, then the line specifies a
+precedence, if it is `TERM` then the line declares terminals without
precedence, otherwise it specifies a data type.
The data type name is simply stored and applied to the head of all
-subsequent productions. It must be the name of a structure, so `$expr`
-maps to `struct expr`.
-
-Any productions given before the first data type will have no data type
-and can carry no information. In order to allow other non-terminals to
-have no type, the data type `$void` can be given. This does *not* mean
-that `struct void` will be used, but rather than no type will be
-associated with future non-terminals.
-
-The precedence line must contain a list of symbols - typically
-terminal symbols, but not necessarily. It can only contain symbols
-that have not been seen yet, so precedence declaration must precede
-the productions that they affect.
-
-A precedence line may also contain a "virtual" symbol which is an
-identifier preceded by `$$`. Virtual terminals carry precedence
-information but are not included in the grammar. A production can
-declare that it inherits the precedence of a given virtual symbol.
+subsequent productions. It must be the name of a structure optionally
+preceded by an asterisk which means a reference or "pointer". So
+`$expression` maps to `struct expression` and `$*statement` maps to
+`struct statement *`.
+
+Any productions given before the first data type declaration will have
+no data type associated with them and can carry no information. In
+order to allow other non-terminals to have no type, the data type
+`$void` can be given. This does *not* mean that `struct void` will be
+used, but rather than no type will be associated with subsequent
+non-terminals.
+
+The precedence line must contain a list of symbols, either terminal
+symbols or virtual symbols. It can only contain symbols that have not
+been seen yet, so precedence declaration must precede the productions
+that they affect.
+
+A "virtual" symbol is an identifier preceded by `$$`. Virtual symbols
+carry precedence information but are not included in the grammar. A
+production can declare that it inherits the precedence of a given
+virtual symbol.
This use for `$$` precludes it from being used as a symbol in the
described language. Two other symbols: `${` and `}$` are also
listed and may be inherited by any production which uses the symbol. A
production inherits from the last symbol which has a precedence.
+The symbols on the first precedence line have the lowest precedence.
+Subsequent lines introduce symbols with higher precedence and so bind
+more tightly.
+
+Note that a declaration line (or "dollar line") can start with either of
+two different marks: "$" or "$*". The `dollar_line()` function defined
+here is told which was found in the `isref` argument.
+
###### grammar fields
struct text current_type;
+ int type_isref;
int prec_levels;
###### declarations
static const char *known[] = { "$$", "${", "}$" };
###### functions
- static char *dollar_line(struct token_state *ts, struct grammar *g)
+ static char *dollar_line(struct token_state *ts, struct grammar *g, int isref)
{
struct token t = token_next(ts);
char *err;
enum assoc assoc;
+ int term = 0;
int found;
if (t.num != TK_ident) {
assoc = Right;
else if (text_is(t.txt, "NON"))
assoc = Non;
- else {
+ else if (text_is(t.txt, "TERM")) {
+ term = 1;
+ g->terminals_declared = 1;
+ } else {
g->current_type = t.txt;
+ g->type_isref = isref;
if (text_is(t.txt, "void"))
g->current_type.txt = NULL;
t = token_next(ts);
return NULL;
}
- // This is a precedence line, need some symbols.
+ if (isref) {
+ err = "$* cannot be followed by a precedence";
+ goto abort;
+ }
+
+ // This is a precedence or TERM line, need some symbols.
found = 0;
g->prec_levels += 1;
t = token_next(ts);
err = "$$ must be followed by a word";
goto abort;
}
+ if (term) {
+ err = "Virtual symbols not permitted on $TERM line";
+ goto abort;
+ }
} else if (t.num != TK_ident &&
t.num != TK_mark) {
err = "Illegal token in precedence line";
}
s = sym_find(g, t.txt);
if (s->type != Unknown) {
- err = "Symbols in precedence line must not already be known.";
+ err = "Symbols in precedence/TERM line must not already be known.";
goto abort;
}
s->type = type;
- s->precedence = g->prec_levels;
- s->assoc = assoc;
+ if (!term) {
+ s->precedence = g->prec_levels;
+ s->assoc = assoc;
+ }
found += 1;
+ t = token_next(ts);
}
if (found == 0)
- err = "No symbols given on precedence line";
+ err = "No symbols given on precedence/TERM line";
goto abort;
return NULL;
abort:
be in one `code_node` of the literate code. The `}$` must be
at the end of a line.
-Text in the code fragment will undergo substitutions where `$N` for
-some numeric `N` will be replaced with a variable holding the parse
-information for the particular symbol in the production. `$0` is the
-head of the production, `$1` is the first symbol of the body, etc.
-The type of `$N` for a terminal symbol is `struct token`. For
-a non-terminal, it is whatever has been declared for that symbol.
+Text in the code fragment will undergo substitutions where `$N` or
+`$<N`,for some numeric `N` (or non-numeric indicator as described
+later), will be replaced with a variable holding the parse information
+for the particular symbol in the production. `$0` is the head of the
+production, `$1` is the first symbol of the body, etc. The type of `$N`
+for a terminal symbol is `struct token`. For a non-terminal, it is
+whatever has been declared for that symbol. The `<` may be included and
+means that the value (usually a reference) is being moved out, so it
+will not automatically be freed. The effect of using '<' is that the
+variable is cleared to all-zeros.
While building productions we will need to add to an array which needs to
grow dynamically.
return code;
}
-Now we have all the bit we need to parse a full production.
+Now we have all the bits we need to parse a full production.
###### includes
#include <memory.h>
struct symbol **body;
int body_size;
struct text code;
+ int code_line;
###### symbol fields
int first_production;
tk = token_next(state);
while (tk.num == TK_ident || tk.num == TK_mark) {
struct symbol *bs = sym_find(g, tk.txt);
- if (bs->type == Unknown)
- bs->type = Terminal;
+ if (bs->type == Unknown) {
+ if (!g->terminals_declared)
+ bs->type = Terminal;
+ }
if (bs->type == Virtual) {
err = "Virtual symbol not permitted in production";
goto abort;
goto abort;
}
vs = sym_find(g, tk.txt);
- if (vs->type != Virtual) {
- err = "symbol after $$ must be virtual";
+ if (vs->num == TK_newline)
+ p.line_like = 1;
+ else if (vs->num == TK_out)
+ p.line_like = 2;
+ else if (vs->precedence == 0) {
+ err = "symbol after $$ must have precedence";
goto abort;
+ } else {
+ p.precedence = vs->precedence;
+ p.assoc = vs->assoc;
}
- p.precedence = vs->precedence;
- p.assoc = vs->assoc;
tk = token_next(state);
}
if (tk.num == TK_open) {
+ p.code_line = tk.line;
p.code = collect_code(state, tk);
if (p.code.txt == NULL) {
err = "code fragment not closed properly";
}
tk = token_next(state);
}
+
if (tk.num != TK_newline && tk.num != TK_eof) {
err = "stray tokens at end of line";
goto abort;
abort:
while (tk.num != TK_newline && tk.num != TK_eof)
tk = token_next(state);
+ free(p.body);
return err;
}
-With the ability to parse production and dollar-lines, we have nearly all
+With the ability to parse productions and dollar-lines, we have nearly all
that we need to parse a grammar from a `code_node`.
-The head of the first production will effectively be the `start` symbol of
-the grammar. However it won't _actually_ be so. Processing the grammar is
-greatly simplified if the real start symbol only has a single production,
-and expects `$eof` as the final terminal. So when we find the first
-explicit production we insert an extra production as production zero which
-looks like
+The head of the first production will effectively be the `start` symbol
+of the grammar. However it won't _actually_ be so. Processing the
+grammar is greatly simplified if the real start symbol only has a single
+production, and expects `$eof` as the final terminal. So when we find
+the first explicit production we insert an extra implicit production as
+production zero which looks like
###### Example: production 0
$start -> START $eof
struct production *p = calloc(1,sizeof(*p));
struct text start = {"$start",6};
struct text eof = {"$eof",4};
+ struct text code = {"$0 = $<1;", 9};
p->head = sym_find(g, start);
p->head->type = Nonterminal;
+ p->head->struct_name = g->current_type;
+ p->head->isref = g->type_isref;
+ if (g->current_type.txt)
+ p->code = code;
array_add(&p->body, &p->body_size, head);
array_add(&p->body, &p->body_size, sym_find(g, eof));
- g->start = p->head->num;
p->head->first_production = g->production_count;
array_add(&g->productions, &g->production_count, p);
-Now we are ready to read in the grammar.
+Now we are ready to read in the grammar. We ignore comments
+and strings so that the marks which introduce them can be
+used as terminals in the grammar. We don't ignore numbers
+even though we don't allow them as that causes the scanner
+to produce errors that the parser is better positioned to handle.
+We also ignore indentation, but we expect and use newlines.
-###### grammar fields
- int start; // the 'start' symbol.
+To allow comments in the grammar, we explicitly check for "//" as the
+first token on the line and those lines are skipped. "//" can still be
+used as a terminal anywhere that a terminal is expected.
###### grammar_read
static struct grammar *grammar_read(struct code_node *code)
else {
head->type = Nonterminal;
head->struct_name = g->current_type;
- if (g->start == 0) {
+ head->isref = g->type_isref;
+ if (g->production_count == 0) {
## create production zero
}
head->first_production = g->production_count;
err = "First production must have a head";
} else if (tk.num == TK_mark
&& text_is(tk.txt, "$")) {
- err = dollar_line(state, g);
+ err = dollar_line(state, g, 0);
+ } else if (tk.num == TK_mark
+ && text_is(tk.txt, "$*")) {
+ err = dollar_line(state, g, 1);
+ } else if (tk.num == TK_mark
+ && text_is(tk.txt, "//")) {
+ while (tk.num != TK_newline &&
+ tk.num != TK_eof)
+ tk = token_next(state);
} else {
err = "Unrecognised token at start of line.";
}
goto abort;
}
token_close(state);
+ if (g->terminals_declared) {
+ struct symbol *s;
+ int errs = 0;
+ for (s = g->syms; s; s = s->next) {
+ if (s->type != Unknown)
+ continue;
+ errs += 1;
+ fprintf(stderr, "Token %.*s not declared\n",
+ s->name.len, s->name.txt);
+ }
+ if (errs) {
+ free(g); // FIXME free content
+ g = NULL;
+ }
+ }
return g;
abort:
fprintf(stderr, "Error at line %d: %s\n",
tk.line, err);
token_close(state);
- free(g);
+ free(g); // FIXME free content
return NULL;
}
## Analysing the grammar
The central task in analysing the grammar is to determine a set of
-states to drive the parsing process. This will require finding
-various sets of symbols and of "items". Some of these sets will need
-to attach information to each element in the set, so they are more
-like maps.
+states to drive the parsing process. This will require finding various
+sets of symbols and of "items". Some of these sets will need to attach
+information to each element in the set, so they are more like maps.
-Each "item" may have a 'look-ahead' or `LA` set associated with
-it. Many of these will be the same as each other. To avoid memory
-wastage, and to simplify some comparisons of sets, these sets will be
-stored in a list of unique sets, each assigned a number.
+Each "item" may have a 'look-ahead' or `LA` set associated with it.
+Many of these will be the same as each other. To avoid memory wastage,
+and to simplify some comparisons of sets, these sets will be stored in a
+list of unique sets, each assigned a number.
-Once we have the data structures in hand to manage these sets and
-lists, we can start setting the 'nullable' flag, build the 'FIRST'
-sets, and then create the item sets which define the various states.
+Once we have the data structures in hand to manage these sets and lists,
+we can start setting the 'nullable' flag, build the 'FIRST' sets, and
+then create the item sets which define the various states.
### Symbol sets.
things we store, so they are called symbol sets or "symsets".
A symset has a size, an array of shorts, and an optional array of data
-values, which are also shorts. If the array of data is not present,
-we store an impossible pointer, as `NULL` is used to indicate that no
+values, which are also shorts. If the array of data is not present, we
+store an impossible pointer, as `NULL` is used to indicate that no
memory has been allocated yet;
###### declarations
const struct symset INIT_SYMSET = { 0, NULL, NO_DATA };
const struct symset INIT_DATASET = { 0, NULL, NULL };
-The arrays of shorts are allocated in blocks of 8 and are kept sorted
-by using an insertion sort. We don't explicitly record the amount of
-allocated space as it can be derived directly from the current `cnt` using
-`((cnt - 1) | 7) + 1`.
+The arrays of shorts are allocated in blocks of 8 and are kept sorted by
+using an insertion sort. We don't explicitly record the amount of
+allocated space as it can be derived directly from the current `cnt`
+using `((cnt - 1) | 7) + 1`.
###### functions
static void symset_add(struct symset *s, unsigned short key, unsigned short val)
}
We will often want to form the union of two symsets. When we do, we
-will often want to know if anything changed (as they might mean there
-is more work to do). So `symset_union` reports whether anything was
-added to the first set. We use a slow quadratic approach as these
-sets don't really get very big. If profiles shows this to be a problem is
-can be optimised later.
+will often want to know if anything changed (as that might mean there is
+more work to do). So `symset_union` reports whether anything was added
+to the first set. We use a slow quadratic approach as these sets are
+rarely large. If profiling shows this to be a problem it can be
+optimised later.
static int symset_union(struct symset *a, struct symset *b)
{
Some symsets are simply stored somewhere appropriate in the `grammar`
data structure, others needs a bit of indirection. This applies
particularly to "LA" sets. These will be paired with "items" in an
-"itemset". As itemsets will be stored in a symset, the "LA" set needs to be
-stored in the `data` field, so we need a mapping from a small (short)
-number to an LA `symset`.
+"itemset". As itemsets will be stored in a symset, the "LA" set needs
+to be stored in the `data` field, so we need a mapping from a small
+(short) number to an LA `symset`.
As mentioned earlier this involves creating a list of unique symsets.
return s->num;
}
-Finding a set by number is currently performed by a simple linear search.
-If this turns out to hurt performance, we can store the sets in a dynamic
-array like the productions.
+Finding a set by number is currently performed by a simple linear
+search. If this turns out to hurt performance, we can store the sets in
+a dynamic array like the productions.
static struct symset set_find(struct grammar *g, int num)
{
return sl->ss;
}
-
### Setting `nullable`
We set `nullable` on the head symbol for any production for which all
}
}
-### Setting `can_eol`
+### Setting `line_like`
In order to be able to ignore newline tokens when not relevant, but
still include them in the parse when needed, we will need to know
which states can start a "line-like" section of code. We ignore
newlines when there is an indent since the most recent start of a
-line-like section.
+line-like symbol.
-To know what is line-like, we first need to know which symbols can end
-a line-like section, which is precisely those which can end with a
-newline token. These symbols don't necessarily alway end with a
-newline, but they can. Hence they are not described as "lines" but
-only "line-like".
+A "line_like" symbol is simply any symbol that can derive a NEWLINE.
+If a symbol cannot derive a NEWLINE, then it is only part of a line -
+so is word-like. If it can derive a NEWLINE, then we consider it to
+be like a line.
-Clearly the `TK_newline` token can end with a newline. Any symbol
-which is the head of a production that contains a line-ending symbol
-followed only by nullable symbols is also a line-ending symbol. We
-use a new field `can_eol` to record this attribute of symbols, and
-compute it in a repetitive manner similar to `set_nullable`.
+Clearly the `TK_newline` token can derive a NEWLINE. Any symbol which
+is the head of a production that contains a line_like symbol is also a
+line-like symbol. We use a new field `line_like` to record this
+attribute of symbols, and compute it in a repetitive manner similar to
+`set_nullable`.
###### symbol fields
- int can_eol;
+ int line_like;
###### functions
- static void set_can_eol(struct grammar *g)
+ static void set_line_like(struct grammar *g)
{
int check_again = 1;
- g->symtab[TK_newline]->can_eol = 1;
+ g->symtab[TK_newline]->line_like = 1;
while (check_again) {
int p;
check_again = 0;
struct production *pr = g->productions[p];
int s;
- if (pr->head->can_eol)
+ if (pr->head->line_like)
continue;
- for (s = pr->body_size - 1; s >= 0; s--) {
- if (pr->body[s]->can_eol) {
- pr->head->can_eol = 1;
+ for (s = 0 ; s < pr->body_size; s++) {
+ if (pr->body[s]->line_like) {
+ pr->head->line_like = 1;
check_again = 1;
break;
}
- if (!pr->body[s]->nullable)
- break;
}
}
}
### Building the `first` sets
-When calculating what can follow a particular non-terminal, we will need to
-know what the "first" terminal in any subsequent non-terminal might be. So
-we calculate the `first` set for every non-terminal and store them in an
-array. We don't bother recording the "first" set for terminals as they are
-trivial.
+When calculating what can follow a particular non-terminal, we will need
+to know what the "first" terminal in any subsequent non-terminal might
+be. So we calculate the `first` set for every non-terminal and store
+them in an array. We don't bother recording the "first" set for
+terminals as they are trivial.
-As the "first" for one symbol might depend on the "first" of another,
-we repeat the calculations until no changes happen, just like with
-`set_nullable`. This makes use of the fact that `symset_union`
-reports if any change happens.
+As the "first" for one symbol might depend on the "first" of another, we
+repeat the calculations until no changes happen, just like with
+`set_nullable`. This makes use of the fact that `symset_union` reports
+if any change happens.
-The core of this which finds the "first" of part of a production body
-will be reused for computing the "follow" sets, so we split it out
-into a separate function.
+The core of this, which finds the "first" of part of a production body,
+will be reused for computing the "follow" sets, so we split that out
+into a separate function, `add_first`, which also reports if it got all
+the way to the end of the production without finding a non-nullable
+symbol.
###### grammar fields
struct symset *first;
### Building the `follow` sets.
-There are two different situations when we will want to generate "follow"
-sets. If we are doing an SLR analysis, we want to generate a single
-"follow" set for each non-terminal in the grammar. That is what is
-happening here. If we are doing an LALR or LR analysis we will want
-to generate a separate "LA" set for each item. We do that later
-in state generation.
+There are two different situations when we will want to generate
+"follow" sets. If we are doing an SLR analysis, we want to generate a
+single "follow" set for each non-terminal in the grammar. That is what
+is happening here. If we are doing an LALR or LR analysis we will want
+to generate a separate "LA" set for each item. We do that later in
+state generation.
There are two parts to generating a "follow" set. Firstly we look at
-every place that any non-terminal appears in the body of any
-production, and we find the set of possible "first" symbols after
-there. This is done using `add_first` above and only needs to be done
-once as the "first" sets are now stable and will not change.
+every place that any non-terminal appears in the body of any production,
+and we find the set of possible "first" symbols after there. This is
+done using `add_first` above and only needs to be done once as the
+"first" sets are now stable and will not change.
###### follow code
depends on "follow" itself we need to repeatedly perform the process
until no further changes happen.
+Rather than 2 nested loops, one that repeats the whole process until
+there is no change, and one that iterates of the set of productions, we
+combine these two functions into a single loop.
+
###### follow code
- for (again = 0, p = 0;
+ for (check_again = 0, p = 0;
p < g->production_count;
p < g->production_count-1
- ? p++ : again ? (again = 0, p = 0)
+ ? p++ : check_again ? (check_again = 0, p = 0)
: p++) {
struct production *pr = g->productions[p];
int b;
break;
if (symset_union(&g->follow[bs->num],
&g->follow[pr->head->num]))
- again = 1;
+ check_again = 1;
if (!bs->nullable)
break;
}
###### functions
static void build_follow(struct grammar *g)
{
- int s, again, p;
+ int s, check_again, p;
g->follow = calloc(g->num_syms, sizeof(g->follow[0]));
for (s = 0; s < g->num_syms; s++)
g->follow[s] = INIT_SYMSET;
There are three different levels of detail that can be chosen for
building the itemsets and states for the LR grammar. They are:
-1. LR(0) or SLR(1), where no look-ahead is considered.
+1. LR(0), LR(0.5), or SLR(1), where no look-ahead is included in the
+ itemsets - look-ahead, if used at all, is simply the 'follow' sets
+ already computed,
2. LALR(1) where we build look-ahead sets with each item and merge
the LA sets when we find two paths to the same "kernel" set of items.
3. LR(1) where different look-ahead for any item in the set means
- a different state must be created.
+ a different item set must be created.
###### forward declarations
enum grammar_type { LR0, LR05, SLR, LALR, LR1 };
list.
An item is a pair of numbers: the production number and the position of
-"DOT", which is an index into the body of the production.
-As the numbers are not enormous we can combine them into a single "short"
-and store them in a `symset` - 4 bits for "DOT" and 12 bits for the
-production number (so 4000 productions with maximum of 15 symbols in the
+"DOT", which is an index into the body of the production. As the
+numbers are not enormous we can combine them into a single "short" and
+store them in a `symset` - 5 bits for "DOT" and 11 bits for the
+production number (so 2000 productions with maximum of 31 symbols in the
body).
Comparing the itemsets will be a little different to comparing symsets
the first "0".
###### declarations
- static inline unsigned short item_num(int production, int index)
+ static inline unsigned short item_num(int production, int dot)
{
- return production | ((31-index) << 11);
+ return production | ((31-dot) << 11);
}
static inline int item_prod(unsigned short item)
{
return item & 0x7ff;
}
- static inline int item_index(unsigned short item)
+ static inline int item_dot(unsigned short item)
{
return (31-(item >> 11)) & 0x1f;
}
for (i = 0;
i < a.cnt && i < b.cnt &&
- item_index(a.syms[i]) > 0 &&
- item_index(b.syms[i]) > 0;
+ item_dot(a.syms[i]) > 0 &&
+ item_dot(b.syms[i]) > 0;
i++) {
int diff = a.syms[i] - b.syms[i];
if (diff)
return diff;
}
}
- if (i == a.cnt || item_index(a.syms[i]) == 0)
+ if (i == a.cnt || item_dot(a.syms[i]) == 0)
av = -1;
else
av = a.syms[i];
- if (i == b.cnt || item_index(b.syms[i]) == 0)
+ if (i == b.cnt || item_dot(b.syms[i]) == 0)
bv = -1;
else
bv = b.syms[i];
a.data[i] - b.data[i];
}
+It will be helpful to know if an itemset has been "completed" or not,
+particularly for LALR where itemsets get merged, at which point they
+need to be consider for completion again. So a `completed` flag is
+needed.
+
+For correct handling of `TK_newline` when parsing, we will need to
+know which states (itemsets) can occur at the start of a line, so we
+will record a `starts_line` flag too whenever DOT is at the start of a
+`line_like` symbol.
+
+Finally, for handling `TK_out` we need to know whether productions in the
+current state started *before* the most recent indent. A state
+doesn't usually keep details of individual productions, so we need to
+add one extra detail. `min_prefix` is the smallest non-zero number of
+symbols *before* DOT in any production in an itemset. This will allow
+us to determine if the the most recent indent is sufficiently recent
+to cancel it against a `TK_out`. If it was seen longer ago than the
+`min_prefix`, and if the current state cannot be reduced, then the
+indented section must have ended in the middle of a syntactic unit, so
+an error must be signaled.
+
And now we can build the list of itemsets. The lookup routine returns
-both a success flag and a pointer to where in the list an insert
-should happen, so we don't need to search a second time.
+both a success flag and a pointer to where in the list an insert should
+happen, so we don't need to search a second time.
###### declarations
struct itemset {
short state;
struct symset items;
struct symset go_to;
+ enum assoc assoc;
+ unsigned short precedence;
char completed;
char starts_line;
+ int min_prefix;
};
###### grammar fields
}
Adding an itemset may require merging the LA sets if LALR analysis is
-happening. If any new LA set add symbol that weren't in the old LA set, we
-clear the `completed` flag so that the dependants of this itemset will be
-recalculated and their LA sets updated.
+happening. If any new LA set adds any symbols that weren't in the old
+LA set, we clear the `completed` flag so that the dependants of this
+itemset will be recalculated and their LA sets updated.
`add_itemset` must consume the symsets it is passed, either by adding
them to a data structure, of freeing them.
static int add_itemset(struct grammar *g, struct symset ss,
- enum grammar_type type, int starts_line)
+ enum assoc assoc, unsigned short precedence,
+ enum grammar_type type)
{
struct itemset **where, *is;
int i;
is->state = g->states;
g->states += 1;
is->items = ss;
+ is->assoc = assoc;
+ is->precedence = precedence;
is->next = *where;
is->go_to = INIT_DATASET;
- is->starts_line = starts_line;
*where = is;
return is->state;
}
#### The build
-To build all the itemsets, we first insert the initial itemset made from the
-start symbol, complete each itemset, and then generate new itemsets from old
-until no new ones can be made.
+To build all the itemsets, we first insert the initial itemset made from
+production zero, complete each itemset, and then generate new itemsets
+from old until no new ones can be made.
-Completing an itemset means finding all the items where "DOT" is followed by
-a nonterminal and adding "DOT=0" items for every production from that
-non-terminal - providing each item hasn't already been added.
+Completing an itemset means finding all the items where "DOT" is
+followed by a nonterminal and adding "DOT=0" items for every production
+from that non-terminal - providing each item hasn't already been added.
+When we add such an item it might get inserted before the current
+one, so to make sure we process it we reset the loop counter to the
+start.
If LA sets are needed, the LA set for each new item is found using
-`add_first` which was developed earlier for `FIRST` and `FOLLOW`. This may
-be supplemented by the LA set for the item which produce the new item.
-
-We also collect a set of all symbols which follow "DOT" (in `done`) as this
-is used in the next stage.
-
-NOTE: precedence handling should happen here - I haven't written this yet
-though.
+`add_first` which was developed earlier for `FIRST` and `FOLLOW`. This
+may be supplemented by the LA set for the item which produced the new
+item.
+
+We also collect a set of all symbols which follow "DOT" (in `done`) as
+this is used in the next stage. If any of these symbols are flagged as
+`line_like`, then this state must be a `starts_line` state so now is a
+good time to record that.
+
+When itemsets are created we assign a precedence to the itemset from the
+complete item, if there is one. We ignore the possibility of there
+being two and don't (currently) handle precedence in such grammars.
+When completing a grammar we ignore any item where DOT is followed by a
+terminal with a precedence lower than that for the itemset. Unless the
+terminal has right associativity, we also ignore items where the
+terminal has the same precedence. The result is that unwanted items are
+still in the itemset, but the terminal doesn't get into the go to set,
+so the item is ineffective.
###### complete itemset
for (i = 0; i < is->items.cnt; i++) {
int p = item_prod(is->items.syms[i]);
- int bs = item_index(is->items.syms[i]);
+ int bs = item_dot(is->items.syms[i]);
struct production *pr = g->productions[p];
int p2;
struct symbol *s;
struct symset LA = INIT_SYMSET;
unsigned short sn = 0;
+ struct symset LAnl = INIT_SYMSET;
+ unsigned short snnl = 0;
+ if (is->min_prefix == 0 ||
+ (bs > 0 && bs < is->min_prefix))
+ is->min_prefix = bs;
if (bs == pr->body_size)
continue;
s = pr->body[bs];
- if (symset_find(&done, s->num) < 0)
+ if (s->precedence && is->precedence &&
+ is->precedence > s->precedence)
+ /* This terminal has a low precedence and
+ * shouldn't be shifted
+ */
+ continue;
+ if (s->precedence && is->precedence &&
+ is->precedence == s->precedence && s->assoc != Right)
+ /* This terminal has a matching precedence and is
+ * not Right-associative, so we mustn't shift it.
+ */
+ continue;
+ if (symset_find(&done, s->num) < 0) {
symset_add(&done, s->num, 0);
+ }
if (s->type != Nonterminal)
continue;
- again = 1;
+ if (s->line_like)
+ is->starts_line = 1;
+ check_again = 1;
if (type >= LALR) {
// Need the LA set.
int to_end;
}
sn = save_set(g, LA);
LA = set_find(g, sn);
+ if (symset_find(&LA, TK_newline))
+ symset_add(&LAnl, TK_newline, 0);
+ snnl = save_set(g, LAnl);
+ LAnl = set_find(g, snnl);
}
/* Add productions for this symbol */
int itm = item_num(p2, 0);
int pos = symset_find(&is->items, itm);
if (pos < 0) {
- symset_add(&is->items, itm, sn);
+ if (g->productions[p2]->line_like)
+ symset_add(&is->items, itm, snnl);
+ else
+ symset_add(&is->items, itm, sn);
/* Will have re-ordered, so start
* from beginning again */
i = -1;
} else if (type >= LALR) {
struct symset ss = set_find(g, is->items.data[pos]);
struct symset tmp = INIT_SYMSET;
+ struct symset *la = &LA;
+ if (g->productions[p2]->line_like)
+ la = &LAnl;
symset_union(&tmp, &ss);
- if (symset_union(&tmp, &LA)) {
+ if (symset_union(&tmp, la)) {
is->items.data[pos] = save_set(g, tmp);
i = -1;
- }else
+ } else
symset_free(tmp);
}
}
}
-For each symbol we found (and placed in `done`) we collect all the items for
-which this symbol is next, and create a new itemset with "DOT" advanced over
-the symbol. This is then added to the collection of itemsets (or merged
-with a pre-existing itemset).
+For each symbol we found (and placed in `done`) we collect all the items
+for which this symbol is next, and create a new itemset with "DOT"
+advanced over the symbol. This is then added to the collection of
+itemsets (or merged with a pre-existing itemset).
###### derive itemsets
// Now we have a completed itemset, so we need to
for (i = 0; i < done.cnt; i++) {
int j;
unsigned short state;
- int starts_line = 0;
struct symbol *sym = g->symtab[done.syms[i]];
+ enum assoc assoc = Non;
+ unsigned short precedence = 0;
struct symset newitemset = INIT_SYMSET;
if (type >= LALR)
newitemset = INIT_DATASET;
- if (sym->can_eol ||
- (sym->nullable && is->starts_line))
- starts_line = 1;
for (j = 0; j < is->items.cnt; j++) {
int itm = is->items.syms[j];
int p = item_prod(itm);
- int bp = item_index(itm);
+ int bp = item_dot(itm);
struct production *pr = g->productions[p];
unsigned short la = 0;
int pos;
if (type >= LALR)
la = is->items.data[j];
pos = symset_find(&newitemset, pr->head->num);
+ if (bp + 1 == pr->body_size &&
+ pr->precedence > 0 &&
+ pr->precedence > precedence) {
+ // new itemset is reducible and has a precedence.
+ precedence = pr->precedence;
+ assoc = pr->assoc;
+ }
if (pos < 0)
symset_add(&newitemset, item_num(p, bp+1), la);
else if (type >= LALR) {
}
}
}
- state = add_itemset(g, newitemset, type, starts_line);
+ state = add_itemset(g, newitemset, assoc, precedence, type);
if (symset_find(&is->go_to, done.syms[i]) < 0)
symset_add(&is->go_to, done.syms[i], state);
}
-All that is left is to crate the initial itemset from production zero, and
+All that is left is to create the initial itemset from production zero, and
with `TK_eof` as the LA set.
###### functions
{
struct symset first = INIT_SYMSET;
struct itemset *is;
- int again;
+ int check_again;
unsigned short la = 0;
if (type >= LALR) {
// LA set just has eof
}
// production 0, offset 0 (with no data)
symset_add(&first, item_num(0, 0), la);
- add_itemset(g, first, type, g->productions[0]->body[0]->can_eol);
- for (again = 0, is = g->items;
+ add_itemset(g, first, Non, 0, type);
+ for (check_again = 0, is = g->items;
is;
- is = is->next ?: again ? (again = 0, g->items) : NULL) {
+ is = is->next ?: check_again ? (check_again = 0, g->items) : NULL) {
int i;
struct symset done = INIT_SYMSET;
if (is->completed)
continue;
is->completed = 1;
- again = 1;
+ check_again = 1;
## complete itemset
## derive itemsets
symset_free(done);
a report.
Once we have built everything we allocate arrays for the two lists:
-symbols and itemsets. This allows more efficient access during reporting.
-The symbols are grouped as terminals and non-terminals and we record the
-changeover point in `first_nonterm`.
+symbols and itemsets. This allows more efficient access during
+reporting. The symbols are grouped as terminals and then non-terminals,
+and we record the changeover point in `first_nonterm`.
###### grammar fields
struct symbol **symtab;
snum++;
}
g->first_nonterm = snum;
+ for (s = g->syms; s; s = s->next)
+ if (s->num < 0 && s->type != Virtual) {
+ s->num = snum;
+ snum++;
+ }
for (s = g->syms; s; s = s->next)
if (s->num < 0) {
s->num = snum;
g->symtab[s->num] = s;
set_nullable(g);
- set_can_eol(g);
+ set_line_like(g);
if (type >= SLR)
build_first(g);
The purpose of the report is to give the grammar developer insight into
how the grammar parser will work. It is basically a structured dump of
-all the tables that have been generated, plus an description of any conflicts.
+all the tables that have been generated, plus a description of any conflicts.
###### grammar_report
static int grammar_report(struct grammar *g, enum grammar_type type)
return report_conflicts(g, type);
}
-Firstly we have the complete list of symbols, together with the "FIRST"
-set if that was generated.
+Firstly we have the complete list of symbols, together with the
+"FIRST" set if that was generated. We add a mark to each symbol to
+show if it is considered to be "line-like" (`<`), or if it is nullable (`.`).
###### functions
printf(" %c%c%3d%c: ",
s->nullable ? '.':' ',
- s->can_eol ? '>':' ',
+ s->line_like ? '<':' ',
s->num, symtypes[s->type]);
prtxt(s->name);
if (s->precedence)
}
}
-Then we have to follow sets if they were computed.
+Then we have the follow sets if they were computed.
static void report_follow(struct grammar *g)
{
static void report_item(struct grammar *g, int itm)
{
int p = item_prod(itm);
- int dot = item_index(itm);
+ int dot = item_dot(itm);
struct production *pr = g->productions[p];
int i;
if (dot == pr->body_size)
printf(" .");
printf(" [%d]", p);
- if (pr->precedence)
+ if (pr->precedence && dot == pr->body_size)
printf(" (%d%s)", pr->precedence,
assoc_names[pr->assoc]);
+ if (dot < pr->body_size &&
+ pr->body[dot]->precedence) {
+ struct symbol *s = pr->body[dot];
+ printf(" [%d%s]", s->precedence,
+ assoc_names[s->assoc]);
+ }
+ if (pr->line_like == 1)
+ printf(" $$NEWLINE");
+ else if (pr->line_like)
+ printf(" $$OUT");
printf("\n");
}
Then the go to sets:
-
static void report_goto(struct grammar *g, struct symset gt)
{
int i;
for (s = 0; s < g->states; s++) {
int j;
struct itemset *is = g->statetab[s];
- printf(" Itemset %d:%s\n", s, is->starts_line?" (startsline)":"");
+ printf(" Itemset %d:%s min prefix=%d",
+ s, is->starts_line?" (startsline)":"", is->min_prefix);
+ if (is->precedence)
+ printf(" %d%s", is->precedence, assoc_names[is->assoc]);
+ printf("\n");
for (j = 0; j < is->items.cnt; j++) {
report_item(g, is->items.syms[j]);
if (is->items.data != NO_DATA)
### Reporting conflicts
Conflict detection varies a lot among different analysis levels. However
-LR0 and LR0.5 are quite similar - having no follow sets, and SLR, LALR and
+LR0 and LR0.5 are quite similar - having no follow sets - and SLR, LALR and
LR1 are also similar as they have FOLLOW or LA sets.
###### functions
}
LR0 conflicts are any state which have both a reducible item and
-a shiftable item.
+a shiftable item, or two reducible items.
-LR05 conflicts only occurs if two possibly reductions exist,
+LR05 conflicts only occur if two possible reductions exist,
as shifts always over-ride reductions.
###### conflict functions
for (j = 0; j < is->items.cnt; j++) {
int itm = is->items.syms[j];
int p = item_prod(itm);
- int bp = item_index(itm);
+ int bp = item_dot(itm);
struct production *pr = g->productions[p];
if (bp == pr->body_size) {
}
SLR, LALR, and LR1 conflicts happen if two reducible items have over-lapping
-look ahead, or if a symbol in a look-ahead can be shifted. The differ only
+look ahead, or if a symbol in a look-ahead can be shifted. They differ only
in the source of the look ahead set.
-We build a dataset mapping terminal to item for possible SHIFTs and then
-another for possible REDUCE operations. We report when we get conflicts
-between the two.
+We build two datasets to reflect the "action" table: one which maps
+terminals to items where that terminal could be shifted and another
+which maps terminals to items that could be reduced when the terminal
+is in look-ahead. We report when we get conflicts between the two.
+
+As a special case, if we find a SHIFT/REDUCE conflict, on the NEWLINE
+terminal, we ignore it. NEWLINES are handled specially with its own
+rules for when to shift and when to reduce. Conflicts are expected,
+but handled internally.
static int conflicts_slr(struct grammar *g, enum grammar_type type)
{
for (j = 0; j < is->items.cnt; j++) {
unsigned short itm = is->items.syms[j];
int p = item_prod(itm);
- int bp = item_index(itm);
+ int bp = item_dot(itm);
struct production *pr = g->productions[p];
+ struct symbol *s;
- if (bp < pr->body_size &&
- pr->body[bp]->type == Terminal) {
- /* shiftable */
- int sym = pr->body[bp]->num;
- if (symset_find(&shifts, sym) < 0)
- symset_add(&shifts, sym, itm);
- }
+ if (bp >= pr->body_size ||
+ pr->body[bp]->type != Terminal)
+ /* not shiftable */
+ continue;
+
+ s = pr->body[bp];
+ if (s->precedence && is->precedence)
+ /* Precedence resolves this, so no conflict */
+ continue;
+
+ if (symset_find(&shifts, s->num) < 0)
+ symset_add(&shifts, s->num, itm);
}
- /* Now look for reduction and conflicts */
+ /* Now look for reductions and conflicts */
for (j = 0; j < is->items.cnt; j++) {
unsigned short itm = is->items.syms[j];
int p = item_prod(itm);
- int bp = item_index(itm);
+ int bp = item_dot(itm);
struct production *pr = g->productions[p];
if (bp < pr->body_size)
int k;
for (k = 0; k < la.cnt; k++) {
int pos = symset_find(&shifts, la.syms[k]);
- if (pos >= 0) {
+ if (pos >= 0 && la.syms[k] != TK_newline) {
printf(" State %d has SHIFT/REDUCE conflict on ", i);
- prtxt(g->symtab[la.syms[k]]->name);
+ cnt++;
+ prtxt(g->symtab[la.syms[k]]->name);
printf(":\n");
report_item(g, shifts.data[pos]);
report_item(g, itm);
- cnt++;
}
pos = symset_find(&reduce, la.syms[k]);
if (pos < 0) {
## Generating the parser
-The export part of the parser is the `parse_XX` function, where the name
+The exported part of the parser is the `parse_XX` function, where the name
`XX` is based on the name of the parser files.
This takes a `code_node`, a partially initialized `token_config`, and an
known words added and then is used with the `code_node` to initialize the
scanner.
-`parse_XX` then call the library function `parser_run` to actually complete
-the parse. This needs the `states` table and function to call the various
+`parse_XX` then calls the library function `parser_run` to actually complete
+the parse. This needs the `states` table and functions to call the various
pieces of code provided in the grammar file, so they are generated first.
###### parser_generate
- static void gen_parser(FILE *f, struct grammar *g, char *file, char *name)
+ static void gen_parser(FILE *f, struct grammar *g, char *file, char *name,
+ struct code_node *pre_reduce)
{
gen_known(f, g);
gen_non_term(f, g);
gen_goto(f, g);
gen_states(f, g);
- gen_reduce(f, g, file);
+ gen_reduce(f, g, file, pre_reduce);
gen_free(f, g);
fprintf(f, "#line 0 \"gen_parser\"\n");
fprintf(f, "\tstruct token_state *tokens;\n");
fprintf(f, "\tconfig->words_marks = known;\n");
fprintf(f, "\tconfig->known_count = sizeof(known)/sizeof(known[0]);\n");
- fprintf(f, "\tconfig->ignored |= (1 << TK_line_comment) | (1 << TK_block_comment);\n");
fprintf(f, "\ttokens = token_open(code, config);\n");
- fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config->known_count);\n");
+ fprintf(f, "\tvoid *rv = parser_run(tokens, states, do_reduce, do_free, trace, non_term, config);\n");
fprintf(f, "\ttoken_close(tokens);\n");
fprintf(f, "\treturn rv;\n");
fprintf(f, "}\n\n");
}
-### Table words table
+### Known words table
-The know words is simply an array of terminal symbols.
+The known words table is simply an array of terminal symbols.
The table of nonterminals used for tracing is a similar array.
###### functions
### States and the goto tables.
-For each state we record the goto table, the reducible production if
-there is one, or a symbol to shift for error recovery.
+For each state we record the goto table and details of the reducible
+production if there is one.
Some of the details of the reducible production are stored in the
-`do_reduce` function to come later. Here we store the production number,
-the body size (useful for stack management) and the resulting symbol (useful
-for knowing how to free data later).
+`do_reduce` function to come later. Here we store the production
+number, the body size (useful for stack management), and the resulting
+symbol (useful for knowing how to free data later).
The go to table is stored in a simple array of `sym` and corresponding
`state`.
short reduce_prod;
short reduce_size;
short reduce_sym;
- short shift_sym;
- short starts_line;
+ char starts_line;
+ char newline_only;
+ short min_prefix;
};
-
###### functions
static void gen_goto(FILE *f, struct grammar *g)
for (i = 0; i < g->states; i++) {
struct itemset *is = g->statetab[i];
int j, prod = -1, prod_len;
- int shift_sym = -1;
- int shift_len = 0, shift_remain = 0;
+
for (j = 0; j < is->items.cnt; j++) {
int itm = is->items.syms[j];
int p = item_prod(itm);
- int bp = item_index(itm);
+ int bp = item_dot(itm);
struct production *pr = g->productions[p];
- if (bp < pr->body_size) {
- if (shift_sym < 0 ||
- (shift_len == bp && shift_remain > pr->body_size - bp)) {
- shift_sym = pr->body[bp]->num;
- shift_len = bp;
- shift_remain = pr->body_size - bp;
- }
+ if (bp < pr->body_size)
continue;
- }
- /* This is what we reduce */
+ /* This is what we reduce - choose longest */
if (prod < 0 || prod_len < pr->body_size) {
prod = p;
prod_len = pr->body_size;
}
if (prod >= 0)
- fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, 0, %d },\n",
+ fprintf(f, "\t[%d] = { %d, goto_%d, %d, %d, %d, %d, %d, %d },\n",
i, is->go_to.cnt, i, prod,
g->productions[prod]->body_size,
g->productions[prod]->head->num,
- is->starts_line);
+ is->starts_line,
+ g->productions[prod]->line_like,
+ is->min_prefix);
else
- fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, %d },\n",
- i, is->go_to.cnt, i, shift_sym,
- is->starts_line);
+ fprintf(f, "\t[%d] = { %d, goto_%d, -1, -1, -1, %d, 0, %d },\n",
+ i, is->go_to.cnt, i,
+ is->starts_line, is->min_prefix);
}
fprintf(f, "};\n\n");
}
### The `do_reduce` function and the code
-When the parser engine decides to reduce a production, it calls `do_reduce`.
-This has two functions.
+When the parser engine decides to reduce a production, it calls
+`do_reduce` which runs the code that was included with the production,
+if any.
+
+This code needs to be able to store data somewhere. Rather than
+requiring `do_reduce` to `malloc` that "somewhere", we pass in a large
+buffer and have `do_reduce` return the size to be saved.
+
+In order for the code to access "global" context, we pass in the
+"config" pointer that was passed to the parser function. If the `struct
+token_config` is embedded in some larger structure, the reducing code
+can access the larger structure using pointer manipulation. Performing
+that pointer manipulation, and any other common code or variables for
+all reduce actions, can be provided in code node called "reduce" which
+is passed around in `pre_reduce` which you might have already noticed.
+
+The code fragments require translation when written out. Any `$N` needs
+to be converted to a reference either to that buffer (if `$0`) or to the
+structure returned by a previous reduction. These pointers need to be
+cast to the appropriate type for each access. All this is handled in
+`gen_code`.
-Firstly, if a non-NULL `trace` file is passed, it prints out details of the
-production being reduced. Secondly it runs the code that was included with
-the production if any.
+`gen_code` also allows symbol references to contain a '`<`' as in
+'`$<2`'. This is particularly useful for references (or pointers), but
+can be used with structures too. The `<` implies that the value is
+being moved out, so the object will not be automatically freed. It is
+equivalent to assigning `NULL` to the pointer or filling a structure
+with zeros.
+
+Instead of a number `N`, the `$` or `$<` can be followed by some letters
+and possibly a number. A number by itself (other than zero) selects a
+symbol from the body of the production. A sequence of letters selects
+the shortest symbol in the body which contains those letters in the
+given order. If a number follows the letters, then a later occurrence
+of that symbol is chosen. So "`$AB2`" will refer to the structure
+attached to the second occurrence of the shortest symbol which contains
+an `A` followed by a `B`. If there is no unique shortest system, or if
+the number given is too large, then the symbol reference is not
+transformed, and will cause an error when the code is compiled.
-This code needs to be able to store data somewhere. Rather than requiring
-`do_reduce` to `malloc` that "somewhere", we pass in a large buffer and have
-`do_reduce` return the size to be saved.
+###### functions
-The code fragment requires translation when written out. Any `$N` needs to
-be converted to a reference either to that buffer (if `$0`) or to the
-structure returned by a previous reduction. These pointer need to be cast
-to the appropriate type for each access. All this is handling in
-`gen_code`.
+ static int textchr(struct text t, char c, int s)
+ {
+ int i;
+ for (i = s; i < t.len; i++)
+ if (t.txt[i] == c)
+ return i;
+ return -1;
+ }
+ static int subseq_match(char *seq, int slen, struct text name)
+ {
+ int st = 0;
+ while (slen > 0) {
+ st = textchr(name, *seq, st);
+ if (st < 0)
+ return 0;
+ slen -= 1;
+ seq += 1;
+ st += 1;
+ }
+ return 1;
+ }
-###### functions
+ static int choose_sym(char **namep, int len, struct production *p)
+ {
+ char *name = *namep;
+ char *nam = name;
+ int namlen;
+ int n = 0;
+ int i, s, slen;
+ char c;
+
+ c = *name;
+ while (len > 0 &&
+ ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'))) {
+ name += 1;
+ len -= 1;
+ c = *name;
+ }
+ namlen = name-nam;
+ while (len > 0 && (c >= '0' && c <= '9')) {
+ name += 1;
+ len -= 1;
+ n = n * 10 + (c - '0');
+ c = *name;
+ }
+ if (namlen == 0) {
+ if (name == *namep)
+ return -1;
+ *namep = name;
+ return n;
+ }
+ slen = 0; s = -1;
+ for (i = 0; i < p->body_size; i++) {
+ if (!subseq_match(nam, namlen, p->body[i]->name))
+ continue;
+ if (slen == 0 || p->body[i]->name.len < slen)
+ s = i;
+ if (s >= 0 && p->body[i] != p->body[s] &&
+ p->body[i]->name.len == p->body[s]->name.len)
+ /* not unique, so s cannot be used */
+ s = -1;
+ }
+ if (s < 0)
+ return -1;
+ if (n == 0);
+ n = 1;
+ for (i = 0; i < p->body_size; i++)
+ if (p->body[i] == p->body[s]) {
+ n -= 1;
+ if (n == 0)
+ break;
+ }
+ if (n > 1)
+ return -1;
+ *namep = name;
+ return i + 1;
+ }
static void gen_code(struct production *p, FILE *f, struct grammar *g)
{
char *c;
+ char *used = calloc(1, p->body_size);
+ int i;
+
fprintf(f, "\t\t\t");
for (c = p->code.txt; c < p->code.txt + p->code.len; c++) {
int n;
+ int use = 0;
if (*c != '$') {
fputc(*c, f);
if (*c == '\n')
continue;
}
c++;
- if (*c < '0' || *c > '9') {
+ if (*c == '<') {
+ use = 1;
+ c++;
+ }
+ n = choose_sym(&c, p->code.txt + p->code.len - c, p);
+ if (n < 0) {
+ fputc('$', f);
+ if (use)
+ fputc('<', f);
fputc(*c, f);
continue;
}
- n = *c - '0';
- while (c[1] >= '0' && c[1] <= '9') {
- c += 1;
- n = n * 10 + *c - '0';
- }
if (n == 0)
- fprintf(f, "(*(struct %.*s*)ret)",
+ fprintf(f, "(*(struct %.*s*%s)ret)",
p->head->struct_name.len,
- p->head->struct_name.txt);
- else if (n > p->body_size)
- fprintf(f, "$%d", n);
+ p->head->struct_name.txt,
+ p->head->isref ? "*":"");
else if (p->body[n-1]->type == Terminal)
fprintf(f, "(*(struct token *)body[%d])",
n-1);
else if (p->body[n-1]->struct_name.txt == NULL)
fprintf(f, "$%d", n);
- else
- fprintf(f, "(*(struct %.*s*)body[%d])",
+ else {
+ fprintf(f, "(*(struct %.*s*%s)body[%d])",
p->body[n-1]->struct_name.len,
- p->body[n-1]->struct_name.txt, n-1);
+ p->body[n-1]->struct_name.txt,
+ p->body[n-1]->isref ? "*":"", n-1);
+ used[n-1] = use;
+ }
+ c -= 1;
}
fputs("\n", f);
+ for (i = 0; i < p->body_size; i++) {
+ if (p->body[i]->struct_name.txt &&
+ used[i]) {
+ // assume this has been copied out
+ if (p->body[i]->isref)
+ fprintf(f, "\t\t*(void**)body[%d] = NULL;\n", i);
+ else
+ fprintf(f, "\t\tmemset(body[%d], 0, sizeof(struct %.*s));\n", i, p->body[i]->struct_name.len, p->body[i]->struct_name.txt);
+ }
+ }
+ free(used);
}
###### functions
- static void gen_reduce(FILE *f, struct grammar *g, char *file)
+ static void gen_reduce(FILE *f, struct grammar *g, char *file,
+ struct code_node *code)
{
int i;
- fprintf(f, "#line 0 \"gen_reduce\"\n");
- fprintf(f, "static int do_reduce(int prod, void **body, void *ret)\n");
+ fprintf(f, "#line 1 \"gen_reduce\"\n");
+ fprintf(f, "static int do_reduce(int prod, void **body, struct token_config *config, void *ret)\n");
fprintf(f, "{\n");
fprintf(f, "\tint ret_size = 0;\n");
+ if (code)
+ code_node_print(f, code, file);
+ fprintf(f, "#line 4 \"gen_reduce\"\n");
fprintf(f, "\tswitch(prod) {\n");
for (i = 0; i < g->production_count; i++) {
struct production *p = g->productions[i];
fprintf(f, "\tcase %d:\n", i);
- if (p->code.txt)
+ if (p->code.txt) {
+ fprintf(f, "#line %d \"%s\"\n", p->code_line, file);
gen_code(p, f, g);
+ }
if (p->head->struct_name.txt)
- fprintf(f, "\t\tret_size = sizeof(struct %.*s);\n",
+ fprintf(f, "\t\tret_size = sizeof(struct %.*s%s);\n",
p->head->struct_name.len,
- p->head->struct_name.txt);
+ p->head->struct_name.txt,
+ p->head->isref ? "*":"");
fprintf(f, "\t\tbreak;\n");
}
It is particularly important to have fine control over freeing during error
recovery where individual stack frames might need to be freed.
-For this, the grammar author required to defined a `free_XX` function for
-each structure that is used by a non-terminal. `do_free` all call whichever
+For this, the grammar author is required to defined a `free_XX` function for
+each structure that is used by a non-terminal. `do_free` will call whichever
is appropriate given a symbol number, and will call `free` (as is
-appropriate for tokens` on any terminal symbol.
+appropriate for tokens) on any terminal symbol.
###### functions
continue;
fprintf(f, "\tcase %d:\n", s->num);
- fprintf(f, "\t\tfree_%.*s(asn);\n",
- s->struct_name.len,
- s->struct_name.txt);
+ if (s->isref) {
+ fprintf(f, "\t\tfree_%.*s(*(void**)asn);\n",
+ s->struct_name.len,
+ s->struct_name.txt);
+ fprintf(f, "\t\tfree(asn);\n");
+ } else
+ fprintf(f, "\t\tfree_%.*s(asn);\n",
+ s->struct_name.len,
+ s->struct_name.txt);
fprintf(f, "\t\tbreak;\n");
}
fprintf(f, "\t}\n}\n\n");
{ "SLR", 0, NULL, 'S' },
{ "LALR", 0, NULL, 'L' },
{ "LR1", 0, NULL, '1' },
+ { "tag", 1, NULL, 't' },
{ "report", 0, NULL, 'R' },
{ "output", 1, NULL, 'o' },
{ NULL, 0, NULL, 0 }
};
- const char *options = "05SL1Ro:";
+ const char *options = "05SL1t:Ro:";
###### process arguments
int opt;
char *outfile = NULL;
char *infile;
char *name;
+ char *tag = NULL;
int report = 1;
enum grammar_type type = LR05;
while ((opt = getopt_long(argc, argv, options,
report = 2; break;
case 'o':
outfile = optarg; break;
+ case 't':
+ tag = optarg; break;
default:
fprintf(stderr, "Usage: parsergen ...\n");
exit(1);
To be able to run `mdcode` and `scanner` on the grammar we need to memory
map it.
-One we have extracted the code (with `mdcode`) we expect to file three
-sections: header, code, and grammar. Anything else is an error.
+Once we have extracted the code (with `mdcode`) we expect to find three
+or four sections: header, code, grammar, reduce.
+Anything else that is not excluded by the `--tag` option is an error.
-"header" and "code" are optional, though it is hard to build a working
-parser with neither. "grammar" must be provided.
+"header", "code", and "reduce" are optional, though it is hard to build
+a working parser without the first two. "grammar" must be provided.
###### includes
#include <fcntl.h>
struct code_node *hdr = NULL;
struct code_node *code = NULL;
struct code_node *gram = NULL;
+ struct code_node *pre_reduce = NULL;
for (s = table; s; s = s->next) {
- if (text_is(s->section, "header"))
+ struct text sec = s->section;
+ if (tag && !strip_tag(&sec, tag))
+ continue;
+ if (text_is(sec, "header"))
hdr = s->code;
- else if (text_is(s->section, "code"))
+ else if (text_is(sec, "code"))
code = s->code;
- else if (text_is(s->section, "grammar"))
+ else if (text_is(sec, "grammar"))
gram = s->code;
+ else if (text_is(sec, "reduce"))
+ pre_reduce = s->code;
else {
fprintf(stderr, "Unknown content section: %.*s\n",
s->section.len, s->section.txt);
rv |= 1;
}
-If a headers section is defined, we write it out and include a declaration
+If a "headers" section is defined, we write it out and include a declaration
for the `parse_XX` function so it can be used from separate code.
if (rv == 0 && hdr && outfile) {
if (f) {
if (code)
code_node_print(f, code, infile);
- gen_parser(f, g, infile, name);
+ gen_parser(f, g, infile, name, pre_reduce);
fclose(f);
} else {
fprintf(stderr, "Cannot create %s.c\n",
}
And that about wraps it up. We need to set the locale so that UTF-8 is
-recognised properly, and link with `libicuuc` is `libmdcode` requires that.
+recognised properly, and link with `libicuuc` as `libmdcode` requires that.
###### File: parsergen.mk
parsergen : parsergen.o libscanner.o libmdcode.o
## The SHIFT/REDUCE parser
-Having analysed the grammar and generated all the table, we only need the
-shift/reduce engine to bring it all together.
+Having analysed the grammar and generated all the tables, we only need
+the shift/reduce engine to bring it all together.
### Goto table lookup
return -1;
}
-### The state stack.
+### Memory allocation
-The core data structure for the parser is the stack. This tracks all the
-symbols that have been recognised or partially recognised.
+The `scanner` returns tokens in a local variable - we want them in allocated
+memory so they can live in the `asn_stack`. Similarly the `asn` produced by
+a reduce is in a large buffer. Both of these require some allocation and
+copying, hence `memdup` and `tok_copy`.
+
+###### parser includes
+ #include <memory.h>
+
+###### parser functions
+
+ void *memdup(void *m, int len)
+ {
+ void *ret;
+ if (len == 0)
+ return NULL;
+ ret = malloc(len);
+ memcpy(ret, m, len);
+ return ret;
+ }
-The stack usually won't grow very large - maybe a few tens of entries. So
-we dynamically resize and array as required but never bother to shrink it
-down again.
+ static struct token *tok_copy(struct token tk)
+ {
+ struct token *new = malloc(sizeof(*new));
+ *new = tk;
+ return new;
+ }
-We keep the stack as two separate allocations. One, `asn_stack` stores the
-"abstract syntax nodes" which are created by each reduction. When we call
-`do_reduce` we need to pass an array of the `asn`s of the body of the
-production, and by keeping a separate `asn` stack, we can just pass a
-pointer into this stack.
+### The state stack.
-The other allocation stores all other stack fields of which there are four.
-The `state` is the most important one and guides the parsing process. The
-`sym` is nearly unnecessary. However when we want to free entries from the
-`asn_stack`, it helps to know what type they are so we can call the right
-freeing function. The symbol leads us to the right free function through
+The core data structure for the parser is the stack. This tracks all
+the symbols that have been recognised or partially recognised.
+
+The stack usually won't grow very large - maybe a few tens of entries.
+So we dynamically grow an array as required but never bother to
+shrink it down again.
+
+We keep the stack as two separate allocations. One, `asn_stack` stores
+the "abstract syntax nodes" which are created by each reduction. When
+we call `do_reduce` we need to pass an array of the `asn`s of the body
+of the production, and by keeping a separate `asn` stack, we can just
+pass a pointer into this stack.
+
+The other allocation stores all other stack fields of which there are
+several. The `state` is the most important one and guides the parsing
+process. The `sym` is nearly unnecessary as it is implicit in the
+state. However when we want to free entries from the `asn_stack`, it
+helps to know what type they are so we can call the right freeing
+function. The symbol leads us to the right free function through
`do_free`.
-The `indents` count and the `starts_indented` flag track the line
-indents in the symbol. These are used to allow indent information to
+The `indents` count tracks the line indents with-in the symbol or
+immediately follow it. These are used to allow indent information to
guide parsing and error recovery.
-As well as the stack of frames we have a `next` frame which is
-assembled from the incoming token and other information prior to
-pushing it onto the stack.
+`since_newline` tracks how many stack frames since the last
+start-of-line (whether indented or not). So if `since_newline` is
+zero, then this symbol is at the start of a line. Similarly
+`since_indent` counts the number of states since an indent, it is zero
+precisely when `indents` is not zero.
+
+`newline_permitted` keeps track of whether newlines should be ignored
+or not.
+
+The stack is most properly seen as alternating states and symbols -
+states, like the 'DOT' in items, are between symbols. Each frame in
+our stack holds a state and the symbol that was before it. The
+bottom of stack holds the start state but no symbol, as nothing came
+before the beginning. As we need to store some value, `TK_eof` is used
+to mark the beginning of the file as well as the end.
###### parser functions
struct parser {
struct frame {
short state;
+ short newline_permitted;
+
short sym;
- short starts_indented;
short indents;
- short newline_permitted;
- } *stack, next;
+ short since_newline;
+ short since_indent;
+ } *stack;
void **asn_stack;
int stack_size;
int tos;
Two operations are needed on the stack - shift (which is like push) and pop.
Shift applies not only to terminals but also to non-terminals. When we
-reduce a production we will pop off entries corresponding to the body
-symbols, then push on an item for the head of the production. This last is
-exactly the same process as shifting in a terminal so we use the same
-function for both.
+reduce a production we will pop off frames corresponding to the body
+symbols, then push on a frame for the head of the production. This last
+is exactly the same process as shifting in a terminal so we use the same
+function for both. In both cases we provide the symbol, the number of
+indents the symbol contains (which will be zero for a terminal symbol)
+and a flag indicating the the symbol was at (or was reduced from a
+symbol which was at) the start of a line. The state is deduced from the
+current top-of-stack state and the new symbol.
To simplify other code we arrange for `shift` to fail if there is no `goto`
state for the symbol. This is useful in basic parsing due to our design
that we shift when we can, and reduce when we cannot. So the `shift`
function reports if it could.
-So `shift` finds the next state. If that succeed it extends the allocations
-if needed and pushes all the information onto the stacks.
+`shift` is also used to push state zero onto the stack, so if the
+stack is empty, it always chooses zero as the next state.
+
+So `shift` finds the next state. If that succeeds it extends the
+allocations if needed and pushes all the information onto the stacks.
+
+Newlines are permitted after a `starts_line` state until an internal
+indent. If the new frame has neither a `starts_line` state nor an
+indent, newlines are permitted if the previous stack frame permitted
+them.
###### parser functions
static int shift(struct parser *p,
+ short sym, short indents, short start_of_line,
void *asn,
const struct state states[])
{
// Push an entry onto the stack
- int newstate = search(&states[p->next.state], p->next.sym);
+ struct frame next = {0};
+ int newstate = p->tos
+ ? search(&states[p->stack[p->tos-1].state],
+ sym)
+ : 0;
if (newstate < 0)
return 0;
if (p->tos >= p->stack_size) {
p->asn_stack = realloc(p->asn_stack, p->stack_size
* sizeof(p->asn_stack[0]));
}
- p->stack[p->tos] = p->next;
+ next.sym = sym;
+ next.indents = indents;
+ next.state = newstate;
+ if (states[newstate].starts_line)
+ next.newline_permitted = 1;
+ else if (indents)
+ next.newline_permitted = 0;
+ else if (p->tos)
+ next.newline_permitted =
+ p->stack[p->tos-1].newline_permitted;
+ else
+ next.newline_permitted = 0;
+
+ if (!start_of_line) {
+ if (p->tos)
+ next.since_newline = p->stack[p->tos-1].since_newline + 1;
+ else
+ next.since_newline = 1;
+ }
+ if (indents)
+ next.since_indent = 0;
+ else if (p->tos)
+ next.since_indent = p->stack[p->tos-1].since_indent + 1;
+ else
+ next.since_indent = 1;
+
+ p->stack[p->tos] = next;
p->asn_stack[p->tos] = asn;
p->tos++;
- p->next.state = newstate;
- p->next.indents = 0;
- p->next.starts_indented = 0;
- // if new state doesn't start a line, we inherit newline_permitted status
- if (states[newstate].starts_line)
- p->next.newline_permitted = 1;
return 1;
}
-`pop` simply moves the top of stack (`tos`) back down the required amount
-and frees any `asn` entries that need to be freed. It is called _after_ we
-reduce a production, just before we `shift` the nonterminal in.
+`pop` primarily moves the top of stack (`tos`) back down the required
+amount and frees any `asn` entries that need to be freed. It also
+collects a summary of the indents and line starts in the symbols that
+are being removed. It is called _after_ we reduce a production, just
+before we `shift` the nonterminal in.
###### parser functions
- static void pop(struct parser *p, int num,
- void(*do_free)(short sym, void *asn))
+ static int pop(struct parser *p, int num,
+ short *start_of_line,
+ void(*do_free)(short sym, void *asn))
{
int i;
+ short indents = 0;
+ int sol = 0;
+
p->tos -= num;
for (i = 0; i < num; i++) {
- p->next.indents += p->stack[p->tos+i].indents;
+ sol |= !p->stack[p->tos+i].since_newline;
+ indents += p->stack[p->tos+i].indents;
do_free(p->stack[p->tos+i].sym,
p->asn_stack[p->tos+i]);
}
-
- if (num) {
- p->next.state = p->stack[p->tos].state;
- p->next.starts_indented = p->stack[p->tos].starts_indented;
- p->next.newline_permitted = p->stack[p->tos].newline_permitted;
- if (p->next.indents > p->next.starts_indented)
- p->next.newline_permitted = 0;
- }
- }
-
-### Memory allocation
-
-The `scanner` returns tokens in a local variable - we want them in allocated
-memory so they can live in the `asn_stack`. Similarly the `asn` produced by
-a reduce is in a large buffer. Both of these require some allocation and
-copying, hence `memdup` and `tokcopy`.
-
-###### parser includes
- #include <memory.h>
-
-###### parser functions
-
- void *memdup(void *m, int len)
- {
- void *ret;
- if (len == 0)
- return NULL;
- ret = malloc(len);
- memcpy(ret, m, len);
- return ret;
- }
-
- static struct token *tok_copy(struct token tk)
- {
- struct token *new = malloc(sizeof(*new));
- *new = tk;
- return new;
+ if (start_of_line)
+ *start_of_line = sol;
+ return indents;
}
### The heart of the parser.
-Now we have the parser. If we can shift, we do. If not and we can reduce
-we do. If the production we reduced was production zero, then we have
-accepted the input and can finish.
+Now we have the parser. For each token we might shift it, trigger a
+reduction, or start error handling. 2D tokens (IN, OUT, EOL) also need
+to be handled.
We return whatever `asn` was returned by reducing production zero.
If we can neither shift nor reduce we have an error to handle. We pop
-single entries off the stack until we can shift the `TK_error` symbol, then
-drop input tokens until we find one we can shift into the new error state.
+single entries off the stack until we can shift the `TK_error` symbol,
+then drop input tokens until we find one we can shift into the new error
+state. We need to ensure that something is dropped or shifted after an
+error, or we could get into an infinite loop, only shifting in
+`TK_error`, then reducing. So we track if there has been a shift since
+the last error, and if not the next error always discards one token.
When we find `TK_in` and `TK_out` tokens which report indents we need
to handle them directly as the grammar cannot express what we want to
do with them.
-`TK_in` tokens are easy: we simply update the `next` stack frame to
-record how many indents there are and that the next token started with
-an indent.
-
-`TK_out` tokens must either be counted off against any pending indent,
-or must force reductions until there is a pending indent which isn't
-at the start of a production.
+`TK_in` tokens are easy: we simply update indent count in the top stack frame to
+record how many indents there are following the previous token.
+
+`TK_out` tokens must be canceled against an indent count
+within the stack. If we can reduce some symbols that are all since
+the most recent indent, then we do that first. If the minimum prefix
+of the current state then extends back before the most recent indent,
+that indent can be cancelled. If the minimum prefix is shorter then
+the indent had ended prematurely and we must start error handling, which
+is still a work-in-progress.
+
+`TK_newline` tokens are ignored unless the top stack frame records
+that they are permitted. In that case they will not be considered for
+shifting if it is possible to reduce some symbols that are all since
+the most recent start of line. This is how a newline forcibly
+terminates any line-like structure - we try to reduce down to at most
+one symbol for each line where newlines are allowed.
+A consequence of this is that a rule like
+
+###### Example: newlines - broken
+
+ Newlines ->
+ | NEWLINE Newlines
+ IfStatement -> Newlines if ....
+
+cannot work, as the NEWLINE will never be shifted as the empty string
+will be reduced first. Optional sets of newlines need to be include
+in the thing that preceed:
+
+###### Example: newlines - works
+
+ If -> if
+ | NEWLINE If
+ IfStatement -> If ....
+
+Here the NEWLINE will be shifted because nothing can be reduced until
+the `if` is seen.
+
+When during error handling we discard tokens read in, we want to keep
+discarding until we see one that is recognised. If we had a full set
+of LR(1) grammar states, this would mean looking in the look-ahead set,
+but we don't keep a full look-ahead set. We only record the subset
+that leads to SHIFT. We can, however, deduce the look-ahead set by
+looking at the SHIFT subsets for all states that we can get to by
+reducing zero or more times. So we need a little function which
+checks if a given token is in any of these look-ahead sets.
###### parser includes
#include "parser.h"
+
###### parser_run
+
+ static int in_lookahead(struct token *tk, const struct state *states, int state)
+ {
+ while (state >= 0) {
+ if (search(&states[state], tk->num) >= 0)
+ return 1;
+ if (states[state].reduce_prod < 0)
+ return 0;
+ state = search(&states[state], states[state].reduce_sym);
+ }
+ return 0;
+ }
+
void *parser_run(struct token_state *tokens,
const struct state states[],
- int (*do_reduce)(int, void**, void*),
+ int (*do_reduce)(int, void**, struct token_config*, void*),
void (*do_free)(short, void*),
- FILE *trace, const char *non_term[], int knowns)
+ FILE *trace, const char *non_term[],
+ struct token_config *config)
{
struct parser p = { 0 };
struct token *tk = NULL;
int accepted = 0;
- void *ret;
+ int shift_since_err = 1;
+ void *ret = NULL;
- p.next.newline_permitted = states[0].starts_line;
- while (!accepted) {
+ shift(&p, TK_eof, 0, 1, NULL, states);
+ while (!accepted && p.tos > 0) {
struct token *err_tk;
+ struct frame *tos = &p.stack[p.tos-1];
if (!tk)
tk = tok_copy(token_next(tokens));
- p.next.sym = tk->num;
- if (trace)
- parser_trace(trace, &p, tk, states, non_term, knowns);
-
- if (p.next.sym == TK_in) {
- p.next.starts_indented = 1;
- p.next.indents = 1;
+ parser_trace(trace, &p,
+ tk, states, non_term, config->known_count);
+
+ if (tk->num == TK_in) {
+ tos->indents += 1;
+ tos->since_newline = 0;
+ tos->since_indent = 0;
+ if (!states[tos->state].starts_line)
+ tos->newline_permitted = 0;
free(tk);
tk = NULL;
+ parser_trace_action(trace, "Record");
continue;
}
- if (p.next.sym == TK_out) {
- if (p.stack[p.tos-1].indents > p.stack[p.tos-1].starts_indented ||
- (p.stack[p.tos-1].indents == 1 &&
- states[p.next.state].reduce_size > 1)) {
- p.stack[p.tos-1].indents -= 1;
- if (p.stack[p.tos-1].indents == p.stack[p.tos-1].starts_indented) {
- // no internal indent any more, reassess 'newline_permitted'
- if (states[p.stack[p.tos-1].state].starts_line)
- p.stack[p.tos-1].newline_permitted = 1;
- else if (p.tos > 1)
- p.stack[p.tos-1].newline_permitted = p.stack[p.tos-2].newline_permitted;
+ if (tk->num == TK_out) {
+ if (states[tos->state].reduce_size >= 0 &&
+ states[tos->state].reduce_size <= tos->since_indent)
+ goto force_reduce;
+ if (states[tos->state].min_prefix >= tos->since_indent) {
+ // OK to cancel
+ struct frame *in = tos - tos->since_indent;
+ in->indents -= 1;
+ if (in->indents == 0) {
+ /* Reassess since_indent and newline_permitted */
+ if (in > p.stack) {
+ in->since_indent = in[-1].since_indent + 1;
+ in->newline_permitted = in[-1].newline_permitted;
+ } else {
+ in->since_indent = 0;
+ in->newline_permitted = 0;
+ }
+ if (states[in->state].starts_line)
+ in->newline_permitted = 1;
+ while (in < tos) {
+ in += 1;
+ in->since_indent = in[-1].since_indent + 1;
+ if (states[in->state].starts_line)
+ in->newline_permitted = 1;
+ else
+ in->newline_permitted = in[-1].newline_permitted;
+ }
}
free(tk);
tk = NULL;
+ parser_trace_action(trace, "Cancel");
continue;
}
- // fall through and force a REDUCE (as 'shift'
- // will fail).
+ // fall through to error handling as both SHIFT and REDUCE
+ // will fail.
}
- if (p.next.sym == TK_newline) {
- if (!p.tos || ! p.stack[p.tos-1].newline_permitted) {
+ if (tk->num == TK_newline) {
+ if (!tos->newline_permitted) {
free(tk);
tk = NULL;
+ parser_trace_action(trace, "Discard");
continue;
}
+ if (tos->since_newline > 1 &&
+ states[tos->state].reduce_size >= 0 &&
+ states[tos->state].reduce_size <= tos->since_newline)
+ goto force_reduce;
}
- if (shift(&p, tk, states)) {
+ if (shift(&p, tk->num, 0, tk->num == TK_newline, tk, states)) {
+ shift_since_err = 1;
tk = NULL;
+ parser_trace_action(trace, "Shift");
continue;
}
- if (states[p.next.state].reduce_prod >= 0) {
+ force_reduce:
+ if (states[tos->state].reduce_prod >= 0 &&
+ states[tos->state].newline_only &&
+ !(tk->num == TK_newline ||
+ tk->num == TK_eof ||
+ tk->num == TK_out ||
+ (tos->indents == 0 && tos->since_newline == 0))) {
+ /* Anything other than newline or out or eof
+ * in an error unless we are already at start
+ * of line, as this production must end at EOL.
+ */
+ } else if (states[tos->state].reduce_prod >= 0) {
void **body;
- int prod = states[p.next.state].reduce_prod;
- int size = states[p.next.state].reduce_size;
+ void *res;
+ const struct state *nextstate = &states[tos->state];
+ int prod = nextstate->reduce_prod;
+ int size = nextstate->reduce_size;
int bufsize;
static char buf[16*1024];
- p.next.sym = states[p.next.state].reduce_sym;
+ short indents, start_of_line;
- body = p.asn_stack +
- (p.tos - states[p.next.state].reduce_size);
+ body = p.asn_stack + (p.tos - size);
- bufsize = do_reduce(prod, body, buf);
+ bufsize = do_reduce(prod, body, config, buf);
- pop(&p, size, do_free);
- shift(&p, memdup(buf, bufsize), states);
- if (prod == 0)
+ indents = pop(&p, size, &start_of_line,
+ do_free);
+ res = memdup(buf, bufsize);
+ memset(buf, 0, bufsize);
+ if (!shift(&p, nextstate->reduce_sym,
+ indents, start_of_line,
+ res, states)) {
+ if (prod != 0) abort();
accepted = 1;
- continue;
- }
- if (tk->num == TK_out) {
- // Indent problem - synthesise tokens to get us
- // out of here.
- fprintf(stderr, "Synthesize %d to handle indent problem\n", states[p.next.state].shift_sym);
- p.next.sym = states[p.next.state].shift_sym;
- shift(&p, tok_copy(*tk), states);
- // FIXME need to report this error somehow
+ ret = res;
+ }
+ parser_trace_action(trace, "Reduce");
continue;
}
/* Error. We walk up the stack until we
* Then we discard input tokens until
* we find one that is acceptable.
*/
+ parser_trace_action(trace, "ERROR");
+ short indents = 0, start_of_line;
err_tk = tok_copy(*tk);
- p.next.sym = TK_error;
- while (shift(&p, err_tk, states) == 0
- && p.tos > 0)
+ while (p.tos > 0 &&
+ shift(&p, TK_error, 0, 0,
+ err_tk, states) == 0)
// discard this state
- pop(&p, 1, do_free);
+ indents += pop(&p, 1, &start_of_line, do_free);
if (p.tos == 0) {
free(err_tk);
// no state accepted TK_error
break;
}
- while (search(&states[p.next.state], tk->num) < 0 &&
+ if (!shift_since_err) {
+ /* must discard at least one token to avoid
+ * infinite loop.
+ */
+ if (tk->num == TK_eof)
+ break;
+ free(tk);
+ tk = tok_copy(token_next(tokens));
+ }
+ shift_since_err = 0;
+ tos = &p.stack[p.tos-1];
+ while (!in_lookahead(tk, states, tos->state) &&
tk->num != TK_eof) {
free(tk);
tk = tok_copy(token_next(tokens));
+ shift_since_err = 1;
if (tk->num == TK_in)
- p.next.indents += 1;
+ indents += 1;
if (tk->num == TK_out) {
- if (p.next.indents == 0)
+ if (indents == 0)
break;
- p.next.indents -= 1;
+ indents -= 1;
+ // FIXME update since_indent here
}
}
- if (p.tos == 0 && tk->num == TK_eof)
- break;
+ tos->indents += indents;
}
free(tk);
- if (accepted)
- ret = p.asn_stack[0];
- else
- pop(&p, p.tos, do_free);
+ pop(&p, p.tos, NULL, do_free);
free(p.asn_stack);
free(p.stack);
return ret;
###### exported functions
void *parser_run(struct token_state *tokens,
const struct state states[],
- int (*do_reduce)(int, void**, void*),
+ int (*do_reduce)(int, void**, struct token_config*, void*),
void (*do_free)(short, void*),
- FILE *trace, const char *non_term[], int knowns);
+ FILE *trace, const char *non_term[],
+ struct token_config *config);
### Tracing
static void parser_trace_state(FILE *trace, struct frame *f, const struct state states[])
{
fprintf(trace, "(%d", f->state);
- if (f->indents)
- fprintf(trace, "%c%d", f->starts_indented?':':'.',
- f->indents);
if (states[f->state].starts_line)
fprintf(trace, "s");
if (f->newline_permitted)
- fprintf(trace, "n");
+ fprintf(trace, "n%d", f->since_newline);
fprintf(trace, ") ");
}
const char *non_term[], int knowns)
{
int i;
+ if (!trace)
+ return;
for (i = 0; i < p->tos; i++) {
- int sym = p->stack[i].sym;
- parser_trace_state(trace, &p->stack[i], states);
- if (sym < TK_reserved &&
- reserved_words[sym] != NULL)
- fputs(reserved_words[sym], trace);
- else if (sym < TK_reserved + knowns) {
- struct token *t = p->asn_stack[i];
- text_dump(trace, t->txt, 20);
- } else
- fputs(non_term[sym - TK_reserved - knowns],
- trace);
- fputs(" ", trace);
+ struct frame *f = &p->stack[i];
+ if (i) {
+ int sym = f->sym;
+ if (sym < TK_reserved &&
+ reserved_words[sym] != NULL)
+ fputs(reserved_words[sym], trace);
+ else if (sym < TK_reserved + knowns) {
+ struct token *t = p->asn_stack[i];
+ text_dump(trace, t->txt, 20);
+ } else
+ fputs(non_term[sym - TK_reserved - knowns],
+ trace);
+ if (f->indents)
+ fprintf(trace, ".%d", f->indents);
+ if (f->since_newline == 0)
+ fputs("/", trace);
+ fputs(" ", trace);
+ }
+ parser_trace_state(trace, f, states);
}
- parser_trace_state(trace, &p->next, states);
- fprintf(trace, " [");
+ fprintf(trace, "[");
if (tk->num < TK_reserved &&
reserved_words[tk->num] != NULL)
fputs(reserved_words[tk->num], trace);
else
text_dump(trace, tk->txt, 20);
- fputs("]\n", trace);
+ fprintf(trace, ":%d:%d]", tk->line, tk->col);
+ }
+
+ void parser_trace_action(FILE *trace, char *action)
+ {
+ if (trace)
+ fprintf(trace, " - %s\n", action);
}
# A Worked Example
The obvious example for a parser is a calculator.
-As `scanner` provides number parsing function using `libgmp` is it not much
+As `scanner` provides number parsing function using `libgmp` it is not much
work to perform arbitrary rational number calculations.
-This calculator takes one expression, or an equality test per line. The
-results are printed and in any equality test fails, the program exits with
-an error.
-
-Embedding mdcode inside mdcode is rather horrible. I'd like to find a
-better approach, but as the grammar file must have 3 components I need
-something like this.
+This calculator takes one expression, or an equality test, per line.
+The results are printed and if any equality test fails, the program
+exits with an error.
###### File: parsergen.mk
- calc.c : parsergen calc.cgm
- ./parsergen -o calc calc.cgm
+ calc.c calc.h : parsergen parsergen.mdc
+ ./parsergen --tag calc -o calc parsergen.mdc
calc : calc.o libparser.o libscanner.o libmdcode.o libnumber.o
$(CC) $(CFLAGS) -o calc calc.o libparser.o libscanner.o libmdcode.o libnumber.o -licuuc -lgmp
+ calctest : calc
+ ./calc parsergen.mdc
+ demos :: calctest
-###### demo grammar
-
- # header
- ~~~~~
+# calc: header
- #include "number.h"
+ #include "parse_number.h"
// what do we use for a demo-grammar? A calculator of course.
struct number {
mpq_t val;
int err;
};
- ~~~~~
- # code
- ~~~~~
+# calc: code
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <malloc.h>
#include <gmp.h>
+ #include <string.h>
#include "mdcode.h"
#include "scanner.h"
- #include "number.h"
#include "parser.h"
#include "calc.h"
free(n);
}
+ static int text_is(struct text t, char *s)
+ {
+ return (strlen(s) == t.len &&
+ strncmp(s, t.txt, t.len) == 0);
+ }
+
int main(int argc, char *argv[])
{
int fd = open(argv[1], O_RDONLY);
int len = lseek(fd, 0, 2);
char *file = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
- struct section *s = code_extract(file, file+len, NULL);
+ struct section *table = code_extract(file, file+len, NULL);
+ struct section *s;
struct token_config config = {
.ignored = (1 << TK_line_comment)
- | (1 << TK_block_comment)
| (1 << TK_in)
| (1 << TK_out),
.number_chars = ".,_+-",
.word_start = "",
.word_cont = "",
};
- parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
+ for (s = table; s; s = s->next)
+ if (text_is(s->section, "example: input"))
+ parse_calc(s->code, &config, argc > 2 ? stderr : NULL);
+ while (table) {
+ struct section *t = table->next;
+ code_free(table->code);
+ free(table);
+ table = t;
+ }
exit(0);
}
- ~~~~~
- # grammar
- ~~~~~
+# calc: grammar
+
+ $LEFT + -
+ $LEFT * / //
Session -> Session Line
| Line
| ERROR NEWLINE ${ printf("Skipped a bad line\n"); }$
$number
- Expression -> Expression + Term ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$
- | Expression - Term ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$
- | Term ${ mpq_init($0.val); mpq_set($0.val, $1.val); }$
+ Expression -> Expression + Expression ${ mpq_init($0.val); mpq_add($0.val, $1.val, $3.val); }$
+ | Expression - Expression ${ mpq_init($0.val); mpq_sub($0.val, $1.val, $3.val); }$
+ | Expression * Expression ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$
+ | Expression / Expression ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$
+ | Expression // Expression ${ {
+ mpz_t z0, z1, z2;
+ mpq_init($0.val);
+ mpz_init(z0); mpz_init(z1); mpz_init(z2);
+ mpz_tdiv_q(z1, mpq_numref($1.val), mpq_denref($1.val));
+ mpz_tdiv_q(z2, mpq_numref($3.val), mpq_denref($3.val));
+ mpz_tdiv_q(z0, z1, z2);
+ mpq_set_z($0.val, z0);
+ mpz_clear(z0); mpz_clear(z1); mpz_clear(z2);
+ } }$
+ | NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$
+ | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$
- Term -> Term * Factor ${ mpq_init($0.val); mpq_mul($0.val, $1.val, $3.val); }$
- | Term / Factor ${ mpq_init($0.val); mpq_div($0.val, $1.val, $3.val); }$
- | Factor ${ mpq_init($0.val); mpq_set($0.val, $1.val); }$
+# example: input
- Factor -> NUMBER ${ if (number_parse($0.val, $0.tail, $1.txt) == 0) mpq_init($0.val); }$
- | ( Expression ) ${ mpq_init($0.val); mpq_set($0.val, $2.val); }$
+ 355/113
+ 3.1415926535 - 355/113
+ 2 + 4 * 5
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9
+ 10 * 9 / 2
+ 1 * 1000 + 2 * 100 + 3 * 10 + 4 * 1
+
+ 355//113
+
+ error