echo "PASSED"; \
done
+ ## other tests
+
@gcov -o coverage scanner.c libscanner.c > /dev/null 2> /dev/null
@mv *.gcov coverage; [ -f .gcov ] && mv .gcov coverage || true
@awk '/NOTEST/ { next } /^ *[1-9]/ {ran+=1} /^ *###/ {skip+=1} \
END {printf "coverage: %6.2f%%\n", ran * 100 / (ran + skip); \
- if (ran < (ran + skip) *0.90) exit(1) }' \
+ if (ran < (ran + skip) *0.92) exit(1) }' \
coverage/scanner.mdc.gcov
@rm -f .tmp*
14:0 string(" \\\\ \\t \\n specia..) \\ \x09 \x0a special ch..
15:0 newline()
15:0 eof()
+
+## Nested tests.
+
+We need to test various aspects of tokenizing code that is stored
+in multiple nodes. For example, comments and multi-line strings mustn't
+cross a node boundary.
+
+For this we tell `scanner` to extract sections directly from this file.
+As the file changes, line numbers might change as well, so we need to factor
+that out when testing. A simple awk script can normalise the first line number
+to one.
+
+###### other tests
+ @for T in $(scanner_section_tests); do \
+ echo -n "Test $$T ... "; \
+ i="$IFS"; IFS=,; set $$T; IFS="$$i"; section="$$1"; shift; \
+ ./md2c scanner-tests.mdc "output: $$T" | grep -v '^#' > .tmp.want; \
+ ./coverage_scanner --file scanner-tests.mdc --section "test: $$section" \
+ $${1+"$$@"} | awk -F: ' BEGIN {OFS=":"} $$1 ~ /^[0-9]/ {if (!first) first = $$1 - 1; \
+ $$1 = $$1 - first} { print } '> .tmp.have; \
+ if ! cmp -s .tmp.want .tmp.have; then \
+ echo "FAILED"; diff -u .tmp.want .tmp.have; exit 1; fi ; \
+ echo "PASSED"; \
+ done
+
+###### test list
+ scanner_section_tests += section1
+
+###### test: section1
+
+ foreach s in sections:
+ ## section2
+ print done
+
+###### section2
+
+ This is another
+ section
+
+###### output: section1
+ Tokenizing: test: section1
+ 1:8 ident(foreach)
+ 1:16 ident(s)
+ 1:18 ident(in)
+ 1:21 ident(sections)
+ 1:29 mark(:)
+ 7:16 in()
+ 7:16 ident(This)
+ 7:21 ident(is)
+ 7:24 ident(another)
+ 8:8 newline()
+ 8:8 out()
+ 8:8 in()
+ 8:8 ident(section)
+ 3:16 newline()
+ 3:16 ident(print)
+ 3:22 ident(done)
+ 4:0 newline()
+ 4:0 out()
+ 4:0 newline()
+ 4:0 eof()
+
+## Ad-hoc test
+
+These tests test bugs that were found in practice, and so prevent them recuring.
+
+The "bad_indent" test was written because I was seeing a TK_in before the
+"program" instead of TK_newline
+
+###### test list
+ scanner_tests += "bad_indent"
+
+###### test: bad_indent
+
+ const:
+ foo : number = 45
+ bar := "string"
+ program:
+ foo := 4
+ print foo, bar
+
+###### output: bad_indent
+ Tokenizing:
+ 2:8 in()
+ 2:8 ident(const)
+ 2:13 mark(:)
+ 3:16 in()
+ 3:16 ident(foo)
+ 3:20 mark(:)
+ 3:22 ident(number)
+ 3:29 mark(=)
+ 3:31 number(45) 45
+ 4:16 newline()
+ 4:16 ident(bar)
+ 4:20 mark(:=)
+ 4:23 string("string") string
+ 5:8 newline()
+ 5:8 out()
+ 5:8 newline()
+ 5:8 ident(program)
+ 5:15 mark(:)
+ 6:16 in()
+ 6:16 ident(foo)
+ 6:20 mark(:=)
+ 6:23 number(4) 4
+ 7:16 newline()
+ 7:16 ident(print)
+ 7:22 ident(foo)
+ 7:25 mark(,)
+ 7:27 ident(bar)
+ 8:0 newline()
+ 8:0 out()
+ 8:0 newline()
+ 8:0 out()
+ 8:0 newline()
+ 8:0 eof()