]> ocean-lang.org Git - ocean/blobdiff - csrc/scanner-tests.mdc
scanner: change the meaning of ignoring comment tokens.
[ocean] / csrc / scanner-tests.mdc
index 14a70572a6646886c485423fc770ffd452049b0e..d527ed0442fc9c68bbbe2f0e586bfa65409ce2c5 100644 (file)
@@ -32,11 +32,13 @@ about each test.
                    echo "PASSED"; \
                done
 
+               ## other tests
+
                @gcov -o coverage scanner.c libscanner.c > /dev/null 2> /dev/null
                @mv *.gcov coverage; [ -f .gcov ] && mv .gcov coverage || true
                @awk '/NOTEST/ { next } /^ *[1-9]/ {ran+=1} /^ *###/ {skip+=1} \
                    END {printf "coverage: %6.2f%%\n", ran * 100 / (ran + skip); \
-                        if (ran < (ran + skip) *0.90) exit(1) }' \
+                        if (ran < (ran + skip) *0.95) exit(1) }' \
                        coverage/scanner.mdc.gcov
                @rm -f .tmp*
 
@@ -51,12 +53,12 @@ Some simple tests... maybe all tests are simple.
 Include a special test for numbers, as they are interesting.
 
 ###### test list
-       scanner_tests += "test1,if,then,+,-"
-       scanner_tests += "test1,if,then,+,-,/"
-       scanner_tests += "test1,--ignore-indent,if,then,+,-,/"
+       scanner_tests += "test1,-r,if,then,+,-"
+       scanner_tests += "test1,-r,if,then,+,-,/"
+       scanner_tests += "test1,-r,--ignore-indent,if,then,+,-,/"
+       scanner_tests += "test1,-r,--ignore-indent,--ignore-newline,if,then,+,-,/"
        scanner_tests += "test1,--ignore-indent,--ignore-newline,if,then,+,-,/"
-       scanner_tests += "test1,-Cc,--ignore-indent,--ignore-newline,if,then,+,-,/"
-       scanner_tests += "test1,-CcSz,--ignore-indent,--ignore-newline,if,then,+,-,/"
+       scanner_tests += "test1,-Sz,--ignore-indent,--ignore-newline,if,then,+,-,/"
 
 ###### test: test1
 
@@ -91,7 +93,7 @@ Include a special test for numbers, as they are interesting.
        lines */
        divident /+ divisor
 
-###### output: test1,if,then,+,-
+###### output: test1,-r,if,then,+,-
        Tokenizing: 
        2:0 ident(A)
        2:2 ident(B)
@@ -178,7 +180,7 @@ Include a special test for numbers, as they are interesting.
        32:0 newline()
        32:0 eof()
 
-###### output: test1,if,then,+,-,/
+###### output: test1,-r,if,then,+,-,/
        Tokenizing: 
        2:0 ident(A)
        2:2 ident(B)
@@ -266,7 +268,7 @@ Include a special test for numbers, as they are interesting.
        32:0 newline()
        32:0 eof()
 
-###### output: test1,--ignore-indent,if,then,+,-,/
+###### output: test1,-r,--ignore-indent,if,then,+,-,/
        Tokenizing: 
        2:0 ident(A)
        2:2 ident(B)
@@ -342,7 +344,7 @@ Include a special test for numbers, as they are interesting.
        31:19 newline()
        32:0 eof()
 
-###### output: test1,--ignore-indent,--ignore-newline,if,then,+,-,/
+###### output: test1,-r,--ignore-indent,--ignore-newline,if,then,+,-,/
        Tokenizing: 
        2:0 ident(A)
        2:2 ident(B)
@@ -394,7 +396,7 @@ Include a special test for numbers, as they are interesting.
        31:12 ident(divisor)
        32:0 eof()
 
-###### output: test1,-Cc,--ignore-indent,--ignore-newline,if,then,+,-,/
+###### output: test1,--ignore-indent,--ignore-newline,if,then,+,-,/
        Tokenizing: 
        2:0 ident(A)
        2:2 ident(B)
@@ -443,7 +445,7 @@ Include a special test for numbers, as they are interesting.
        31:12 ident(divisor)
        32:0 eof()
 
-###### output: test1,-CcSz,--ignore-indent,--ignore-newline,if,then,+,-,/
+###### output: test1,-Sz,--ignore-indent,--ignore-newline,if,then,+,-,/
        Tokenizing: 
        2:0 ident(A)
        2:2 ident(B)
@@ -605,8 +607,8 @@ Now to test for some errors ... though things I thought would be errors
 sometimes aren't.
 
 ###### test list
-       scanner_tests += "errtest,--ignore-ident,--ignore-mark,-W_,-w_,if,then,+,-"
-       scanner_tests += "errtest,--ignore-ident,--ignore-mark,-N,if,then,+,-"
+       scanner_tests += "errtest,-r,--ignore-ident,--ignore-mark,-W_,-w_,if,then,+,-"
+       scanner_tests += "errtest,-r,--ignore-ident,--ignore-mark,-N,if,then,+,-"
 
 ###### test: errtest
 
@@ -624,7 +626,7 @@ sometimes aren't.
 
        "  \\ \t \n special chars in strings"
 
-###### output: errtest,--ignore-ident,--ignore-mark,-W_,-w_,if,then,+,-
+###### output: errtest,-r,--ignore-ident,--ignore-mark,-W_,-w_,if,then,+,-
 
        Tokenizing: 
        2:0 ERROR(multiple)
@@ -662,7 +664,7 @@ sometimes aren't.
        15:0 newline()
        15:0 eof()
 
-###### output: errtest,--ignore-ident,--ignore-mark,-N,if,then,+,-
+###### output: errtest,-r,--ignore-ident,--ignore-mark,-N,if,then,+,-
        Tokenizing: 
        2:0 ERROR(multiple)
        2:9 ERROR(decimal)
@@ -715,6 +717,121 @@ sometimes aren't.
        15:0 newline()
        15:0 eof()
 
+## Nested tests.
+
+We need to test various aspects of tokenizing code that is stored
+in multiple nodes.  For example, comments and multi-line strings mustn't
+cross a node boundary.
+
+For this we tell `scanner` to extract sections directly from this file.
+As the file changes, line numbers might change as well, so we need to factor
+that out when testing.  A simple awk script can normalise the first line number
+to one.
+
+###### other tests
+       @for T in $(scanner_section_tests); do \
+          echo -n "Test $$T ... "; \
+          i="$IFS"; IFS=,; set $$T; IFS="$$i"; section="$$1"; shift; \
+           ./md2c scanner-tests.mdc "output: $$T" | grep -v '^#' > .tmp.want; \
+          ./coverage_scanner --file scanner-tests.mdc --section "test: $$section" \
+            $${1+"$$@"} | awk -F: ' BEGIN {OFS=":"} $$1 ~ /^[0-9]/ {if (!first) first = $$1 - 1; \
+                 $$1 = $$1 - first} { print } '> .tmp.have; \
+           if ! cmp -s .tmp.want .tmp.have; then \
+               echo "FAILED"; diff -u .tmp.want .tmp.have; exit 1; fi ; \
+           echo "PASSED"; \
+       done
+
+###### test list
+       scanner_section_tests += section1 section_string section_comment
+
+###### test: section1
+
+       foreach s in sections:
+               ## section2
+               print done
+
+###### section2
+
+               This is another
+       section
+
+###### output: section1
+       Tokenizing: test: section1
+       1:8 ident(foreach)
+       1:16 ident(s)
+       1:18 ident(in)
+       1:21 ident(sections)
+       1:29 mark(:)
+       7:16 in()
+       7:16 ident(This)
+       7:21 ident(is)
+       7:24 ident(another)
+       8:8 newline()
+       8:8 out()
+       8:8 in()
+       8:8 ident(section)
+       3:16 newline()
+       3:16 ident(print)
+       3:22 ident(done)
+       4:0 newline()
+       4:0 out()
+       4:0 newline()
+       4:0 eof()
+
+###### test: section_string
+       a = '''
+         A sting mustn't cross
+         ## string B
+       skip
+
+###### string B
+       to a new node
+       '''
+
+###### output: section_string
+       Tokenizing: test: section_string
+       1:8 ident(a)
+       1:10 mark(=)
+       1:12 ERROR('''\x0a\x09  A sting mus..)
+       7:8 in()
+       7:8 ident(to)
+       7:11 ident(a)
+       7:13 ident(new)
+       7:17 ident(node)
+       8:8 newline()
+       8:8 ERROR(''')
+       4:8 newline()
+       4:8 out()
+       4:8 newline()
+       4:8 ident(skip)
+       5:0 newline()
+       5:0 eof()
+
+###### test: section_comment
+       /* Mult-line comment must stay within
+       ## comment B
+       */
+
+###### comment B
+       a single node, they cannot cross nodes.
+
+###### output: section_comment
+       Tokenizing: test: section_comment
+       1:8 ERROR(/* Mult-line comme..)
+       6:8 ident(a)
+       6:10 ident(single)
+       6:17 ident(node)
+       6:21 mark(,)
+       6:23 ident(they)
+       6:28 ident(cannot)
+       6:35 ident(cross)
+       6:41 ident(nodes)
+       6:46 mark(.)
+       3:8 newline()
+       3:8 mark(*/)
+       4:0 newline()
+       4:0 eof()
+
 ## Ad-hoc test
 
 These tests test bugs that were found in practice, and so prevent them recuring.