Prototyping: Revise CI (#371)

Introduces a test runner with test cases. Also significantly overhauls the GHA configuration.
This commit is contained in:
Lily Brown 2022-02-17 17:15:33 -08:00 committed by GitHub
parent 6aaaafcc8d
commit 1ac64af484
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 260 additions and 19 deletions

View File

@ -34,6 +34,11 @@ jobs:
with:
path: ~/.cabal/store
key: prototyping-${{ runner.os }}-${{ matrix.agda }}
- uses: actions/cache@v2
id: luau-ast-cache
with:
path: ./build
key: prototyping-${{ runner.os }}-${{ hashFiles('Ast/**', 'Analysis/**', 'CLI/Ast.cpp', 'CLI/FileUtils.*')}}
- name: install cabal
run: sudo apt-get install -y cabal-install
- name: cabal update
@ -43,26 +48,35 @@ jobs:
working-directory: prototyping
run: |
cabal install Agda-${{ matrix.agda }}
cabal install --lib scientific --package-env .
cabal install --lib vector --package-env .
cabal install --lib aeson --package-env .
- name: check examples
cabal install --lib scientific vector aeson --package-env .
- name: check targets
working-directory: prototyping
run: ~/.cabal/bin/agda Examples.agda
run: |
~/.cabal/bin/agda Examples.agda
~/.cabal/bin/agda Properties.agda
- name: build executables
working-directory: prototyping
run: |
~/.cabal/bin/agda --compile PrettyPrinter.agda
~/.cabal/bin/agda --compile Interpreter.agda
- name: cmake configure
run: cmake .
if: steps.luau-ast-cache.outputs.cache-hit != 'true'
run: |
mkdir -p build
cd build
cmake build ../
- name: cmake build luau-ast
run: cmake --build . --target Luau.Ast.CLI
- name: run smoketest
if: steps.luau-ast-cache.outputs.cache-hit != 'true'
run: |
cmake --build ./build --target Luau.Ast.CLI
- name: run tests
working-directory: prototyping
run: |
../luau-ast Examples/SmokeTest.lua | ./PrettyPrinter > Examples/SmokeTestOutput.lua
../luau-ast Examples/SmokeTest.lua | ./Interpreter
- name: diff smoketest
working-directory: prototyping
run: diff Examples/SmokeTest.lua Examples/SmokeTestOutput.lua
mkdir test-failures
python tests -l ../build/luau-ast --write-diff-failures --diff-failure-location test-failures/
- uses: actions/upload-artifact@v2
if: failure()
with:
name: test failures
path: prototyping/test-failures
retention-days: 5

View File

@ -2,5 +2,11 @@
*.agdai
Main
MAlonzo
Examples
PrettyPrinter
Interpreter
Properties
!Tests/Interpreter
!Tests/PrettyPrinter
.ghc.*
test-failures/

View File

@ -25,3 +25,21 @@ and run!
```
luau-ast Examples/SmokeTest.lua | ./PrettyPrinter
```
## Testing
We have a series of snapshot tests in the `Tests/` directory. You interact with the tests using the `tests` Python script in the `prototyping` directory. To simply run the tests, run:
```sh
tests --luau-cli ../build/luau-ast --build
```
This will build the test targets and run them. Run `tests --help` for information about all the command-line options.
### Adding a new test
To add a new test, add it to `Tests/{SUITE_NAME}/{CASE_NAME}`. You'll need an `in.lua` file and an `out.txt` file. The `in.lua` file is the input Luau source code, while the `out.txt` file is the expected output after running `luau-ast in.lua | test_executable`.
### Updating a test
If you make a change to the prototype that results in an expected change in behavior, you might want to update the test cases automatically. To do this, run `tests` with the `--accept-new-output` (`-a` for short) flag. Rather than diffing the output, this will overwrite the `out.txt` files for each test case with the actual result. Commit the resulting changes with your PR.

View File

@ -0,0 +1,5 @@
local function foo(x)
return nil
end
return foo(nil)

View File

@ -0,0 +1 @@
nil

197
prototyping/tests Executable file
View File

@ -0,0 +1,197 @@
#!/bin/python
import argparse
import difflib
import enum
import os
import os.path
import subprocess
import sys
SUITES = ["interpreter", "prettyprinter"]
IN_FILE_NAME = "in.lua"
OUT_FILE_NAME = "out.txt"
SUITE_EXE_NAMES = {
"interpreter": "Interpreter",
"prettyprinter": "PrettyPrinter",
}
SUITE_ENTRY_POINTS = {
"interpreter": "Interpreter.agda",
"prettyprinter": "PrettyPrinter.agda",
}
SUITE_ROOTS = {
"interpreter": "Tests/Interpreter",
"prettyprinter": "Tests/PrettyPrinter",
}
class TestResultStatus(enum.Enum):
CLI_ERROR = 0
EXE_ERROR = 1
DIFF_ERROR = 2
SUCCESS = 3
WROTE_NEW = 4
class DiffFailure:
def __init__(self, expected, actual):
self.expected = expected
self.actual = actual
def diff_text(self):
diff_generator = difflib.context_diff(self.expected.splitlines(), self.actual.splitlines(), fromfile="expected", tofile="actual", n=3)
return "".join(diff_generator)
def diff_html(self):
differ = difflib.HtmlDiff(tabsize=4)
return differ.make_file(self.expected.splitlines(), self.actual.splitlines(), fromdesc="Expected", todesc="Actual", context=True, numlines=5)
class TestCaseResult:
def __init__(self, suite, case, status, details):
self.suite = suite
self.case = case
self.status = status
self.details = details
def did_pass(self):
return self.status == TestResultStatus.SUCCESS or self.status == TestResultStatus.WROTE_NEW
def to_string(self):
prefix = f"[{self.suite}/{self.case}]: "
if self.status == TestResultStatus.CLI_ERROR:
return f"{prefix}CLI ERROR: {self.details}"
elif self.status == TestResultStatus.EXE_ERROR:
return f"{prefix}EXE ERROR: {self.details}"
elif self.status == TestResultStatus.DIFF_ERROR:
text_diff = self.details.diff_text()
return f"{prefix}FAILED:\n{text_diff}"
elif self.status == TestResultStatus.SUCCESS:
return f"{prefix}SUCCEEDED"
elif self.status == TestResultStatus.WROTE_NEW:
return f"{prefix}WROTE NEW RESULT"
def write_artifact(self, artifact_root):
if self.status != TestResultStatus.DIFF_ERROR:
return
filename = f"{self.suite}-{self.case}.out.html"
path = os.path.join(artifact_root, filename)
html = self.details.diff_html()
with open(path, "w") as file:
file.write(html)
parser = argparse.ArgumentParser(description="Runs prototype test cases")
parser.add_argument("--luau-cli", "-l", dest="cli_location", required=True, help="The location of luau-cli")
parser.add_argument("--root", "-r", dest="prototype_root", required=False, default=os.getcwd(), help="The root of the prototype")
parser.add_argument("--build", "-b", dest="build", action="store_true", default=True, help="Whether to automatically build required test binaries")
parser.add_argument("--suite", "-s", dest="suites", action="append", default=[], choices=SUITES, help="Which test suites to run")
parser.add_argument("--case", "-c", dest="cases", action="append", default=[], help="Which test cases to run")
parser.add_argument("--accept-new-output", "-a", dest="snapshot", action="store_true", default=False, help="Whether to write the new output to files, instead of diffing against it")
parser.add_argument("--write-diff-failures", dest="write_diffs", action="store_true", default=False, help="Whether to write test failure diffs to files")
parser.add_argument("--diff-failure-location", dest="diff_location", default=None, help="Where to write diff failure files to")
def build_suite(root, suite):
entry_point = SUITE_ENTRY_POINTS.get(suite)
if entry_point is None:
return (False, "Invalid suite")
result = subprocess.run(["~/.cabal/bin/agda", "--compile", entry_point], shell=True, cwd=root, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if result.returncode == 0:
return (True, None)
else:
return (False, result.stdout)
def run_test(in_path, out_path, cli_path, exe_path, snapshot):
cli_result = subprocess.run([cli_path, in_path], capture_output=True)
if cli_result.returncode != 0:
return (TestResultStatus.CLI_ERROR, f"CLI error: {cli_result.stderr}")
exe_result = subprocess.run(exe_path, input=cli_result.stdout, capture_output=True)
if exe_result.returncode != 0:
return (TestResultStatus.EXE_ERROR, f"Executable error; stdout:{exe_result.stdout}\n\nstderr: {exe_result.stderr}")
actual_result = exe_result.stdout.decode("utf-8")
if snapshot:
with open(out_path, "w") as out_file:
out_file.write(actual_result)
return (TestResultStatus.WROTE_NEW, None)
else:
with open(out_path, "r") as out_file:
expected_result = out_file.read()
if expected_result != actual_result:
return (TestResultStatus.DIFF_ERROR, DiffFailure(expected_result, actual_result))
return (TestResultStatus.SUCCESS, None)
def should_run_case(case_name, filters):
if len(filters) == 0:
return True
return any([f in case_name for f in filters])
def run_test_suite(args, suite, suite_root, suite_exe):
results = []
for entry in os.listdir(suite_root):
if not should_run_case(entry, args.cases):
continue
case_path = os.path.join(suite_root, entry)
if os.path.isdir(case_path):
in_path = os.path.join(case_path, IN_FILE_NAME)
out_path = os.path.join(case_path, OUT_FILE_NAME)
if not os.path.exists(in_path) or not os.path.exists(out_path):
continue
status, details = run_test(in_path, out_path, args.cli_location, suite_exe, args.snapshot)
result = TestCaseResult(suite, entry, status, details)
results.append(result)
return results
def main():
args = parser.parse_args()
suites = args.suites if len(args.suites) > 0 else SUITES
root = os.path.abspath(args.prototype_root)
if args.build:
for suite in suites:
success, reason = build_suite(root, suite)
if not success:
print(f"Error building executable for test suite {suite}:\n{reason}")
sys.exit(1)
else:
print(f"Built executable for test suite {suite} successfully.")
failed = False
for suite in suites:
suite_root = os.path.join(root, SUITE_ROOTS.get(suite))
suite_exe = os.path.join(root, SUITE_EXE_NAMES.get(suite))
print(f"Running test suite {suite}...")
results = run_test_suite(args, suite, suite_root, suite_exe)
passed = 0
total = len(results)
for result in results:
if result.did_pass():
passed += 1
else:
failed = True
print(f"Suite {suite} [{passed} / {total} passed]:")
for result in results:
print(result.to_string())
if args.write_diffs:
result.write_artifact(args.diff_location)
if failed:
sys.exit(1)
if __name__ == "__main__":
main()