nim-unittest2/unittest2.nim

834 lines
26 KiB
Nim
Raw Normal View History

# See the file "LICENSE.txt", included in this
# distribution, for details about the copyright.
#
## :Authors: Zahary Karadjov, Ștefan Talpalaru
##
## This module implements boilerplate to make unit testing easy.
##
## The test status and name is printed after any output or traceback.
##
## Tests can be nested, however failure of a nested test will not mark the
## parent test as failed. Setup and teardown are inherited. Setup can be
## overridden locally.
##
## Compiled test files return the number of failed test as exit code, while
##
## .. code::
## nim c -r testfile.nim
##
## exits with 0 or 1.
##
## Running individual tests
## ========================
##
## Specify the test names as command line arguments.
##
## .. code::
##
## nim c -r test "my test name" "another test"
##
## Multiple arguments can be used.
##
## Running a single test suite
## ===========================
##
## Specify the suite name delimited by ``"::"``.
##
## .. code::
##
## nim c -r test "my suite name::"
##
## Selecting tests by pattern
## ==========================
##
## A single ``"*"`` can be used for globbing.
##
## Delimit the end of a suite name with ``"::"``.
##
## Tests matching **any** of the arguments are executed.
##
## .. code::
##
## nim c -r test fast_suite::mytest1 fast_suite::mytest2
## nim c -r test "fast_suite::mytest*"
## nim c -r test "auth*::" "crypto::hashing*"
## # Run suites starting with 'bug #' and standalone tests starting with '#'
## nim c -r test 'bug #*::' '::#*'
##
## Running tests in parallel
## =========================
##
## To enable the threadpool-based test parallelisation, "--threads:on" needs to
## be passed to the compiler, along with "-d:nimtestParallel" or the
## NIMTEST_PARALLEL environment variable:
##
## .. code::
##
## nim c -r --threads:on -d:nimtestParallel testfile.nim
## # or
## NIMTEST_PARALLEL=1 nim c -r --threads:on testfile.nim
##
## There are some implicit barriers where we wait for all the spawned jobs to
## complete: before and after each test suite and at the main thread's exit.
##
## The suite-related barriers are there to avoid mixing test output, but they
## also affect which groups of tests can be run in parallel, so keep them in
## mind when deciding how many tests to place in different suites (or between
## suites).
##
## Example
## -------
##
## .. code:: nim
##
## suite "description for this stuff":
## echo "suite setup: run once before the tests"
##
## setup:
## echo "run before each test"
##
## teardown:
## echo "run after each test"
##
## test "essential truths":
## # give up and stop if this fails
## require(true)
##
## test "slightly less obvious stuff":
## # print a nasty message and move on, skipping
## # the remainder of this block
## check(1 != 1)
## check("asd"[2] == 'd')
##
## test "out of bounds error is thrown on bad access":
## let v = @[1, 2, 3] # you can do initialization here
## expect(IndexError):
## discard v[4]
##
## echo "suite teardown: run once after the tests"
import locks, macros, sets, strutils, streams, times
when declared(stdout):
import os
when not defined(ECMAScript):
import terminal
when declared(stdout):
const paralleliseTests* = existsEnv("NIMTEST_PARALLEL") or defined(nimtestParallel)
## Whether parallel test running was enabled (set at compile time).
## This constant might be useful in custom output formatters.
else:
const paralleliseTests* = false
when paralleliseTests:
import threadpool, locks
# repeatedly calling sync() without waiting for results - on procs that don't
# return any - doesn't work properly (probably due to gSomeReady getting its
# counter increased back to the pre-call value) so we're stuck with these
# dummy flowvars
# (`flowVars` will be initialized in each child thread, when using nested tests, by the compiler)
# TODO: try getting rid of them when nim-0.20.0 is released
var flowVars {.threadvar.}: seq[FlowVarBase]
proc repeatableSync() =
sync()
for flowVar in flowVars:
blockUntil(flowVar)
flowVars = @[]
# make sure all the spawned tests are done before exiting
# (this will be the last sync, so no need for repeatability)
let mainThreadID = getThreadId()
proc quitProc() {.noconv.} =
# "require" can exit from a worker thread and syncing in there would block
if getThreadId() == mainThreadID:
sync()
addQuitProc(quitProc)
var outputLock: Lock # used by testEnded() to avoid mixed test outputs
initLock(outputLock)
type
TestStatus* = enum ## The status of a test when it is done.
OK,
FAILED,
SKIPPED
OutputLevel* = enum ## The output verbosity of the tests.
PRINT_ALL, ## Print as much as possible.
PRINT_FAILURES, ## Print only the failed tests.
PRINT_NONE ## Print nothing.
TestResult* = object
suiteName*: string
## Name of the test suite that contains this test case.
## Can be ``nil`` if the test case is not in a suite.
testName*: string
## Name of the test case
status*: TestStatus
OutputFormatter* = ref object of RootObj
ConsoleOutputFormatter* = ref object of OutputFormatter
colorOutput: bool
## Have test results printed in color.
## Default is true for the non-js target,
## for which ``stdout`` is a tty.
## Setting the environment variable
## ``NIMTEST_COLOR`` to ``always`` or
## ``never`` changes the default for the
## non-js target to true or false respectively.
## The deprecated environment variable
## ``NIMTEST_NO_COLOR``, when set,
## changes the defualt to true, if
## ``NIMTEST_COLOR`` is undefined.
outputLevel: OutputLevel
## Set the verbosity of test results.
## Default is ``PRINT_ALL``, unless
## the ``NIMTEST_OUTPUT_LVL`` environment
## variable is set for the non-js target.
isInSuite: bool
isInTest: bool
JUnitOutputFormatter* = ref object of OutputFormatter
stream: Stream
testErrors: seq[string]
testStartTime: float
testStackTrace: string
var
abortOnError* {.threadvar.}: bool ## Set to true in order to quit
## immediately on fail. Default is false,
## unless the ``NIMTEST_ABORT_ON_ERROR``
## environment variable is set for
## the non-js target.
checkpoints {.threadvar.}: seq[string]
formattersLock: Lock
formatters {.guard: formattersLock.}: seq[OutputFormatter]
testFiltersLock: Lock
testsFilters {.guard: testFiltersLock.}: HashSet[string]
disabledParamFiltering: bool
when declared(stdout):
abortOnError = existsEnv("NIMTEST_ABORT_ON_ERROR")
initLock(formattersLock)
initLock(testFiltersLock)
method suiteStarted*(formatter: OutputFormatter, suiteName: string) {.base, gcsafe.} =
discard
method testStarted*(formatter: OutputFormatter, testName: string) {.base, gcsafe.} =
discard
method failureOccurred*(formatter: OutputFormatter, checkpoints: seq[string], stackTrace: string) {.base, gcsafe.} =
## ``stackTrace`` is provided only if the failure occurred due to an exception.
## ``checkpoints`` is never ``nil``.
discard
method testEnded*(formatter: OutputFormatter, testResult: TestResult) {.base, gcsafe.} =
discard
method suiteEnded*(formatter: OutputFormatter) {.base, gcsafe.} =
discard
proc clearOutputFormatters*() =
withLock formattersLock:
{.gcsafe.}:
formatters = @[]
proc addOutputFormatter*(formatter: OutputFormatter) =
withLock formattersLock:
{.gcsafe.}:
formatters.add(formatter)
proc newConsoleOutputFormatter*(outputLevel: OutputLevel = PRINT_ALL,
colorOutput = true): ConsoleOutputFormatter =
ConsoleOutputFormatter(
outputLevel: outputLevel,
colorOutput: colorOutput
)
proc defaultConsoleFormatter*(): ConsoleOutputFormatter =
when declared(stdout):
# Reading settings
# On a terminal this branch is executed
var envOutLvl = os.getEnv("NIMTEST_OUTPUT_LVL").string
var colorOutput = isatty(stdout)
if existsEnv("NIMTEST_COLOR"):
let colorEnv = getenv("NIMTEST_COLOR")
if colorEnv == "never":
colorOutput = false
elif colorEnv == "always":
colorOutput = true
elif existsEnv("NIMTEST_NO_COLOR"):
colorOutput = false
var outputLevel = PRINT_ALL
if envOutLvl.len > 0:
for opt in countup(low(OutputLevel), high(OutputLevel)):
if $opt == envOutLvl:
outputLevel = opt
break
result = newConsoleOutputFormatter(outputLevel, colorOutput)
else:
result = newConsoleOutputFormatter()
method suiteStarted*(formatter: ConsoleOutputFormatter, suiteName: string) =
template rawPrint() = echo("\n[Suite] ", suiteName)
when not defined(ECMAScript):
if formatter.colorOutput:
styledEcho styleBright, fgBlue, "\n[Suite] ", resetStyle, suiteName
else: rawPrint()
else: rawPrint()
formatter.isInSuite = true
method testStarted*(formatter: ConsoleOutputFormatter, testName: string) =
formatter.isInTest = true
method failureOccurred*(formatter: ConsoleOutputFormatter, checkpoints: seq[string], stackTrace: string) =
if stackTrace.len > 0:
echo stackTrace
let prefix = if formatter.isInSuite: " " else: ""
for msg in items(checkpoints):
echo prefix, msg
method testEnded*(formatter: ConsoleOutputFormatter, testResult: TestResult) =
formatter.isInTest = false
if formatter.outputLevel != PRINT_NONE and
(formatter.outputLevel == PRINT_ALL or testResult.status == FAILED):
let prefix = if testResult.suiteName.len > 0: " " else: ""
template rawPrint() = echo(prefix, "[", $testResult.status, "] ", testResult.testName)
when not defined(ECMAScript):
if formatter.colorOutput and not defined(ECMAScript):
var color = case testResult.status
of OK: fgGreen
of FAILED: fgRed
of SKIPPED: fgYellow
styledEcho styleBright, color, prefix, "[", $testResult.status, "] ", resetStyle, testResult.testName
else:
rawPrint()
else:
rawPrint()
method suiteEnded*(formatter: ConsoleOutputFormatter) =
formatter.isInSuite = false
proc xmlEscape(s: string): string =
result = newStringOfCap(s.len)
for c in items(s):
case c:
of '<': result.add("&lt;")
of '>': result.add("&gt;")
of '&': result.add("&amp;")
of '"': result.add("&quot;")
of '\'': result.add("&apos;")
else:
if ord(c) < 32:
result.add("&#" & $ord(c) & ';')
else:
result.add(c)
proc newJUnitOutputFormatter*(stream: Stream): JUnitOutputFormatter =
## Creates a formatter that writes report to the specified stream in
## JUnit format.
## The ``stream`` is NOT closed automatically when the test are finished,
## because the formatter has no way to know when all tests are finished.
## You should invoke formatter.close() to finalize the report.
result = JUnitOutputFormatter(
stream: stream,
testErrors: @[],
testStackTrace: "",
testStartTime: 0.0
)
stream.writeLine("<?xml version=\"1.0\" encoding=\"UTF-8\"?>")
stream.writeLine("<testsuites>")
proc close*(formatter: JUnitOutputFormatter) =
## Completes the report and closes the underlying stream.
formatter.stream.writeLine("</testsuites>")
formatter.stream.close()
method suiteStarted*(formatter: JUnitOutputFormatter, suiteName: string) =
formatter.stream.writeLine("\t<testsuite name=\"$1\">" % xmlEscape(suiteName))
method testStarted*(formatter: JUnitOutputFormatter, testName: string) =
formatter.testErrors.setLen(0)
formatter.testStackTrace.setLen(0)
formatter.testStartTime = epochTime()
method failureOccurred*(formatter: JUnitOutputFormatter, checkpoints: seq[string], stackTrace: string) =
## ``stackTrace`` is provided only if the failure occurred due to an exception.
## ``checkpoints`` is never ``nil``.
formatter.testErrors.add(checkpoints)
if stackTrace.len > 0:
formatter.testStackTrace = stackTrace
method testEnded*(formatter: JUnitOutputFormatter, testResult: TestResult) =
let time = epochTime() - formatter.testStartTime
let timeStr = time.formatFloat(ffDecimal, precision = 8)
formatter.stream.writeLine("\t\t<testcase name=\"$#\" time=\"$#\">" % [xmlEscape(testResult.testName), timeStr])
case testResult.status:
of OK:
discard
of SKIPPED:
formatter.stream.writeLine("<skipped />")
of FAILED:
let failureMsg = if formatter.testStackTrace.len > 0 and
formatter.testErrors.len > 0:
xmlEscape(formatter.testErrors[^1])
elif formatter.testErrors.len > 0:
xmlEscape(formatter.testErrors[0])
else: "The test failed without outputting an error"
var errs = ""
if formatter.testErrors.len > 1:
var startIdx = if formatter.testStackTrace.len > 0: 0 else: 1
var endIdx = if formatter.testStackTrace.len > 0: formatter.testErrors.len - 2
else: formatter.testErrors.len - 1
for errIdx in startIdx..endIdx:
if errs.len > 0:
errs.add("\n")
errs.add(xmlEscape(formatter.testErrors[errIdx]))
if formatter.testStackTrace.len > 0:
formatter.stream.writeLine("\t\t\t<error message=\"$#\">$#</error>" % [failureMsg, xmlEscape(formatter.testStackTrace)])
if errs.len > 0:
formatter.stream.writeLine("\t\t\t<system-err>$#</system-err>" % errs)
else:
formatter.stream.writeLine("\t\t\t<failure message=\"$#\">$#</failure>" % [failureMsg, errs])
formatter.stream.writeLine("\t\t</testcase>")
method suiteEnded*(formatter: JUnitOutputFormatter) =
formatter.stream.writeLine("\t</testsuite>")
proc glob(matcher, filter: string): bool =
## Globbing using a single `*`. Empty `filter` matches everything.
if filter.len == 0:
return true
if not filter.contains('*'):
return matcher == filter
let beforeAndAfter = filter.split('*', maxsplit=1)
if beforeAndAfter.len == 1:
# "foo*"
return matcher.startswith(beforeAndAfter[0])
if matcher.len < filter.len - 1:
return false # "12345" should not match "123*345"
return matcher.startsWith(beforeAndAfter[0]) and matcher.endsWith(beforeAndAfter[1])
proc matchFilter(suiteName, testName, filter: string): bool =
if filter == "":
return true
if testName == filter:
# corner case for tests containing "::" in their name
return true
let suiteAndTestFilters = filter.split("::", maxsplit=1)
if suiteAndTestFilters.len == 1:
# no suite specified
let test_f = suiteAndTestFilters[0]
return glob(testName, test_f)
return glob(suiteName, suiteAndTestFilters[0]) and glob(testName, suiteAndTestFilters[1])
when defined(testing): export matchFilter
proc shouldRun(currentSuiteName, testName: string): bool =
## Check if a test should be run by matching suiteName and testName against
## test filters.
withLock testFiltersLock:
{.gcsafe.}:
if testsFilters.len == 0:
return true
for f in testsFilters:
if matchFilter(currentSuiteName, testName, f):
return true
return false
proc ensureInitialized() =
withLock formattersLock:
{.gcsafe.}:
if formatters.len == 0:
formatters = @[OutputFormatter(defaultConsoleFormatter())]
withLock testFiltersLock:
{.gcsafe.}:
if not disabledParamFiltering and not testsFilters.isValid:
testsFilters.init()
when declared(paramCount):
# Read tests to run from the command line.
for i in 1 .. paramCount():
testsFilters.incl(paramStr(i))
proc suiteStarted(name: string) =
when paralleliseTests:
repeatableSync() # wait for any independent tests from the threadpool before starting the suite
withLock formattersLock:
{.gcsafe.}:
for formatter in formatters:
formatter.suiteStarted(name)
proc suiteEnded() =
when paralleliseTests:
repeatableSync() # wait for a suite's tests from the threadpool before moving on to the next suite
withLock formattersLock:
{.gcsafe.}:
for formatter in formatters:
formatter.suiteEnded()
proc testStarted(name: string) =
withLock formattersLock:
{.gcsafe.}:
for formatter in formatters:
formatter.testStarted(name)
proc testEnded(testResult: TestResult) =
withLock formattersLock:
{.gcsafe.}:
for formatter in formatters:
when paralleliseTests:
withLock outputLock:
formatter.testEnded(testResult)
else:
formatter.testEnded(testResult)
template suite*(name, body) {.dirty.} =
## Declare a test suite identified by `name` with optional ``setup``
## and/or ``teardown`` section.
##
## A test suite is a series of one or more related tests sharing a
## common fixture (``setup``, ``teardown``). The fixture is executed
## for EACH test.
##
## .. code-block:: nim
## suite "test suite for addition":
## setup:
## let result = 4
##
## test "2 + 2 = 4":
## check(2+2 == result)
##
## test "(2 + -2) != 4":
## check(2 + -2 != result)
##
## # No teardown needed
##
## The suite will run the individual test cases in the order in which
## they were listed. With default global settings the above code prints:
##
## .. code-block::
##
## [Suite] test suite for addition
## [OK] 2 + 2 = 4
## [OK] (2 + -2) != 4
bind formatters, ensureInitialized, suiteStarted, suiteEnded
block:
template setup(setupBody: untyped) {.dirty, used.} =
var testSetupIMPLFlag {.used.} = true
template testSetupIMPL: untyped {.dirty.} = setupBody
template teardown(teardownBody: untyped) {.dirty, used.} =
var testTeardownIMPLFlag {.used.} = true
template testTeardownIMPL: untyped {.dirty.} = teardownBody
let testSuiteName {.used.} = name
ensureInitialized()
try:
suiteStarted(name)
body
finally:
suiteEnded()
template exceptionTypeName(e: typed): string = $e.name
template test*(name, body) =
## Define a single test case identified by `name`.
##
## .. code-block:: nim
##
## test "roses are red":
## let roses = "red"
## check(roses == "red")
##
## The above code outputs:
##
## .. code-block::
##
## [OK] roses are red
bind shouldRun, checkpoints, formatters, ensureInitialized, testStarted, testEnded, exceptionTypeName
# `gensym` can't be in here because it's not a first-class pragma
when paralleliseTests:
{.pragma: testrunner, gcsafe.}
else:
{.pragma: testrunner.}
proc runTest(testSuiteName: string): int {.gensym, testrunner.} =
when declared(testSetupIMPLFlag):
testSetupIMPL()
ensureInitialized()
if shouldRun(testSuiteName, name):
checkpoints = @[]
var testStatusIMPL {.inject.} = OK
testStarted(name)
try:
body
except:
when not defined(js):
let e = getCurrentException()
let eTypeDesc = "[" & exceptionTypeName(e) & "]"
checkpoint("Unhandled exception: " & getCurrentExceptionMsg() & " " & eTypeDesc)
var stackTrace {.inject.} = e.getStackTrace()
fail()
finally:
if testStatusIMPL == FAILED:
programResult += 1
let testResult = TestResult(
suiteName: testSuiteName,
testName: name,
status: testStatusIMPL
)
testEnded(testResult)
checkpoints = @[]
# when running tests in parallel, "formatters" manipulation may occur in
# teardown(), so it needs to be after testEnded()
when declared(testTeardownIMPLFlag):
testTeardownIMPL()
let optionalTestSuiteName = when declared(testSuiteName): testSuiteName else: ""
when paralleliseTests:
flowVars.add(spawn runTest(optionalTestSuiteName))
else:
discard runTest(optionalTestSuiteName)
proc checkpoint*(msg: string) =
## Set a checkpoint identified by `msg`. Upon test failure all
## checkpoints encountered so far are printed out. Example:
##
## .. code-block:: nim
##
## checkpoint("Checkpoint A")
## check((42, "the Answer to life and everything") == (1, "a"))
## checkpoint("Checkpoint B")
##
## outputs "Checkpoint A" once it fails.
checkpoints.add(msg)
# TODO: add support for something like SCOPED_TRACE from Google Test
template fail* =
## Print out the checkpoints encountered so far and quit if ``abortOnError``
## is true. Otherwise, erase the checkpoints and indicate the test has
## failed (change exit code and test status). This template is useful
## for debugging, but is otherwise mostly used internally. Example:
##
## .. code-block:: nim
##
## checkpoint("Checkpoint A")
## complicatedProcInThread()
## fail()
##
## outputs "Checkpoint A" before quitting.
bind ensureInitialized
when declared(testStatusIMPL):
testStatusIMPL = FAILED
else:
programResult += 1
ensureInitialized()
withLock formattersLock:
{.gcsafe.}:
for formatter in formatters:
when declared(stackTrace):
formatter.failureOccurred(checkpoints, stackTrace)
else:
formatter.failureOccurred(checkpoints, "")
when not defined(ECMAScript):
if abortOnError:
when declared(testStatusIMPL):
# this wasn't incremented yet, because it's normally incremented in the
# "test" template, but we're exiting earlier here
programResult += 1
quit(programResult)
checkpoints = @[]
template skip* =
## Mark the test as skipped. Should be used directly
## in case when it is not possible to perform test
## for reasons depending on outer environment,
## or certain application logic conditions or configurations.
## The test code is still executed.
##
## .. code-block:: nim
##
## if not isGLConextCreated():
## skip()
bind checkpoints
testStatusIMPL = SKIPPED
checkpoints = @[]
macro check*(conditions: untyped): untyped =
## Verify if a statement or a list of statements is true.
## A helpful error message and set checkpoints are printed out on
## failure (if ``outputLevel`` is not ``PRINT_NONE``).
## Example:
##
## .. code-block:: nim
##
## import strutils
##
## check("AKB48".toLowerAscii() == "akb48")
##
## let teams = {'A', 'K', 'B', '4', '8'}
##
## check:
## "AKB48".toLowerAscii() == "akb48"
## 'C' in teams
let checked = callsite()[1]
template asgn(a: untyped, value: typed) =
var a = value # XXX: we need "var: var" here in order to
# preserve the semantics of var params
template print(name: untyped, value: typed) =
when compiles(string($value)):
checkpoint(name & " was " & $value)
proc inspectArgs(exp: NimNode): tuple[assigns, check, printOuts: NimNode] =
result.check = copyNimTree(exp)
result.assigns = newNimNode(nnkStmtList)
result.printOuts = newNimNode(nnkStmtList)
var counter = 0
if exp[0].kind == nnkIdent and
$exp[0] in ["not", "in", "notin", "==", "<=",
">=", "<", ">", "!=", "is", "isnot"]:
for i in 1 ..< exp.len:
if exp[i].kind notin nnkLiterals:
inc counter
let argStr = exp[i].toStrLit
let paramAst = exp[i]
if exp[i].kind == nnkIdent:
result.printOuts.add getAst(print(argStr, paramAst))
if exp[i].kind in nnkCallKinds + { nnkDotExpr, nnkBracketExpr }:
let callVar = newIdentNode(":c" & $counter)
result.assigns.add getAst(asgn(callVar, paramAst))
result.check[i] = callVar
result.printOuts.add getAst(print(argStr, callVar))
if exp[i].kind == nnkExprEqExpr:
# ExprEqExpr
# Ident !"v"
# IntLit 2
result.check[i] = exp[i][1]
if exp[i].typekind notin {ntyTypeDesc}:
let arg = newIdentNode(":p" & $counter)
result.assigns.add getAst(asgn(arg, paramAst))
result.printOuts.add getAst(print(argStr, arg))
if exp[i].kind != nnkExprEqExpr:
result.check[i] = arg
else:
result.check[i][1] = arg
case checked.kind
of nnkCallKinds:
let (assigns, check, printOuts) = inspectArgs(checked)
let lineinfo = newStrLitNode(checked.lineinfo)
let callLit = checked.toStrLit
result = quote do:
block:
`assigns`
if not `check`:
checkpoint(`lineinfo` & ": Check failed: " & `callLit`)
`printOuts`
fail()
of nnkStmtList:
result = newNimNode(nnkStmtList)
for node in checked:
if node.kind != nnkCommentStmt:
result.add(newCall(!"check", node))
else:
let lineinfo = newStrLitNode(checked.lineinfo)
let callLit = checked.toStrLit
result = quote do:
if not `checked`:
checkpoint(`lineinfo` & ": Check failed: " & `callLit`)
fail()
template require*(conditions: untyped) =
## Same as `check` except any failed test causes the program to quit
## immediately. Any teardown statements are not executed and the failed
## test output is not generated.
let savedAbortOnError = abortOnError
block:
abortOnError = true
check conditions
abortOnError = savedAbortOnError
macro expect*(exceptions: varargs[typed], body: untyped): untyped =
## Test if `body` raises an exception found in the passed `exceptions`.
## The test passes if the raised exception is part of the acceptable
## exceptions. Otherwise, it fails.
## Example:
##
## .. code-block:: nim
##
## import math, random
## proc defectiveRobot() =
## randomize()
## case random(1..4)
## of 1: raise newException(OSError, "CANNOT COMPUTE!")
## of 2: discard parseInt("Hello World!")
## of 3: raise newException(IOError, "I can't do that Dave.")
## else: assert 2 + 2 == 5
##
## expect IOError, OSError, ValueError, AssertionError:
## defectiveRobot()
let exp = callsite()
template expectBody(errorTypes, lineInfoLit, body): NimNode {.dirty.} =
try:
body
checkpoint(lineInfoLit & ": Expect Failed, no exception was thrown.")
fail()
except errorTypes:
discard
except:
checkpoint(lineInfoLit & ": Expect Failed, unexpected exception was thrown.")
fail()
var body = exp[exp.len - 1]
var errorTypes = newNimNode(nnkBracket)
for i in countup(1, exp.len - 2):
errorTypes.add(exp[i])
result = getAst(expectBody(errorTypes, exp.lineinfo, body))
proc disableParamFiltering* =
## disables filtering tests with the command line params
disabledParamFiltering = true