sourcecred/config/test.js

121 lines
2.6 KiB
JavaScript
Raw Normal View History

2018-05-02 23:10:03 +00:00
// @flow
const tmp = require("tmp");
const execDependencyGraph = require("../src/tools/execDependencyGraph");
2018-05-02 23:10:03 +00:00
main();
2018-05-02 23:10:03 +00:00
function main() {
const mode = process.argv.includes("--full") ? "FULL" : "BASIC";
execDependencyGraph(makeTasks(mode)).then(({success}) => {
process.exitCode = success ? 0 : 1;
2018-05-02 23:10:03 +00:00
});
}
function makeTasks(mode /*: "BASIC" | "FULL" */) {
const backendOutput = tmp.dirSync({
unsafeCleanup: true,
prefix: "sourcecred-test-",
}).name;
console.log("tmpdir for backend output: " + backendOutput);
function withSourcecredBinEnv(
invocation /*: $ReadOnlyArray<string> */
) /*: string[] */ {
return ["env", "SOURCECRED_BIN=" + backendOutput, ...invocation];
}
2018-05-02 23:10:03 +00:00
const basicTasks = [
{
id: "ensure-flow-typing",
cmd: ["./scripts/ensure-flow.sh"],
deps: [],
},
{
// eslint-disable-next-line no-useless-concat
id: "check-stop" + "ships",
// eslint-disable-next-line no-useless-concat
cmd: ["./scripts/check-stop" + "ships.sh"],
deps: [],
},
2018-05-02 23:10:03 +00:00
{
id: "check-pretty",
cmd: ["npm", "run", "--silent", "check-pretty"],
2018-05-02 23:10:03 +00:00
deps: [],
},
{
id: "lint",
cmd: ["npm", "run", "--silent", "lint"],
2018-05-02 23:10:03 +00:00
deps: [],
},
{
id: "flow",
cmd: [
"npm",
"run",
"--silent",
"flow",
"--",
"--quiet",
"--max-warnings=0",
],
2018-05-02 23:10:03 +00:00
deps: [],
},
{
id: "unit",
Improve CI performance by limiting max workers (#856) According to the [jest docs], setting `maxWorkers=4` can substantially improve performance on CI. This commit sets `maxWorkers=4` when running `yarn unit` as a part of `yarn test`. Based on my local testing (see data below), this improves performance locally in addition to the expected performance improvement on travis. \## Testing `yarn unit` by itself ``` yarn unit --maxWorkers=1 15.28s user 3.71s system 112% cpu 16.848 total 15.13s user 3.68s system 112% cpu 16.708 total 15.32s user 3.76s system 112% cpu 16.917 total 15.91s user 3.74s system 112% cpu 17.425 total 15.61s user 3.76s system 113% cpu 17.125 total yarn unit --maxWorkers=2 19.43s user 4.03s system 212% cpu 11.061 total 19.86s user 4.19s system 210% cpu 11.407 total 21.19s user 4.26s system 213% cpu 11.902 total 20.68s user 4.51s system 212% cpu 11.873 total 20.78s user 4.26s system 212% cpu 11.780 total yarn unit --maxWorkers=4 29.43s user 5.14s system 389% cpu 8.865 total 29.99s user 5.18s system 392% cpu 8.961 total 32.22s user 5.52s system 390% cpu 9.659 total 33.99s user 5.97s system 395% cpu 10.097 total 33.38s user 5.93s system 395% cpu 9.933 total yarn unit --maxWorkers=8 48.21s user 6.57s system 621% cpu 8.815 total 51.61s user 7.16s system 610% cpu 9.622 total 59.48s user 7.82s system 621% cpu 10.833 total 58.18s user 8.10s system 624% cpu 10.607 total 58.92s user 8.22s system 620% cpu 10.817 total unset 46.27s user 6.44s system 599% cpu 8.799 total 49.08s user 7.04s system 600% cpu 9.342 total 54.85s user 7.52s system 600% cpu 10.383 total 55.66s user 7.52s system 605% cpu 10.438 total 53.77s user 7.50s system 604% cpu 10.142 total ``` \## Testing `yarn test` ``` maxWorkers=1 46.65s user 5.92s system 249% cpu 21.038 total 47.94s user 5.81s system 251% cpu 21.354 total 51.50s user 6.44s system 260% cpu 22.234 total 52.60s user 6.65s system 268% cpu 22.077 total 53.04s user 6.27s system 266% cpu 22.278 total maxWorkers=2 56.13s user 6.13s system 409% cpu 15.204 total 63.32s user 7.22s system 412% cpu 17.091 total 64.82s user 7.19s system 422% cpu 17.027 total 64.59s user 7.41s system 417% cpu 17.227 total 65.40s user 7.30s system 419% cpu 17.318 total maxWorkers=4 74.64s user 7.60s system 584% cpu 14.066 total 82.69s user 8.43s system 582% cpu 15.643 total 85.00s user 8.68s system 591% cpu 15.835 total 84.81s user 8.58s system 595% cpu 15.690 total 85.22s user 8.59s system 596% cpu 15.719 total maxWorkers=4 and everything depends on unit 59.29s user 6.01s system 378% cpu 17.261 total 62.99s user 6.64s system 375% cpu 18.564 total 65.54s user 7.31s system 375% cpu 19.419 total 63.24s user 7.13s system 379% cpu 18.548 total 63.68s user 7.13s system 383% cpu 18.457 total maxWorkers=8 92.85s user 8.13s system 643% cpu 15.702 total 101.63s user 9.21s system 632% cpu 17.510 total 101.63s user 9.23s system 636% cpu 17.428 total 101.81s user 9.32s system 633% cpu 17.546 total 101.62s user 9.39s system 632% cpu 17.542 total unset 88.75s user 8.15s system 646% cpu 14.988 total 96.43s user 9.23s system 631% cpu 16.739 total 98.27s user 9.17s system 638% cpu 16.819 total 98.46s user 9.01s system 642% cpu 16.729 total 98.53s user 9.15s system 637% cpu 16.889 total unset + everything depends on unit 76.02s user 7.61s system 486% cpu 17.208 total 79.14s user 8.26s system 484% cpu 18.030 total 84.32s user 9.19s system 488% cpu 19.136 total 84.92s user 9.14s system 497% cpu 18.919 total 84.46s user 8.94s system 492% cpu 18.965 total ``` Test plan: `yarn test` passes here and on travis [jest docs]: https://jestjs.io/docs/en/troubleshooting
2018-09-18 20:17:51 +00:00
cmd: ["npm", "run", "--silent", "unit", "--", "--ci", "--maxWorkers=4"],
2018-05-02 23:10:03 +00:00
deps: [],
},
Add `sharness` for shell-based testing (#597) Summary: We will shortly want to perform testing of shell scripts; it makes the most sense to do so via the shell. We could roll our own testing framework, but it makes more sense to use an existing one. By choosing Sharness, we’re in good company: `go-ipfs` and `go-multihash` use it as well, and it’s derived from Git’s testing library. I like it a lot. For now, we need a dummy test file; our test runner will fail if there are no tests to run. As soon as we have a real test, we can remove this. This commit was generated by following the “per-project installation” instructions at https://github.com/chriscool/sharness, and by additionally including that repository’s `COPYING` file as `SHARNESS_LICENSE`, with a header prepended. I considered instead adding Sharness as a submodule, which is supported and has clear advantages (e.g., you can update the thing), but opted to avoid the complexity of submodules for now. Test Plan: Create the following tests in the `sharness` directory: ```shell $ cat sharness/good.t #!/bin/sh test_description='demo of passing tests' . ./sharness.sh test_expect_success "look at me go" true test_expect_success EXPENSIVE "this may take a while" 'sleep 2' test_done # vim: ft=sh $ cat sharness/bad.t #!/bin/sh test_description='demo of failing tests' . ./sharness.sh test_expect_success "I don't feel so good" false test_done # vim: ft=sh ``` Note that `yarn sharness` and `yarn test` fail appropriately. Note that `yarn sharness-full` fails appropriately after taking two extra seconds, and `yarn test --full` runs the latter. Each failure message should print the name of the failing test case, not just the suite name, and should indicate that the passing tests passed. Then, remove `sharness/bad.t`, and note that the above commands all pass, with the `--full` variants still taking longer. Finally, remove `sharness/good.t`, and note that the above commands all pass (and all pass quickly). wchargin-branch: add-sharness
2018-08-06 19:56:25 +00:00
{
id: "backend",
Add `sharness` for shell-based testing (#597) Summary: We will shortly want to perform testing of shell scripts; it makes the most sense to do so via the shell. We could roll our own testing framework, but it makes more sense to use an existing one. By choosing Sharness, we’re in good company: `go-ipfs` and `go-multihash` use it as well, and it’s derived from Git’s testing library. I like it a lot. For now, we need a dummy test file; our test runner will fail if there are no tests to run. As soon as we have a real test, we can remove this. This commit was generated by following the “per-project installation” instructions at https://github.com/chriscool/sharness, and by additionally including that repository’s `COPYING` file as `SHARNESS_LICENSE`, with a header prepended. I considered instead adding Sharness as a submodule, which is supported and has clear advantages (e.g., you can update the thing), but opted to avoid the complexity of submodules for now. Test Plan: Create the following tests in the `sharness` directory: ```shell $ cat sharness/good.t #!/bin/sh test_description='demo of passing tests' . ./sharness.sh test_expect_success "look at me go" true test_expect_success EXPENSIVE "this may take a while" 'sleep 2' test_done # vim: ft=sh $ cat sharness/bad.t #!/bin/sh test_description='demo of failing tests' . ./sharness.sh test_expect_success "I don't feel so good" false test_done # vim: ft=sh ``` Note that `yarn sharness` and `yarn test` fail appropriately. Note that `yarn sharness-full` fails appropriately after taking two extra seconds, and `yarn test --full` runs the latter. Each failure message should print the name of the failing test case, not just the suite name, and should indicate that the passing tests passed. Then, remove `sharness/bad.t`, and note that the above commands all pass, with the `--full` variants still taking longer. Finally, remove `sharness/good.t`, and note that the above commands all pass (and all pass quickly). wchargin-branch: add-sharness
2018-08-06 19:56:25 +00:00
cmd: [
"npm",
"run",
"--silent",
"backend",
"--",
"--output-path",
backendOutput,
Add `sharness` for shell-based testing (#597) Summary: We will shortly want to perform testing of shell scripts; it makes the most sense to do so via the shell. We could roll our own testing framework, but it makes more sense to use an existing one. By choosing Sharness, we’re in good company: `go-ipfs` and `go-multihash` use it as well, and it’s derived from Git’s testing library. I like it a lot. For now, we need a dummy test file; our test runner will fail if there are no tests to run. As soon as we have a real test, we can remove this. This commit was generated by following the “per-project installation” instructions at https://github.com/chriscool/sharness, and by additionally including that repository’s `COPYING` file as `SHARNESS_LICENSE`, with a header prepended. I considered instead adding Sharness as a submodule, which is supported and has clear advantages (e.g., you can update the thing), but opted to avoid the complexity of submodules for now. Test Plan: Create the following tests in the `sharness` directory: ```shell $ cat sharness/good.t #!/bin/sh test_description='demo of passing tests' . ./sharness.sh test_expect_success "look at me go" true test_expect_success EXPENSIVE "this may take a while" 'sleep 2' test_done # vim: ft=sh $ cat sharness/bad.t #!/bin/sh test_description='demo of failing tests' . ./sharness.sh test_expect_success "I don't feel so good" false test_done # vim: ft=sh ``` Note that `yarn sharness` and `yarn test` fail appropriately. Note that `yarn sharness-full` fails appropriately after taking two extra seconds, and `yarn test --full` runs the latter. Each failure message should print the name of the failing test case, not just the suite name, and should indicate that the passing tests passed. Then, remove `sharness/bad.t`, and note that the above commands all pass, with the `--full` variants still taking longer. Finally, remove `sharness/good.t`, and note that the above commands all pass (and all pass quickly). wchargin-branch: add-sharness
2018-08-06 19:56:25 +00:00
],
deps: [],
},
2018-05-02 23:10:03 +00:00
{
id: {BASIC: "sharness", FULL: "sharness-full"}[mode],
cmd: withSourcecredBinEnv([
"npm",
"run",
"--silent",
{BASIC: "sharness", FULL: "sharness-full"}[mode],
]),
deps: ["backend"],
2018-05-02 23:10:03 +00:00
},
];
const extraTasks = [
{
id: "fetchGithubRepoTest",
cmd: withSourcecredBinEnv([
"./src/plugins/github/fetchGithubRepoTest.sh",
"--no-build",
]),
deps: ["backend"],
},
{
id: "loadRepositoryTest",
cmd: withSourcecredBinEnv([
"./src/plugins/git/loadRepositoryTest.sh",
"--no-build",
]),
deps: ["backend"],
},
2018-05-02 23:10:03 +00:00
];
switch (mode) {
case "BASIC":
return basicTasks;
case "FULL":
return [].concat(basicTasks, extraTasks);
default:
/*:: (mode: empty); */ throw new Error(mode);
}
}