initial tests

- runs two nodes for ALL two-node tests
- next: change to run two nodes for each test
This commit is contained in:
E M 2026-03-23 12:33:21 +11:00
parent 3bc6aa8b8d
commit 8a68a81f54
No known key found for this signature in database
4 changed files with 663 additions and 0 deletions

361
tests/js/harness.js Normal file
View File

@ -0,0 +1,361 @@
/**
* harness.js koffi FFI wrapper for libstorage
*
* Wraps the libstorage C API into a promise-based JS interface.
* Each async libstorage call (which dispatches work to a thread and calls back
* with the result) is wrapped into a Promise that resolves on RET_OK or rejects
* on RET_ERR.
*
* NOTE: The StorageCallback msg parameter is declared as 'str' (null-terminated
* C string). This is correct for text content. For binary chunk downloads,
* a typed-buffer approach using the len parameter would be needed instead.
*/
import koffi from 'koffi';
import path from 'path';
import { fileURLToPath } from 'url';
export const __dirname = path.dirname(fileURLToPath(import.meta.url));
const libName =
process.platform === 'darwin' ? 'libstorage.dylib' :
process.platform === 'win32' ? 'libstorage.dll' :
'libstorage.so';
const libPath = path.resolve(__dirname, '../../build', libName);
const lib = koffi.load(libPath);
// Return codes matching libstorage.h
export const RET_OK = 0;
export const RET_ERR = 1;
export const RET_PROGRESS = 3;
// typedef void (*StorageCallback)(int callerRet, const char *msg, size_t len, void *userData)
const StorageCallback = koffi.proto(
'StorageCallback', 'void', ['int', 'str', 'size_t', 'void *']
);
// All libstorage function bindings
const _lib = {
storage_new: lib.func(
'void *storage_new(str configJson, StorageCallback *callback, void *userData)'
),
storage_version: lib.func('str storage_version(void *ctx)'),
storage_revision: lib.func('str storage_revision(void *ctx)'),
storage_start: lib.func(
'int storage_start(void *ctx, StorageCallback *callback, void *userData)'
),
storage_stop: lib.func(
'int storage_stop(void *ctx, StorageCallback *callback, void *userData)'
),
storage_close: lib.func(
'int storage_close(void *ctx, StorageCallback *callback, void *userData)'
),
storage_destroy: lib.func('int storage_destroy(void *ctx)'),
storage_repo: lib.func(
'int storage_repo(void *ctx, StorageCallback *callback, void *userData)'
),
storage_spr: lib.func(
'int storage_spr(void *ctx, StorageCallback *callback, void *userData)'
),
storage_debug: lib.func(
'int storage_debug(void *ctx, StorageCallback *callback, void *userData)'
),
storage_peer_id: lib.func(
'int storage_peer_id(void *ctx, StorageCallback *callback, void *userData)'
),
storage_log_level: lib.func(
'int storage_log_level(void *ctx, str logLevel, StorageCallback *callback, void *userData)'
),
storage_upload_init: lib.func(
'int storage_upload_init(void *ctx, str filepath, size_t chunkSize, StorageCallback *callback, void *userData)'
),
storage_upload_chunk: lib.func(
'int storage_upload_chunk(void *ctx, str sessionId, void *chunk, size_t len, StorageCallback *callback, void *userData)'
),
storage_upload_finalize: lib.func(
'int storage_upload_finalize(void *ctx, str sessionId, StorageCallback *callback, void *userData)'
),
storage_upload_cancel: lib.func(
'int storage_upload_cancel(void *ctx, str sessionId, StorageCallback *callback, void *userData)'
),
storage_upload_file: lib.func(
'int storage_upload_file(void *ctx, str sessionId, StorageCallback *callback, void *userData)'
),
storage_download_init: lib.func(
'int storage_download_init(void *ctx, str cid, size_t chunkSize, bool local, StorageCallback *callback, void *userData)'
),
storage_download_stream: lib.func(
'int storage_download_stream(void *ctx, str cid, size_t chunkSize, bool local, str filepath, StorageCallback *callback, void *userData)'
),
storage_download_chunk: lib.func(
'int storage_download_chunk(void *ctx, str cid, StorageCallback *callback, void *userData)'
),
storage_download_cancel: lib.func(
'int storage_download_cancel(void *ctx, str cid, StorageCallback *callback, void *userData)'
),
storage_download_manifest: lib.func(
'int storage_download_manifest(void *ctx, str cid, StorageCallback *callback, void *userData)'
),
storage_list: lib.func(
'int storage_list(void *ctx, StorageCallback *callback, void *userData)'
),
storage_space: lib.func(
'int storage_space(void *ctx, StorageCallback *callback, void *userData)'
),
storage_delete: lib.func(
'int storage_delete(void *ctx, str cid, StorageCallback *callback, void *userData)'
),
storage_fetch: lib.func(
'int storage_fetch(void *ctx, str cid, StorageCallback *callback, void *userData)'
),
storage_exists: lib.func(
'int storage_exists(void *ctx, str cid, StorageCallback *callback, void *userData)'
),
};
/**
* Wraps a single async libstorage call into a Promise.
*
* libstorage dispatches work to an internal Nim thread, then calls back
* from that thread when done. koffi queues thread callbacks onto the Node.js
* event loop, but the event loop may drain before the callback arrives unless
* there is an active handle keeping it alive. A setInterval serves this role
* and is cleared once the callback fires.
*
* @param {Function} thunk - (cb) => libFn(ctx, ...args, cb, null)
* The thunk receives the koffi callback and must call the libstorage function,
* returning its synchronous int result.
* @param {Object} opts
* @param {Function} opts.onProgress - called for each RET_PROGRESS invocation
* @returns {Promise<string>} resolves with the msg string on RET_OK
*/
function callAsync(thunk, { onProgress } = {}) {
return new Promise((resolve, reject) => {
const keepAlive = setInterval(() => {}, 200);
let cb;
cb = koffi.register((ret, msg, _len, _userData) => {
if (ret === RET_PROGRESS) {
if (onProgress) onProgress(msg);
return; // callback will fire again; keepAlive remains active
}
clearInterval(keepAlive);
koffi.unregister(cb);
if (ret === RET_OK) resolve(msg ?? '');
else reject(new Error(msg ?? `libstorage error (ret=${ret})`));
}, koffi.pointer(StorageCallback));
const syncRet = thunk(cb);
if (typeof syncRet === 'number' && syncRet !== RET_OK) {
clearInterval(keepAlive);
koffi.unregister(cb);
reject(new Error(`libstorage sync dispatch failed (ret=${syncRet})`));
}
});
}
export class StorageNode {
#ctx = null;
/**
* Create and initialise a storage node from a JSON config string.
* Resolves when the node is ready (storage_new callback fires RET_OK).
*/
async create(configJson) {
let cb;
const promise = new Promise((resolve, reject) => {
const keepAlive = setInterval(() => {}, 200);
cb = koffi.register((ret, msg, _len, _userData) => {
clearInterval(keepAlive);
koffi.unregister(cb);
if (ret === RET_OK) resolve();
else reject(new Error(msg ?? 'storage_new callback failed'));
}, koffi.pointer(StorageCallback));
});
this.#ctx = _lib.storage_new(configJson, cb, null);
if (!this.#ctx) {
koffi.unregister(cb);
throw new Error('storage_new returned null — check configJson');
}
await promise;
return this;
}
/** Returns the version string synchronously (e.g. "v0.3.2") */
version() {
return _lib.storage_version(this.#ctx);
}
/** Returns the contracts revision string synchronously */
revision() {
return _lib.storage_revision(this.#ctx);
}
start() {
return callAsync(cb => _lib.storage_start(this.#ctx, cb, null));
}
stop() {
return callAsync(cb => _lib.storage_stop(this.#ctx, cb, null));
}
close() {
return callAsync(cb => _lib.storage_close(this.#ctx, cb, null));
}
destroy() {
const ret = _lib.storage_destroy(this.#ctx);
this.#ctx = null;
return ret;
}
/** Returns the data-dir path string */
repo() {
return callAsync(cb => _lib.storage_repo(this.#ctx, cb, null));
}
/** Returns the raw SPR URI string (e.g. "spr1qm...") */
spr() {
return callAsync(cb => _lib.storage_spr(this.#ctx, cb, null));
}
/** Returns the debug info as a JSON string */
debug() {
return callAsync(cb => _lib.storage_debug(this.#ctx, cb, null));
}
/** Returns the peer ID string */
peerId() {
return callAsync(cb => _lib.storage_peer_id(this.#ctx, cb, null));
}
/**
* Set the runtime log level.
* @param {string} level - e.g. "TRACE", "DEBUG", "INFO", "WARN", "ERROR"
*/
logLevel(level) {
return callAsync(cb => _lib.storage_log_level(this.#ctx, level, cb, null));
}
/**
* Upload arbitrary text content as a single chunk.
* Returns the CID string of the uploaded content.
*/
async uploadContent(content, name = 'test.txt', chunkSize = 64 * 1024) {
const sessionId = await callAsync(
cb => _lib.storage_upload_init(this.#ctx, name, chunkSize, cb, null)
);
const buf = Buffer.from(content, 'utf8');
await callAsync(
cb => _lib.storage_upload_chunk(this.#ctx, sessionId, buf, buf.byteLength, cb, null)
);
const cid = await callAsync(
cb => _lib.storage_upload_finalize(this.#ctx, sessionId, cb, null)
);
return cid;
}
/**
* Cancel an in-progress upload session.
* @param {string} sessionId
*/
uploadCancel(sessionId) {
return callAsync(cb => _lib.storage_upload_cancel(this.#ctx, sessionId, cb, null));
}
/**
* Download content by CID. Collects all RET_PROGRESS chunk messages and
* joins them as the full content string.
*
* @param {string} cid
* @param {boolean} local - true = local store only, false = fetch from network
* @param {number} chunkSize
* @returns {Promise<string>} the downloaded content as a UTF-8 string
*/
async downloadContent(cid, local = false, chunkSize = 64 * 1024) {
await callAsync(
cb => _lib.storage_download_init(this.#ctx, cid, chunkSize, local, cb, null)
);
const chunks = [];
await callAsync(
cb => _lib.storage_download_stream(this.#ctx, cid, chunkSize, local, null, cb, null),
{ onProgress: (msg) => { if (msg) chunks.push(msg); } }
);
return chunks.join('');
}
/**
* Cancel an in-progress download for a CID.
* @param {string} cid
*/
downloadCancel(cid) {
return callAsync(cb => _lib.storage_download_cancel(this.#ctx, cid, cb, null));
}
/**
* Retrieve the manifest JSON for a CID.
* @param {string} cid
* @returns {Promise<string>} manifest JSON string
*/
downloadManifest(cid) {
return callAsync(cb => _lib.storage_download_manifest(this.#ctx, cid, cb, null));
}
/**
* Retrieve the list of stored manifests as a JSON string.
* @returns {Promise<string>}
*/
list() {
return callAsync(cb => _lib.storage_list(this.#ctx, cb, null));
}
/**
* Retrieve storage space info as a JSON string.
* @returns {Promise<string>}
*/
space() {
return callAsync(cb => _lib.storage_space(this.#ctx, cb, null));
}
/**
* Check whether a CID exists in local store.
* @param {string} cid
* @returns {Promise<boolean>}
*/
async exists(cid) {
const msg = await callAsync(cb => _lib.storage_exists(this.#ctx, cid, cb, null));
return msg === 'true';
}
/**
* Delete content by CID from local store.
* @param {string} cid
*/
delete(cid) {
return callAsync(cb => _lib.storage_delete(this.#ctx, cid, cb, null));
}
/**
* Fetch content from the network into local store (background fetch).
* Does not return progress updates; resolves when fetch is complete.
* @param {string} cid
*/
fetch(cid) {
return callAsync(cb => _lib.storage_fetch(this.#ctx, cid, cb, null));
}
/** Graceful shutdown: stop → close → destroy */
async shutdown() {
await this.stop();
await this.close();
this.destroy();
}
}

25
tests/js/package-lock.json generated Normal file
View File

@ -0,0 +1,25 @@
{
"name": "logos-storage-js-tests",
"version": "0.1.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "logos-storage-js-tests",
"version": "0.1.0",
"dependencies": {
"koffi": "^2.9.0"
}
},
"node_modules/koffi": {
"version": "2.15.2",
"resolved": "https://registry.npmjs.org/koffi/-/koffi-2.15.2.tgz",
"integrity": "sha512-r9tjJLVRSOhCRWdVyQlF3/Ugzeg13jlzS4czS82MAgLff4W+BcYOW7g8Y62t9O5JYjYOLAjAovAZDNlDfZNu+g==",
"hasInstallScript": true,
"license": "MIT",
"funding": {
"url": "https://liberapay.com/Koromix"
}
}
}
}

11
tests/js/package.json Normal file
View File

@ -0,0 +1,11 @@
{
"name": "logos-storage-js-tests",
"version": "0.1.0",
"type": "module",
"scripts": {
"test": "node two-node-test.js"
},
"dependencies": {
"koffi": "^2.9.0"
}
}

266
tests/js/two-node-test.js Normal file
View File

@ -0,0 +1,266 @@
/**
* two-node-test.js Layer 1.5: full pipeline two-node upload/download test
*
* Spins up two in-process libstorage nodes, connects them via bootstrap SPR,
* uploads content on node A, and verifies it can be downloaded from node B
* over the p2p network.
*
* This is the test that fills the TODO in tests/cbindings/storage.c:
* "// TODO: implement check_fetch — requires two nodes connected together"
*
* Usage:
* cd tests/js && npm install && npm test
*/
import { StorageNode } from './harness.js';
import { rmSync, mkdirSync } from 'fs';
import { tmpdir } from 'os';
import path from 'path';
// ---------------------------------------------------------------------------
// Config helpers
// ---------------------------------------------------------------------------
const TMP = tmpdir();
function nodeConfig(label, config) {// { listenPort, discPort, bootstrapSpr = null }) {
if (!config["data-dir"]) {
config["data-dir"] = path.join(TMP, `libstorage-test-${label}`);
}
// Clean up any leftover state from a previous run
rmSync(config["data-dir"], { recursive: true, force: true });
mkdirSync(config["data-dir"], { recursive: true });
if (!config["log-level"]) {
config["log-level"] = 'ERROR';
}
console.debug(`Node '${label}' config:`, config);
return JSON.stringify(config);
}
// ---------------------------------------------------------------------------
// Minimal test runner
// ---------------------------------------------------------------------------
let passed = 0;
let failed = 0;
async function test(name, fn) {
process.stdout.write(` ${name} ... `);
try {
await fn();
console.log('PASS');
passed++;
} catch (err) {
console.log(`FAIL\n ${err.message}`);
failed++;
}
}
function assert(condition, message) {
if (!condition) throw new Error(message ?? 'assertion failed');
}
function assertEqual(actual, expected, label = '') {
if (actual !== expected) {
throw new Error(
`${label ? label + ': ' : ''}expected ${JSON.stringify(expected)}, got ${JSON.stringify(actual)}`
);
}
}
// ---------------------------------------------------------------------------
// Test suite
// ---------------------------------------------------------------------------
async function main() {
console.log('\nlogos-storage two-node p2p test\n');
const nodeA = new StorageNode();
const nodeB = new StorageNode();
// -------------------------------------------------------------------------
// Setup: start node A, get its SPR, start node B bootstrapped to node A
// -------------------------------------------------------------------------
console.log('Setting up nodes...');
await nodeA.create(nodeConfig('a', {
"listen-port": 8792,
"disc-port": 8794,
"nat": "extip:127.0.0.1",
// "log-level": 'TRACE'
}));
await nodeA.start();
console.log(` node A version: ${nodeA.version()}`);
const sprA = await nodeA.spr();
console.log(` node A SPR: ${sprA.slice(0, 40)}...`);
await nodeB.create(nodeConfig('b', {
"listen-port": 8793,
"disc-port": 8795,
"nat": "extip:127.0.0.1",
// "log-level": 'TRACE',
"bootstrap-node": [sprA]
}));
await nodeB.start();
console.log(` node B version: ${nodeB.version()}`);
// Allow time for the bootstrap connection to establish
// console.log('\nWaiting for p2p connection...');
// await new Promise(r => setTimeout(r, 3000));
// -------------------------------------------------------------------------
// Single-node tests (mirrors tests/cbindings/storage.c single-node suite)
// -------------------------------------------------------------------------
console.log('\nSingle-node tests (node A):');
await test('version is non-empty', () => {
assert(nodeA.version().length > 0, 'version should be non-empty');
});
await test('peer ID is a non-empty string', async () => {
const pid = await nodeA.peerId();
assert(pid && pid.length > 0, 'peer ID should be non-empty');
});
await test('SPR contains "spr"', async () => {
const spr = await nodeA.spr();
assert(spr.includes('spr'), `SPR should contain "spr", got: ${spr}`);
});
await test('debug info has id and addrs', async () => {
const debugJson = await nodeA.debug();
const info = JSON.parse(debugJson);
assert(info.id && info.id.length > 0, 'debug.id should be present');
assert(Array.isArray(info.addrs), 'debug.addrs should be an array');
});
const CONTENT = 'Hello from node A — p2p test content!';
let cid;
await test('upload content on node A', async () => {
cid = await nodeA.uploadContent(CONTENT);
assert(cid && cid.length > 0, 'CID should be a non-empty string');
console.log(`\n CID: ${cid}`);
});
await test('manifest has expected fields', async () => {
const manifestJson = await nodeA.downloadManifest(cid);
const m = JSON.parse(manifestJson);
assert(m.treeCid && m.treeCid.length > 0, 'manifest.treeCid should be present');
assert(typeof m.datasetSize === 'number', 'manifest.datasetSize should be a number');
assert(m.filename === 'test.txt', `manifest.filename should be "test.txt", got "${m.filename}"`);
});
await test('list includes uploaded CID manifest', async () => {
const listJson = await nodeA.list();
assert(listJson.includes(cid), `list should include CID ${cid}`);
});
await test('space info has totalBlocks', async () => {
const spaceJson = await nodeA.space();
const s = JSON.parse(spaceJson);
assert(typeof s.totalBlocks === 'number', 'space.totalBlocks should be a number');
});
await test('content exists locally on node A', async () => {
const exists = await nodeA.exists(cid);
assert(exists === true, 'exists should return true for uploaded CID');
});
await test('content is downloadable locally on node A', async () => {
const downloaded = await nodeA.downloadContent(cid, true /* local only */);
assertEqual(downloaded, CONTENT, 'local download on node A');
});
// Upload a second piece of content for delete/fetch tests to leave CONTENT available
let cid2;
await test('upload second content for delete test', async () => {
cid2 = await nodeA.uploadContent('ephemeral content for delete test', 'ephemeral.txt');
assert(cid2 && cid2.length > 0, 'CID2 should be non-empty');
});
await test('delete removes content from local store', async () => {
await nodeA.delete(cid2);
const exists = await nodeA.exists(cid2);
assert(exists === false, 'exists should return false after delete');
});
await test('cancel download does not crash', async () => {
// Init a download then immediately cancel it — verifies the cancel path is wired correctly
await nodeA.downloadContent(cid, true); // ensure blocks are cached
await nodeA.downloadCancel(cid);
// If we get here without exception, the cancel path is working
});
// -------------------------------------------------------------------------
// Two-node tests
// -------------------------------------------------------------------------
console.log('\nTwo-node tests:');
await test('node B can download content from node A over p2p', async () => {
const downloaded = await nodeB.downloadContent(cid, false /* remote */);
assertEqual(downloaded, CONTENT, 'remote download on node B');
});
await test('storage_fetch: node B fetches into local store, then exists locally', async () => {
// Upload fresh content on A so B definitely doesn't have it yet
const fetchContent = 'Content for storage_fetch test';
const fetchCid = await nodeA.uploadContent(fetchContent, 'fetch-test.txt');
// Verify B doesn't have it locally yet
const beforeFetch = await nodeB.exists(fetchCid);
assert(beforeFetch === false, 'B should not have the CID locally before fetch');
// Fetch into B's local store
await nodeB.fetch(fetchCid);
// Now it should exist locally on B
const afterFetch = await nodeB.exists(fetchCid);
assert(afterFetch === true, 'B should have the CID locally after fetch');
// And be downloadable locally
const downloaded = await nodeB.downloadContent(fetchCid, true /* local only */);
assertEqual(downloaded, fetchContent, 'fetched content should match');
});
await test('node B peer ID is non-empty', async () => {
const pid = await nodeB.peerId();
assert(pid && pid.length > 0, 'node B peer ID should be non-empty');
});
await test('node A debug info shows node B in routing table', async () => {
const debugJson = await nodeA.debug();
const info = JSON.parse(debugJson);
// After 3s connection, node B should appear in node A's routing table
assert(Array.isArray(info.table?.nodes), 'debug.table.nodes should be an array');
assert(info.table.nodes.length > 0, 'routing table should contain at least one peer (node B)');
});
// -------------------------------------------------------------------------
// Teardown
// -------------------------------------------------------------------------
console.log('\nShutting down nodes...');
await nodeA.shutdown();
await nodeB.shutdown();
// -------------------------------------------------------------------------
// Summary
// -------------------------------------------------------------------------
console.log(`\n${passed + failed} tests: ${passed} passed, ${failed} failed\n`);
if (failed > 0) process.exit(1);
}
main().catch(err => {
console.error('\nFatal error:', err);
process.exit(1);
});