mirror of
https://github.com/logos-messaging/logos-messaging-nim.git
synced 2026-01-03 14:33:12 +00:00
Cleaning up
This commit is contained in:
parent
7385de7bcc
commit
4b9b648b90
@ -33,6 +33,9 @@ initLock(testLock)
|
||||
|
||||
suite "Onchain group manager":
|
||||
setup:
|
||||
# Acquire lock to ensure tests run sequentially
|
||||
acquire(testLock)
|
||||
|
||||
let runAnvil {.used.} = runAnvil()
|
||||
|
||||
var manager {.threadvar.}: OnchainGroupManager
|
||||
@ -82,7 +85,6 @@ suite "Onchain group manager":
|
||||
let accounts = waitFor web3.provider.eth_accounts()
|
||||
web3.defaultAccount = accounts[2]
|
||||
let (privateKey, acc) = createEthAccount(web3)
|
||||
|
||||
let tokenAddress = (waitFor deployTestToken(privateKey, acc, web3)).valueOr:
|
||||
assert false, "Failed to deploy test token contract: " & $error
|
||||
return
|
||||
@ -91,7 +93,6 @@ suite "Onchain group manager":
|
||||
).valueOr:
|
||||
assert false, "Failed to deploy RLN contract: " & $error
|
||||
return
|
||||
|
||||
# simulating a change in the contractAddress
|
||||
let manager2 = OnchainGroupManager(
|
||||
ethClientUrls: @[EthClient],
|
||||
@ -314,87 +315,87 @@ suite "Onchain group manager":
|
||||
check:
|
||||
validated
|
||||
|
||||
# test "validateRoot: should reject bad root":
|
||||
# let idCredentials = generateCredentials(manager.rlnInstance)
|
||||
# let idCommitment = idCredentials.idCommitment
|
||||
test "validateRoot: should reject bad root":
|
||||
let idCredentials = generateCredentials(manager.rlnInstance)
|
||||
let idCommitment = idCredentials.idCommitment
|
||||
|
||||
# (waitFor manager.init()).isOkOr:
|
||||
# raiseAssert $error
|
||||
(waitFor manager.init()).isOkOr:
|
||||
raiseAssert $error
|
||||
|
||||
# manager.userMessageLimit = some(UserMessageLimit(20))
|
||||
# manager.membershipIndex = some(MembershipIndex(0))
|
||||
# manager.idCredentials = some(idCredentials)
|
||||
manager.userMessageLimit = some(UserMessageLimit(20))
|
||||
manager.membershipIndex = some(MembershipIndex(0))
|
||||
manager.idCredentials = some(idCredentials)
|
||||
|
||||
# manager.merkleProofCache = newSeq[byte](640)
|
||||
# for i in 0 ..< 640:
|
||||
# manager.merkleProofCache[i] = byte(rand(255))
|
||||
manager.merkleProofCache = newSeq[byte](640)
|
||||
for i in 0 ..< 640:
|
||||
manager.merkleProofCache[i] = byte(rand(255))
|
||||
|
||||
# let messageBytes = "Hello".toBytes()
|
||||
let messageBytes = "Hello".toBytes()
|
||||
|
||||
# let epoch = default(Epoch)
|
||||
# debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
let epoch = default(Epoch)
|
||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
|
||||
# let validProofRes = manager.generateProof(
|
||||
# data = messageBytes, epoch = epoch, messageId = MessageId(1)
|
||||
# )
|
||||
let validProofRes = manager.generateProof(
|
||||
data = messageBytes, epoch = epoch, messageId = MessageId(1)
|
||||
)
|
||||
|
||||
# check:
|
||||
# validProofRes.isOk()
|
||||
# let validProof = validProofRes.get()
|
||||
check:
|
||||
validProofRes.isOk()
|
||||
let validProof = validProofRes.get()
|
||||
|
||||
# let validated = manager.validateRoot(validProof.merkleRoot)
|
||||
let validated = manager.validateRoot(validProof.merkleRoot)
|
||||
|
||||
# check:
|
||||
# validated == false
|
||||
check:
|
||||
validated == false
|
||||
|
||||
# test "verifyProof: should verify valid proof":
|
||||
# let credentials = generateCredentials(manager.rlnInstance)
|
||||
# (waitFor manager.init()).isOkOr:
|
||||
# raiseAssert $error
|
||||
test "verifyProof: should verify valid proof":
|
||||
let credentials = generateCredentials(manager.rlnInstance)
|
||||
(waitFor manager.init()).isOkOr:
|
||||
raiseAssert $error
|
||||
|
||||
# let fut = newFuture[void]()
|
||||
let fut = newFuture[void]()
|
||||
|
||||
# proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
||||
# if registrations.len == 1 and
|
||||
# registrations[0].rateCommitment ==
|
||||
# getRateCommitment(credentials, UserMessageLimit(20)).get() and
|
||||
# registrations[0].index == 0:
|
||||
# manager.idCredentials = some(credentials)
|
||||
# fut.complete()
|
||||
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
||||
if registrations.len == 1 and
|
||||
registrations[0].rateCommitment ==
|
||||
getRateCommitment(credentials, UserMessageLimit(20)).get() and
|
||||
registrations[0].index == 0:
|
||||
manager.idCredentials = some(credentials)
|
||||
fut.complete()
|
||||
|
||||
# manager.onRegister(callback)
|
||||
manager.onRegister(callback)
|
||||
|
||||
# try:
|
||||
# waitFor manager.register(credentials, UserMessageLimit(20))
|
||||
# except Exception, CatchableError:
|
||||
# assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
# waitFor fut
|
||||
try:
|
||||
waitFor manager.register(credentials, UserMessageLimit(20))
|
||||
except Exception, CatchableError:
|
||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
waitFor fut
|
||||
|
||||
# let rootUpdated = waitFor manager.updateRoots()
|
||||
let rootUpdated = waitFor manager.updateRoots()
|
||||
|
||||
# if rootUpdated:
|
||||
# let proofResult = waitFor manager.fetchMerkleProofElements()
|
||||
# if proofResult.isErr():
|
||||
# error "Failed to fetch Merkle proof", error = proofResult.error
|
||||
# manager.merkleProofCache = proofResult.get()
|
||||
if rootUpdated:
|
||||
let proofResult = waitFor manager.fetchMerkleProofElements()
|
||||
if proofResult.isErr():
|
||||
error "Failed to fetch Merkle proof", error = proofResult.error
|
||||
manager.merkleProofCache = proofResult.get()
|
||||
|
||||
# let messageBytes = "Hello".toBytes()
|
||||
let messageBytes = "Hello".toBytes()
|
||||
|
||||
# # prepare the epoch
|
||||
# let epoch = default(Epoch)
|
||||
# debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
# prepare the epoch
|
||||
let epoch = default(Epoch)
|
||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
|
||||
# # generate proof
|
||||
# let validProof = manager.generateProof(
|
||||
# data = messageBytes, epoch = epoch, messageId = MessageId(0)
|
||||
# ).valueOr:
|
||||
# raiseAssert $error
|
||||
# generate proof
|
||||
let validProof = manager.generateProof(
|
||||
data = messageBytes, epoch = epoch, messageId = MessageId(0)
|
||||
).valueOr:
|
||||
raiseAssert $error
|
||||
|
||||
# let verified = manager.verifyProof(messageBytes, validProof).valueOr:
|
||||
# raiseAssert $error
|
||||
let verified = manager.verifyProof(messageBytes, validProof).valueOr:
|
||||
raiseAssert $error
|
||||
|
||||
# check:
|
||||
# verified
|
||||
check:
|
||||
verified
|
||||
|
||||
test "verifyProof: should reject invalid proof":
|
||||
(waitFor manager.init()).isOkOr:
|
||||
@ -402,102 +403,102 @@ suite "Onchain group manager":
|
||||
|
||||
let idCredential = generateCredentials(manager.rlnInstance)
|
||||
|
||||
# try:
|
||||
# waitFor manager.register(idCredential, UserMessageLimit(20))
|
||||
# except Exception, CatchableError:
|
||||
# assert false,
|
||||
# "exception raised when calling startGroupSync: " & getCurrentExceptionMsg()
|
||||
try:
|
||||
waitFor manager.register(idCredential, UserMessageLimit(20))
|
||||
except Exception, CatchableError:
|
||||
assert false,
|
||||
"exception raised when calling startGroupSync: " & getCurrentExceptionMsg()
|
||||
|
||||
# let messageBytes = "Hello".toBytes()
|
||||
let messageBytes = "Hello".toBytes()
|
||||
|
||||
# let rootUpdated = waitFor manager.updateRoots()
|
||||
let rootUpdated = waitFor manager.updateRoots()
|
||||
|
||||
# manager.merkleProofCache = newSeq[byte](640)
|
||||
# for i in 0 ..< 640:
|
||||
# manager.merkleProofCache[i] = byte(rand(255))
|
||||
manager.merkleProofCache = newSeq[byte](640)
|
||||
for i in 0 ..< 640:
|
||||
manager.merkleProofCache[i] = byte(rand(255))
|
||||
|
||||
# let epoch = default(Epoch)
|
||||
# debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
let epoch = default(Epoch)
|
||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
|
||||
# # generate proof
|
||||
# let invalidProofRes = manager.generateProof(
|
||||
# data = messageBytes, epoch = epoch, messageId = MessageId(0)
|
||||
# )
|
||||
# generate proof
|
||||
let invalidProofRes = manager.generateProof(
|
||||
data = messageBytes, epoch = epoch, messageId = MessageId(0)
|
||||
)
|
||||
|
||||
# check:
|
||||
# invalidProofRes.isOk()
|
||||
# let invalidProof = invalidProofRes.get()
|
||||
check:
|
||||
invalidProofRes.isOk()
|
||||
let invalidProof = invalidProofRes.get()
|
||||
|
||||
# # verify the proof (should be false)
|
||||
# let verified = manager.verifyProof(messageBytes, invalidProof).valueOr:
|
||||
# raiseAssert $error
|
||||
# verify the proof (should be false)
|
||||
let verified = manager.verifyProof(messageBytes, invalidProof).valueOr:
|
||||
raiseAssert $error
|
||||
|
||||
# check:
|
||||
# verified == false
|
||||
check:
|
||||
verified == false
|
||||
|
||||
# test "root queue should be updated correctly":
|
||||
# const credentialCount = 12
|
||||
# let credentials = generateCredentials(manager.rlnInstance, credentialCount)
|
||||
# (waitFor manager.init()).isOkOr:
|
||||
# raiseAssert $error
|
||||
test "root queue should be updated correctly":
|
||||
const credentialCount = 12
|
||||
let credentials = generateCredentials(manager.rlnInstance, credentialCount)
|
||||
(waitFor manager.init()).isOkOr:
|
||||
raiseAssert $error
|
||||
|
||||
# type TestBackfillFuts = array[0 .. credentialCount - 1, Future[void]]
|
||||
# var futures: TestBackfillFuts
|
||||
# for i in 0 ..< futures.len():
|
||||
# futures[i] = newFuture[void]()
|
||||
type TestBackfillFuts = array[0 .. credentialCount - 1, Future[void]]
|
||||
var futures: TestBackfillFuts
|
||||
for i in 0 ..< futures.len():
|
||||
futures[i] = newFuture[void]()
|
||||
|
||||
# proc generateCallback(
|
||||
# futs: TestBackfillFuts, credentials: seq[IdentityCredential]
|
||||
# ): OnRegisterCallback =
|
||||
# var futureIndex = 0
|
||||
# proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
||||
# if registrations.len == 1 and
|
||||
# registrations[0].rateCommitment ==
|
||||
# getRateCommitment(credentials[futureIndex], UserMessageLimit(20)).get() and
|
||||
# registrations[0].index == MembershipIndex(futureIndex):
|
||||
# futs[futureIndex].complete()
|
||||
# futureIndex += 1
|
||||
proc generateCallback(
|
||||
futs: TestBackfillFuts, credentials: seq[IdentityCredential]
|
||||
): OnRegisterCallback =
|
||||
var futureIndex = 0
|
||||
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
|
||||
if registrations.len == 1 and
|
||||
registrations[0].rateCommitment ==
|
||||
getRateCommitment(credentials[futureIndex], UserMessageLimit(20)).get() and
|
||||
registrations[0].index == MembershipIndex(futureIndex):
|
||||
futs[futureIndex].complete()
|
||||
futureIndex += 1
|
||||
|
||||
# return callback
|
||||
return callback
|
||||
|
||||
# try:
|
||||
# manager.onRegister(generateCallback(futures, credentials))
|
||||
try:
|
||||
manager.onRegister(generateCallback(futures, credentials))
|
||||
|
||||
# for i in 0 ..< credentials.len():
|
||||
# waitFor manager.register(credentials[i], UserMessageLimit(20))
|
||||
# discard waitFor manager.updateRoots()
|
||||
# except Exception, CatchableError:
|
||||
# assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
for i in 0 ..< credentials.len():
|
||||
waitFor manager.register(credentials[i], UserMessageLimit(20))
|
||||
discard waitFor manager.updateRoots()
|
||||
except Exception, CatchableError:
|
||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
|
||||
# waitFor allFutures(futures)
|
||||
waitFor allFutures(futures)
|
||||
|
||||
# check:
|
||||
# manager.validRoots.len() == credentialCount
|
||||
check:
|
||||
manager.validRoots.len() == credentialCount
|
||||
|
||||
# test "isReady should return false if ethRpc is none":
|
||||
# (waitFor manager.init()).isOkOr:
|
||||
# raiseAssert $error
|
||||
test "isReady should return false if ethRpc is none":
|
||||
(waitFor manager.init()).isOkOr:
|
||||
raiseAssert $error
|
||||
|
||||
# manager.ethRpc = none(Web3)
|
||||
manager.ethRpc = none(Web3)
|
||||
|
||||
# var isReady = true
|
||||
# try:
|
||||
# isReady = waitFor manager.isReady()
|
||||
# except Exception, CatchableError:
|
||||
# assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
var isReady = true
|
||||
try:
|
||||
isReady = waitFor manager.isReady()
|
||||
except Exception, CatchableError:
|
||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
|
||||
# check:
|
||||
# isReady == false
|
||||
check:
|
||||
isReady == false
|
||||
|
||||
# test "isReady should return true if ethRpc is ready":
|
||||
# (waitFor manager.init()).isOkOr:
|
||||
# raiseAssert $error
|
||||
test "isReady should return true if ethRpc is ready":
|
||||
(waitFor manager.init()).isOkOr:
|
||||
raiseAssert $error
|
||||
|
||||
# var isReady = false
|
||||
# try:
|
||||
# isReady = waitFor manager.isReady()
|
||||
# except Exception, CatchableError:
|
||||
# assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
var isReady = false
|
||||
try:
|
||||
isReady = waitFor manager.isReady()
|
||||
except Exception, CatchableError:
|
||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
|
||||
# check:
|
||||
# isReady == true
|
||||
check:
|
||||
isReady == true
|
||||
|
||||
@ -16,7 +16,8 @@ import
|
||||
json_rpc/rpcclient,
|
||||
json,
|
||||
libp2p/crypto/crypto,
|
||||
eth/keys
|
||||
eth/keys,
|
||||
results
|
||||
|
||||
import
|
||||
waku/[
|
||||
@ -80,72 +81,6 @@ proc getForgePath*(): string =
|
||||
forgePath = joinPath(forgePath, ".foundry/bin/forge")
|
||||
return $forgePath
|
||||
|
||||
proc getPnpmPath*(): string =
|
||||
# Try multiple common pnpm installation paths in order of preference
|
||||
let homeDir = getEnv("HOME", "")
|
||||
let xdgDataHome = getEnv("XDG_DATA_HOME", joinPath(homeDir, ".local", "share"))
|
||||
|
||||
let possiblePaths = [
|
||||
# Check if pnpm is in PATH first (most reliable)
|
||||
"pnpm",
|
||||
# Self-installer locations (most common for CI/automated installs)
|
||||
joinPath(xdgDataHome, "pnpm", "pnpm"),
|
||||
joinPath(xdgDataHome, "pnpm", "bin", "pnpm"),
|
||||
joinPath(homeDir, ".local", "share", "pnpm", "pnpm"),
|
||||
joinPath(homeDir, ".local", "share", "pnpm", "bin", "pnpm"),
|
||||
# Global npm installation
|
||||
joinPath(homeDir, ".npm-global", "bin", "pnpm"),
|
||||
# Local user installation via npm
|
||||
joinPath(homeDir, ".local", "bin", "pnpm"),
|
||||
# Homebrew on macOS
|
||||
"/opt/homebrew/bin/pnpm",
|
||||
"/usr/local/bin/pnpm",
|
||||
# System-wide installations
|
||||
"/usr/bin/pnpm",
|
||||
"/bin/pnpm",
|
||||
]
|
||||
|
||||
for path in possiblePaths:
|
||||
if path == "pnpm":
|
||||
# For bare "pnpm", check if it's available in PATH using which/where
|
||||
try:
|
||||
when defined(windows):
|
||||
let (output, exitCode) = execCmdEx("where pnpm 2>nul")
|
||||
else:
|
||||
let (output, exitCode) = execCmdEx("which pnpm 2>/dev/null")
|
||||
|
||||
if exitCode == 0 and output.strip() != "":
|
||||
return "pnpm" # Let the shell find it in PATH
|
||||
except OSError, IOError:
|
||||
# If execCmdEx fails, continue to next path
|
||||
discard
|
||||
else:
|
||||
# For absolute paths, check if file exists
|
||||
if fileExists(path):
|
||||
return path
|
||||
|
||||
# If no pnpm found, try to refresh PATH and check again
|
||||
debug "pnpm not found in any known location, waiting briefly and retrying"
|
||||
sleep(1000) # Wait 1 second for any installation to complete
|
||||
|
||||
# Retry the PATH check
|
||||
try:
|
||||
when defined(windows):
|
||||
let (output, exitCode) = execCmdEx("where pnpm 2>nul")
|
||||
else:
|
||||
let (output, exitCode) = execCmdEx("which pnpm 2>/dev/null")
|
||||
|
||||
if exitCode == 0 and output.strip() != "":
|
||||
debug "Found pnpm in PATH after retry", path = output.strip()
|
||||
return "pnpm"
|
||||
except OSError, IOError:
|
||||
discard
|
||||
|
||||
# If still no pnpm found, return "pnpm" as fallback and let the error be more descriptive
|
||||
error "pnpm not found in any location after installation. Checked paths:",
|
||||
paths = possiblePaths
|
||||
return "pnpm"
|
||||
|
||||
contract(ERC20Token):
|
||||
proc allowance(owner: Address, spender: Address): UInt256 {.view.}
|
||||
proc balanceOf(account: Address): UInt256 {.view.}
|
||||
@ -245,9 +180,7 @@ proc deployTestToken*(
|
||||
debug "Submodule path verified", submodulePath = submodulePath
|
||||
|
||||
let forgePath = getForgePath()
|
||||
var pnpmPath = getPnpmPath()
|
||||
debug "Forge path", forgePath
|
||||
debug "Pnpm path", pnpmPath
|
||||
|
||||
# Verify forge executable exists
|
||||
if not fileExists(forgePath):
|
||||
@ -267,40 +200,11 @@ proc deployTestToken*(
|
||||
if forgeInstallExitCode != 0:
|
||||
return error("forge install command failed")
|
||||
|
||||
# Verify pnpm is actually executable before using it
|
||||
debug "Verifying pnpm path before use", pnpmPath = pnpmPath
|
||||
if pnpmPath != "pnpm":
|
||||
if not fileExists(pnpmPath):
|
||||
return err(fmt"pnpm executable not found at path: {pnpmPath}")
|
||||
else:
|
||||
# For bare "pnpm", try to run the install script first to ensure pnpm is available
|
||||
debug "Running pnpm install script to ensure pnpm is available"
|
||||
let installScriptPath = "./scripts/install_pnpm.sh"
|
||||
if fileExists(installScriptPath):
|
||||
let (installOutput, installExitCode) = execCmdEx(fmt"bash {installScriptPath}")
|
||||
debug "pnpm install script output",
|
||||
output = installOutput, exitCode = installExitCode
|
||||
|
||||
# After installation, try to find the actual pnpm path
|
||||
if installExitCode == 0:
|
||||
let homeDir = getEnv("HOME", "")
|
||||
let commonPnpmPaths = [
|
||||
joinPath(homeDir, ".local", "share", "pnpm", "pnpm"),
|
||||
joinPath(homeDir, ".local", "share", "pnpm", "bin", "pnpm"),
|
||||
]
|
||||
|
||||
for possiblePath in commonPnpmPaths:
|
||||
if fileExists(possiblePath):
|
||||
debug "Found pnpm after installation", actualPath = possiblePath
|
||||
pnpmPath = possiblePath
|
||||
break
|
||||
|
||||
let (pnpmInstallOutput, pnpmInstallExitCode) =
|
||||
execCmdEx(fmt"""cd {submodulePath} && {pnpmPath} install""")
|
||||
execCmdEx(fmt"""cd {submodulePath} && pnpm install""")
|
||||
trace "Executed pnpm install command", output = pnpmInstallOutput
|
||||
if pnpmInstallExitCode != 0:
|
||||
return
|
||||
err(fmt"pnpm install command failed using path '{pnpmPath}': {pnpmInstallOutput}")
|
||||
return err("pnpm install command failed" & pnpmInstallOutput)
|
||||
|
||||
let (forgeBuildOutput, forgeBuildExitCode) =
|
||||
execCmdEx(fmt"""cd {submodulePath} && {forgePath} build""")
|
||||
@ -435,41 +339,10 @@ proc executeForgeContractDeployScripts*(
|
||||
|
||||
trace "contract deployer account details", account = acc, privateKey = privateKey
|
||||
|
||||
# Build the Foundry project with timeout monitoring
|
||||
let forgeCleanProcess = startProcess(
|
||||
"sh",
|
||||
args = ["-c", fmt"""cd {submodulePath} && {forgePath} clean"""],
|
||||
options = {poUsePath, poStdErrToStdOut},
|
||||
)
|
||||
|
||||
let startTime = Moment.now()
|
||||
let timeoutDuration = 30.seconds # 30 second timeout for clean command
|
||||
var forgeCleanOutput = ""
|
||||
var line = ""
|
||||
|
||||
while forgeCleanProcess.running and (Moment.now() - startTime) < timeoutDuration:
|
||||
try:
|
||||
if forgeCleanProcess.outputStream.readLine(line):
|
||||
forgeCleanOutput.add(line & "\n")
|
||||
trace "Forge clean output line", line = line
|
||||
else:
|
||||
sleep(100)
|
||||
except:
|
||||
break
|
||||
|
||||
let forgeCleanExitCode =
|
||||
if (Moment.now() - startTime) >= timeoutDuration:
|
||||
kill(forgeCleanProcess)
|
||||
close(forgeCleanProcess)
|
||||
error "Forge clean command timed out after 30 seconds"
|
||||
-1
|
||||
else:
|
||||
let exitCode = waitForExit(forgeCleanProcess)
|
||||
close(forgeCleanProcess)
|
||||
exitCode
|
||||
|
||||
trace "Executed forge clean command",
|
||||
output = forgeCleanOutput, exitCode = forgeCleanExitCode
|
||||
# Build the Foundry project
|
||||
let (forgeCleanOutput, forgeCleanExitCode) =
|
||||
execCmdEx(fmt"""cd {submodulePath} && {forgePath} clean""")
|
||||
trace "Executed forge clean command", output = forgeCleanOutput
|
||||
if forgeCleanExitCode != 0:
|
||||
return error("forge clean failed")
|
||||
|
||||
@ -479,29 +352,11 @@ proc executeForgeContractDeployScripts*(
|
||||
if forgeInstallExitCode != 0:
|
||||
return error("forge install failed")
|
||||
|
||||
var pnpmPath = getPnpmPath()
|
||||
debug "Pnpm path", pnpmPath
|
||||
|
||||
# If we got bare "pnpm" and it might not be in PATH, try to find the actual installed path
|
||||
if pnpmPath == "pnpm":
|
||||
let homeDir = getEnv("HOME", "")
|
||||
let commonPnpmPaths = [
|
||||
joinPath(homeDir, ".local", "share", "pnpm", "pnpm"),
|
||||
joinPath(homeDir, ".local", "share", "pnpm", "bin", "pnpm"),
|
||||
]
|
||||
|
||||
for possiblePath in commonPnpmPaths:
|
||||
if fileExists(possiblePath):
|
||||
debug "Found pnpm at actual path", actualPath = possiblePath
|
||||
pnpmPath = possiblePath
|
||||
break
|
||||
|
||||
let (pnpmInstallOutput, pnpmInstallExitCode) =
|
||||
execCmdEx(fmt"""cd {submodulePath} && {pnpmPath} install""")
|
||||
execCmdEx(fmt"""cd {submodulePath} && pnpm install""")
|
||||
trace "Executed pnpm install command", output = pnpmInstallOutput
|
||||
if pnpmInstallExitCode != 0:
|
||||
return
|
||||
err(fmt"pnpm install command failed using path '{pnpmPath}': {pnpmInstallOutput}")
|
||||
return err("pnpm install command failed" & pnpmInstallOutput)
|
||||
|
||||
let (forgeBuildOutput, forgeBuildExitCode) =
|
||||
execCmdEx(fmt"""cd {submodulePath} && {forgePath} build""")
|
||||
@ -741,71 +596,6 @@ proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process =
|
||||
except: # TODO: Fix "BareExcept" warning
|
||||
error "Anvil daemon run failed", err = getCurrentExceptionMsg()
|
||||
|
||||
# # Runs Anvil daemon
|
||||
# proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process =
|
||||
# # Passed options are
|
||||
# # --port Port to listen on.
|
||||
# # --gas-limit Sets the block gas limit in WEI.
|
||||
# # --balance The default account balance, specified in ether.
|
||||
# # --chain-id Chain ID of the network.
|
||||
# # See anvil documentation https://book.getfoundry.sh/reference/anvil/ for more details
|
||||
# try:
|
||||
# # Check for existing Anvil instances before starting a new one
|
||||
# let runningInstances = checkRunningAnvilInstances()
|
||||
# debug "Checking for running Anvil instances before starting",
|
||||
# runningInstances = runningInstances
|
||||
|
||||
# let anvilPath = getAnvilPath()
|
||||
# debug "Anvil path", anvilPath
|
||||
# let runAnvil = startProcess(
|
||||
# anvilPath,
|
||||
# args = [
|
||||
# "--port",
|
||||
# $port,
|
||||
# "--gas-limit",
|
||||
# "300000000000000",
|
||||
# "--balance",
|
||||
# "1000000000",
|
||||
# "--chain-id",
|
||||
# $chainId,
|
||||
# ],
|
||||
# options = {poUsePath, poStdErrToStdOut},
|
||||
# )
|
||||
# let anvilPID = runAnvil.processID
|
||||
|
||||
# # Add timeout mechanism
|
||||
# let startTime = Moment.now()
|
||||
# let timeoutDuration = 120.seconds # 60 second timeout
|
||||
|
||||
# # We read stdout from Anvil to see when daemon is ready
|
||||
# var anvilStartLog: string
|
||||
# var cmdline: string
|
||||
# while (Moment.now() - startTime) < timeoutDuration:
|
||||
# if not runAnvil.running:
|
||||
# error "Anvil process died unexpectedly"
|
||||
# raise newException(IOError, "Anvil process failed to start")
|
||||
|
||||
# try:
|
||||
# if runAnvil.outputstream.readLine(cmdline):
|
||||
# anvilStartLog.add(cmdline)
|
||||
# if cmdline.contains("Listening on 127.0.0.1:" & $port):
|
||||
# break
|
||||
# else:
|
||||
# sleep(100)
|
||||
# except Exception, CatchableError:
|
||||
# break
|
||||
|
||||
# # Check if we timed out
|
||||
# if (Moment.now() - startTime) >= timeoutDuration:
|
||||
# kill(runAnvil)
|
||||
# error "Anvil startup timed out after 60 seconds"
|
||||
# raise newException(IOError, "Anvil startup timed out")
|
||||
|
||||
# debug "Anvil daemon is running and ready", pid = anvilPID, startLog = anvilStartLog
|
||||
# return runAnvil
|
||||
# except: # TODO: Fix "BareExcept" warning
|
||||
# error "Anvil daemon run failed", err = getCurrentExceptionMsg()
|
||||
|
||||
# Stops Anvil daemon
|
||||
proc stopAnvil*(runAnvil: Process) {.used.} =
|
||||
if runAnvil.isNil:
|
||||
@ -861,11 +651,6 @@ proc setupOnchainGroupManager*(
|
||||
discard await sendMintCall(
|
||||
web3, web3.defaultAccount, testTokenAddress, acc, ethToWei(1000.u256), some(0.u256)
|
||||
)
|
||||
# let contractAddressRes =
|
||||
# await executeForgeContractDeployScripts(privateKey, acc, web3)
|
||||
# if contractAddressRes.isErr():
|
||||
# error "Failed to deploy RLN contract", error = contractAddressRes.error
|
||||
# raise newException(CatchableError, "Failed to deploy RLN contract")
|
||||
|
||||
let contractAddress = (await executeForgeContractDeployScripts(privateKey, acc, web3)).valueOr:
|
||||
assert false, "Failed to deploy RLN contract: " & $error
|
||||
|
||||
@ -100,7 +100,7 @@ proc sendEthCallWithChainId*(
|
||||
toAddress: Address,
|
||||
chainId: UInt256,
|
||||
): Future[Result[UInt256, string]] {.async.} =
|
||||
## Generic proc to make contract calls with no arguments and with explicit chainId (workaround for automatic chainId=null with web3 call() proc)
|
||||
## Generic proc to make contract calls with no arguments and with explicit chainId (workaround for automatic chainId=null with web3 call())
|
||||
##
|
||||
## Args:
|
||||
## ethRpc: Web3 instance for making RPC calls
|
||||
@ -301,7 +301,7 @@ method register*(
|
||||
g.retryWrapper(gasPrice, "Failed to get gas price"):
|
||||
int(await ethRpc.provider.eth_gasPrice()) * 2
|
||||
let idCommitmentHex = identityCredential.idCommitment.inHex()
|
||||
debug "identityCredential idCommitmentHex", idCommitment = idCommitmentHex
|
||||
debug "identityCredential idCommitmentHex", idCommitmentNoConvert = idCommitmentHex
|
||||
let idCommitment = identityCredential.idCommitment.toUInt256()
|
||||
let idCommitmentsToErase: seq[UInt256] = @[]
|
||||
debug "registering the member",
|
||||
@ -641,7 +641,6 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
|
||||
try:
|
||||
# let membershipExists =
|
||||
# await wakuRlnContract.isInMembershipSet(idCommitment).call()
|
||||
# The above code is not working with the latest web3 version due to chainId being null (specifically on linea-sepolia), below is the workaround
|
||||
# Function signature with parameter type
|
||||
let functionSignature = "isInMembershipSet(uint256)"
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user