Support for distributed keystores with multiple remotes based on threshold signatures (#3616)

Other fixes:

* Fix bit rot in the `make prater-dev-deposit` target.
* Correct content-type in the responses of the Nimbus signing node
* Invalid JSON payload was being sent in the web3signer requests
This commit is contained in:
zah 2022-05-10 03:32:12 +03:00 committed by GitHub
parent 011e0ca02f
commit 6d11ad6ce1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 831 additions and 213 deletions

View File

@ -234,6 +234,14 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
+ Mocked start private key OK
```
OK: 3/3 Fail: 0/3 Skip: 0/3
## Key spliting
```diff
+ k < n OK
+ k == n OK
+ k == n == 100 OK
+ single share OK
```
OK: 4/4 Fail: 0/4 Skip: 0/4
## KeyStorage testing suite
```diff
+ Pbkdf2 errors OK
@ -313,6 +321,13 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
+ deletePeer() test OK
```
OK: 12/12 Fail: 0/12 Skip: 0/12
## Remove keystore testing suite
```diff
+ vesion 1 OK
+ vesion 2 many remotes OK
+ vesion 2 single remote OK
```
OK: 3/3 Fail: 0/3 Skip: 0/3
## Slashing Interchange tests [Preset: mainnet]
```diff
+ Slashing test: duplicate_pubkey_not_slashable.json OK
@ -529,4 +544,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
OK: 1/1 Fail: 0/1 Skip: 0/1
---TOTAL---
OK: 294/299 Fail: 0/299 Skip: 5/299
OK: 301/306 Fail: 0/306 Skip: 5/306

View File

@ -359,9 +359,10 @@ define MAKE_DEPOSIT
build/deposit_contract sendDeposits \
$(2) \
--deposit-contract=$$(cat vendor/eth2-network/shared/$(1)/deposit_contract.txt) \
--deposit-contract=$$(cat vendor/eth2-networks/shared/$(1)/deposit_contract.txt) \
--deposits-file=nbc-$(1)-deposits.json \
--min-delay=$(DEPOSITS_DELAY) \
--max-delay=$(DEPOSITS_DELAY) \
--ask-for-key
endef

View File

@ -391,7 +391,9 @@ proc doDeposits*(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
walletPath.wallet.nextAccount,
config.totalDeposits,
config.outValidatorsDir,
config.outSecretsDir)
config.outSecretsDir,
@[], 0, 0,
KeystoreMode.Fast)
if deposits.isErr:
fatal "Failed to generate deposits", err = deposits.error

View File

@ -27,7 +27,6 @@ type
StartUpCommand {.pure.} = enum
deploy
drain
sendEth
generateSimulationDeposits
sendDeposits
@ -56,11 +55,6 @@ type
of deploy:
discard
of drain:
drainedContractAddress* {.
desc: "Address of the contract to drain"
name: "deposit-contract" }: Eth1Address
of sendEth:
toAddress {.name: "to".}: Eth1Address
valueEth {.name: "eth".}: string
@ -82,6 +76,20 @@ type
desc: "A LaunchPad deposits file to write"
name: "out-deposits-file" }: OutFile
threshold {.
defaultValue: 1
desc: "Used to generate distributed keys"
name: "threshold" }: uint32
remoteValidatorsCount {.
defaultValue: 0
desc: "The number of distributed validators validator"
name: "remote-validators-count" }: uint32
remoteSignersUrls {.
desc: "URLs of the remote signers"
name: "remote-signer" }: seq[string]
of sendDeposits:
depositsFile {.
desc: "A LaunchPad deposits file"
@ -101,13 +109,16 @@ type
desc: "Maximum possible delay between making two deposits (in seconds)"
name: "max-delay" }: float
contract(DepositContract):
proc deposit(pubkey: Bytes48,
withdrawalCredentials: Bytes32,
signature: Bytes96,
deposit_data_root: FixedBytes[32])
type
PubKeyBytes = DynamicBytes[48, 48]
WithdrawalCredentialsBytes = DynamicBytes[32, 32]
SignatureBytes = DynamicBytes[96, 96]
proc drain()
contract(DepositContract):
proc deposit(pubkey: PubKeyBytes,
withdrawalCredentials: WithdrawalCredentialsBytes,
signature: SignatureBytes,
deposit_data_root: FixedBytes[32])
proc deployContract*(web3: Web3, code: string): Future[ReceiptObject] {.async.} =
var code = code
@ -157,22 +168,23 @@ proc sendDeposits*(deposits: seq[LaunchPadDeposit],
depositContract = depositContractAddress
var web3 = await initWeb3(web3Url, privateKey)
let gasPrice = int(await web3.provider.eth_gasPrice()) * 2
let depositContract = web3.contractSender(DepositContract,
Address depositContractAddress)
for i, launchPadDeposit in deposits:
let dp = launchPadDeposit as DepositData
for i in 4200 ..< deposits.len:
let dp = deposits[i] as DepositData
while true:
try:
let tx = depositContract.deposit(
Bytes48(dp.pubkey.toRaw()),
Bytes32(dp.withdrawal_credentials.data),
Bytes96(dp.signature.toRaw()),
PubKeyBytes(@(dp.pubkey.toRaw())),
WithdrawalCredentialsBytes(@(dp.withdrawal_credentials.data)),
SignatureBytes(@(dp.signature.toRaw())),
FixedBytes[32](hash_tree_root(dp).data))
let status = await tx.send(value = 32.u256.ethToWei, gasPrice = 1)
let status = await tx.send(value = 32.u256.ethToWei, gasPrice = gasPrice)
info "Deposit sent", status = $status
info "Deposit sent", tx = $status
if delayGenerator != nil:
await sleepAsync(delayGenerator())
@ -212,7 +224,11 @@ proc main() {.async.} =
seed,
0, conf.simulationDepositsCount,
string conf.outValidatorsDir,
string conf.outSecretsDir)
string conf.outSecretsDir,
conf.remoteSignersUrls,
conf.threshold,
conf.remoteValidatorsCount,
KeystoreMode.Fast)
if deposits.isErr:
fatal "Failed to generate deposits", err = deposits.error
@ -257,11 +273,6 @@ proc main() {.async.} =
let receipt = await web3.deployContract(contractCode)
echo receipt.contractAddress.get, ";", receipt.blockHash
of StartUpCommand.drain:
let sender = web3.contractSender(DepositContract,
conf.drainedContractAddress)
discard await sender.drain().send(gasPrice = 1)
of StartUpCommand.sendEth:
echo await sendEth(web3, conf.toAddress, conf.valueEth.parseInt)

View File

@ -181,7 +181,7 @@ template errorResponse(code: HttpCode, message: string): RestApiResponse =
RestApiResponse.response("{\"error\": \"" & message & "\"}", code)
template signatureResponse(code: HttpCode, signature: string): RestApiResponse =
RestApiResponse.response("{\"signature\": \"0x" & signature & "\"}", code)
RestApiResponse.response("{\"signature\": \"0x" & signature & "\"}", code, "application/json")
proc installApiHandlers*(node: SigningNode) =
var router = node.router()

View File

@ -81,6 +81,9 @@ proc initValidators(vc: ValidatorClientRef): Future[bool] {.async.} =
if pubkey in duplicates:
error "Duplicate validator's key found", validator_pubkey = pubkey
return false
elif keystore.kind == KeystoreKind.Remote:
info "Remote validator client was skipped", validator_pubkey = pubkey
continue
else:
duplicates.add(pubkey)
vc.attachedValidators.addLocalValidator(keystore)

View File

@ -16,8 +16,8 @@ import ".."/spec/eth2_apis/rest_keymanager_types
export rest_utils, results
proc listLocalValidators*(node: BeaconNode): seq[KeystoreInfo] {.
raises: [Defect].} =
proc listLocalValidators*(node: BeaconNode): seq[KeystoreInfo]
{.raises: [Defect].} =
var validators: seq[KeystoreInfo]
for item in node.attachedValidators[].items():
if item.kind == ValidatorKind.Local:
@ -28,14 +28,25 @@ proc listLocalValidators*(node: BeaconNode): seq[KeystoreInfo] {.
)
validators
proc listRemoteValidators*(node: BeaconNode): seq[RemoteKeystoreInfo] {.
raises: [Defect].} =
proc listRemoteValidators*(node: BeaconNode): seq[RemoteKeystoreInfo]
{.raises: [Defect].} =
var validators: seq[RemoteKeystoreInfo]
for item in node.attachedValidators[].items():
if item.kind == ValidatorKind.Remote:
if item.kind == ValidatorKind.Remote and item.data.remotes.len == 1:
validators.add RemoteKeystoreInfo(
pubkey: item.pubkey,
url: HttpHostUri(item.data.remoteUrl)
url: HttpHostUri(item.data.remotes[0].url)
)
validators
proc listRemoteDistributedValidators*(node: BeaconNode): seq[DistributedKeystoreInfo]
{.raises: [Defect].} =
var validators: seq[DistributedKeystoreInfo]
for item in node.attachedValidators[].items():
if item.kind == ValidatorKind.Remote and item.data.remotes.len > 1:
validators.add DistributedKeystoreInfo(
pubkey: item.pubkey,
remotes: item.data.remotes
)
validators
@ -62,6 +73,34 @@ proc validateUri*(url: string): Result[Uri, cstring] =
return err("Empty URL hostname")
ok(surl)
proc removeValidator(node: BeaconNode,
key: ValidatorPubKey): RemoteKeystoreStatus =
let res = removeValidator(node.attachedValidators[], node.config,
key, KeystoreKind.Remote)
if res.isOk:
case res.value()
of RemoveValidatorStatus.deleted:
return RemoteKeystoreStatus(status: KeystoreStatus.deleted)
of RemoveValidatorStatus.notFound:
return RemoteKeystoreStatus(status: KeystoreStatus.notFound)
else:
return RemoteKeystoreStatus(status: KeystoreStatus.error,
message: some($res.error()))
proc addRemoteValidator(node: BeaconNode,
keystore: RemoteKeystore): RequestItemStatus =
let res = importKeystore(node.attachedValidators[], node.config, keystore)
if res.isErr():
case res.error().status
of AddValidatorStatus.failed:
return RequestItemStatus(status: $KeystoreStatus.error,
message: $res.error().message)
of AddValidatorStatus.existingArtifacts:
return RequestItemStatus(status: $KeystoreStatus.duplicate)
else:
node.addRemoteValidators([res.get()])
return RequestItemStatus(status: $KeystoreStatus.imported)
proc installKeymanagerHandlers*(router: var RestRouter, node: BeaconNode) =
# https://ethereum.github.io/keymanager-APIs/#/Keymanager/ListKeys
router.api(MethodGet, "/api/eth/v1/keystores") do () -> RestApiResponse:
@ -216,25 +255,17 @@ proc installKeymanagerHandlers*(router: var RestRouter, node: BeaconNode) =
var response: PostKeystoresResponse
for index, key in keys.pairs():
let keystore = RemoteKeystore(
version: 1'u64, remoteType: RemoteSignerType.Web3Signer,
pubkey: key.pubkey, remote: key.url
)
let res = importKeystore(node.attachedValidators[], node.config,
keystore)
if res.isErr():
case res.error().status
of AddValidatorStatus.failed:
response.data.add(
RequestItemStatus(status: $KeystoreStatus.error,
message: $res.error().message))
of AddValidatorStatus.existingArtifacts:
response.data.add(
RequestItemStatus(status: $KeystoreStatus.duplicate))
else:
node.addRemoteValidators([res.get()])
response.data.add(
RequestItemStatus(status: $KeystoreStatus.imported))
let
remoteInfo = RemoteSignerInfo(
url: key.url,
pubkey: key.pubkey,
id: 0)
keystore = RemoteKeystore(
version: 1'u64, remoteType: RemoteSignerType.Web3Signer,
pubkey: key.pubkey, remotes: @[remoteInfo])
status = node.addRemoteValidator(keystore)
response.data.add(status)
return RestApiResponse.jsonResponsePlain(response)
@ -255,25 +286,75 @@ proc installKeymanagerHandlers*(router: var RestRouter, node: BeaconNode) =
$dres.error())
dres.get().pubkeys
let response =
var response: DeleteRemoteKeystoresResponse
for index, key in keys.pairs():
let status = node.removeValidator(key)
response.data.add(status)
return RestApiResponse.jsonResponsePlain(response)
# TODO: These URLs will be changed once we submit a proposal for
# /api/eth/v2/remotekeys that supports distributed keys.
router.api(MethodGet, "/api/eth/v1/remotekeys/distributed") do () -> RestApiResponse:
let authStatus = checkAuthorization(request, node)
if authStatus.isErr():
return RestApiResponse.jsonError(Http401, InvalidAuthorization,
$authStatus.error())
let response = GetDistributedKeystoresResponse(data: listRemoteDistributedValidators(node))
return RestApiResponse.jsonResponsePlain(response)
# TODO: These URLs will be changed once we submit a proposal for
# /api/eth/v2/remotekeys that supports distributed keys.
router.api(MethodPost, "/api/eth/v1/remotekeys/distributed") do (
contentBody: Option[ContentBody]) -> RestApiResponse:
let authStatus = checkAuthorization(request, node)
if authStatus.isErr():
return RestApiResponse.jsonError(Http401, InvalidAuthorization,
$authStatus.error())
let keys =
block:
var resp: DeleteRemoteKeystoresResponse
for index, key in keys.pairs():
let res = removeValidator(node.attachedValidators[], node.config, key,
KeystoreKind.Remote)
if res.isOk:
case res.value()
of RemoveValidatorStatus.deleted:
resp.data.add(
RemoteKeystoreStatus(status: KeystoreStatus.deleted))
of RemoveValidatorStatus.notFound:
resp.data.add(
RemoteKeystoreStatus(status: KeystoreStatus.notFound))
else:
resp.data.add(
RemoteKeystoreStatus(status: KeystoreStatus.error,
message: some($res.error())))
resp
if contentBody.isNone():
return RestApiResponse.jsonError(Http404, EmptyRequestBodyError)
let dres = decodeBody(ImportDistributedKeystoresBody, contentBody.get())
if dres.isErr():
return RestApiResponse.jsonError(Http400, InvalidKeystoreObjects,
$dres.error())
dres.get.remote_keys
var response: PostKeystoresResponse
for index, key in keys.pairs():
let keystore = RemoteKeystore(
version: 2'u64,
remoteType: RemoteSignerType.Web3Signer,
pubkey: key.pubkey,
remotes: key.remotes,
threshold: uint32 key.threshold
)
let status = node.addRemoteValidator(keystore)
response.data.add(status)
return RestApiResponse.jsonResponsePlain(response)
router.api(MethodDelete, "/api/eth/v1/remotekeys/distributed") do (
contentBody: Option[ContentBody]) -> RestApiResponse:
let authStatus = checkAuthorization(request, node)
if authStatus.isErr():
return RestApiResponse.jsonError(Http401, InvalidAuthorization,
$authStatus.error())
let keys =
block:
if contentBody.isNone():
return RestApiResponse.jsonError(Http404, EmptyRequestBodyError)
let dres = decodeBody(DeleteKeystoresBody, contentBody.get())
if dres.isErr():
return RestApiResponse.jsonError(Http400, InvalidValidatorPublicKey,
$dres.error())
dres.get.pubkeys
var response: DeleteRemoteKeystoresResponse
for index, key in keys.pairs():
let status = node.removeValidator(key)
response.data.add(status)
return RestApiResponse.jsonResponsePlain(response)
@ -306,3 +387,18 @@ proc installKeymanagerHandlers*(router: var RestRouter, node: BeaconNode) =
MethodDelete,
"/eth/v1/remotekeys",
"/api/eth/v1/remotekeys")
router.redirect(
MethodGet,
"/eth/v1/remotekeys/distributed",
"/api/eth/v1/remotekeys/distributed")
router.redirect(
MethodPost,
"/eth/v1/remotekeys/distributed",
"/api/eth/v1/remotekeys/distributed")
router.redirect(
MethodDelete,
"/eth/v1/remotekeys/distributed",
"/api/eth/v1/remotekeys/distributed")

View File

@ -30,6 +30,7 @@ import
stew/[endians2, objects, results, byteutils],
blscurve,
chronicles,
bearssl,
json_serialization
from nimcrypto/utils import burnMem
@ -79,6 +80,14 @@ type
## ValidatorSig and are used to avoid expensive reloading as well as error
## checking
SignatureShare* = object
sign*: blscurve.Signature
id*: uint32
SecretShare* = object
key*: ValidatorPrivKey
id*: uint32
export
AggregateSignature
@ -478,3 +487,59 @@ func infinity*(T: type ValidatorSig): T =
func burnMem*(key: var ValidatorPrivKey) =
burnMem(addr key, sizeof(ValidatorPrivKey))
proc keyGen(rng: var BrHmacDrbgContext): BlsResult[blscurve.SecretKey] =
var
bytes: array[32, byte]
pubkey: blscurve.PublicKey
brHmacDrbgGenerate(rng, bytes)
result.ok default(blscurve.SecretKey)
if not keyGen(bytes, pubkey, result.value):
return err "key generation failed"
proc secretShareId(x: uint32) : blscurve.ID =
let bytes: array[8, uint32] = [uint32 x, 0, 0, 0, 0, 0, 0, 0]
blscurve.ID.fromUint32(bytes)
func generateSecretShares*(sk: ValidatorPrivKey,
rng: var BrHmacDrbgContext,
k: uint32, n: uint32): BlsResult[seq[SecretShare]] =
doAssert k > 0 and k <= n
var originPts: seq[blscurve.SecretKey]
originPts.add(blscurve.SecretKey(sk))
for i in 1 ..< k:
originPts.add(? keyGen(rng))
var shares: seq[SecretShare]
for i in uint32(0) ..< n:
let numericShareId = i + 1 # the share id must not be zero
let blsShareId = secretShareId(numericShareId)
let secret = genSecretShare(originPts, blsShareId)
let share = SecretShare(key: ValidatorPrivKey(secret), id: numericShareId)
shares.add(share)
return ok shares
func toSignatureShare*(sig: CookedSig, id: uint32): SignatureShare =
result.sign = blscurve.Signature(sig)
result.id = id
func recoverSignature*(sings: seq[SignatureShare]): CookedSig =
let signs = sings.mapIt(it.sign)
let ids = sings.mapIt(secretShareId(it.id))
CookedSig blscurve.recover(signs, ids).expect(
"valid shares (validated when loading the keystore)")
proc confirmShares*(pubKey: ValidatorPubKey,
shares: seq[SecretShare],
rng: var BrHmacDrbgContext): bool =
var confirmationData: array[32, byte]
brHmacDrbgGenerate(rng, confirmationData)
var signs: seq[SignatureShare]
for share in items(shares):
let signature = share.key.blsSign(confirmationData).toSignatureShare(share.id);
signs.add(signature)
let recovered = signs.recoverSignature()
return pubKey.blsVerify(confirmationData, recovered)

View File

@ -58,7 +58,8 @@ type
Web3SignerRequest |
KeystoresAndSlashingProtection |
DeleteKeystoresBody |
ImportRemoteKeystoresBody
ImportRemoteKeystoresBody |
ImportDistributedKeystoresBody
EncodeArrays* =
seq[ValidatorIndex] |
@ -77,6 +78,7 @@ type
GetBlockV2Response |
GetKeystoresResponse |
GetRemoteKeystoresResponse |
GetDistributedKeystoresResponse |
GetStateV2Response |
GetStateForkResponse |
ProduceBlockResponseV2 |
@ -1214,6 +1216,7 @@ proc writeValue*(writer: var JsonWriter[RestJson], value: ForkedHashedBeaconStat
proc writeValue*(writer: var JsonWriter[RestJson],
value: Web3SignerRequest) {.
raises: [IOError, Defect].} =
writer.beginRecord()
case value.kind
of Web3SignerRequestKind.AggregationSlot:
doAssert(value.forkInfo.isSome(),
@ -1302,6 +1305,7 @@ proc writeValue*(writer: var JsonWriter[RestJson],
writer.writeField("signingRoot", value.signingRoot)
writer.writeField("contribution_and_proof",
value.syncCommitteeContributionAndProof)
writer.endRecord()
proc readValue*(reader: var JsonReader[RestJson],
value: var Web3SignerRequest) {.
@ -1359,7 +1363,7 @@ proc readValue*(reader: var JsonReader[RestJson],
signingRoot = some(reader.readValue(Eth2Digest))
of "aggregation_slot", "aggregate_and_proof", "block", "beacon_block",
"randao_reveal", "voluntary_exit", "sync_committee_message",
"sync_aggregator_selection_data", "contribution_and_proof":
"sync_aggregator_selection_data", "contribution_and_proof", "attestation":
if data.isSome():
reader.raiseUnexpectedField("Multiple data fields found",
"Web3SignerRequest")

View File

@ -69,6 +69,20 @@ proc deleteRemoteKeysPlain*(body: DeleteKeystoresBody): RestPlainResponse {.
meth: MethodDelete.}
## https://ethereum.github.io/keymanager-APIs/#/Remote%20Key%20Manager/DeleteRemoteKeys
proc listRemoteDistributedKeysPlain*(): RestPlainResponse {.
rest, endpoint: "/eth/v1/remotekeys/distributed",
meth: MethodGet.}
proc importRemoteDistributedKeysPlain*(body: ImportDistributedKeystoresBody
): RestPlainResponse {.
rest, endpoint: "/eth/v1/remotekeys/distributed",
meth: MethodPost.}
proc deleteRemoteDistributedKeysPlain*(body: DeleteKeystoresBody): RestPlainResponse {.
rest, endpoint: "/eth/v1/remotekeys/distributed",
meth: MethodDelete.}
proc listRemoteKeys*(client: RestClientRef,
token: string): Future[GetRemoteKeystoresResponse] {.
async.} =

View File

@ -13,6 +13,11 @@ type
pubkey*: ValidatorPubKey
url*: HttpHostUri
DistributedKeystoreInfo* = object
threshold*: int
pubkey*: ValidatorPubKey
remotes*: seq[RemoteSignerInfo]
RequestItemStatus* = object
status*: string
message*: string
@ -31,9 +36,15 @@ type
GetRemoteKeystoresResponse* = object
data*: seq[RemoteKeystoreInfo]
GetDistributedKeystoresResponse* = object
data*: seq[DistributedKeystoreInfo]
ImportRemoteKeystoresBody* = object
remote_keys*: seq[RemoteKeystoreInfo]
ImportDistributedKeystoresBody* = object
remote_keys*: seq[DistributedKeystoreInfo]
PostKeystoresResponse* = object
data*: seq[RequestItemStatus]

View File

@ -132,6 +132,11 @@ type
HttpHostUri* = distinct Uri
RemoteSignerInfo* = object
url*: HttpHostUri
id*: uint32
pubkey*: ValidatorPubKey
KeystoreData* = object
version*: uint64
pubkey*: ValidatorPubKey
@ -142,8 +147,9 @@ type
path*: KeyPath
uuid*: string
of KeystoreKind.Remote:
remoteUrl*: HttpHostUri
flags*: set[RemoteKeystoreFlag]
remotes*: seq[RemoteSignerInfo]
threshold*: uint32
NetKeystore* = object
crypto*: Crypto
@ -160,8 +166,9 @@ type
description*: Option[string]
remoteType*: RemoteSignerType
pubkey*: ValidatorPubKey
remote*: HttpHostUri
flags*: set[RemoteKeystoreFlag]
remotes*: seq[RemoteSignerInfo]
threshold*: uint32
KsResult*[T] = Result[T, string]
@ -560,12 +567,13 @@ proc writeValue*(writer: var JsonWriter, value: HttpHostUri) {.
writer.writeValue($distinctBase(value))
# RemoteKeystore
proc writeValue*(writer: var JsonWriter, value: RemoteKeystore) {.
raises: [IOError, Defect].} =
proc writeValue*(writer: var JsonWriter, value: RemoteKeystore)
{.raises: [IOError, Defect].} =
writer.beginRecord()
writer.writeField("version", value.version)
writer.writeField("pubkey", "0x" & value.pubkey.toHex())
writer.writeField("remote", $distinctBase(value.remote))
writer.writeField("remotes", value.remotes)
writer.writeField("threshold", value.threshold)
case value.remoteType
of RemoteSignerType.Web3Signer:
writer.writeField("type", "web3signer")
@ -585,10 +593,16 @@ proc readValue*(reader: var JsonReader, value: var RemoteKeystore)
version: Option[uint64]
description: Option[string]
remote: Option[HttpHostUri]
remotes: Option[seq[RemoteSignerInfo]]
remoteType: Option[string]
ignoreSslVerification: Option[bool]
pubkey: Option[ValidatorPubKey]
threshold: Option[uint32]
implicitVersion1 = false
# TODO: implementing deserializers for versioned objects
# manually is extremely error-prone. This should use
# the auto-generated deserializer from nim-json-serialization
for fieldName in readObjectFields(reader):
case fieldName:
of "pubkey":
@ -597,15 +611,33 @@ proc readValue*(reader: var JsonReader, value: var RemoteKeystore)
"RemoteKeystore")
pubkey = some(reader.readValue(ValidatorPubKey))
of "remote":
if version.isSome and version.get > 1:
reader.raiseUnexpectedField(
"The `remote` field is valid only in version 1 of the remote keystore format",
"RemoteKeystore")
if remote.isSome():
reader.raiseUnexpectedField("Multiple `remote` fields found",
"RemoteKeystore")
remote = some(reader.readValue(HttpHostUri))
implicitVersion1 = true
of "remotes":
if remotes.isSome():
reader.raiseUnexpectedField("Multiple `remote` fields found",
"RemoteKeystore")
remotes = some(reader.readValue(seq[RemoteSignerInfo]))
of "version":
if version.isSome():
reader.raiseUnexpectedField("Multiple `version` fields found",
"RemoteKeystore")
version = some(reader.readValue(uint64))
if implicitVersion1 and version.get > 1'u64:
reader.raiseUnexpectedValue(
"Remote keystore format doesn't match the specified version number")
if version.get > 2'u64:
reader.raiseUnexpectedValue(
"Remote keystore version " & $version.get &
" requires a more recent version of Nimbus")
of "description":
let res = reader.readValue(string)
if description.isSome():
@ -620,16 +652,28 @@ proc readValue*(reader: var JsonReader, value: var RemoteKeystore)
of "type":
if remoteType.isSome():
reader.raiseUnexpectedField("Multiple `type` fields found",
"RemoteKeystore")
"RemoteKeystore")
remoteType = some(reader.readValue(string))
of "threshold":
if threshold.isSome():
reader.raiseUnexpectedField("Multiple `threshold` fields found",
"RemoteKeystore")
threshold = some(reader.readValue(uint32))
else:
# Ignore unknown field names.
discard
if version.isNone():
reader.raiseUnexpectedValue("Field `version` is missing")
if remote.isNone():
reader.raiseUnexpectedValue("Field `remote` is missing")
if remotes.isNone():
if remote.isSome and pubkey.isSome:
remotes = some @[RemoteSignerInfo(
pubkey: pubkey.get,
id: 0,
url: remote.get
)]
else:
reader.raiseUnexpectedValue("Field `remotes` is missing")
if pubkey.isNone():
reader.raiseUnexpectedValue("Field `pubkey` is missing")
@ -652,12 +696,12 @@ proc readValue*(reader: var JsonReader, value: var RemoteKeystore)
res
value = RemoteKeystore(
version: version.get(),
remote: remote.get(),
pubkey: pubkey.get(),
version: 2'u64,
pubkey: pubkey.get,
description: description,
remoteType: keystoreType,
flags: keystoreFlags
remotes: remotes.get,
threshold: threshold.get(1),
)
template writeValue*(w: var JsonWriter,
@ -900,13 +944,18 @@ proc createRemoteKeystore*(pubKey: ValidatorPubKey, remoteUri: HttpHostUri,
version = 1'u64, description = "",
remoteType = RemoteSignerType.Web3Signer,
flags: set[RemoteKeystoreFlag] = {}): RemoteKeystore =
let signerInfo = RemoteSignerInfo(
url: remoteUri,
pubkey: pubKey,
id: 0
)
RemoteKeystore(
version: version,
description: if len(description) > 0: some(description)
else: none[string](),
remoteType: remoteType,
pubkey: pubKey,
remote: remoteUri,
remotes: @[signerInfo],
flags: flags
)

View File

@ -105,16 +105,18 @@ func init*(T: type KeystoreData,
pubkey: cookedKey.toPubKey,
description: keystore.description,
version: keystore.version,
remoteUrl: keystore.remote
remotes: keystore.remotes,
threshold: keystore.threshold
)
func init*(T: type KeystoreData, cookedKey: CookedPubKey,
remoteUrl: HttpHostUri): T =
remotes: seq[RemoteSignerInfo], threshold: uint32): T =
KeystoreData(
kind: KeystoreKind.Remote,
pubkey: cookedKey.toPubKey(),
version: 1'u64,
remoteUrl: remoteUrl
version: 2'u64,
remotes: remotes,
threshold: threshold
)
func init(T: type AddValidatorFailure, status: AddValidatorStatus,
@ -518,15 +520,14 @@ proc removeValidatorFiles*(validatorsDir, secretsDir, keyName: string,
ok(RemoveValidatorStatus.deleted)
proc removeValidatorFiles*(conf: AnyConf, keyName: string,
kind: KeystoreKind
): KmResult[RemoveValidatorStatus] {.
raises: [Defect].} =
kind: KeystoreKind): KmResult[RemoveValidatorStatus]
{.raises: [Defect].} =
removeValidatorFiles(conf.validatorsDir(), conf.secretsDir(), keyName, kind)
proc removeValidator*(pool: var ValidatorPool, conf: AnyConf,
publicKey: ValidatorPubKey,
kind: KeystoreKind): KmResult[RemoveValidatorStatus] {.
raises: [Defect].} =
kind: KeystoreKind): KmResult[RemoveValidatorStatus]
{.raises: [Defect].} =
let validator = pool.getValidator(publicKey)
if isNil(validator):
return ok(RemoveValidatorStatus.notFound)
@ -822,21 +823,26 @@ proc saveKeystore*(rng: var BrHmacDrbgContext,
ok()
proc saveKeystore*(validatorsDir: string,
publicKey: ValidatorPubKey, url: HttpHostUri,
version = 1'u64,
publicKey: ValidatorPubKey,
urls: seq[RemoteSignerInfo],
threshold: uint32,
flags: set[RemoteKeystoreFlag] = {},
remoteType = RemoteSignerType.Web3Signer,
desc = ""): Result[void, KeystoreGenerationError] {.
raises: [Defect].} =
desc = ""): Result[void, KeystoreGenerationError]
{.raises: [Defect].} =
let
keyName = "0x" & publicKey.toHex()
keystoreDir = validatorsDir / keyName
keystoreFile = keystoreDir / RemoteKeystoreFileName
keystoreDesc = if len(desc) == 0: none[string]() else: some(desc)
keyStore = RemoteKeystore(
version: version, description: keystoreDesc, remoteType: remoteType,
pubkey: publicKey, remote: url, flags: flags
)
version: 2'u64,
description: keystoreDesc,
remoteType: remoteType,
pubkey: publicKey,
threshold: threshold,
remotes: urls,
flags: flags)
if dirExists(keystoreDir):
return err(KeystoreGenerationError(kind: DuplicateKeystoreDir,
@ -857,18 +863,28 @@ proc saveKeystore*(validatorsDir: string,
encodedStorage)
ok()
proc saveKeystore*(conf: AnyConf, publicKey: ValidatorPubKey, url: HttpHostUri,
version = 1'u64,
proc saveKeystore*(validatorsDir: string,
publicKey: ValidatorPubKey,
url: HttpHostUri): Result[void, KeystoreGenerationError]
{.raises: [Defect].} =
let remoteInfo = RemoteSignerInfo(url: url, id: 0)
saveKeystore(validatorsDir, publicKey, @[remoteInfo], 1)
proc saveKeystore*(conf: AnyConf,
publicKey: ValidatorPubKey,
remotes: seq[RemoteSignerInfo],
threshold: uint32,
flags: set[RemoteKeystoreFlag] = {},
remoteType = RemoteSignerType.Web3Signer,
desc = ""): Result[void, KeystoreGenerationError] {.
raises: [Defect].} =
saveKeystore(conf.validatorsDir(), publicKey, url, version, flags,
remoteType, desc)
desc = ""): Result[void, KeystoreGenerationError]
{.raises: [Defect].} =
saveKeystore(
conf.validatorsDir(),
publicKey, remotes, threshold, flags, remoteType, desc)
proc importKeystore*(pool: var ValidatorPool, conf: AnyConf,
keystore: RemoteKeystore): ImportResult[KeystoreData] {.
raises: [Defect].} =
keystore: RemoteKeystore): ImportResult[KeystoreData]
{.raises: [Defect].} =
let
publicKey = keystore.pubkey
keyName = "0x" & publicKey.toHex()
@ -894,11 +910,11 @@ proc importKeystore*(pool: var ValidatorPool, conf: AnyConf,
if existsKeystore(keystoreDir, {KeystoreKind.Local, KeystoreKind.Remote}):
return err(AddValidatorFailure.init(AddValidatorStatus.existingArtifacts))
let res = saveKeystore(conf, publicKey, keystore.remote)
let res = saveKeystore(conf, publicKey, keystore.remotes, keystore.threshold)
if res.isErr():
return err(AddValidatorFailure.init(AddValidatorStatus.failed,
$res.error()))
ok(KeystoreData.init(cookedKey, keystore.remote))
ok(KeystoreData.init(cookedKey, keystore.remotes, keystore.threshold))
proc importKeystore*(pool: var ValidatorPool,
rng: var BrHmacDrbgContext,
@ -940,12 +956,45 @@ proc importKeystore*(pool: var ValidatorPool,
ok(KeystoreData.init(privateKey, keystore))
proc generateDistirbutedStore*(rng: var BrHmacDrbgContext,
shares: seq[SecretShare],
pubKey: ValidatorPubKey,
validatorIdx: Natural,
shareSecretsDir: string,
shareValidatorDir: string,
remoteValidatorDir: string,
remoteSignersUrls: seq[string],
threshold: uint32): Result[void, KeystoreGenerationError] =
var signers: seq[RemoteSignerInfo]
for (idx, share) in pairs(shares):
var password = KeystorePass.init ncrutils.toHex(getRandomBytes(rng, 32))
# remote signer shares
defer: burnMem(password)
? saveKeystore(rng,
shareValidatorDir & "/" & $idx,
shareSecretsDir & "/" & $idx,
share.key, share.key.toPubKey,
makeKeyPath(validatorIdx, signingKeyKind),
password.str,
KeystoreMode.Secure)
signers.add RemoteSignerInfo(
url: HttpHostUri(parseUri(remoteSignersUrls[idx])),
id: share.id,
pubkey: share.key.toPubKey.toPubKey)
# actual validator
saveKeystore(remoteValidatorDir, pubKey, signers, threshold)
proc generateDeposits*(cfg: RuntimeConfig,
rng: var BrHmacDrbgContext,
seed: KeySeed,
firstValidatorIdx, totalNewValidators: int,
validatorsDir: string,
secretsDir: string,
remoteSignersUrls: seq[string] = @[],
threshold: uint32 = 1,
remoteValidatorsCount: uint32 = 0,
mode = Secure): Result[seq[DepositData],
KeystoreGenerationError] =
var deposits: seq[DepositData]
@ -958,7 +1007,8 @@ proc generateDeposits*(cfg: RuntimeConfig,
defer: burnMem(baseKey)
baseKey = deriveChildKey(baseKey, baseKeyPath)
for i in 0 ..< totalNewValidators:
let localValidatorsCount = totalNewValidators - int(remoteValidatorsCount)
for i in 0 ..< localValidatorsCount:
let validatorIdx = firstValidatorIdx + i
# We'll reuse a single variable here to make the secret
@ -970,6 +1020,7 @@ proc generateDeposits*(cfg: RuntimeConfig,
let withdrawalPubKey = derivedKey.toPubKey
derivedKey = deriveChildKey(derivedKey, 0) # This is the signing key
let signingPubKey = derivedKey.toPubKey
var password = KeystorePass.init ncrutils.toHex(getRandomBytes(rng, 32))
defer: burnMem(password)
? saveKeystore(rng, validatorsDir, secretsDir,
@ -980,6 +1031,39 @@ proc generateDeposits*(cfg: RuntimeConfig,
deposits.add prepareDeposit(
cfg, withdrawalPubKey, derivedKey, signingPubKey)
for i in 0 ..< remoteValidatorsCount:
let validatorIdx = int(firstValidatorIdx) + localValidatorsCount + int(i)
# We'll reuse a single variable here to make the secret
# scrubbing (burnMem) easier to handle:
var derivedKey = baseKey
defer: burnMem(derivedKey)
derivedKey = deriveChildKey(derivedKey, validatorIdx)
derivedKey = deriveChildKey(derivedKey, 0) # This is witdrawal key
let withdrawalPubKey = derivedKey.toPubKey
derivedKey = deriveChildKey(derivedKey, 0) # This is the signing key
let signingPubKey = derivedKey.toPubKey
let sharesCount = uint32 len(remoteSignersUrls)
let shares = generateSecretShares(derivedKey, rng, threshold, sharesCount)
if shares.isErr():
error "Failed to generate distributed key: ", threshold, sharesCount
continue
? generateDistirbutedStore(rng,
shares.get,
signingPubKey.toPubKey,
validatorIdx,
secretsDir & "_shares",
validatorsDir & "_shares",
validatorsDir,
remoteSignersUrls,
threshold)
deposits.add prepareDeposit(
cfg, withdrawalPubKey, derivedKey, signingPubKey)
ok deposits
proc saveWallet*(wallet: Wallet, outWalletPath: string): Result[void, string] =

View File

@ -101,6 +101,7 @@ proc addLocalValidator(node: BeaconNode, validators: auto,
proc addRemoteValidator(pool: var ValidatorPool, validators: auto,
item: KeystoreData) =
var clients: seq[(RestClientRef, RemoteSignerInfo)]
let httpFlags =
block:
var res: set[HttpClientFlag]
@ -109,13 +110,14 @@ proc addRemoteValidator(pool: var ValidatorPool, validators: auto,
HttpClientFlag.NoVerifyServerName})
res
let prestoFlags = {RestClientFlag.CommaSeparatedArray}
let client = RestClientRef.new($item.remoteUrl, prestoFlags, httpFlags)
if client.isErr():
warn "Unable to resolve remote signer address",
remote_url = $item.remoteUrl, validator = item.pubkey
return
for remote in item.remotes:
let client = RestClientRef.new($remote.url, prestoFlags, httpFlags)
if client.isErr():
warn "Unable to resolve distributed signer address",
remote_url = $remote.url, validator = $remote.pubkey
clients.add((client.get(), remote))
let index = findValidator(validators, item.pubkey)
pool.addRemoteValidator(item, client.get(), index)
pool.addRemoteValidator(item, clients, index)
proc addLocalValidators*(node: BeaconNode,
validators: openArray[KeystoreData]) =
@ -133,7 +135,7 @@ proc addRemoteValidators*(node: BeaconNode,
proc addValidators*(node: BeaconNode) =
let (localValidators, remoteValidators) =
block:
var local, remote: seq[KeystoreData]
var local, remote, distributed: seq[KeystoreData]
for keystore in listLoadableKeystores(node.config):
case keystore.kind
of KeystoreKind.Local:

View File

@ -8,7 +8,7 @@
{.push raises: [Defect].}
import
std/[options, tables, json, streams, uri],
std/[options, tables, json, streams, sequtils, uri],
chronos, chronicles, metrics,
json_serialization/std/net,
presto, presto/client,
@ -40,7 +40,8 @@ type
of ValidatorKind.Local:
discard
of ValidatorKind.Remote:
client*: RestClientRef
clients*: seq[(RestClientRef, RemoteSignerInfo)]
threshold*: uint32
# The index at which this validator has been observed in the chain -
# it does not change as long as there are no reorgs on eth1 - however, the
@ -67,7 +68,7 @@ func shortLog*(v: AttachedValidator): string =
of ValidatorKind.Local:
shortLog(v.pubkey)
of ValidatorKind.Remote:
shortLog(v.pubkey) & "@" & $v.client.address.getUri()
shortLog(v.pubkey)
func init*(T: type ValidatorPool,
slashingProtectionDB: SlashingProtectionDB): T =
@ -82,6 +83,7 @@ template count*(pool: ValidatorPool): int =
proc addLocalValidator*(pool: var ValidatorPool, item: KeystoreData,
index: Option[ValidatorIndex]) =
doAssert item.kind == KeystoreKind.Local
let pubkey = item.pubkey
let v = AttachedValidator(kind: ValidatorKind.Local, pubkey: pubkey,
index: index, data: item)
@ -93,13 +95,14 @@ proc addLocalValidator*(pool: var ValidatorPool, item: KeystoreData) =
addLocalValidator(pool, item, none[ValidatorIndex]())
proc addRemoteValidator*(pool: var ValidatorPool, item: KeystoreData,
client: RestClientRef, index: Option[ValidatorIndex]) =
clients: seq[(RestClientRef, RemoteSignerInfo)], index: Option[ValidatorIndex]) =
doAssert item.kind == KeystoreKind.Remote
let pubkey = item.pubkey
let v = AttachedValidator(kind: ValidatorKind.Remote, pubkey: pubkey,
index: index, data: item, client: client)
index: index, data: item, clients: clients)
pool.validators[pubkey] = v
notice "Remote validator attached", pubkey, validator = shortLog(v),
remote_signer = $item.remoteUrl
remote_signer = $item.remotes
validators.set(pool.count().int64)
proc getValidator*(pool: ValidatorPool,
@ -147,92 +150,142 @@ iterator items*(pool: ValidatorPool): AttachedValidator =
for item in pool.validators.values():
yield item
proc signWithDistributedKey(v: AttachedValidator,
request: Web3SignerRequest): Future[SignatureResult]
{.async.} =
doAssert v.data.threshold <= uint32(v.clients.len)
let signatureReqs = mapIt(v.clients, it[0].signData(it[1].pubkey, request))
await allFutures(signatureReqs)
var shares: seq[SignatureShare]
var neededShares = v.data.threshold
for i, req in signatureReqs:
template shareInfo: untyped = v.clients[i][1]
if req.done and req.read.isOk:
shares.add req.read.get.toSignatureShare(shareInfo.id)
neededShares = neededShares - 1
else:
warn "Failed to obtain signature from remote signer",
pubkey = shareInfo.pubkey,
signerUrl = $(v.clients[i][0].address)
if neededShares == 0:
let recovered = shares.recoverSignature()
return SignatureResult.ok recovered.toValidatorSig
return SignatureResult.err "Not enough shares to recover the signature"
proc signWithSingleKey(v: AttachedValidator,
request: Web3SignerRequest): Future[SignatureResult]
{.async.} =
doAssert v.clients.len == 1
let (client, info) = v.clients[0]
let res = await client.signData(info.pubkey, request)
if res.isErr:
return SignatureResult.err res.error
else:
return SignatureResult.ok res.get.toValidatorSig
proc signData(v: AttachedValidator,
request: Web3SignerRequest): Future[SignatureResult]
{.async.} =
return
case v.kind
of ValidatorKind.Local:
SignatureResult.err "Invalid validator kind"
of ValidatorKind.Remote:
if v.clients.len == 1:
await v.signWithSingleKey(request)
else:
await v.signWithDistributedKey(request)
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest,
blck: ForkedBeaconBlock): Future[SignResponse] {.
async.} =
blck: ForkedBeaconBlock): Future[SignatureResult]
{.async.} =
let request = Web3SignerRequest.init(fork, genesis_validators_root, blck)
debug "Signing block proposal using remote signer",
validator = shortLog(v)
return await v.client.signData(v.pubkey, request)
return await v.signData(request)
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest,
adata: AttestationData): Future[SignResponse] {.
async.} =
adata: AttestationData): Future[SignatureResult]
{.async.} =
let request = Web3SignerRequest.init(fork, genesis_validators_root, adata)
debug "Signing block proposal using remote signer",
validator = shortLog(v)
return await v.client.signData(v.pubkey, request)
return await v.signData(request)
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest,
epoch: Epoch): Future[SignResponse] {.
async.} =
epoch: Epoch): Future[SignatureResult]
{.async.} =
let request = Web3SignerRequest.init(fork, genesis_validators_root, epoch)
debug "Generating randao reveal signature using remote signer",
validator = shortLog(v)
return await v.client.signData(v.pubkey, request)
return await v.signData(request)
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest,
proof: AggregateAndProof): Future[SignResponse] {.
async.} =
proof: AggregateAndProof): Future[SignatureResult]
{.async.} =
let request = Web3SignerRequest.init(fork, genesis_validators_root, proof)
debug "Signing aggregate and proof using remote signer",
validator = shortLog(v)
return await v.client.signData(v.pubkey, request)
return await v.signData(request)
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest,
slot: Slot): Future[SignResponse] {.
async.} =
slot: Slot): Future[SignatureResult]
{.async.} =
let request = Web3SignerRequest.init(fork, genesis_validators_root, slot)
debug "Signing aggregate slot using remote signer",
validator = shortLog(v)
return await v.client.signData(v.pubkey, request)
return await v.signData(request)
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest,
slot: Slot,
blockRoot: Eth2Digest): Future[SignResponse] {.
async.} =
blockRoot: Eth2Digest): Future[SignatureResult]
{.async.} =
let request = Web3SignerRequest.init(fork, genesis_validators_root, blockRoot,
slot)
debug "Signing sync committee message using remote signer",
validator = shortLog(v)
return await v.client.signData(v.pubkey, request)
return await v.signData(request)
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest,
slot: Slot,
subIndex: uint64): Future[SignResponse] {.
async.} =
subIndex: uint64): Future[SignatureResult]
{.async.} =
let request = Web3SignerRequest.init(
fork, genesis_validators_root,
SyncAggregatorSelectionData(slot: slot, subcommittee_index: subIndex),
)
debug "Signing sync aggregator selection data using remote signer",
validator = shortLog(v)
return await v.client.signData(v.pubkey, request)
return await v.signData(request)
proc signWithRemoteValidator*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest,
contribution: ContributionAndProof
): Future[SignResponse] {.
async.} =
): Future[SignatureResult] {.async.} =
let request = Web3SignerRequest.init(
fork, genesis_validators_root, contribution
)
debug "Signing sync contribution and proof message using remote signer",
validator = shortLog(v)
return await v.client.signData(v.pubkey, request)
return await v.signData(request)
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/validator.md#signature
proc signBlockProposal*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest, slot: Slot,
blockRoot: Eth2Digest, blck: ForkedBeaconBlock
): Future[SignatureResult] {.async.} =
): Future[SignatureResult] {.async.} =
return
case v.kind
of ValidatorKind.Local:
@ -241,17 +294,13 @@ proc signBlockProposal*(v: AttachedValidator, fork: Fork,
v.data.privateKey).toValidatorSig()
)
of ValidatorKind.Remote:
let res = await signWithRemoteValidator(v, fork, genesis_validators_root,
await signWithRemoteValidator(v, fork, genesis_validators_root,
blck)
if res.isErr():
SignatureResult.err(res.error())
else:
SignatureResult.ok(res.get().toValidatorSig())
proc signAttestation*(v: AttachedValidator,
data: AttestationData,
fork: Fork, genesis_validators_root: Eth2Digest):
Future[SignatureResult] {.async.} =
fork: Fork, genesis_validators_root: Eth2Digest
): Future[SignatureResult] {.async.} =
return
case v.kind
of ValidatorKind.Local:
@ -260,12 +309,7 @@ proc signAttestation*(v: AttachedValidator,
v.data.privateKey).toValidatorSig()
)
of ValidatorKind.Remote:
let res = await signWithRemoteValidator(v, fork, genesis_validators_root,
data)
if res.isErr():
SignatureResult.err(res.error())
else:
SignatureResult.ok(res.get().toValidatorSig())
await signWithRemoteValidator(v, fork, genesis_validators_root, data)
proc produceAndSignAttestation*(validator: AttachedValidator,
attestationData: AttestationData,
@ -302,12 +346,8 @@ proc signAggregateAndProof*(v: AttachedValidator,
v.data.privateKey).toValidatorSig()
)
of ValidatorKind.Remote:
let res = await signWithRemoteValidator(v, fork, genesis_validators_root,
await signWithRemoteValidator(v, fork, genesis_validators_root,
aggregate_and_proof)
if res.isErr():
SignatureResult.err(res.error())
else:
SignatureResult.ok(res.get().toValidatorSig())
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/altair/validator.md#prepare-sync-committee-message
proc signSyncCommitteeMessage*(v: AttachedValidator,
@ -319,15 +359,15 @@ proc signSyncCommitteeMessage*(v: AttachedValidator,
let signature =
case v.kind
of ValidatorKind.Local:
get_sync_committee_message_signature(
SignatureResult.ok(get_sync_committee_message_signature(
fork, genesis_validators_root, slot, beacon_block_root,
v.data.privateKey).toValidatorSig()
v.data.privateKey).toValidatorSig())
of ValidatorKind.Remote:
let res = await signWithRemoteValidator(v, fork, genesis_validators_root,
await signWithRemoteValidator(v, fork, genesis_validators_root,
slot, beacon_block_root)
if res.isErr():
return SyncCommitteeMessageResult.err(res.error())
res.get().toValidatorSig()
if signature.isErr:
return SyncCommitteeMessageResult.err("Failed to obtain signature")
return
SyncCommitteeMessageResult.ok(
@ -335,7 +375,7 @@ proc signSyncCommitteeMessage*(v: AttachedValidator,
slot: slot,
beacon_block_root: beacon_block_root,
validator_index: uint64(v.index.get()),
signature: signature
signature: signature.get()
)
)
@ -353,29 +393,26 @@ proc getSyncCommitteeSelectionProof*(v: AttachedValidator,
fork, genesis_validators_root, slot, subcommittee_index,
v.data.privateKey).toValidatorSig())
of ValidatorKind.Remote:
let res = await signWithRemoteValidator(v, fork, genesis_validators_root,
await signWithRemoteValidator(v, fork, genesis_validators_root,
slot, subcommittee_index)
if res.isErr():
SignatureResult.err(res.error())
else:
SignatureResult.ok(res.get().toValidatorSig())
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/altair/validator.md#signature
proc sign*(v: AttachedValidator, msg: ref SignedContributionAndProof,
fork: Fork, genesis_validators_root: Eth2Digest
): Future[SignatureResult] {.async.} =
msg.signature =
let signature =
case v.kind
of ValidatorKind.Local:
get_contribution_and_proof_signature(
fork, genesis_validators_root, msg.message, v.data.privateKey).toValidatorSig()
SignatureResult.ok(get_contribution_and_proof_signature(
fork, genesis_validators_root, msg.message, v.data.privateKey).toValidatorSig())
of ValidatorKind.Remote:
let res = await signWithRemoteValidator(v, fork, genesis_validators_root,
await signWithRemoteValidator(v, fork, genesis_validators_root,
msg.message)
if res.isErr():
return SignatureResult.err(res.error())
res.get().toValidatorSig()
return SignatureResult.ok(msg.signature)
if signature.isOk:
msg.signature = signature.get()
return signature
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/validator.md#randao-reveal
func genRandaoReveal*(k: ValidatorPrivKey, fork: Fork,
@ -393,30 +430,25 @@ proc genRandaoReveal*(v: AttachedValidator, fork: Fork,
genesis_validators_root,
slot).toValidatorSig())
of ValidatorKind.Remote:
let res = await signWithRemoteValidator(v, fork, genesis_validators_root,
await signWithRemoteValidator(v, fork, genesis_validators_root,
slot.epoch())
if res.isErr():
SignatureResult.err(res.error())
else:
SignatureResult.ok(res.get().toValidatorSig())
proc getSlotSig*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest, slot: Slot
): Future[SignatureResult] {.async.} =
if v.slotSignature.isSome() and v.slotSignature.get().slot == slot:
return SignatureResult.ok(v.slotSignature.get().signature)
if v.slotSignature.isSome and v.slotSignature.get.slot == slot:
return SignatureResult.ok(v.slotSignature.get.signature)
let signature =
case v.kind
of ValidatorKind.Local:
get_slot_signature(fork, genesis_validators_root, slot,
v.data.privateKey).toValidatorSig()
SignatureResult.ok(get_slot_signature(fork, genesis_validators_root, slot,
v.data.privateKey).toValidatorSig())
of ValidatorKind.Remote:
let res = await signWithRemoteValidator(v, fork, genesis_validators_root,
slot)
if res.isErr():
return SignatureResult.err(res.error())
res.get().toValidatorSig()
await signWithRemoteValidator(v, fork, genesis_validators_root, slot)
v.slotSignature = some((slot, signature))
return SignatureResult.ok(signature)
if signature.isErr:
return signature
v.slotSignature = some((slot, signature.get))
return signature

View File

@ -43,7 +43,7 @@ if [[ ${PIPESTATUS[0]} != 4 ]]; then
fi
OPTS="ht:n:d:g"
LONGOPTS="help,preset:,nodes:,data-dir:,with-ganache,stop-at-epoch:,disable-htop,disable-vc,enable-logtrace,log-level:,base-port:,base-rest-port:,base-metrics-port:,reuse-existing-data-dir,reuse-binaries,timeout:,kill-old-processes,eth2-docker-image:,lighthouse-vc-nodes:"
LONGOPTS="help,preset:,nodes:,data-dir:,remote-validators-count:,threshold:,remote-signers:,with-ganache,stop-at-epoch:,disable-htop,disable-vc,enable-logtrace,log-level:,base-port:,base-rest-port:,base-metrics-port:,reuse-existing-data-dir,reuse-binaries,timeout:,kill-old-processes,eth2-docker-image:,lighthouse-vc-nodes:"
# default values
NUM_NODES="10"
@ -54,6 +54,7 @@ LIGHTHOUSE_VC_NODES="0"
USE_GANACHE="0"
LOG_LEVEL="DEBUG; TRACE:networking"
BASE_PORT="9000"
BASE_REMOTE_SIGNER_PORT="6000"
BASE_METRICS_PORT="8008"
BASE_REST_PORT="7500"
REUSE_EXISTING_DATA_DIR="0"
@ -64,6 +65,9 @@ TIMEOUT_DURATION="0"
CONST_PRESET="mainnet"
KILL_OLD_PROCESSES="0"
ETH2_DOCKER_IMAGE=""
REMOTE_SIGNER_NODES=0
REMOTE_SIGNER_THRESHOLD=1
REMOTE_VALIDATORS_COUNT=0
print_help() {
cat <<EOF
@ -94,6 +98,10 @@ CI run: $(basename "$0") --disable-htop -- --verify-finalization
--timeout timeout in seconds (default: ${TIMEOUT_DURATION} - no timeout)
--kill-old-processes if any process is found listening on a port we use, kill it (default: disabled)
--eth2-docker-image use docker image instead of compiling the beacon node
--remote-validators-count number of remote validators which will be generated
--threshold used by a threshold secret sharing mechanism and determine how many shares are need to
restore signature of the original secret key
--remote-signers number of remote signing nodes
EOF
}
@ -115,6 +123,18 @@ while true; do
NUM_NODES="$2"
shift 2
;;
--remote-signers)
REMOTE_SIGNER_NODES=$2
shift 2
;;
--remote-validators-count)
REMOTE_VALIDATORS_COUNT=$2
shift 2
;;
--threshold)
REMOTE_SIGNER_THRESHOLD=$2
shift 2
;;
-d|--data-dir)
DATA_DIR="$2"
shift 2
@ -292,6 +312,10 @@ fi
# Build the binaries
BINARIES="deposit_contract"
if [ "$REMOTE_SIGNER_NODES" -ge "0" ]; then
BINARIES="${BINARIES} nimbus_signing_node"
fi
if [[ "${USE_VC}" == "1" ]]; then
BINARIES="${BINARIES} nimbus_validator_client"
fi
@ -330,10 +354,12 @@ fi
cleanup() {
pkill -f -P $$ nimbus_beacon_node &>/dev/null || true
pkill -f -P $$ nimbus_validator_client &>/dev/null || true
pkill -f -P $$ nimbus_signing_node &>/dev/null || true
pkill -f -P $$ ${LH_BINARY} &>/dev/null || true
sleep 2
pkill -f -9 -P $$ nimbus_beacon_node &>/dev/null || true
pkill -f -9 -P $$ nimbus_validator_client &>/dev/null || true
pkill -f -9 -P $$ nimbus_signing_node &>/dev/null || true
pkill -f -9 -P $$ ${LH_BINARY} &>/dev/null || true
# Delete all binaries we just built, because these are unusable outside this
@ -362,6 +388,13 @@ if [[ "${TIMEOUT_DURATION}" != "0" ]]; then
( sleep ${TIMEOUT_DURATION} && kill -ALRM ${PARENT_PID} ) 2>/dev/null & WATCHER_PID=$!
fi
REMOTE_URLS=""
for NUM_REMOTE in $(seq 0 $(( REMOTE_SIGNER_NODES - 1 ))); do
REMOTE_PORT=$(( BASE_REMOTE_SIGNER_PORT + NUM_REMOTE ))
REMOTE_URLS="${REMOTE_URLS} --remote-signer=http://127.0.0.1:${REMOTE_PORT}"
done
# deposit and testnet creation
PIDS=""
WEB3_ARG=""
@ -379,7 +412,10 @@ if [[ "$REUSE_EXISTING_DATA_DIR" == "0" ]]; then
--count=${TOTAL_VALIDATORS} \
--out-validators-dir="${VALIDATORS_DIR}" \
--out-secrets-dir="${SECRETS_DIR}" \
--out-deposits-file="${DEPOSITS_FILE}"
--out-deposits-file="${DEPOSITS_FILE}" \
--threshold=${REMOTE_SIGNER_THRESHOLD} \
--remote-validators-count=${REMOTE_VALIDATORS_COUNT} \
${REMOTE_URLS}
fi
if [[ $USE_GANACHE == "0" ]]; then
@ -497,6 +533,11 @@ if [[ "${USE_VC}" == "1" ]]; then
VALIDATORS_PER_NODE=$((VALIDATORS_PER_NODE / 2 ))
NUM_JOBS=$((NUM_JOBS * 2 ))
fi
if [ "$REMOTE_SIGNER_NODES" -ge "0" ]; then
NUM_JOBS=$((NUM_JOBS + REMOTE_SIGNER_NODES ))
fi
VALIDATORS_PER_VALIDATOR=$(( (SYSTEM_VALIDATORS / NODES_WITH_VALIDATORS) / 2 ))
VALIDATOR_OFFSET=$((SYSTEM_VALIDATORS / 2))
@ -523,8 +564,13 @@ for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do
scripts/makedir.sh "${VALIDATOR_DATA_DIR}/validators" 2>&1
scripts/makedir.sh "${VALIDATOR_DATA_DIR}/secrets" 2>&1
for VALIDATOR in $(ls "${VALIDATORS_DIR}" | tail -n +$(( $USER_VALIDATORS + ($VALIDATORS_PER_VALIDATOR * $NUM_NODE) + 1 + $VALIDATOR_OFFSET )) | head -n $VALIDATORS_PER_VALIDATOR); do
cp -a "${VALIDATORS_DIR}/${VALIDATOR}" "${VALIDATOR_DATA_DIR}/validators/" 2>&1
cp -a "${SECRETS_DIR}/${VALIDATOR}" "${VALIDATOR_DATA_DIR}/secrets/" 2>&1
if [[ -f "${VALIDATORS_DIR}/${VALIDATOR}/keystore.json" ]]; then
cp -a "${VALIDATORS_DIR}/${VALIDATOR}" "${VALIDATOR_DATA_DIR}/validators/" 2>&1
cp -a "${SECRETS_DIR}/${VALIDATOR}" "${VALIDATOR_DATA_DIR}/secrets/" 2>&1
else
# TODO: validators support remote signers
cp -a "${VALIDATORS_DIR}/${VALIDATOR}" "${NODE_DATA_DIR}/validators/" 2>&1
fi
done
if [[ "${OS}" == "Windows_NT" ]]; then
find "${VALIDATOR_DATA_DIR}" -type f \( -iname "*.json" -o ! -iname "*.*" \) -exec icacls "{}" /inheritance:r /grant:r ${USERDOMAIN}\\${USERNAME}:\(F\) \;
@ -532,7 +578,10 @@ for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do
fi
for VALIDATOR in $(ls "${VALIDATORS_DIR}" | tail -n +$(( $USER_VALIDATORS + ($VALIDATORS_PER_NODE * $NUM_NODE) + 1 )) | head -n $VALIDATORS_PER_NODE); do
cp -a "${VALIDATORS_DIR}/${VALIDATOR}" "${NODE_DATA_DIR}/validators/" 2>&1
cp -a "${SECRETS_DIR}/${VALIDATOR}" "${NODE_DATA_DIR}/secrets/" 2>&1
if [[ -f "${VALIDATORS_DIR}/${VALIDATOR}/keystore.json" ]]; then
# Only remote key stores doesn't have a secret
cp -a "${SECRETS_DIR}/${VALIDATOR}" "${NODE_DATA_DIR}/secrets/" 2>&1
fi
done
if [[ "${OS}" == "Windows_NT" ]]; then
find "${NODE_DATA_DIR}" -type f \( -iname "*.json" -o ! -iname "*.*" \) -exec icacls "{}" /inheritance:r /grant:r ${USERDOMAIN}\\${USERNAME}:\(F\) \;
@ -554,6 +603,17 @@ metrics = true
metrics-address = "127.0.0.1"
END_CLI_CONFIG
for NUM_REMOTE in $(seq 0 $(( REMOTE_SIGNER_NODES - 1 ))); do
./build/nimbus_signing_node \
--validators-dir="${DATA_DIR}/validators_shares/${NUM_REMOTE}" \
--secrets-dir="${DATA_DIR}/secrets_shares/${NUM_REMOTE}" \
--bind-port=$(( BASE_REMOTE_SIGNER_PORT + NUM_REMOTE )) \
> "${DATA_DIR}/log_remote_signer_${NUM_REMOTE}.txt" &
done
# give each node time to load keys
sleep 10
for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do
NODE_DATA_DIR="${DATA_DIR}/node${NUM_NODE}"
CONTAINER_NODE_DATA_DIR="${CONTAINER_DATA_DIR}/node${NUM_NODE}"

View File

@ -40,6 +40,8 @@ import # Unit test
./test_sync_committee_pool,
./test_sync_manager,
./test_zero_signature,
./test_key_splitting,
./test_remote_keystore,
./fork_choice/tests_fork_choice,
./consensus_spec/all_tests as consensus_all_tests,
./slashing_protection/test_fixtures,

View File

@ -0,0 +1,83 @@
{.used.}
import
std/[json, typetraits, sequtils],
unittest2, eth/keys, stew/byteutils,
../beacon_chain/spec/[crypto, keystore],
./testutil
func sign(secrets: seq[SecretShare], message: seq[byte]): seq[SignatureShare] =
let msg = message
return secrets.mapIt(it.key.blsSign(message).toSignatureShare(it.id))
suite "Key spliting":
let
privateKey = ValidatorPrivKey.init("0x25295f0d1d592a90b333e26e85149708208e9f8e8bc18f6c77bd62f8ad7a6866")
pubKey = privateKey.toPubKey.toPubKey
password = string.fromBytes hexToSeqByte("7465737470617373776f7264f09f9491")
salt = hexToSeqByte "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
iv = hexToSeqByte "264daa3f303d7259501c93d997d84fe6"
rng = keys.newRng()
var msg = newSeq[byte](32)
brHmacDrbgGenerate(rng[], msg)
test "single share":
let maybeShares = generateSecretShares(privateKey, rng[], 1, 1)
check maybeShares.isOk
let shares = maybeShares.get
check shares.len == 1
let signs = shares.sign(msg)
let recovered = signs.recoverSignature()
check pubKey.blsVerify(msg, recovered)
check pubKey.confirmShares(shares, rng[])
test "k < n":
let maybeShares = generateSecretShares(privateKey, rng[], 2, 3)
doAssert maybeShares.isOk
let shares = maybeShares.get
check shares.len == 3
let signs = shares.sign(msg)
var invalidShare = shares[2]
invalidShare.id = 1000 # 1000 is an arbitrary wrong value
check pubKey.blsVerify(msg, signs.recoverSignature())
check pubKey.blsVerify(msg, @[signs[0], signs[1]].recoverSignature())
check pubKey.blsVerify(msg, @[signs[1], signs[2]].recoverSignature())
check pubKey.blsVerify(msg, @[signs[2], signs[0]].recoverSignature())
check not pubKey.blsVerify(msg, @[signs[0]].recoverSignature())
check pubKey.confirmShares(shares, rng[])
check pubKey.confirmShares(@[shares[0], shares[1]], rng[])
check pubKey.confirmShares(@[shares[1], shares[2]], rng[])
check pubKey.confirmShares(@[shares[2], shares[0]], rng[])
check pubKey.confirmShares(@[shares[0], shares[2]], rng[])
check not pubKey.confirmShares(@[shares[0]], rng[])
check not pubKey.confirmShares(@[shares[1]], rng[])
check not pubKey.confirmShares(@[shares[2]], rng[])
check not pubKey.confirmShares(@[invalidShare], rng[])
check not pubKey.confirmShares(@[shares[0], invalidShare], rng[])
check not pubKey.confirmShares(@[shares[1], invalidShare], rng[])
check not pubKey.confirmShares(@[shares[2], invalidShare], rng[])
test "k == n":
let maybeShares = generateSecretShares(privateKey, rng[], 3, 3)
check maybeShares.isOk
let shares = maybeShares.get
check shares.len == 3
let signs = shares.sign(msg)
let recovered = signs.recoverSignature()
check pubKey.blsVerify(msg, recovered)
check not pubKey.blsVerify(msg, @[signs[0]].recoverSignature())
check not pubKey.blsVerify(msg, @[signs[0], signs[1]].recoverSignature())
check pubKey.confirmShares(shares, rng[])
test "k == n == 100":
let maybeShares = generateSecretShares(privateKey, rng[], 100, 100)
check maybeShares.isOk
let shares = maybeShares.get
check shares.len == 100
let signs = shares.sign(msg)
let recovered = signs.recoverSignature()
check pubKey.blsVerify(msg, recovered)
check pubKey.confirmShares(shares, rng[])

View File

@ -102,6 +102,9 @@ proc startSingleNodeNetwork {.raises: [CatchableError, Defect].} =
0, simulationDepositsCount,
validatorsDir,
secretsDir,
@[],
0,
0,
KeystoreMode.Fast)
if deposits.isErr:
@ -213,7 +216,7 @@ proc listRemoteValidators(validatorsDir,
for el in listLoadableKeystores(validatorsDir, secretsDir, true,
{KeystoreKind.Remote}):
validators.add RemoteKeystoreInfo(pubkey: el.pubkey,
url: el.remoteUrl)
url: el.remotes[0].url)
except OSError as err:
error "Failure to list the validator directories",

View File

@ -0,0 +1,81 @@
{.used.}
import
std/[json, typetraits],
unittest2, stew/byteutils, json_serialization,
blscurve, eth/keys, libp2p/crypto/crypto as lcrypto,
nimcrypto/utils as ncrutils,
../beacon_chain/spec/[crypto, keystore],
./testutil
suite "Remove keystore testing suite":
test "vesion 1" :
let remoteKeyStores = """{
"version": 1,
"pubkey": "0x8b9c875fbe539c6429c4fc304675062579ce47fb6b2ac6b6a1ba1188ca123a80affbfe381dbbc8e7f2437709a4c3325c",
"remote": "http://127.0.0.1:6000",
"type": "web3signer"
}"""
let keystore = Json.decode(remoteKeyStores, RemoteKeystore)
check keystore.pubkey.toHex == "8b9c875fbe539c6429c4fc304675062579ce47fb6b2ac6b6a1ba1188ca123a80affbfe381dbbc8e7f2437709a4c3325c"
check keystore.remotes.len == 1
check $keystore.remotes[0].url == "http://127.0.0.1:6000"
check keystore.remotes[0].id == 0
check keystore.remotes[0].pubkey.toHex == "8b9c875fbe539c6429c4fc304675062579ce47fb6b2ac6b6a1ba1188ca123a80affbfe381dbbc8e7f2437709a4c3325c"
test "vesion 2 single remote":
let remoteKeyStores = """{
"version": 2,
"pubkey": "0x8b9c875fbe539c6429c4fc304675062579ce47fb6b2ac6b6a1ba1188ca123a80affbfe381dbbc8e7f2437709a4c3325c",
"remotes": [
{
"url": "http://127.0.0.1:6000",
"pubkey": "8b9c875fbe539c6429c4fc304675062579ce47fb6b2ac6b6a1ba1188ca123a80affbfe381dbbc8e7f2437709a4c3325c"
}
],
"type": "web3signer"
}"""
let keystore = Json.decode(remoteKeyStores, RemoteKeystore)
check keystore.pubkey.toHex == "8b9c875fbe539c6429c4fc304675062579ce47fb6b2ac6b6a1ba1188ca123a80affbfe381dbbc8e7f2437709a4c3325c"
check keystore.remotes.len == 1
check $keystore.remotes[0].url == "http://127.0.0.1:6000"
check keystore.remotes[0].id == 0
check keystore.remotes[0].pubkey.toHex == "8b9c875fbe539c6429c4fc304675062579ce47fb6b2ac6b6a1ba1188ca123a80affbfe381dbbc8e7f2437709a4c3325c"
test "vesion 2 many remotes" :
let remoteKeyStores = """{
"version": 2,
"pubkey": "0x8ebc7291df2a671326de83471a4feeb759cc842caa59aa92065e3508baa7e50513bc49a79ff4387c8ef747764f364b6f",
"remotes": [
{
"url": "http://127.0.0.1:6000",
"id": 1,
"pubkey": "95313b967bcd761175dbc2a5685c16b1a73000e66f9622eca080cb0428dd3db61f7377b32b1fd27f3bdbdf2b554e7f87"
},
{
"url": "http://127.0.0.1:6001",
"id": 2,
"pubkey": "8b8c115d19a9bdacfc7af9c8e8fc1353af54b63b0e772a641499cac9b6ea5cb1b3479cfa52ebc98ba5afe07a06c06238"
},
{
"url": "http://127.0.0.1:6002",
"id": 3,
"pubkey": "8f5f9e305e7fcbde94182747f5ecec573d1786e8320a920347a74c0ff5e70f12ca22607c98fdc8dbe71161db59e0ac9d"
}
],
"threshold": 2,
"type": "web3signer"
}"""
let keystore = Json.decode(remoteKeyStores, RemoteKeystore)
check keystore.pubkey.toHex == "8ebc7291df2a671326de83471a4feeb759cc842caa59aa92065e3508baa7e50513bc49a79ff4387c8ef747764f364b6f"
check keystore.remotes.len == 3
check $keystore.remotes[0].url == "http://127.0.0.1:6000"
check $keystore.remotes[1].url == "http://127.0.0.1:6001"
check $keystore.remotes[2].url == "http://127.0.0.1:6002"
check keystore.remotes[0].id == 1
check keystore.remotes[1].id == 2
check keystore.remotes[2].id == 3
check keystore.remotes[0].pubkey.toHex == "95313b967bcd761175dbc2a5685c16b1a73000e66f9622eca080cb0428dd3db61f7377b32b1fd27f3bdbdf2b554e7f87"
check keystore.remotes[1].pubkey.toHex == "8b8c115d19a9bdacfc7af9c8e8fc1353af54b63b0e772a641499cac9b6ea5cb1b3479cfa52ebc98ba5afe07a06c06238"
check keystore.remotes[2].pubkey.toHex == "8f5f9e305e7fcbde94182747f5ecec573d1786e8320a920347a74c0ff5e70f12ca22607c98fdc8dbe71161db59e0ac9d"
check keystore.threshold == 2

2
vendor/nim-blscurve vendored

@ -1 +1 @@
Subproject commit 71a30b926c2a66096a1925fd9b6e5a6ed1546d9b
Subproject commit 1d428420076a230e8a46346edbc084320afdad9d