Fixes for style check (#676)
This commit is contained in:
parent
c49932b55a
commit
df566e69db
|
@ -56,7 +56,7 @@ proc dialPeer(p: ChatProto, address: string) {.async.} =
|
||||||
.tryGet()
|
.tryGet()
|
||||||
.protoAddress()
|
.protoAddress()
|
||||||
.tryGet()
|
.tryGet()
|
||||||
remotePeer = PeerID.init(peerIdBytes).tryGet()
|
remotePeer = PeerId.init(peerIdBytes).tryGet()
|
||||||
# split the wire address
|
# split the wire address
|
||||||
ip4Addr = multiAddr[multiCodec("ip4")].tryGet()
|
ip4Addr = multiAddr[multiCodec("ip4")].tryGet()
|
||||||
tcpAddr = multiAddr[multiCodec("tcp")].tryGet()
|
tcpAddr = multiAddr[multiCodec("tcp")].tryGet()
|
||||||
|
@ -182,7 +182,7 @@ proc processInput(rfd: AsyncFD, rng: ref BrHmacDrbgContext) {.async.} =
|
||||||
chatProto.started = true
|
chatProto.started = true
|
||||||
|
|
||||||
let id = $switch.peerInfo.peerId
|
let id = $switch.peerInfo.peerId
|
||||||
echo "PeerID: " & id
|
echo "PeerId: " & id
|
||||||
echo "listening on: "
|
echo "listening on: "
|
||||||
for a in switch.peerInfo.addrs:
|
for a in switch.peerInfo.addrs:
|
||||||
echo &"{a}/p2p/{id}"
|
echo &"{a}/p2p/{id}"
|
||||||
|
|
|
@ -92,7 +92,7 @@ proc dialPeer(c: Chat, address: string) {.async.} =
|
||||||
.tryGet()
|
.tryGet()
|
||||||
.protoAddress()
|
.protoAddress()
|
||||||
.tryGet()
|
.tryGet()
|
||||||
remotePeer = PeerID.init(peerIdBytes).tryGet()
|
remotePeer = PeerId.init(peerIdBytes).tryGet()
|
||||||
# split the wire address
|
# split the wire address
|
||||||
ip4Addr = multiAddr[multiCodec("ip4")].tryGet()
|
ip4Addr = multiAddr[multiCodec("ip4")].tryGet()
|
||||||
tcpAddr = multiAddr[multiCodec("tcp")].tryGet()
|
tcpAddr = multiAddr[multiCodec("tcp")].tryGet()
|
||||||
|
@ -184,7 +184,7 @@ proc main() {.async.} =
|
||||||
await switch.start()
|
await switch.start()
|
||||||
|
|
||||||
let id = $switch.peerInfo.peerId
|
let id = $switch.peerInfo.peerId
|
||||||
echo "PeerID: " & id
|
echo "PeerId: " & id
|
||||||
echo "listening on: "
|
echo "listening on: "
|
||||||
for a in switch.peerInfo.addrs:
|
for a in switch.peerInfo.addrs:
|
||||||
echo &"{a}/p2p/{id}"
|
echo &"{a}/p2p/{id}"
|
||||||
|
|
|
@ -41,7 +41,7 @@ proc serveThread(udata: CustomData) {.async.} =
|
||||||
if line.startsWith("/connect"):
|
if line.startsWith("/connect"):
|
||||||
var parts = line.split(" ")
|
var parts = line.split(" ")
|
||||||
if len(parts) == 2:
|
if len(parts) == 2:
|
||||||
var peerId = PeerID.init(parts[1])
|
var peerId = PeerId.init(parts[1])
|
||||||
var address = MultiAddress.init(multiCodec("p2p-circuit"))
|
var address = MultiAddress.init(multiCodec("p2p-circuit"))
|
||||||
address &= MultiAddress.init(multiCodec("p2p"), peerId)
|
address &= MultiAddress.init(multiCodec("p2p"), peerId)
|
||||||
echo "= Searching for peer ", peerId.pretty()
|
echo "= Searching for peer ", peerId.pretty()
|
||||||
|
@ -59,7 +59,7 @@ proc serveThread(udata: CustomData) {.async.} =
|
||||||
elif line.startsWith("/search"):
|
elif line.startsWith("/search"):
|
||||||
var parts = line.split(" ")
|
var parts = line.split(" ")
|
||||||
if len(parts) == 2:
|
if len(parts) == 2:
|
||||||
var peerId = PeerID.init(parts[1])
|
var peerId = PeerId.init(parts[1])
|
||||||
echo "= Searching for peer ", peerId.pretty()
|
echo "= Searching for peer ", peerId.pretty()
|
||||||
var id = await udata.api.dhtFindPeer(peerId)
|
var id = await udata.api.dhtFindPeer(peerId)
|
||||||
echo "= Peer " & parts[1] & " found at addresses:"
|
echo "= Peer " & parts[1] & " found at addresses:"
|
||||||
|
@ -68,7 +68,7 @@ proc serveThread(udata: CustomData) {.async.} =
|
||||||
elif line.startsWith("/consearch"):
|
elif line.startsWith("/consearch"):
|
||||||
var parts = line.split(" ")
|
var parts = line.split(" ")
|
||||||
if len(parts) == 2:
|
if len(parts) == 2:
|
||||||
var peerId = PeerID.init(parts[1])
|
var peerId = PeerId.init(parts[1])
|
||||||
echo "= Searching for peers connected to peer ", parts[1]
|
echo "= Searching for peers connected to peer ", parts[1]
|
||||||
var peers = await udata.api.dhtFindPeersConnectedToPeer(peerId)
|
var peers = await udata.api.dhtFindPeersConnectedToPeer(peerId)
|
||||||
echo "= Found ", len(peers), " connected to peer ", parts[1]
|
echo "= Found ", len(peers), " connected to peer ", parts[1]
|
||||||
|
@ -127,7 +127,7 @@ proc main() {.async.} =
|
||||||
echo ">> ", line
|
echo ">> ", line
|
||||||
|
|
||||||
await data.api.addHandler(ServerProtocols, streamHandler)
|
await data.api.addHandler(ServerProtocols, streamHandler)
|
||||||
echo "= Your PeerID is ", id.peer.pretty()
|
echo "= Your PeerId is ", id.peer.pretty()
|
||||||
await data.serveFut
|
await data.serveFut
|
||||||
|
|
||||||
when isMainModule:
|
when isMainModule:
|
||||||
|
|
|
@ -45,10 +45,10 @@ proc main() {.async, gcsafe.} =
|
||||||
let
|
let
|
||||||
rng = newRng() # Single random number source for the whole application
|
rng = newRng() # Single random number source for the whole application
|
||||||
# port 0 will take a random available port
|
# port 0 will take a random available port
|
||||||
# `tryGet` will throw an exception if the Multiaddress failed
|
# `tryGet` will throw an exception if the MultiAddress failed
|
||||||
# (for instance, if the address is not well formatted)
|
# (for instance, if the address is not well formatted)
|
||||||
ma1 = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
ma1 = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
ma2 = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
ma2 = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
|
|
||||||
# setup the custom proto
|
# setup the custom proto
|
||||||
let testProto = TestProto.new()
|
let testProto = TestProto.new()
|
||||||
|
|
|
@ -71,8 +71,8 @@ proc dumpHex*(pbytes: pointer, nbytes: int, items = 1, ascii = true): string =
|
||||||
result = result & asciiText
|
result = result & asciiText
|
||||||
result = result & "\n"
|
result = result & "\n"
|
||||||
|
|
||||||
proc dumpHex*[T](v: openarray[T], items: int = 0, ascii = true): string =
|
proc dumpHex*[T](v: openArray[T], items: int = 0, ascii = true): string =
|
||||||
## Return hexadecimal memory dump representation of openarray[T] ``v``.
|
## Return hexadecimal memory dump representation of openArray[T] ``v``.
|
||||||
## ``items`` - number of bytes in group (supported ``items`` count is
|
## ``items`` - number of bytes in group (supported ``items`` count is
|
||||||
## 0, 1, 2, 4, 8). If ``items`` is ``0`` group size will depend on
|
## 0, 1, 2, 4, 8). If ``items`` is ``0`` group size will depend on
|
||||||
## ``sizeof(T)``.
|
## ``sizeof(T)``.
|
||||||
|
|
|
@ -21,7 +21,7 @@ requires "nim >= 1.2.0",
|
||||||
proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
||||||
moreoptions: string = "") =
|
moreoptions: string = "") =
|
||||||
let env_nimflags = getEnv("NIMFLAGS")
|
let env_nimflags = getEnv("NIMFLAGS")
|
||||||
var excstr = "nim c --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics --verbosity:0 --hints:off " & env_nimflags
|
var excstr = "nim c --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics --verbosity:0 --hints:off --styleCheck:usages --styleCheck:hint " & env_nimflags
|
||||||
excstr.add(" --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off")
|
excstr.add(" --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off")
|
||||||
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
|
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
|
||||||
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
|
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
|
||||||
|
@ -34,7 +34,7 @@ proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
||||||
rmFile "tests/" & filename.toExe
|
rmFile "tests/" & filename.toExe
|
||||||
|
|
||||||
proc buildSample(filename: string, run = false) =
|
proc buildSample(filename: string, run = false) =
|
||||||
var excstr = "nim c --opt:speed --threads:on -d:debug --verbosity:0 --hints:off"
|
var excstr = "nim c --opt:speed --threads:on -d:debug --verbosity:0 --hints:off "
|
||||||
excstr.add(" --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off")
|
excstr.add(" --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off")
|
||||||
excstr.add(" examples/" & filename)
|
excstr.add(" examples/" & filename)
|
||||||
exec excstr
|
exec excstr
|
||||||
|
|
|
@ -198,7 +198,7 @@ proc build*(b: SwitchBuilder): Switch
|
||||||
proc newStandardSwitch*(
|
proc newStandardSwitch*(
|
||||||
privKey = none(PrivateKey),
|
privKey = none(PrivateKey),
|
||||||
addrs: MultiAddress | seq[MultiAddress] = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
|
addrs: MultiAddress | seq[MultiAddress] = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
|
||||||
secureManagers: openarray[SecureProtocol] = [
|
secureManagers: openArray[SecureProtocol] = [
|
||||||
SecureProtocol.Noise,
|
SecureProtocol.Noise,
|
||||||
],
|
],
|
||||||
transportFlags: set[ServerFlags] = {},
|
transportFlags: set[ServerFlags] = {},
|
||||||
|
|
|
@ -71,7 +71,7 @@ const
|
||||||
template orError*(exp: untyped, err: untyped): untyped =
|
template orError*(exp: untyped, err: untyped): untyped =
|
||||||
(exp.mapErr do (_: auto) -> auto: err)
|
(exp.mapErr do (_: auto) -> auto: err)
|
||||||
|
|
||||||
proc decode(data: openarray[byte]): Result[Cid, CidError] =
|
proc decode(data: openArray[byte]): Result[Cid, CidError] =
|
||||||
if len(data) == 34 and data[0] == 0x12'u8 and data[1] == 0x20'u8:
|
if len(data) == 34 and data[0] == 0x12'u8 and data[1] == 0x20'u8:
|
||||||
ok(Cid(
|
ok(Cid(
|
||||||
cidver: CIDv0,
|
cidver: CIDv0,
|
||||||
|
@ -114,7 +114,7 @@ proc decode(data: openarray[byte]): Result[Cid, CidError] =
|
||||||
hpos: offset,
|
hpos: offset,
|
||||||
data: vb))
|
data: vb))
|
||||||
|
|
||||||
proc decode(data: openarray[char]): Result[Cid, CidError] =
|
proc decode(data: openArray[char]): Result[Cid, CidError] =
|
||||||
var buffer: seq[byte]
|
var buffer: seq[byte]
|
||||||
var plen = 0
|
var plen = 0
|
||||||
if len(data) < 2:
|
if len(data) < 2:
|
||||||
|
@ -137,7 +137,7 @@ proc decode(data: openarray[char]): Result[Cid, CidError] =
|
||||||
return err(CidError.Incorrect)
|
return err(CidError.Incorrect)
|
||||||
decode(buffer)
|
decode(buffer)
|
||||||
|
|
||||||
proc validate*(ctype: typedesc[Cid], data: openarray[byte]): bool =
|
proc validate*(ctype: typedesc[Cid], data: openArray[byte]): bool =
|
||||||
## Returns ``true`` is data has valid binary CID representation.
|
## Returns ``true`` is data has valid binary CID representation.
|
||||||
var version, codec: uint64
|
var version, codec: uint64
|
||||||
var res: VarintResult[void]
|
var res: VarintResult[void]
|
||||||
|
@ -185,7 +185,7 @@ proc version*(cid: Cid): CidVersion =
|
||||||
## Returns CID version
|
## Returns CID version
|
||||||
result = cid.cidver
|
result = cid.cidver
|
||||||
|
|
||||||
proc init*[T: char|byte](ctype: typedesc[Cid], data: openarray[T]): Result[Cid, CidError] =
|
proc init*[T: char|byte](ctype: typedesc[Cid], data: openArray[T]): Result[Cid, CidError] =
|
||||||
## Create new content identifier using array of bytes or string ``data``.
|
## Create new content identifier using array of bytes or string ``data``.
|
||||||
decode(data)
|
decode(data)
|
||||||
|
|
||||||
|
@ -275,7 +275,7 @@ proc `$`*(cid: Cid): string =
|
||||||
if cid.cidver == CIDv0:
|
if cid.cidver == CIDv0:
|
||||||
BTCBase58.encode(cid.data.buffer)
|
BTCBase58.encode(cid.data.buffer)
|
||||||
elif cid.cidver == CIDv1:
|
elif cid.cidver == CIDv1:
|
||||||
let res = Multibase.encode("base58btc", cid.data.buffer)
|
let res = MultiBase.encode("base58btc", cid.data.buffer)
|
||||||
if res.isOk():
|
if res.isOk():
|
||||||
res.get()
|
res.get()
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -75,7 +75,7 @@ type
|
||||||
maxConnsPerPeer: int
|
maxConnsPerPeer: int
|
||||||
inSema*: AsyncSemaphore
|
inSema*: AsyncSemaphore
|
||||||
outSema*: AsyncSemaphore
|
outSema*: AsyncSemaphore
|
||||||
conns: Table[PeerID, HashSet[Connection]]
|
conns: Table[PeerId, HashSet[Connection]]
|
||||||
muxed: Table[Connection, MuxerHolder]
|
muxed: Table[Connection, MuxerHolder]
|
||||||
connEvents: array[ConnEventKind, OrderedSet[ConnEventHandler]]
|
connEvents: array[ConnEventKind, OrderedSet[ConnEventHandler]]
|
||||||
peerEvents: array[PeerEventKind, OrderedSet[PeerEventHandler]]
|
peerEvents: array[PeerEventKind, OrderedSet[PeerEventHandler]]
|
||||||
|
@ -103,7 +103,7 @@ proc new*(C: type ConnManager,
|
||||||
inSema: inSema,
|
inSema: inSema,
|
||||||
outSema: outSema)
|
outSema: outSema)
|
||||||
|
|
||||||
proc connCount*(c: ConnManager, peerId: PeerID): int =
|
proc connCount*(c: ConnManager, peerId: PeerId): int =
|
||||||
c.conns.getOrDefault(peerId).len
|
c.conns.getOrDefault(peerId).len
|
||||||
|
|
||||||
proc addConnEventHandler*(c: ConnManager,
|
proc addConnEventHandler*(c: ConnManager,
|
||||||
|
@ -219,7 +219,7 @@ proc contains*(c: ConnManager, conn: Connection): bool =
|
||||||
|
|
||||||
return conn in c.conns.getOrDefault(conn.peerId)
|
return conn in c.conns.getOrDefault(conn.peerId)
|
||||||
|
|
||||||
proc contains*(c: ConnManager, peerId: PeerID): bool =
|
proc contains*(c: ConnManager, peerId: PeerId): bool =
|
||||||
peerId in c.conns
|
peerId in c.conns
|
||||||
|
|
||||||
proc contains*(c: ConnManager, muxer: Muxer): bool =
|
proc contains*(c: ConnManager, muxer: Muxer): bool =
|
||||||
|
@ -334,7 +334,7 @@ proc onClose(c: ConnManager, conn: Connection) {.async.} =
|
||||||
asyncSpawn c.peerCleanup(conn)
|
asyncSpawn c.peerCleanup(conn)
|
||||||
|
|
||||||
proc selectConn*(c: ConnManager,
|
proc selectConn*(c: ConnManager,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
dir: Direction): Connection =
|
dir: Direction): Connection =
|
||||||
## Select a connection for the provided peer and direction
|
## Select a connection for the provided peer and direction
|
||||||
##
|
##
|
||||||
|
@ -345,7 +345,7 @@ proc selectConn*(c: ConnManager,
|
||||||
if conns.len > 0:
|
if conns.len > 0:
|
||||||
return conns[0]
|
return conns[0]
|
||||||
|
|
||||||
proc selectConn*(c: ConnManager, peerId: PeerID): Connection =
|
proc selectConn*(c: ConnManager, peerId: PeerId): Connection =
|
||||||
## Select a connection for the provided giving priority
|
## Select a connection for the provided giving priority
|
||||||
## to outgoing connections
|
## to outgoing connections
|
||||||
##
|
##
|
||||||
|
@ -506,7 +506,7 @@ proc storeMuxer*(c: ConnManager,
|
||||||
asyncSpawn c.onConnUpgraded(muxer.connection)
|
asyncSpawn c.onConnUpgraded(muxer.connection)
|
||||||
|
|
||||||
proc getStream*(c: ConnManager,
|
proc getStream*(c: ConnManager,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
dir: Direction): Future[Connection] {.async, gcsafe.} =
|
dir: Direction): Future[Connection] {.async, gcsafe.} =
|
||||||
## get a muxed stream for the provided peer
|
## get a muxed stream for the provided peer
|
||||||
## with the given direction
|
## with the given direction
|
||||||
|
@ -517,7 +517,7 @@ proc getStream*(c: ConnManager,
|
||||||
return await muxer.newStream()
|
return await muxer.newStream()
|
||||||
|
|
||||||
proc getStream*(c: ConnManager,
|
proc getStream*(c: ConnManager,
|
||||||
peerId: PeerID): Future[Connection] {.async, gcsafe.} =
|
peerId: PeerId): Future[Connection] {.async, gcsafe.} =
|
||||||
## get a muxed stream for the passed peer from any connection
|
## get a muxed stream for the passed peer from any connection
|
||||||
##
|
##
|
||||||
|
|
||||||
|
@ -534,7 +534,7 @@ proc getStream*(c: ConnManager,
|
||||||
if not(isNil(muxer)):
|
if not(isNil(muxer)):
|
||||||
return await muxer.newStream()
|
return await muxer.newStream()
|
||||||
|
|
||||||
proc dropPeer*(c: ConnManager, peerId: PeerID) {.async.} =
|
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
|
||||||
## drop connections and cleanup resources for peer
|
## drop connections and cleanup resources for peer
|
||||||
##
|
##
|
||||||
trace "Dropping peer", peerId
|
trace "Dropping peer", peerId
|
||||||
|
|
|
@ -37,17 +37,17 @@ type
|
||||||
ChaChaPolyNonce* = array[ChaChaPolyNonceSize, byte]
|
ChaChaPolyNonce* = array[ChaChaPolyNonceSize, byte]
|
||||||
ChaChaPolyTag* = array[ChaChaPolyTagSize, byte]
|
ChaChaPolyTag* = array[ChaChaPolyTagSize, byte]
|
||||||
|
|
||||||
proc intoChaChaPolyKey*(s: openarray[byte]): ChaChaPolyKey =
|
proc intoChaChaPolyKey*(s: openArray[byte]): ChaChaPolyKey =
|
||||||
assert s.len == ChaChaPolyKeySize
|
assert s.len == ChaChaPolyKeySize
|
||||||
copyMem(addr result[0], unsafeaddr s[0], ChaChaPolyKeySize)
|
copyMem(addr result[0], unsafeAddr s[0], ChaChaPolyKeySize)
|
||||||
|
|
||||||
proc intoChaChaPolyNonce*(s: openarray[byte]): ChaChaPolyNonce =
|
proc intoChaChaPolyNonce*(s: openArray[byte]): ChaChaPolyNonce =
|
||||||
assert s.len == ChaChaPolyNonceSize
|
assert s.len == ChaChaPolyNonceSize
|
||||||
copyMem(addr result[0], unsafeaddr s[0], ChaChaPolyNonceSize)
|
copyMem(addr result[0], unsafeAddr s[0], ChaChaPolyNonceSize)
|
||||||
|
|
||||||
proc intoChaChaPolyTag*(s: openarray[byte]): ChaChaPolyTag =
|
proc intoChaChaPolyTag*(s: openArray[byte]): ChaChaPolyTag =
|
||||||
assert s.len == ChaChaPolyTagSize
|
assert s.len == ChaChaPolyTagSize
|
||||||
copyMem(addr result[0], unsafeaddr s[0], ChaChaPolyTagSize)
|
copyMem(addr result[0], unsafeAddr s[0], ChaChaPolyTagSize)
|
||||||
|
|
||||||
# bearssl allows us to use optimized versions
|
# bearssl allows us to use optimized versions
|
||||||
# this is reconciled at runtime
|
# this is reconciled at runtime
|
||||||
|
@ -57,17 +57,17 @@ proc encrypt*(_: type[ChaChaPoly],
|
||||||
key: ChaChaPolyKey,
|
key: ChaChaPolyKey,
|
||||||
nonce: ChaChaPolyNonce,
|
nonce: ChaChaPolyNonce,
|
||||||
tag: var ChaChaPolyTag,
|
tag: var ChaChaPolyTag,
|
||||||
data: var openarray[byte],
|
data: var openArray[byte],
|
||||||
aad: openarray[byte]) =
|
aad: openArray[byte]) =
|
||||||
let
|
let
|
||||||
ad = if aad.len > 0:
|
ad = if aad.len > 0:
|
||||||
unsafeaddr aad[0]
|
unsafeAddr aad[0]
|
||||||
else:
|
else:
|
||||||
nil
|
nil
|
||||||
|
|
||||||
ourPoly1305CtmulRun(
|
ourPoly1305CtmulRun(
|
||||||
unsafeaddr key[0],
|
unsafeAddr key[0],
|
||||||
unsafeaddr nonce[0],
|
unsafeAddr nonce[0],
|
||||||
addr data[0],
|
addr data[0],
|
||||||
data.len,
|
data.len,
|
||||||
ad,
|
ad,
|
||||||
|
@ -80,17 +80,17 @@ proc decrypt*(_: type[ChaChaPoly],
|
||||||
key: ChaChaPolyKey,
|
key: ChaChaPolyKey,
|
||||||
nonce: ChaChaPolyNonce,
|
nonce: ChaChaPolyNonce,
|
||||||
tag: var ChaChaPolyTag,
|
tag: var ChaChaPolyTag,
|
||||||
data: var openarray[byte],
|
data: var openArray[byte],
|
||||||
aad: openarray[byte]) =
|
aad: openArray[byte]) =
|
||||||
let
|
let
|
||||||
ad = if aad.len > 0:
|
ad = if aad.len > 0:
|
||||||
unsafeaddr aad[0]
|
unsafeAddr aad[0]
|
||||||
else:
|
else:
|
||||||
nil
|
nil
|
||||||
|
|
||||||
ourPoly1305CtmulRun(
|
ourPoly1305CtmulRun(
|
||||||
unsafeaddr key[0],
|
unsafeAddr key[0],
|
||||||
unsafeaddr nonce[0],
|
unsafeAddr nonce[0],
|
||||||
addr data[0],
|
addr data[0],
|
||||||
data.len,
|
data.len,
|
||||||
ad,
|
ad,
|
||||||
|
|
|
@ -130,8 +130,8 @@ type
|
||||||
skkey*: SkPrivateKey
|
skkey*: SkPrivateKey
|
||||||
else:
|
else:
|
||||||
discard
|
discard
|
||||||
of PKSCheme.ECDSA:
|
of PKScheme.ECDSA:
|
||||||
when supported(PKSCheme.ECDSA):
|
when supported(PKScheme.ECDSA):
|
||||||
eckey*: ecnist.EcPrivateKey
|
eckey*: ecnist.EcPrivateKey
|
||||||
else:
|
else:
|
||||||
discard
|
discard
|
||||||
|
@ -345,7 +345,7 @@ proc getPublicKey*(key: PrivateKey): CryptoResult[PublicKey] =
|
||||||
err(SchemeError)
|
err(SchemeError)
|
||||||
|
|
||||||
proc toRawBytes*(key: PrivateKey | PublicKey,
|
proc toRawBytes*(key: PrivateKey | PublicKey,
|
||||||
data: var openarray[byte]): CryptoResult[int] =
|
data: var openArray[byte]): CryptoResult[int] =
|
||||||
## Serialize private key ``key`` (using scheme's own serialization) and store
|
## Serialize private key ``key`` (using scheme's own serialization) and store
|
||||||
## it to ``data``.
|
## it to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -397,7 +397,7 @@ proc getRawBytes*(key: PrivateKey | PublicKey): CryptoResult[seq[byte]] =
|
||||||
else:
|
else:
|
||||||
err(SchemeError)
|
err(SchemeError)
|
||||||
|
|
||||||
proc toBytes*(key: PrivateKey, data: var openarray[byte]): CryptoResult[int] =
|
proc toBytes*(key: PrivateKey, data: var openArray[byte]): CryptoResult[int] =
|
||||||
## Serialize private key ``key`` (using libp2p protobuf scheme) and store
|
## Serialize private key ``key`` (using libp2p protobuf scheme) and store
|
||||||
## it to ``data``.
|
## it to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -411,7 +411,7 @@ proc toBytes*(key: PrivateKey, data: var openarray[byte]): CryptoResult[int] =
|
||||||
copyMem(addr data[0], addr msg.buffer[0], blen)
|
copyMem(addr data[0], addr msg.buffer[0], blen)
|
||||||
ok(blen)
|
ok(blen)
|
||||||
|
|
||||||
proc toBytes*(key: PublicKey, data: var openarray[byte]): CryptoResult[int] =
|
proc toBytes*(key: PublicKey, data: var openArray[byte]): CryptoResult[int] =
|
||||||
## Serialize public key ``key`` (using libp2p protobuf scheme) and store
|
## Serialize public key ``key`` (using libp2p protobuf scheme) and store
|
||||||
## it to ``data``.
|
## it to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -425,7 +425,7 @@ proc toBytes*(key: PublicKey, data: var openarray[byte]): CryptoResult[int] =
|
||||||
copyMem(addr data[0], addr msg.buffer[0], blen)
|
copyMem(addr data[0], addr msg.buffer[0], blen)
|
||||||
ok(blen)
|
ok(blen)
|
||||||
|
|
||||||
proc toBytes*(sig: Signature, data: var openarray[byte]): int =
|
proc toBytes*(sig: Signature, data: var openArray[byte]): int =
|
||||||
## Serialize signature ``sig`` and store it to ``data``.
|
## Serialize signature ``sig`` and store it to ``data``.
|
||||||
##
|
##
|
||||||
## Returns number of bytes (octets) needed to store signature ``sig``.
|
## Returns number of bytes (octets) needed to store signature ``sig``.
|
||||||
|
@ -455,7 +455,7 @@ proc getBytes*(sig: Signature): seq[byte] =
|
||||||
## Return signature ``sig`` in binary form.
|
## Return signature ``sig`` in binary form.
|
||||||
result = sig.data
|
result = sig.data
|
||||||
|
|
||||||
proc init*[T: PrivateKey|PublicKey](key: var T, data: openarray[byte]): bool =
|
proc init*[T: PrivateKey|PublicKey](key: var T, data: openArray[byte]): bool =
|
||||||
## Initialize private key ``key`` from libp2p's protobuf serialized raw
|
## Initialize private key ``key`` from libp2p's protobuf serialized raw
|
||||||
## binary form.
|
## binary form.
|
||||||
##
|
##
|
||||||
|
@ -517,7 +517,7 @@ proc init*[T: PrivateKey|PublicKey](key: var T, data: openarray[byte]): bool =
|
||||||
else:
|
else:
|
||||||
false
|
false
|
||||||
|
|
||||||
proc init*(sig: var Signature, data: openarray[byte]): bool =
|
proc init*(sig: var Signature, data: openArray[byte]): bool =
|
||||||
## Initialize signature ``sig`` from raw binary form.
|
## Initialize signature ``sig`` from raw binary form.
|
||||||
##
|
##
|
||||||
## Returns ``true`` on success.
|
## Returns ``true`` on success.
|
||||||
|
@ -540,7 +540,7 @@ proc init*(sig: var Signature, data: string): bool =
|
||||||
sig.init(ncrutils.fromHex(data))
|
sig.init(ncrutils.fromHex(data))
|
||||||
|
|
||||||
proc init*(t: typedesc[PrivateKey],
|
proc init*(t: typedesc[PrivateKey],
|
||||||
data: openarray[byte]): CryptoResult[PrivateKey] =
|
data: openArray[byte]): CryptoResult[PrivateKey] =
|
||||||
## Create new private key from libp2p's protobuf serialized binary form.
|
## Create new private key from libp2p's protobuf serialized binary form.
|
||||||
var res: t
|
var res: t
|
||||||
if not res.init(data):
|
if not res.init(data):
|
||||||
|
@ -549,7 +549,7 @@ proc init*(t: typedesc[PrivateKey],
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
proc init*(t: typedesc[PublicKey],
|
proc init*(t: typedesc[PublicKey],
|
||||||
data: openarray[byte]): CryptoResult[PublicKey] =
|
data: openArray[byte]): CryptoResult[PublicKey] =
|
||||||
## Create new public key from libp2p's protobuf serialized binary form.
|
## Create new public key from libp2p's protobuf serialized binary form.
|
||||||
var res: t
|
var res: t
|
||||||
if not res.init(data):
|
if not res.init(data):
|
||||||
|
@ -558,7 +558,7 @@ proc init*(t: typedesc[PublicKey],
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
proc init*(t: typedesc[Signature],
|
proc init*(t: typedesc[Signature],
|
||||||
data: openarray[byte]): CryptoResult[Signature] =
|
data: openArray[byte]): CryptoResult[Signature] =
|
||||||
## Create new public key from libp2p's protobuf serialized binary form.
|
## Create new public key from libp2p's protobuf serialized binary form.
|
||||||
var res: t
|
var res: t
|
||||||
if not res.init(data):
|
if not res.init(data):
|
||||||
|
@ -713,7 +713,7 @@ proc `$`*(sig: Signature): string =
|
||||||
result = ncrutils.toHex(sig.data)
|
result = ncrutils.toHex(sig.data)
|
||||||
|
|
||||||
proc sign*(key: PrivateKey,
|
proc sign*(key: PrivateKey,
|
||||||
data: openarray[byte]): CryptoResult[Signature] {.gcsafe.} =
|
data: openArray[byte]): CryptoResult[Signature] {.gcsafe.} =
|
||||||
## Sign message ``data`` using private key ``key`` and return generated
|
## Sign message ``data`` using private key ``key`` and return generated
|
||||||
## signature in raw binary form.
|
## signature in raw binary form.
|
||||||
var res: Signature
|
var res: Signature
|
||||||
|
@ -747,7 +747,7 @@ proc sign*(key: PrivateKey,
|
||||||
else:
|
else:
|
||||||
err(SchemeError)
|
err(SchemeError)
|
||||||
|
|
||||||
proc verify*(sig: Signature, message: openarray[byte], key: PublicKey): bool =
|
proc verify*(sig: Signature, message: openArray[byte], key: PublicKey): bool =
|
||||||
## Verify signature ``sig`` using message ``message`` and public key ``key``.
|
## Verify signature ``sig`` using message ``message`` and public key ``key``.
|
||||||
## Return ``true`` if message signature is valid.
|
## Return ``true`` if message signature is valid.
|
||||||
case key.scheme:
|
case key.scheme:
|
||||||
|
@ -898,8 +898,8 @@ proc ephemeral*(
|
||||||
else:
|
else:
|
||||||
ephemeral(Secp521r1, rng)
|
ephemeral(Secp521r1, rng)
|
||||||
|
|
||||||
proc getOrder*(remotePubkey, localNonce: openarray[byte],
|
proc getOrder*(remotePubkey, localNonce: openArray[byte],
|
||||||
localPubkey, remoteNonce: openarray[byte]): CryptoResult[int] =
|
localPubkey, remoteNonce: openArray[byte]): CryptoResult[int] =
|
||||||
## Compare values and calculate `order` parameter.
|
## Compare values and calculate `order` parameter.
|
||||||
var ctx: sha256
|
var ctx: sha256
|
||||||
ctx.init()
|
ctx.init()
|
||||||
|
@ -943,7 +943,7 @@ proc selectBest*(order: int, p1, p2: string): string =
|
||||||
if felement == selement:
|
if felement == selement:
|
||||||
return felement
|
return felement
|
||||||
|
|
||||||
proc createProposal*(nonce, pubkey: openarray[byte],
|
proc createProposal*(nonce, pubkey: openArray[byte],
|
||||||
exchanges, ciphers, hashes: string): seq[byte] =
|
exchanges, ciphers, hashes: string): seq[byte] =
|
||||||
## Create SecIO proposal message using random ``nonce``, local public key
|
## Create SecIO proposal message using random ``nonce``, local public key
|
||||||
## ``pubkey``, comma-delimieted list of supported exchange schemes
|
## ``pubkey``, comma-delimieted list of supported exchange schemes
|
||||||
|
@ -977,7 +977,7 @@ proc decodeProposal*(message: seq[byte], nonce, pubkey: var seq[byte],
|
||||||
r3.isOk() and r3.get() and r4.isOk() and r4.get() and
|
r3.isOk() and r3.get() and r4.isOk() and r4.get() and
|
||||||
r5.isOk() and r5.get()
|
r5.isOk() and r5.get()
|
||||||
|
|
||||||
proc createExchange*(epubkey, signature: openarray[byte]): seq[byte] =
|
proc createExchange*(epubkey, signature: openArray[byte]): seq[byte] =
|
||||||
## Create SecIO exchange message using ephemeral public key ``epubkey`` and
|
## Create SecIO exchange message using ephemeral public key ``epubkey`` and
|
||||||
## signature of proposal blocks ``signature``.
|
## signature of proposal blocks ``signature``.
|
||||||
var msg = initProtoBuffer({WithUint32BeLength})
|
var msg = initProtoBuffer({WithUint32BeLength})
|
||||||
|
|
|
@ -31,9 +31,9 @@ type
|
||||||
Curve25519Error* = enum
|
Curve25519Error* = enum
|
||||||
Curver25519GenError
|
Curver25519GenError
|
||||||
|
|
||||||
proc intoCurve25519Key*(s: openarray[byte]): Curve25519Key =
|
proc intoCurve25519Key*(s: openArray[byte]): Curve25519Key =
|
||||||
assert s.len == Curve25519KeySize
|
assert s.len == Curve25519KeySize
|
||||||
copyMem(addr result[0], unsafeaddr s[0], Curve25519KeySize)
|
copyMem(addr result[0], unsafeAddr s[0], Curve25519KeySize)
|
||||||
|
|
||||||
proc getBytes*(key: Curve25519Key): seq[byte] = @key
|
proc getBytes*(key: Curve25519Key): seq[byte] = @key
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,7 @@ proc NEQ(x, y: uint32): uint32 {.inline.} =
|
||||||
proc LT0(x: int32): uint32 {.inline.} =
|
proc LT0(x: int32): uint32 {.inline.} =
|
||||||
result = cast[uint32](x) shr 31
|
result = cast[uint32](x) shr 31
|
||||||
|
|
||||||
proc checkScalar(scalar: openarray[byte], curve: cint): uint32 =
|
proc checkScalar(scalar: openArray[byte], curve: cint): uint32 =
|
||||||
## Return ``1`` if all of the following hold:
|
## Return ``1`` if all of the following hold:
|
||||||
## - len(``scalar``) <= ``orderlen``
|
## - len(``scalar``) <= ``orderlen``
|
||||||
## - ``scalar`` != 0
|
## - ``scalar`` != 0
|
||||||
|
@ -116,7 +116,7 @@ proc checkScalar(scalar: openarray[byte], curve: cint): uint32 =
|
||||||
c = -1
|
c = -1
|
||||||
result = NEQ(z, 0'u32) and LT0(c)
|
result = NEQ(z, 0'u32) and LT0(c)
|
||||||
|
|
||||||
proc checkPublic(key: openarray[byte], curve: cint): uint32 =
|
proc checkPublic(key: openArray[byte], curve: cint): uint32 =
|
||||||
## Return ``1`` if public key ``key`` is on curve.
|
## Return ``1`` if public key ``key`` is on curve.
|
||||||
var ckey = @key
|
var ckey = @key
|
||||||
var x = [0x00'u8, 0x01'u8]
|
var x = [0x00'u8, 0x01'u8]
|
||||||
|
@ -315,7 +315,7 @@ proc `$`*(sig: EcSignature): string =
|
||||||
else:
|
else:
|
||||||
result = ncrutils.toHex(sig.buffer)
|
result = ncrutils.toHex(sig.buffer)
|
||||||
|
|
||||||
proc toRawBytes*(seckey: EcPrivateKey, data: var openarray[byte]): EcResult[int] =
|
proc toRawBytes*(seckey: EcPrivateKey, data: var openArray[byte]): EcResult[int] =
|
||||||
## Serialize EC private key ``seckey`` to raw binary form and store it
|
## Serialize EC private key ``seckey`` to raw binary form and store it
|
||||||
## to ``data``.
|
## to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -331,7 +331,7 @@ proc toRawBytes*(seckey: EcPrivateKey, data: var openarray[byte]): EcResult[int]
|
||||||
else:
|
else:
|
||||||
err(EcKeyIncorrectError)
|
err(EcKeyIncorrectError)
|
||||||
|
|
||||||
proc toRawBytes*(pubkey: EcPublicKey, data: var openarray[byte]): EcResult[int] =
|
proc toRawBytes*(pubkey: EcPublicKey, data: var openArray[byte]): EcResult[int] =
|
||||||
## Serialize EC public key ``pubkey`` to uncompressed form specified in
|
## Serialize EC public key ``pubkey`` to uncompressed form specified in
|
||||||
## section 4.3.6 of ANSI X9.62.
|
## section 4.3.6 of ANSI X9.62.
|
||||||
##
|
##
|
||||||
|
@ -347,7 +347,7 @@ proc toRawBytes*(pubkey: EcPublicKey, data: var openarray[byte]): EcResult[int]
|
||||||
else:
|
else:
|
||||||
err(EcKeyIncorrectError)
|
err(EcKeyIncorrectError)
|
||||||
|
|
||||||
proc toRawBytes*(sig: EcSignature, data: var openarray[byte]): int =
|
proc toRawBytes*(sig: EcSignature, data: var openArray[byte]): int =
|
||||||
## Serialize EC signature ``sig`` to raw binary form and store it to ``data``.
|
## Serialize EC signature ``sig`` to raw binary form and store it to ``data``.
|
||||||
##
|
##
|
||||||
## Returns number of bytes (octets) needed to store EC signature, or `0`
|
## Returns number of bytes (octets) needed to store EC signature, or `0`
|
||||||
|
@ -358,7 +358,7 @@ proc toRawBytes*(sig: EcSignature, data: var openarray[byte]): int =
|
||||||
if len(sig.buffer) > 0:
|
if len(sig.buffer) > 0:
|
||||||
copyMem(addr data[0], unsafeAddr sig.buffer[0], len(sig.buffer))
|
copyMem(addr data[0], unsafeAddr sig.buffer[0], len(sig.buffer))
|
||||||
|
|
||||||
proc toBytes*(seckey: EcPrivateKey, data: var openarray[byte]): EcResult[int] =
|
proc toBytes*(seckey: EcPrivateKey, data: var openArray[byte]): EcResult[int] =
|
||||||
## Serialize EC private key ``seckey`` to ASN.1 DER binary form and store it
|
## Serialize EC private key ``seckey`` to ASN.1 DER binary form and store it
|
||||||
## to ``data``.
|
## to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -408,7 +408,7 @@ proc toBytes*(seckey: EcPrivateKey, data: var openarray[byte]): EcResult[int] =
|
||||||
err(EcKeyIncorrectError)
|
err(EcKeyIncorrectError)
|
||||||
|
|
||||||
|
|
||||||
proc toBytes*(pubkey: EcPublicKey, data: var openarray[byte]): EcResult[int] =
|
proc toBytes*(pubkey: EcPublicKey, data: var openArray[byte]): EcResult[int] =
|
||||||
## Serialize EC public key ``pubkey`` to ASN.1 DER binary form and store it
|
## Serialize EC public key ``pubkey`` to ASN.1 DER binary form and store it
|
||||||
## to ``data``.
|
## to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -445,7 +445,7 @@ proc toBytes*(pubkey: EcPublicKey, data: var openarray[byte]): EcResult[int] =
|
||||||
else:
|
else:
|
||||||
err(EcKeyIncorrectError)
|
err(EcKeyIncorrectError)
|
||||||
|
|
||||||
proc toBytes*(sig: EcSignature, data: var openarray[byte]): EcResult[int] =
|
proc toBytes*(sig: EcSignature, data: var openArray[byte]): EcResult[int] =
|
||||||
## Serialize EC signature ``sig`` to ASN.1 DER binary form and store it
|
## Serialize EC signature ``sig`` to ASN.1 DER binary form and store it
|
||||||
## to ``data``.
|
## to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -592,7 +592,7 @@ proc `==`*(a, b: EcSignature): bool =
|
||||||
else:
|
else:
|
||||||
CT.isEqual(a.buffer, b.buffer)
|
CT.isEqual(a.buffer, b.buffer)
|
||||||
|
|
||||||
proc init*(key: var EcPrivateKey, data: openarray[byte]): Result[void, Asn1Error] =
|
proc init*(key: var EcPrivateKey, data: openArray[byte]): Result[void, Asn1Error] =
|
||||||
## Initialize EC `private key` or `signature` ``key`` from ASN.1 DER binary
|
## Initialize EC `private key` or `signature` ``key`` from ASN.1 DER binary
|
||||||
## representation ``data``.
|
## representation ``data``.
|
||||||
##
|
##
|
||||||
|
@ -645,7 +645,7 @@ proc init*(key: var EcPrivateKey, data: openarray[byte]): Result[void, Asn1Error
|
||||||
else:
|
else:
|
||||||
err(Asn1Error.Incorrect)
|
err(Asn1Error.Incorrect)
|
||||||
|
|
||||||
proc init*(pubkey: var EcPublicKey, data: openarray[byte]): Result[void, Asn1Error] =
|
proc init*(pubkey: var EcPublicKey, data: openArray[byte]): Result[void, Asn1Error] =
|
||||||
## Initialize EC public key ``pubkey`` from ASN.1 DER binary representation
|
## Initialize EC public key ``pubkey`` from ASN.1 DER binary representation
|
||||||
## ``data``.
|
## ``data``.
|
||||||
##
|
##
|
||||||
|
@ -704,7 +704,7 @@ proc init*(pubkey: var EcPublicKey, data: openarray[byte]): Result[void, Asn1Err
|
||||||
else:
|
else:
|
||||||
err(Asn1Error.Incorrect)
|
err(Asn1Error.Incorrect)
|
||||||
|
|
||||||
proc init*(sig: var EcSignature, data: openarray[byte]): Result[void, Asn1Error] =
|
proc init*(sig: var EcSignature, data: openArray[byte]): Result[void, Asn1Error] =
|
||||||
## Initialize EC signature ``sig`` from raw binary representation ``data``.
|
## Initialize EC signature ``sig`` from raw binary representation ``data``.
|
||||||
##
|
##
|
||||||
## Procedure returns ``Result[void, Asn1Error]``.
|
## Procedure returns ``Result[void, Asn1Error]``.
|
||||||
|
@ -724,7 +724,7 @@ proc init*[T: EcPKI](sospk: var T,
|
||||||
sospk.init(ncrutils.fromHex(data))
|
sospk.init(ncrutils.fromHex(data))
|
||||||
|
|
||||||
proc init*(t: typedesc[EcPrivateKey],
|
proc init*(t: typedesc[EcPrivateKey],
|
||||||
data: openarray[byte]): EcResult[EcPrivateKey] =
|
data: openArray[byte]): EcResult[EcPrivateKey] =
|
||||||
## Initialize EC private key from ASN.1 DER binary representation ``data`` and
|
## Initialize EC private key from ASN.1 DER binary representation ``data`` and
|
||||||
## return constructed object.
|
## return constructed object.
|
||||||
var key: EcPrivateKey
|
var key: EcPrivateKey
|
||||||
|
@ -735,7 +735,7 @@ proc init*(t: typedesc[EcPrivateKey],
|
||||||
ok(key)
|
ok(key)
|
||||||
|
|
||||||
proc init*(t: typedesc[EcPublicKey],
|
proc init*(t: typedesc[EcPublicKey],
|
||||||
data: openarray[byte]): EcResult[EcPublicKey] =
|
data: openArray[byte]): EcResult[EcPublicKey] =
|
||||||
## Initialize EC public key from ASN.1 DER binary representation ``data`` and
|
## Initialize EC public key from ASN.1 DER binary representation ``data`` and
|
||||||
## return constructed object.
|
## return constructed object.
|
||||||
var key: EcPublicKey
|
var key: EcPublicKey
|
||||||
|
@ -746,7 +746,7 @@ proc init*(t: typedesc[EcPublicKey],
|
||||||
ok(key)
|
ok(key)
|
||||||
|
|
||||||
proc init*(t: typedesc[EcSignature],
|
proc init*(t: typedesc[EcSignature],
|
||||||
data: openarray[byte]): EcResult[EcSignature] =
|
data: openArray[byte]): EcResult[EcSignature] =
|
||||||
## Initialize EC signature from raw binary representation ``data`` and
|
## Initialize EC signature from raw binary representation ``data`` and
|
||||||
## return constructed object.
|
## return constructed object.
|
||||||
var sig: EcSignature
|
var sig: EcSignature
|
||||||
|
@ -761,7 +761,7 @@ proc init*[T: EcPKI](t: typedesc[T], data: string): EcResult[T] =
|
||||||
## string representation ``data`` and return constructed object.
|
## string representation ``data`` and return constructed object.
|
||||||
t.init(ncrutils.fromHex(data))
|
t.init(ncrutils.fromHex(data))
|
||||||
|
|
||||||
proc initRaw*(key: var EcPrivateKey, data: openarray[byte]): bool =
|
proc initRaw*(key: var EcPrivateKey, data: openArray[byte]): bool =
|
||||||
## Initialize EC `private key` or `scalar` ``key`` from raw binary
|
## Initialize EC `private key` or `scalar` ``key`` from raw binary
|
||||||
## representation ``data``.
|
## representation ``data``.
|
||||||
##
|
##
|
||||||
|
@ -790,7 +790,7 @@ proc initRaw*(key: var EcPrivateKey, data: openarray[byte]): bool =
|
||||||
key.key.curve = curve
|
key.key.curve = curve
|
||||||
result = true
|
result = true
|
||||||
|
|
||||||
proc initRaw*(pubkey: var EcPublicKey, data: openarray[byte]): bool =
|
proc initRaw*(pubkey: var EcPublicKey, data: openArray[byte]): bool =
|
||||||
## Initialize EC public key ``pubkey`` from raw binary representation
|
## Initialize EC public key ``pubkey`` from raw binary representation
|
||||||
## ``data``.
|
## ``data``.
|
||||||
##
|
##
|
||||||
|
@ -821,7 +821,7 @@ proc initRaw*(pubkey: var EcPublicKey, data: openarray[byte]): bool =
|
||||||
pubkey.key.curve = curve
|
pubkey.key.curve = curve
|
||||||
result = true
|
result = true
|
||||||
|
|
||||||
proc initRaw*(sig: var EcSignature, data: openarray[byte]): bool =
|
proc initRaw*(sig: var EcSignature, data: openArray[byte]): bool =
|
||||||
## Initialize EC signature ``sig`` from raw binary representation ``data``.
|
## Initialize EC signature ``sig`` from raw binary representation ``data``.
|
||||||
##
|
##
|
||||||
## Length of ``data`` array must be ``Sig256Length``, ``Sig384Length``
|
## Length of ``data`` array must be ``Sig256Length``, ``Sig384Length``
|
||||||
|
@ -844,7 +844,7 @@ proc initRaw*[T: EcPKI](sospk: var T, data: string): bool {.inline.} =
|
||||||
result = sospk.initRaw(ncrutils.fromHex(data))
|
result = sospk.initRaw(ncrutils.fromHex(data))
|
||||||
|
|
||||||
proc initRaw*(t: typedesc[EcPrivateKey],
|
proc initRaw*(t: typedesc[EcPrivateKey],
|
||||||
data: openarray[byte]): EcResult[EcPrivateKey] =
|
data: openArray[byte]): EcResult[EcPrivateKey] =
|
||||||
## Initialize EC private key from raw binary representation ``data`` and
|
## Initialize EC private key from raw binary representation ``data`` and
|
||||||
## return constructed object.
|
## return constructed object.
|
||||||
var res: EcPrivateKey
|
var res: EcPrivateKey
|
||||||
|
@ -854,7 +854,7 @@ proc initRaw*(t: typedesc[EcPrivateKey],
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
proc initRaw*(t: typedesc[EcPublicKey],
|
proc initRaw*(t: typedesc[EcPublicKey],
|
||||||
data: openarray[byte]): EcResult[EcPublicKey] =
|
data: openArray[byte]): EcResult[EcPublicKey] =
|
||||||
## Initialize EC public key from raw binary representation ``data`` and
|
## Initialize EC public key from raw binary representation ``data`` and
|
||||||
## return constructed object.
|
## return constructed object.
|
||||||
var res: EcPublicKey
|
var res: EcPublicKey
|
||||||
|
@ -864,7 +864,7 @@ proc initRaw*(t: typedesc[EcPublicKey],
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
proc initRaw*(t: typedesc[EcSignature],
|
proc initRaw*(t: typedesc[EcSignature],
|
||||||
data: openarray[byte]): EcResult[EcSignature] =
|
data: openArray[byte]): EcResult[EcSignature] =
|
||||||
## Initialize EC signature from raw binary representation ``data`` and
|
## Initialize EC signature from raw binary representation ``data`` and
|
||||||
## return constructed object.
|
## return constructed object.
|
||||||
var res: EcSignature
|
var res: EcSignature
|
||||||
|
@ -900,7 +900,7 @@ proc scalarMul*(pub: EcPublicKey, sec: EcPrivateKey): EcPublicKey =
|
||||||
result = key
|
result = key
|
||||||
|
|
||||||
proc toSecret*(pubkey: EcPublicKey, seckey: EcPrivateKey,
|
proc toSecret*(pubkey: EcPublicKey, seckey: EcPrivateKey,
|
||||||
data: var openarray[byte]): int =
|
data: var openArray[byte]): int =
|
||||||
## Calculate ECDHE shared secret using Go's elliptic/curve approach, using
|
## Calculate ECDHE shared secret using Go's elliptic/curve approach, using
|
||||||
## remote public key ``pubkey`` and local private key ``seckey`` and store
|
## remote public key ``pubkey`` and local private key ``seckey`` and store
|
||||||
## shared secret to ``data``.
|
## shared secret to ``data``.
|
||||||
|
@ -937,7 +937,7 @@ proc getSecret*(pubkey: EcPublicKey, seckey: EcPrivateKey): seq[byte] =
|
||||||
copyMem(addr result[0], addr data[0], res)
|
copyMem(addr result[0], addr data[0], res)
|
||||||
|
|
||||||
proc sign*[T: byte|char](seckey: EcPrivateKey,
|
proc sign*[T: byte|char](seckey: EcPrivateKey,
|
||||||
message: openarray[T]): EcResult[EcSignature] {.gcsafe.} =
|
message: openArray[T]): EcResult[EcSignature] {.gcsafe.} =
|
||||||
## Get ECDSA signature of data ``message`` using private key ``seckey``.
|
## Get ECDSA signature of data ``message`` using private key ``seckey``.
|
||||||
if isNil(seckey):
|
if isNil(seckey):
|
||||||
return err(EcKeyIncorrectError)
|
return err(EcKeyIncorrectError)
|
||||||
|
@ -966,7 +966,7 @@ proc sign*[T: byte|char](seckey: EcPrivateKey,
|
||||||
else:
|
else:
|
||||||
err(EcKeyIncorrectError)
|
err(EcKeyIncorrectError)
|
||||||
|
|
||||||
proc verify*[T: byte|char](sig: EcSignature, message: openarray[T],
|
proc verify*[T: byte|char](sig: EcSignature, message: openArray[T],
|
||||||
pubkey: EcPublicKey): bool {.inline.} =
|
pubkey: EcPublicKey): bool {.inline.} =
|
||||||
## Verify ECDSA signature ``sig`` using public key ``pubkey`` and data
|
## Verify ECDSA signature ``sig`` using public key ``pubkey`` and data
|
||||||
## ``message``.
|
## ``message``.
|
||||||
|
|
|
@ -165,30 +165,30 @@ proc feCopy(h: var Fe, f: Fe) =
|
||||||
h[8] = f8
|
h[8] = f8
|
||||||
h[9] = f9
|
h[9] = f9
|
||||||
|
|
||||||
proc load3(inp: openarray[byte]): uint64 =
|
proc load_3(inp: openArray[byte]): uint64 =
|
||||||
result = cast[uint64](inp[0])
|
result = cast[uint64](inp[0])
|
||||||
result = result or (cast[uint64](inp[1]) shl 8)
|
result = result or (cast[uint64](inp[1]) shl 8)
|
||||||
result = result or (cast[uint64](inp[2]) shl 16)
|
result = result or (cast[uint64](inp[2]) shl 16)
|
||||||
|
|
||||||
proc load4(inp: openarray[byte]): uint64 =
|
proc load_4(inp: openArray[byte]): uint64 =
|
||||||
result = cast[uint64](inp[0])
|
result = cast[uint64](inp[0])
|
||||||
result = result or (cast[uint64](inp[1]) shl 8)
|
result = result or (cast[uint64](inp[1]) shl 8)
|
||||||
result = result or (cast[uint64](inp[2]) shl 16)
|
result = result or (cast[uint64](inp[2]) shl 16)
|
||||||
result = result or (cast[uint64](inp[3]) shl 24)
|
result = result or (cast[uint64](inp[3]) shl 24)
|
||||||
|
|
||||||
proc feFromBytes(h: var Fe, s: openarray[byte]) =
|
proc feFromBytes(h: var Fe, s: openArray[byte]) =
|
||||||
var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9: int64
|
var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9: int64
|
||||||
|
|
||||||
var h0 = cast[int64](load4(s.toOpenArray(0, 3)))
|
var h0 = cast[int64](load_4(s.toOpenArray(0, 3)))
|
||||||
var h1 = cast[int64](load3(s.toOpenArray(4, 6))) shl 6
|
var h1 = cast[int64](load_3(s.toOpenArray(4, 6))) shl 6
|
||||||
var h2 = cast[int64](load3(s.toOpenArray(7, 9))) shl 5
|
var h2 = cast[int64](load_3(s.toOpenArray(7, 9))) shl 5
|
||||||
var h3 = cast[int64](load3(s.toOpenArray(10, 12))) shl 3
|
var h3 = cast[int64](load_3(s.toOpenArray(10, 12))) shl 3
|
||||||
var h4 = cast[int64](load3(s.toOpenArray(13, 15))) shl 2
|
var h4 = cast[int64](load_3(s.toOpenArray(13, 15))) shl 2
|
||||||
var h5 = cast[int64](load4(s.toOpenArray(16, 19)))
|
var h5 = cast[int64](load_4(s.toOpenArray(16, 19)))
|
||||||
var h6 = cast[int64](load3(s.toOpenArray(20, 22))) shl 7
|
var h6 = cast[int64](load_3(s.toOpenArray(20, 22))) shl 7
|
||||||
var h7 = cast[int64](load3(s.toOpenArray(23, 25))) shl 5
|
var h7 = cast[int64](load_3(s.toOpenArray(23, 25))) shl 5
|
||||||
var h8 = cast[int64](load3(s.toOpenArray(26, 28))) shl 4
|
var h8 = cast[int64](load_3(s.toOpenArray(26, 28))) shl 4
|
||||||
var h9 = (cast[int64](load3(s.toOpenArray(29, 31))) and 8388607'i32) shl 2
|
var h9 = (cast[int64](load_3(s.toOpenArray(29, 31))) and 8388607'i32) shl 2
|
||||||
|
|
||||||
c9 = ashr((h9 + (1'i64 shl 24)), 25); h0 = h0 + (c9 * 19); h9 -= (c9 shl 25)
|
c9 = ashr((h9 + (1'i64 shl 24)), 25); h0 = h0 + (c9 * 19); h9 -= (c9 shl 25)
|
||||||
c1 = ashr((h1 + (1'i64 shl 24)), 25); h2 = h2 + c1; h1 -= (c1 shl 25)
|
c1 = ashr((h1 + (1'i64 shl 24)), 25); h2 = h2 + c1; h1 -= (c1 shl 25)
|
||||||
|
@ -213,7 +213,7 @@ proc feFromBytes(h: var Fe, s: openarray[byte]) =
|
||||||
h[8] = cast[int32](h8)
|
h[8] = cast[int32](h8)
|
||||||
h[9] = cast[int32](h9)
|
h[9] = cast[int32](h9)
|
||||||
|
|
||||||
proc feToBytes(s: var openarray[byte], h: Fe) =
|
proc feToBytes(s: var openArray[byte], h: Fe) =
|
||||||
var h0 = h[0]; var h1 = h[1]; var h2 = h[2]; var h3 = h[3]; var h4 = h[4]
|
var h0 = h[0]; var h1 = h[1]; var h2 = h[2]; var h3 = h[3]; var h4 = h[4]
|
||||||
var h5 = h[5]; var h6 = h[6]; var h7 = h[7]; var h8 = h[8]; var h9 = h[9]
|
var h5 = h[5]; var h6 = h[6]; var h7 = h[7]; var h8 = h[8]; var h9 = h[9]
|
||||||
var q, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9: int32
|
var q, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9: int32
|
||||||
|
@ -450,7 +450,7 @@ proc feNeg(h: var Fe, f: Fe) =
|
||||||
h[0] = h0; h[1] = h1; h[2] = h2; h[3] = h3; h[4] = h4
|
h[0] = h0; h[1] = h1; h[2] = h2; h[3] = h3; h[4] = h4
|
||||||
h[5] = h5; h[6] = h6; h[7] = h7; h[8] = h8; h[9] = h9
|
h[5] = h5; h[6] = h6; h[7] = h7; h[8] = h8; h[9] = h9
|
||||||
|
|
||||||
proc verify32(x: openarray[byte], y: openarray[byte]): int32 =
|
proc verify32(x: openArray[byte], y: openArray[byte]): int32 =
|
||||||
var d = 0'u32
|
var d = 0'u32
|
||||||
d = d or (x[0] xor y[0])
|
d = d or (x[0] xor y[0])
|
||||||
d = d or (x[1] xor y[1])
|
d = d or (x[1] xor y[1])
|
||||||
|
@ -800,7 +800,7 @@ proc geAdd(r: var GeP1P1, p: GeP3, q: GeCached) =
|
||||||
feAdd(r.z, t0, r.t)
|
feAdd(r.z, t0, r.t)
|
||||||
feSub(r.t, t0, r.t)
|
feSub(r.t, t0, r.t)
|
||||||
|
|
||||||
proc geFromBytesNegateVartime(h: var GeP3, s: openarray[byte]): int32 =
|
proc geFromBytesNegateVartime(h: var GeP3, s: openArray[byte]): int32 =
|
||||||
var u, v, v3, vxx, check: Fe
|
var u, v, v3, vxx, check: Fe
|
||||||
|
|
||||||
feFromBytes(h.y, s)
|
feFromBytes(h.y, s)
|
||||||
|
@ -876,12 +876,12 @@ proc geSub(r: var GeP1P1, p: GeP3, q: GeCached) =
|
||||||
feSub(r.z, t0, r.t)
|
feSub(r.z, t0, r.t)
|
||||||
feAdd(r.t, t0, r.t)
|
feAdd(r.t, t0, r.t)
|
||||||
|
|
||||||
proc geToBytes(s: var openarray[byte], h: GeP2) =
|
proc geToBytes(s: var openArray[byte], h: GeP2) =
|
||||||
var recip, x, y: Fe
|
var recip, x, y: Fe
|
||||||
feInvert(recip, h.z)
|
feInvert(recip, h.z)
|
||||||
feMul(x, h.x, recip)
|
feMul(x, h.x, recip)
|
||||||
feMul(y, h.y, recip)
|
feMul(y, h.y, recip)
|
||||||
feTobytes(s, y)
|
feToBytes(s, y)
|
||||||
s[31] = s[31] xor cast[byte](feIsNegative(x) shl 7)
|
s[31] = s[31] xor cast[byte](feIsNegative(x) shl 7)
|
||||||
|
|
||||||
proc geP1P1toP2(r: var GeP2, p: GeP1P1) =
|
proc geP1P1toP2(r: var GeP2, p: GeP1P1) =
|
||||||
|
@ -925,10 +925,10 @@ proc geP3toP2(r: var GeP2, p: GeP3) =
|
||||||
|
|
||||||
proc geP3dbl(r: var GeP1P1, p: GeP3) =
|
proc geP3dbl(r: var GeP1P1, p: GeP3) =
|
||||||
var q: GeP2
|
var q: GeP2
|
||||||
geP3ToP2(q, p)
|
geP3toP2(q, p)
|
||||||
geP2dbl(r, q)
|
geP2dbl(r, q)
|
||||||
|
|
||||||
proc geP3ToBytes(s: var openarray[byte], h: GeP3) =
|
proc geP3ToBytes(s: var openArray[byte], h: GeP3) =
|
||||||
var recip, x, y: Fe
|
var recip, x, y: Fe
|
||||||
|
|
||||||
feInvert(recip, h.z);
|
feInvert(recip, h.z);
|
||||||
|
@ -985,7 +985,7 @@ proc select(t: var GePrecomp, pos: int, b: int8) =
|
||||||
feNeg(minust.xy2d, t.xy2d)
|
feNeg(minust.xy2d, t.xy2d)
|
||||||
cmov(t, minust, bnegative)
|
cmov(t, minust, bnegative)
|
||||||
|
|
||||||
proc geScalarMultBase(h: var GeP3, a: openarray[byte]) =
|
proc geScalarMultBase(h: var GeP3, a: openArray[byte]) =
|
||||||
var e: array[64, int8]
|
var e: array[64, int8]
|
||||||
var carry: int8
|
var carry: int8
|
||||||
var r: GeP1P1
|
var r: GeP1P1
|
||||||
|
@ -1010,8 +1010,8 @@ proc geScalarMultBase(h: var GeP3, a: openarray[byte]) =
|
||||||
geMadd(r, h, t)
|
geMadd(r, h, t)
|
||||||
geP1P1toP3(h, r)
|
geP1P1toP3(h, r)
|
||||||
|
|
||||||
geP3dbl(r, h); geP1P1ToP2(s, r)
|
geP3dbl(r, h); geP1P1toP2(s, r)
|
||||||
geP2dbl(r, s); geP1P1ToP2(s, r)
|
geP2dbl(r, s); geP1P1toP2(s, r)
|
||||||
geP2dbl(r, s); geP1P1toP2(s, r)
|
geP2dbl(r, s); geP1P1toP2(s, r)
|
||||||
geP2dbl(r, s); geP1P1toP3(h, r)
|
geP2dbl(r, s); geP1P1toP3(h, r)
|
||||||
|
|
||||||
|
@ -1020,7 +1020,7 @@ proc geScalarMultBase(h: var GeP3, a: openarray[byte]) =
|
||||||
geMadd(r, h, t)
|
geMadd(r, h, t)
|
||||||
geP1P1toP3(h, r)
|
geP1P1toP3(h, r)
|
||||||
|
|
||||||
proc scMulAdd(s: var openarray[byte], a, b, c: openarray[byte]) =
|
proc scMulAdd(s: var openArray[byte], a, b, c: openArray[byte]) =
|
||||||
var a0 = 2097151'i64 and cast[int64](load_3(a.toOpenArray(0, 2)))
|
var a0 = 2097151'i64 and cast[int64](load_3(a.toOpenArray(0, 2)))
|
||||||
var a1 = 2097151'i64 and cast[int64](load_4(a.toOpenArray(2, 5)) shr 5)
|
var a1 = 2097151'i64 and cast[int64](load_4(a.toOpenArray(2, 5)) shr 5)
|
||||||
var a2 = 2097151'i64 and cast[int64](load_3(a.toOpenArray(5, 7)) shr 2)
|
var a2 = 2097151'i64 and cast[int64](load_3(a.toOpenArray(5, 7)) shr 2)
|
||||||
|
@ -1320,7 +1320,7 @@ proc scMulAdd(s: var openarray[byte], a, b, c: openarray[byte]) =
|
||||||
s[30] = cast[uint8](ashr(s11, 9))
|
s[30] = cast[uint8](ashr(s11, 9))
|
||||||
s[31] = cast[uint8](ashr(s11, 17))
|
s[31] = cast[uint8](ashr(s11, 17))
|
||||||
|
|
||||||
proc scReduce(s: var openarray[byte]) =
|
proc scReduce(s: var openArray[byte]) =
|
||||||
var s0 = 2097151'i64 and cast[int64](load_3(s.toOpenArray(0, 2)));
|
var s0 = 2097151'i64 and cast[int64](load_3(s.toOpenArray(0, 2)));
|
||||||
var s1 = 2097151'i64 and cast[int64](load_4(s.toOpenArray(2, 5)) shr 5)
|
var s1 = 2097151'i64 and cast[int64](load_4(s.toOpenArray(2, 5)) shr 5)
|
||||||
var s2 = 2097151'i64 and cast[int64](load_3(s.toOpenArray(5, 7)) shr 2)
|
var s2 = 2097151'i64 and cast[int64](load_3(s.toOpenArray(5, 7)) shr 2)
|
||||||
|
@ -1546,7 +1546,7 @@ proc scReduce(s: var openarray[byte]) =
|
||||||
s[30] = cast[byte](ashr(s11, 9))
|
s[30] = cast[byte](ashr(s11, 9))
|
||||||
s[31] = cast[byte](ashr(s11, 17))
|
s[31] = cast[byte](ashr(s11, 17))
|
||||||
|
|
||||||
proc slide(r: var openarray[int8], a: openarray[byte]) =
|
proc slide(r: var openArray[int8], a: openArray[byte]) =
|
||||||
for i in 0..<256:
|
for i in 0..<256:
|
||||||
r[i] = cast[int8](1'u8 and (a[i shr 3] shr (i and 7)))
|
r[i] = cast[int8](1'u8 and (a[i shr 3] shr (i and 7)))
|
||||||
for i in 0..<256:
|
for i in 0..<256:
|
||||||
|
@ -1567,8 +1567,8 @@ proc slide(r: var openarray[int8], a: openarray[byte]) =
|
||||||
break
|
break
|
||||||
inc(b)
|
inc(b)
|
||||||
|
|
||||||
proc geDoubleScalarMultVartime(r: var GeP2, a: openarray[byte], A: GeP3,
|
proc geDoubleScalarMultVartime(r: var GeP2, a: openArray[byte], A: GeP3,
|
||||||
b: openarray[byte]) =
|
b: openArray[byte]) =
|
||||||
var
|
var
|
||||||
aslide: array[256, int8]
|
aslide: array[256, int8]
|
||||||
bslide: array[256, int8]
|
bslide: array[256, int8]
|
||||||
|
@ -1632,7 +1632,7 @@ proc NEQ(x, y: uint32): uint32 {.inline.} =
|
||||||
proc LT0(x: int32): uint32 {.inline.} =
|
proc LT0(x: int32): uint32 {.inline.} =
|
||||||
result = cast[uint32](x) shr 31
|
result = cast[uint32](x) shr 31
|
||||||
|
|
||||||
proc checkScalar*(scalar: openarray[byte]): uint32 =
|
proc checkScalar*(scalar: openArray[byte]): uint32 =
|
||||||
var z = 0'u32
|
var z = 0'u32
|
||||||
var c = 0'i32
|
var c = 0'i32
|
||||||
for u in scalar:
|
for u in scalar:
|
||||||
|
@ -1686,7 +1686,7 @@ proc getPublicKey*(key: EdPrivateKey): EdPublicKey =
|
||||||
## Calculate and return ED25519 public key from private key ``key``.
|
## Calculate and return ED25519 public key from private key ``key``.
|
||||||
copyMem(addr result.data[0], unsafeAddr key.data[32], 32)
|
copyMem(addr result.data[0], unsafeAddr key.data[32], 32)
|
||||||
|
|
||||||
proc toBytes*(key: EdPrivateKey, data: var openarray[byte]): int =
|
proc toBytes*(key: EdPrivateKey, data: var openArray[byte]): int =
|
||||||
## Serialize ED25519 `private key` ``key`` to raw binary form and store it
|
## Serialize ED25519 `private key` ``key`` to raw binary form and store it
|
||||||
## to ``data``.
|
## to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -1696,7 +1696,7 @@ proc toBytes*(key: EdPrivateKey, data: var openarray[byte]): int =
|
||||||
if len(data) >= result:
|
if len(data) >= result:
|
||||||
copyMem(addr data[0], unsafeAddr key.data[0], len(key.data))
|
copyMem(addr data[0], unsafeAddr key.data[0], len(key.data))
|
||||||
|
|
||||||
proc toBytes*(key: EdPublicKey, data: var openarray[byte]): int =
|
proc toBytes*(key: EdPublicKey, data: var openArray[byte]): int =
|
||||||
## Serialize ED25519 `public key` ``key`` to raw binary form and store it
|
## Serialize ED25519 `public key` ``key`` to raw binary form and store it
|
||||||
## to ``data``.
|
## to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -1706,7 +1706,7 @@ proc toBytes*(key: EdPublicKey, data: var openarray[byte]): int =
|
||||||
if len(data) >= result:
|
if len(data) >= result:
|
||||||
copyMem(addr data[0], unsafeAddr key.data[0], len(key.data))
|
copyMem(addr data[0], unsafeAddr key.data[0], len(key.data))
|
||||||
|
|
||||||
proc toBytes*(sig: EdSignature, data: var openarray[byte]): int =
|
proc toBytes*(sig: EdSignature, data: var openArray[byte]): int =
|
||||||
## Serialize ED25519 `signature` ``sig`` to raw binary form and store it
|
## Serialize ED25519 `signature` ``sig`` to raw binary form and store it
|
||||||
## to ``data``.
|
## to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -1749,7 +1749,7 @@ proc `$`*(sig: EdSignature): string =
|
||||||
## Return string representation of ED25519 `signature`.
|
## Return string representation of ED25519 `signature`.
|
||||||
ncrutils.toHex(sig.data)
|
ncrutils.toHex(sig.data)
|
||||||
|
|
||||||
proc init*(key: var EdPrivateKey, data: openarray[byte]): bool =
|
proc init*(key: var EdPrivateKey, data: openArray[byte]): bool =
|
||||||
## Initialize ED25519 `private key` ``key`` from raw binary
|
## Initialize ED25519 `private key` ``key`` from raw binary
|
||||||
## representation ``data``.
|
## representation ``data``.
|
||||||
##
|
##
|
||||||
|
@ -1759,7 +1759,7 @@ proc init*(key: var EdPrivateKey, data: openarray[byte]): bool =
|
||||||
copyMem(addr key.data[0], unsafeAddr data[0], length)
|
copyMem(addr key.data[0], unsafeAddr data[0], length)
|
||||||
result = true
|
result = true
|
||||||
|
|
||||||
proc init*(key: var EdPublicKey, data: openarray[byte]): bool =
|
proc init*(key: var EdPublicKey, data: openArray[byte]): bool =
|
||||||
## Initialize ED25519 `public key` ``key`` from raw binary
|
## Initialize ED25519 `public key` ``key`` from raw binary
|
||||||
## representation ``data``.
|
## representation ``data``.
|
||||||
##
|
##
|
||||||
|
@ -1769,7 +1769,7 @@ proc init*(key: var EdPublicKey, data: openarray[byte]): bool =
|
||||||
copyMem(addr key.data[0], unsafeAddr data[0], length)
|
copyMem(addr key.data[0], unsafeAddr data[0], length)
|
||||||
result = true
|
result = true
|
||||||
|
|
||||||
proc init*(sig: var EdSignature, data: openarray[byte]): bool =
|
proc init*(sig: var EdSignature, data: openArray[byte]): bool =
|
||||||
## Initialize ED25519 `signature` ``sig`` from raw binary
|
## Initialize ED25519 `signature` ``sig`` from raw binary
|
||||||
## representation ``data``.
|
## representation ``data``.
|
||||||
##
|
##
|
||||||
|
@ -1801,7 +1801,7 @@ proc init*(sig: var EdSignature, data: string): bool =
|
||||||
init(sig, ncrutils.fromHex(data))
|
init(sig, ncrutils.fromHex(data))
|
||||||
|
|
||||||
proc init*(t: typedesc[EdPrivateKey],
|
proc init*(t: typedesc[EdPrivateKey],
|
||||||
data: openarray[byte]): Result[EdPrivateKey, EdError] =
|
data: openArray[byte]): Result[EdPrivateKey, EdError] =
|
||||||
## Initialize ED25519 `private key` from raw binary representation ``data``
|
## Initialize ED25519 `private key` from raw binary representation ``data``
|
||||||
## and return constructed object.
|
## and return constructed object.
|
||||||
var res: t
|
var res: t
|
||||||
|
@ -1811,7 +1811,7 @@ proc init*(t: typedesc[EdPrivateKey],
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
proc init*(t: typedesc[EdPublicKey],
|
proc init*(t: typedesc[EdPublicKey],
|
||||||
data: openarray[byte]): Result[EdPublicKey, EdError] =
|
data: openArray[byte]): Result[EdPublicKey, EdError] =
|
||||||
## Initialize ED25519 `public key` from raw binary representation ``data``
|
## Initialize ED25519 `public key` from raw binary representation ``data``
|
||||||
## and return constructed object.
|
## and return constructed object.
|
||||||
var res: t
|
var res: t
|
||||||
|
@ -1821,7 +1821,7 @@ proc init*(t: typedesc[EdPublicKey],
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
proc init*(t: typedesc[EdSignature],
|
proc init*(t: typedesc[EdSignature],
|
||||||
data: openarray[byte]): Result[EdSignature, EdError] =
|
data: openArray[byte]): Result[EdSignature, EdError] =
|
||||||
## Initialize ED25519 `signature` from raw binary representation ``data``
|
## Initialize ED25519 `signature` from raw binary representation ``data``
|
||||||
## and return constructed object.
|
## and return constructed object.
|
||||||
var res: t
|
var res: t
|
||||||
|
@ -1878,7 +1878,7 @@ proc clear*(pair: var EdKeyPair) =
|
||||||
burnMem(pair.pubkey.data)
|
burnMem(pair.pubkey.data)
|
||||||
|
|
||||||
proc sign*[T: byte|char](key: EdPrivateKey,
|
proc sign*[T: byte|char](key: EdPrivateKey,
|
||||||
message: openarray[T]): EdSignature {.gcsafe, noinit.} =
|
message: openArray[T]): EdSignature {.gcsafe, noinit.} =
|
||||||
## Create ED25519 signature of data ``message`` using private key ``key``.
|
## Create ED25519 signature of data ``message`` using private key ``key``.
|
||||||
var ctx: sha512
|
var ctx: sha512
|
||||||
var r: GeP3
|
var r: GeP3
|
||||||
|
@ -1911,7 +1911,7 @@ proc sign*[T: byte|char](key: EdPrivateKey,
|
||||||
scMulAdd(result.data.toOpenArray(32, 63), hram.data.toOpenArray(0, 31),
|
scMulAdd(result.data.toOpenArray(32, 63), hram.data.toOpenArray(0, 31),
|
||||||
hash.data.toOpenArray(0, 31), nonce.data.toOpenArray(0, 31))
|
hash.data.toOpenArray(0, 31), nonce.data.toOpenArray(0, 31))
|
||||||
|
|
||||||
proc verify*[T: byte|char](sig: EdSignature, message: openarray[T],
|
proc verify*[T: byte|char](sig: EdSignature, message: openArray[T],
|
||||||
key: EdPublicKey): bool =
|
key: EdPublicKey): bool =
|
||||||
## Verify ED25519 signature ``sig`` using public key ``key`` and data
|
## Verify ED25519 signature ``sig`` using public key ``key`` and data
|
||||||
## ``message``.
|
## ``message``.
|
||||||
|
|
|
@ -23,18 +23,18 @@ proc br_hkdf_inject(ctx: ptr BearHKDFContext; ikm: pointer; len: csize_t) {.impo
|
||||||
proc br_hkdf_flip(ctx: ptr BearHKDFContext) {.importc: "br_hkdf_flip", header: "bearssl_kdf.h", raises: [].}
|
proc br_hkdf_flip(ctx: ptr BearHKDFContext) {.importc: "br_hkdf_flip", header: "bearssl_kdf.h", raises: [].}
|
||||||
proc br_hkdf_produce(ctx: ptr BearHKDFContext; info: pointer; infoLen: csize_t; output: pointer; outputLen: csize_t) {.importc: "br_hkdf_produce", header: "bearssl_kdf.h", raises: [].}
|
proc br_hkdf_produce(ctx: ptr BearHKDFContext; info: pointer; infoLen: csize_t; output: pointer; outputLen: csize_t) {.importc: "br_hkdf_produce", header: "bearssl_kdf.h", raises: [].}
|
||||||
|
|
||||||
proc hkdf*[T: sha256; len: static int](_: type[T]; salt, ikm, info: openarray[byte]; outputs: var openarray[HKDFResult[len]]) =
|
proc hkdf*[T: sha256; len: static int](_: type[T]; salt, ikm, info: openArray[byte]; outputs: var openArray[HKDFResult[len]]) =
|
||||||
var
|
var
|
||||||
ctx: BearHKDFContext
|
ctx: BearHKDFContext
|
||||||
br_hkdf_init(
|
br_hkdf_init(
|
||||||
addr ctx, addr sha256Vtable,
|
addr ctx, addr sha256Vtable,
|
||||||
if salt.len > 0: unsafeaddr salt[0] else: nil, csize_t(salt.len))
|
if salt.len > 0: unsafeAddr salt[0] else: nil, csize_t(salt.len))
|
||||||
br_hkdf_inject(
|
br_hkdf_inject(
|
||||||
addr ctx, if ikm.len > 0: unsafeaddr ikm[0] else: nil, csize_t(ikm.len))
|
addr ctx, if ikm.len > 0: unsafeAddr ikm[0] else: nil, csize_t(ikm.len))
|
||||||
br_hkdf_flip(addr ctx)
|
br_hkdf_flip(addr ctx)
|
||||||
for i in 0..outputs.high:
|
for i in 0..outputs.high:
|
||||||
br_hkdf_produce(
|
br_hkdf_produce(
|
||||||
addr ctx,
|
addr ctx,
|
||||||
if info.len > 0: unsafeaddr info[0]
|
if info.len > 0: unsafeAddr info[0]
|
||||||
else: nil, csize_t(info.len),
|
else: nil, csize_t(info.len),
|
||||||
addr outputs[i][0], csize_t(outputs[i].len))
|
addr outputs[i][0], csize_t(outputs[i].len))
|
||||||
|
|
|
@ -154,7 +154,7 @@ proc code*(tag: Asn1Tag): byte {.inline.} =
|
||||||
of Asn1Tag.Context:
|
of Asn1Tag.Context:
|
||||||
0xA0'u8
|
0xA0'u8
|
||||||
|
|
||||||
proc asn1EncodeLength*(dest: var openarray[byte], length: uint64): int =
|
proc asn1EncodeLength*(dest: var openArray[byte], length: uint64): int =
|
||||||
## Encode ASN.1 DER length part of TLV triple and return number of bytes
|
## Encode ASN.1 DER length part of TLV triple and return number of bytes
|
||||||
## (octets) used.
|
## (octets) used.
|
||||||
##
|
##
|
||||||
|
@ -181,8 +181,8 @@ proc asn1EncodeLength*(dest: var openarray[byte], length: uint64): int =
|
||||||
# then 9, so it is safe to convert it to `int`.
|
# then 9, so it is safe to convert it to `int`.
|
||||||
int(res)
|
int(res)
|
||||||
|
|
||||||
proc asn1EncodeInteger*(dest: var openarray[byte],
|
proc asn1EncodeInteger*(dest: var openArray[byte],
|
||||||
value: openarray[byte]): int =
|
value: openArray[byte]): int =
|
||||||
## Encode big-endian binary representation of integer as ASN.1 DER `INTEGER`
|
## Encode big-endian binary representation of integer as ASN.1 DER `INTEGER`
|
||||||
## and return number of bytes (octets) used.
|
## and return number of bytes (octets) used.
|
||||||
##
|
##
|
||||||
|
@ -228,7 +228,7 @@ proc asn1EncodeInteger*(dest: var openarray[byte],
|
||||||
len(value) - offset)
|
len(value) - offset)
|
||||||
destlen
|
destlen
|
||||||
|
|
||||||
proc asn1EncodeInteger*[T: SomeUnsignedInt](dest: var openarray[byte],
|
proc asn1EncodeInteger*[T: SomeUnsignedInt](dest: var openArray[byte],
|
||||||
value: T): int =
|
value: T): int =
|
||||||
## Encode Nim's unsigned integer as ASN.1 DER `INTEGER` and return number of
|
## Encode Nim's unsigned integer as ASN.1 DER `INTEGER` and return number of
|
||||||
## bytes (octets) used.
|
## bytes (octets) used.
|
||||||
|
@ -238,7 +238,7 @@ proc asn1EncodeInteger*[T: SomeUnsignedInt](dest: var openarray[byte],
|
||||||
## but number of bytes (octets) required will be returned.
|
## but number of bytes (octets) required will be returned.
|
||||||
dest.asn1EncodeInteger(value.toBytesBE())
|
dest.asn1EncodeInteger(value.toBytesBE())
|
||||||
|
|
||||||
proc asn1EncodeBoolean*(dest: var openarray[byte], value: bool): int =
|
proc asn1EncodeBoolean*(dest: var openArray[byte], value: bool): int =
|
||||||
## Encode Nim's boolean as ASN.1 DER `BOOLEAN` and return number of bytes
|
## Encode Nim's boolean as ASN.1 DER `BOOLEAN` and return number of bytes
|
||||||
## (octets) used.
|
## (octets) used.
|
||||||
##
|
##
|
||||||
|
@ -252,7 +252,7 @@ proc asn1EncodeBoolean*(dest: var openarray[byte], value: bool): int =
|
||||||
dest[2] = if value: 0xFF'u8 else: 0x00'u8
|
dest[2] = if value: 0xFF'u8 else: 0x00'u8
|
||||||
res
|
res
|
||||||
|
|
||||||
proc asn1EncodeNull*(dest: var openarray[byte]): int =
|
proc asn1EncodeNull*(dest: var openArray[byte]): int =
|
||||||
## Encode ASN.1 DER `NULL` and return number of bytes (octets) used.
|
## Encode ASN.1 DER `NULL` and return number of bytes (octets) used.
|
||||||
##
|
##
|
||||||
## If length of ``dest`` is less then number of required bytes to encode
|
## If length of ``dest`` is less then number of required bytes to encode
|
||||||
|
@ -264,8 +264,8 @@ proc asn1EncodeNull*(dest: var openarray[byte]): int =
|
||||||
dest[1] = 0x00'u8
|
dest[1] = 0x00'u8
|
||||||
res
|
res
|
||||||
|
|
||||||
proc asn1EncodeOctetString*(dest: var openarray[byte],
|
proc asn1EncodeOctetString*(dest: var openArray[byte],
|
||||||
value: openarray[byte]): int =
|
value: openArray[byte]): int =
|
||||||
## Encode array of bytes as ASN.1 DER `OCTET STRING` and return number of
|
## Encode array of bytes as ASN.1 DER `OCTET STRING` and return number of
|
||||||
## bytes (octets) used.
|
## bytes (octets) used.
|
||||||
##
|
##
|
||||||
|
@ -282,8 +282,8 @@ proc asn1EncodeOctetString*(dest: var openarray[byte],
|
||||||
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
|
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
|
||||||
res
|
res
|
||||||
|
|
||||||
proc asn1EncodeBitString*(dest: var openarray[byte],
|
proc asn1EncodeBitString*(dest: var openArray[byte],
|
||||||
value: openarray[byte], bits = 0): int =
|
value: openArray[byte], bits = 0): int =
|
||||||
## Encode array of bytes as ASN.1 DER `BIT STRING` and return number of bytes
|
## Encode array of bytes as ASN.1 DER `BIT STRING` and return number of bytes
|
||||||
## (octets) used.
|
## (octets) used.
|
||||||
##
|
##
|
||||||
|
@ -318,7 +318,7 @@ proc asn1EncodeBitString*(dest: var openarray[byte],
|
||||||
dest[2 + lenlen + bytelen - 1] = lastbyte and mask
|
dest[2 + lenlen + bytelen - 1] = lastbyte and mask
|
||||||
res
|
res
|
||||||
|
|
||||||
proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openarray[byte],
|
proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openArray[byte],
|
||||||
value: T): int =
|
value: T): int =
|
||||||
var v = value
|
var v = value
|
||||||
if value <= cast[T](0x7F):
|
if value <= cast[T](0x7F):
|
||||||
|
@ -341,7 +341,7 @@ proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openarray[byte],
|
||||||
dest[k - 1] = dest[k - 1] and 0x7F'u8
|
dest[k - 1] = dest[k - 1] and 0x7F'u8
|
||||||
res
|
res
|
||||||
|
|
||||||
proc asn1EncodeOid*(dest: var openarray[byte], value: openarray[int]): int =
|
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[int]): int =
|
||||||
## Encode array of integers ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and
|
## Encode array of integers ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and
|
||||||
## return number of bytes (octets) used.
|
## return number of bytes (octets) used.
|
||||||
##
|
##
|
||||||
|
@ -367,7 +367,7 @@ proc asn1EncodeOid*(dest: var openarray[byte], value: openarray[int]): int =
|
||||||
cast[uint64](value[i]))
|
cast[uint64](value[i]))
|
||||||
res
|
res
|
||||||
|
|
||||||
proc asn1EncodeOid*(dest: var openarray[byte], value: openarray[byte]): int =
|
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
|
||||||
## Encode array of bytes ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and return
|
## Encode array of bytes ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and return
|
||||||
## number of bytes (octets) used.
|
## number of bytes (octets) used.
|
||||||
##
|
##
|
||||||
|
@ -386,8 +386,8 @@ proc asn1EncodeOid*(dest: var openarray[byte], value: openarray[byte]): int =
|
||||||
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
|
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
|
||||||
res
|
res
|
||||||
|
|
||||||
proc asn1EncodeSequence*(dest: var openarray[byte],
|
proc asn1EncodeSequence*(dest: var openArray[byte],
|
||||||
value: openarray[byte]): int =
|
value: openArray[byte]): int =
|
||||||
## Encode ``value`` as ASN.1 DER `SEQUENCE` and return number of bytes
|
## Encode ``value`` as ASN.1 DER `SEQUENCE` and return number of bytes
|
||||||
## (octets) used.
|
## (octets) used.
|
||||||
##
|
##
|
||||||
|
@ -403,7 +403,7 @@ proc asn1EncodeSequence*(dest: var openarray[byte],
|
||||||
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
|
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
|
||||||
res
|
res
|
||||||
|
|
||||||
proc asn1EncodeComposite*(dest: var openarray[byte],
|
proc asn1EncodeComposite*(dest: var openArray[byte],
|
||||||
value: Asn1Composite): int =
|
value: Asn1Composite): int =
|
||||||
## Encode composite value and return number of bytes (octets) used.
|
## Encode composite value and return number of bytes (octets) used.
|
||||||
##
|
##
|
||||||
|
@ -420,7 +420,7 @@ proc asn1EncodeComposite*(dest: var openarray[byte],
|
||||||
len(value.buffer))
|
len(value.buffer))
|
||||||
res
|
res
|
||||||
|
|
||||||
proc asn1EncodeContextTag*(dest: var openarray[byte], value: openarray[byte],
|
proc asn1EncodeContextTag*(dest: var openArray[byte], value: openArray[byte],
|
||||||
tag: int): int =
|
tag: int): int =
|
||||||
## Encode ASN.1 DER `CONTEXT SPECIFIC TAG` ``tag`` for value ``value`` and
|
## Encode ASN.1 DER `CONTEXT SPECIFIC TAG` ``tag`` for value ``value`` and
|
||||||
## return number of bytes (octets) used.
|
## return number of bytes (octets) used.
|
||||||
|
@ -692,7 +692,7 @@ proc getBuffer*(field: Asn1Field): Asn1Buffer {.inline.} =
|
||||||
## Return ``field`` as Asn1Buffer to enter composite types.
|
## Return ``field`` as Asn1Buffer to enter composite types.
|
||||||
Asn1Buffer(buffer: field.buffer, offset: field.offset, length: field.length)
|
Asn1Buffer(buffer: field.buffer, offset: field.offset, length: field.length)
|
||||||
|
|
||||||
proc `==`*(field: Asn1Field, data: openarray[byte]): bool =
|
proc `==`*(field: Asn1Field, data: openArray[byte]): bool =
|
||||||
## Compares field ``field`` data with ``data`` and returns ``true`` if both
|
## Compares field ``field`` data with ``data`` and returns ``true`` if both
|
||||||
## buffers are equal.
|
## buffers are equal.
|
||||||
let length = len(field.buffer)
|
let length = len(field.buffer)
|
||||||
|
@ -710,7 +710,7 @@ proc `==`*(field: Asn1Field, data: openarray[byte]): bool =
|
||||||
else:
|
else:
|
||||||
false
|
false
|
||||||
|
|
||||||
proc init*(t: typedesc[Asn1Buffer], data: openarray[byte]): Asn1Buffer =
|
proc init*(t: typedesc[Asn1Buffer], data: openArray[byte]): Asn1Buffer =
|
||||||
## Initialize ``Asn1Buffer`` from array of bytes ``data``.
|
## Initialize ``Asn1Buffer`` from array of bytes ``data``.
|
||||||
Asn1Buffer(buffer: @data)
|
Asn1Buffer(buffer: @data)
|
||||||
|
|
||||||
|
@ -825,7 +825,7 @@ proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, value: bool) =
|
||||||
abc.offset += length
|
abc.offset += length
|
||||||
|
|
||||||
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, tag: Asn1Tag,
|
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, tag: Asn1Tag,
|
||||||
value: openarray[byte], bits = 0) =
|
value: openArray[byte], bits = 0) =
|
||||||
## Write array ``value`` using ``tag``.
|
## Write array ``value`` using ``tag``.
|
||||||
##
|
##
|
||||||
## This procedure is used to write ASN.1 `INTEGER`, `OCTET STRING`,
|
## This procedure is used to write ASN.1 `INTEGER`, `OCTET STRING`,
|
||||||
|
|
|
@ -279,7 +279,7 @@ proc clear*[T: RsaPKI|RsaKeyPair](pki: var T) =
|
||||||
burnMem(pki.buffer)
|
burnMem(pki.buffer)
|
||||||
pki.buffer.setLen(0)
|
pki.buffer.setLen(0)
|
||||||
|
|
||||||
proc toBytes*(key: RsaPrivateKey, data: var openarray[byte]): RsaResult[int] =
|
proc toBytes*(key: RsaPrivateKey, data: var openArray[byte]): RsaResult[int] =
|
||||||
## Serialize RSA private key ``key`` to ASN.1 DER binary form and store it
|
## Serialize RSA private key ``key`` to ASN.1 DER binary form and store it
|
||||||
## to ``data``.
|
## to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -316,7 +316,7 @@ proc toBytes*(key: RsaPrivateKey, data: var openarray[byte]): RsaResult[int] =
|
||||||
else:
|
else:
|
||||||
err(RsaKeyIncorrectError)
|
err(RsaKeyIncorrectError)
|
||||||
|
|
||||||
proc toBytes*(key: RsaPublicKey, data: var openarray[byte]): RsaResult[int] =
|
proc toBytes*(key: RsaPublicKey, data: var openArray[byte]): RsaResult[int] =
|
||||||
## Serialize RSA public key ``key`` to ASN.1 DER binary form and store it
|
## Serialize RSA public key ``key`` to ASN.1 DER binary form and store it
|
||||||
## to ``data``.
|
## to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -350,7 +350,7 @@ proc toBytes*(key: RsaPublicKey, data: var openarray[byte]): RsaResult[int] =
|
||||||
else:
|
else:
|
||||||
err(RsaKeyIncorrectError)
|
err(RsaKeyIncorrectError)
|
||||||
|
|
||||||
proc toBytes*(sig: RsaSignature, data: var openarray[byte]): RSaResult[int] =
|
proc toBytes*(sig: RsaSignature, data: var openArray[byte]): RsaResult[int] =
|
||||||
## Serialize RSA signature ``sig`` to raw binary form and store it
|
## Serialize RSA signature ``sig`` to raw binary form and store it
|
||||||
## to ``data``.
|
## to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -402,7 +402,7 @@ proc getBytes*(sig: RsaSignature): RsaResult[seq[byte]] =
|
||||||
else:
|
else:
|
||||||
err(RsaSignatureError)
|
err(RsaSignatureError)
|
||||||
|
|
||||||
proc init*(key: var RsaPrivateKey, data: openarray[byte]): Result[void, Asn1Error] =
|
proc init*(key: var RsaPrivateKey, data: openArray[byte]): Result[void, Asn1Error] =
|
||||||
## Initialize RSA private key ``key`` from ASN.1 DER binary representation
|
## Initialize RSA private key ``key`` from ASN.1 DER binary representation
|
||||||
## ``data``.
|
## ``data``.
|
||||||
##
|
##
|
||||||
|
@ -493,7 +493,7 @@ proc init*(key: var RsaPrivateKey, data: openarray[byte]): Result[void, Asn1Erro
|
||||||
else:
|
else:
|
||||||
err(Asn1Error.Incorrect)
|
err(Asn1Error.Incorrect)
|
||||||
|
|
||||||
proc init*(key: var RsaPublicKey, data: openarray[byte]): Result[void, Asn1Error] =
|
proc init*(key: var RsaPublicKey, data: openArray[byte]): Result[void, Asn1Error] =
|
||||||
## Initialize RSA public key ``key`` from ASN.1 DER binary representation
|
## Initialize RSA public key ``key`` from ASN.1 DER binary representation
|
||||||
## ``data``.
|
## ``data``.
|
||||||
##
|
##
|
||||||
|
@ -562,7 +562,7 @@ proc init*(key: var RsaPublicKey, data: openarray[byte]): Result[void, Asn1Error
|
||||||
else:
|
else:
|
||||||
err(Asn1Error.Incorrect)
|
err(Asn1Error.Incorrect)
|
||||||
|
|
||||||
proc init*(sig: var RsaSignature, data: openarray[byte]): Result[void, Asn1Error] =
|
proc init*(sig: var RsaSignature, data: openArray[byte]): Result[void, Asn1Error] =
|
||||||
## Initialize RSA signature ``sig`` from ASN.1 DER binary representation
|
## Initialize RSA signature ``sig`` from ASN.1 DER binary representation
|
||||||
## ``data``.
|
## ``data``.
|
||||||
##
|
##
|
||||||
|
@ -583,7 +583,7 @@ proc init*[T: RsaPKI](sospk: var T,
|
||||||
sospk.init(ncrutils.fromHex(data))
|
sospk.init(ncrutils.fromHex(data))
|
||||||
|
|
||||||
proc init*(t: typedesc[RsaPrivateKey],
|
proc init*(t: typedesc[RsaPrivateKey],
|
||||||
data: openarray[byte]): RsaResult[RsaPrivateKey] =
|
data: openArray[byte]): RsaResult[RsaPrivateKey] =
|
||||||
## Initialize RSA private key from ASN.1 DER binary representation ``data``
|
## Initialize RSA private key from ASN.1 DER binary representation ``data``
|
||||||
## and return constructed object.
|
## and return constructed object.
|
||||||
var res: RsaPrivateKey
|
var res: RsaPrivateKey
|
||||||
|
@ -593,7 +593,7 @@ proc init*(t: typedesc[RsaPrivateKey],
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
proc init*(t: typedesc[RsaPublicKey],
|
proc init*(t: typedesc[RsaPublicKey],
|
||||||
data: openarray[byte]): RsaResult[RsaPublicKey] =
|
data: openArray[byte]): RsaResult[RsaPublicKey] =
|
||||||
## Initialize RSA public key from ASN.1 DER binary representation ``data``
|
## Initialize RSA public key from ASN.1 DER binary representation ``data``
|
||||||
## and return constructed object.
|
## and return constructed object.
|
||||||
var res: RsaPublicKey
|
var res: RsaPublicKey
|
||||||
|
@ -603,7 +603,7 @@ proc init*(t: typedesc[RsaPublicKey],
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
proc init*(t: typedesc[RsaSignature],
|
proc init*(t: typedesc[RsaSignature],
|
||||||
data: openarray[byte]): RsaResult[RsaSignature] =
|
data: openArray[byte]): RsaResult[RsaSignature] =
|
||||||
## Initialize RSA signature from raw binary representation ``data`` and
|
## Initialize RSA signature from raw binary representation ``data`` and
|
||||||
## return constructed object.
|
## return constructed object.
|
||||||
var res: RsaSignature
|
var res: RsaSignature
|
||||||
|
@ -743,7 +743,7 @@ proc `==`*(a, b: RsaPublicKey): bool =
|
||||||
(r1 and r2)
|
(r1 and r2)
|
||||||
|
|
||||||
proc sign*[T: byte|char](key: RsaPrivateKey,
|
proc sign*[T: byte|char](key: RsaPrivateKey,
|
||||||
message: openarray[T]): RsaResult[RsaSignature] {.gcsafe.} =
|
message: openArray[T]): RsaResult[RsaSignature] {.gcsafe.} =
|
||||||
## Get RSA PKCS1.5 signature of data ``message`` using SHA256 and private
|
## Get RSA PKCS1.5 signature of data ``message`` using SHA256 and private
|
||||||
## key ``key``.
|
## key ``key``.
|
||||||
if isNil(key):
|
if isNil(key):
|
||||||
|
@ -770,7 +770,7 @@ proc sign*[T: byte|char](key: RsaPrivateKey,
|
||||||
else:
|
else:
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
proc verify*[T: byte|char](sig: RsaSignature, message: openarray[T],
|
proc verify*[T: byte|char](sig: RsaSignature, message: openArray[T],
|
||||||
pubkey: RsaPublicKey): bool {.inline.} =
|
pubkey: RsaPublicKey): bool {.inline.} =
|
||||||
## Verify RSA signature ``sig`` using public key ``pubkey`` and data
|
## Verify RSA signature ``sig`` using public key ``pubkey`` and data
|
||||||
## ``message``.
|
## ``message``.
|
||||||
|
|
|
@ -54,7 +54,7 @@ template seckey*(v: SkKeyPair): SkPrivateKey =
|
||||||
template pubkey*(v: SkKeyPair): SkPublicKey =
|
template pubkey*(v: SkKeyPair): SkPublicKey =
|
||||||
SkPublicKey(secp256k1.SkKeyPair(v).pubkey)
|
SkPublicKey(secp256k1.SkKeyPair(v).pubkey)
|
||||||
|
|
||||||
proc init*(key: var SkPrivateKey, data: openarray[byte]): SkResult[void] =
|
proc init*(key: var SkPrivateKey, data: openArray[byte]): SkResult[void] =
|
||||||
## Initialize Secp256k1 `private key` ``key`` from raw binary
|
## Initialize Secp256k1 `private key` ``key`` from raw binary
|
||||||
## representation ``data``.
|
## representation ``data``.
|
||||||
key = SkPrivateKey(? secp256k1.SkSecretKey.fromRaw(data))
|
key = SkPrivateKey(? secp256k1.SkSecretKey.fromRaw(data))
|
||||||
|
@ -66,7 +66,7 @@ proc init*(key: var SkPrivateKey, data: string): SkResult[void] =
|
||||||
key = SkPrivateKey(? secp256k1.SkSecretKey.fromHex(data))
|
key = SkPrivateKey(? secp256k1.SkSecretKey.fromHex(data))
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc init*(key: var SkPublicKey, data: openarray[byte]): SkResult[void] =
|
proc init*(key: var SkPublicKey, data: openArray[byte]): SkResult[void] =
|
||||||
## Initialize Secp256k1 `public key` ``key`` from raw binary
|
## Initialize Secp256k1 `public key` ``key`` from raw binary
|
||||||
## representation ``data``.
|
## representation ``data``.
|
||||||
key = SkPublicKey(? secp256k1.SkPublicKey.fromRaw(data))
|
key = SkPublicKey(? secp256k1.SkPublicKey.fromRaw(data))
|
||||||
|
@ -78,7 +78,7 @@ proc init*(key: var SkPublicKey, data: string): SkResult[void] =
|
||||||
key = SkPublicKey(? secp256k1.SkPublicKey.fromHex(data))
|
key = SkPublicKey(? secp256k1.SkPublicKey.fromHex(data))
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc init*(sig: var SkSignature, data: openarray[byte]): SkResult[void] =
|
proc init*(sig: var SkSignature, data: openArray[byte]): SkResult[void] =
|
||||||
## Initialize Secp256k1 `signature` ``sig`` from raw binary
|
## Initialize Secp256k1 `signature` ``sig`` from raw binary
|
||||||
## representation ``data``.
|
## representation ``data``.
|
||||||
sig = SkSignature(? secp256k1.SkSignature.fromDer(data))
|
sig = SkSignature(? secp256k1.SkSignature.fromDer(data))
|
||||||
|
@ -95,7 +95,7 @@ proc init*(sig: var SkSignature, data: string): SkResult[void] =
|
||||||
return err("secp: Hex to bytes failed")
|
return err("secp: Hex to bytes failed")
|
||||||
init(sig, buffer)
|
init(sig, buffer)
|
||||||
|
|
||||||
proc init*(t: typedesc[SkPrivateKey], data: openarray[byte]): SkResult[SkPrivateKey] =
|
proc init*(t: typedesc[SkPrivateKey], data: openArray[byte]): SkResult[SkPrivateKey] =
|
||||||
## Initialize Secp256k1 `private key` from raw binary
|
## Initialize Secp256k1 `private key` from raw binary
|
||||||
## representation ``data``.
|
## representation ``data``.
|
||||||
##
|
##
|
||||||
|
@ -109,7 +109,7 @@ proc init*(t: typedesc[SkPrivateKey], data: string): SkResult[SkPrivateKey] =
|
||||||
## Procedure returns `private key` on success.
|
## Procedure returns `private key` on success.
|
||||||
SkSecretKey.fromHex(data).mapConvert(SkPrivateKey)
|
SkSecretKey.fromHex(data).mapConvert(SkPrivateKey)
|
||||||
|
|
||||||
proc init*(t: typedesc[SkPublicKey], data: openarray[byte]): SkResult[SkPublicKey] =
|
proc init*(t: typedesc[SkPublicKey], data: openArray[byte]): SkResult[SkPublicKey] =
|
||||||
## Initialize Secp256k1 `public key` from raw binary
|
## Initialize Secp256k1 `public key` from raw binary
|
||||||
## representation ``data``.
|
## representation ``data``.
|
||||||
##
|
##
|
||||||
|
@ -125,7 +125,7 @@ proc init*(t: typedesc[SkPublicKey], data: string): SkResult[SkPublicKey] =
|
||||||
var key: SkPublicKey
|
var key: SkPublicKey
|
||||||
key.init(data) and ok(key)
|
key.init(data) and ok(key)
|
||||||
|
|
||||||
proc init*(t: typedesc[SkSignature], data: openarray[byte]): SkResult[SkSignature] =
|
proc init*(t: typedesc[SkSignature], data: openArray[byte]): SkResult[SkSignature] =
|
||||||
## Initialize Secp256k1 `signature` from raw binary
|
## Initialize Secp256k1 `signature` from raw binary
|
||||||
## representation ``data``.
|
## representation ``data``.
|
||||||
##
|
##
|
||||||
|
@ -145,7 +145,7 @@ proc getPublicKey*(key: SkPrivateKey): SkPublicKey =
|
||||||
## Calculate and return Secp256k1 `public key` from `private key` ``key``.
|
## Calculate and return Secp256k1 `public key` from `private key` ``key``.
|
||||||
SkPublicKey(SkSecretKey(key).toPublicKey())
|
SkPublicKey(SkSecretKey(key).toPublicKey())
|
||||||
|
|
||||||
proc toBytes*(key: SkPrivateKey, data: var openarray[byte]): SkResult[int] =
|
proc toBytes*(key: SkPrivateKey, data: var openArray[byte]): SkResult[int] =
|
||||||
## Serialize Secp256k1 `private key` ``key`` to raw binary form and store it
|
## Serialize Secp256k1 `private key` ``key`` to raw binary form and store it
|
||||||
## to ``data``.
|
## to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -157,7 +157,7 @@ proc toBytes*(key: SkPrivateKey, data: var openarray[byte]): SkResult[int] =
|
||||||
else:
|
else:
|
||||||
err("secp: Not enough bytes")
|
err("secp: Not enough bytes")
|
||||||
|
|
||||||
proc toBytes*(key: SkPublicKey, data: var openarray[byte]): SkResult[int] =
|
proc toBytes*(key: SkPublicKey, data: var openArray[byte]): SkResult[int] =
|
||||||
## Serialize Secp256k1 `public key` ``key`` to raw binary form and store it
|
## Serialize Secp256k1 `public key` ``key`` to raw binary form and store it
|
||||||
## to ``data``.
|
## to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -169,7 +169,7 @@ proc toBytes*(key: SkPublicKey, data: var openarray[byte]): SkResult[int] =
|
||||||
else:
|
else:
|
||||||
err("secp: Not enough bytes")
|
err("secp: Not enough bytes")
|
||||||
|
|
||||||
proc toBytes*(sig: SkSignature, data: var openarray[byte]): int =
|
proc toBytes*(sig: SkSignature, data: var openArray[byte]): int =
|
||||||
## Serialize Secp256k1 `signature` ``sig`` to raw binary form and store it
|
## Serialize Secp256k1 `signature` ``sig`` to raw binary form and store it
|
||||||
## to ``data``.
|
## to ``data``.
|
||||||
##
|
##
|
||||||
|
@ -191,12 +191,12 @@ proc getBytes*(sig: SkSignature): seq[byte] {.inline.} =
|
||||||
let length = toBytes(sig, result)
|
let length = toBytes(sig, result)
|
||||||
result.setLen(length)
|
result.setLen(length)
|
||||||
|
|
||||||
proc sign*[T: byte|char](key: SkPrivateKey, msg: openarray[T]): SkSignature =
|
proc sign*[T: byte|char](key: SkPrivateKey, msg: openArray[T]): SkSignature =
|
||||||
## Sign message `msg` using private key `key` and return signature object.
|
## Sign message `msg` using private key `key` and return signature object.
|
||||||
let h = sha256.digest(msg)
|
let h = sha256.digest(msg)
|
||||||
SkSignature(sign(SkSecretKey(key), SkMessage(h.data)))
|
SkSignature(sign(SkSecretKey(key), SkMessage(h.data)))
|
||||||
|
|
||||||
proc verify*[T: byte|char](sig: SkSignature, msg: openarray[T],
|
proc verify*[T: byte|char](sig: SkSignature, msg: openArray[T],
|
||||||
key: SkPublicKey): bool =
|
key: SkPublicKey): bool =
|
||||||
let h = sha256.digest(msg)
|
let h = sha256.digest(msg)
|
||||||
verify(secp256k1.SkSignature(sig), SkMessage(h.data), secp256k1.SkPublicKey(key))
|
verify(secp256k1.SkSignature(sig), SkMessage(h.data), secp256k1.SkPublicKey(key))
|
||||||
|
|
|
@ -107,12 +107,12 @@ type
|
||||||
RelayActive, ## Enables active mode for relay.
|
RelayActive, ## Enables active mode for relay.
|
||||||
RelayDiscovery,## Enables passive discovery for relay.
|
RelayDiscovery,## Enables passive discovery for relay.
|
||||||
RelayHop, ## Enables hop for relay.
|
RelayHop, ## Enables hop for relay.
|
||||||
NoInlinePeerID,## Disable inlining of peer ID (not yet in #master).
|
NoInlinePeerId,## Disable inlining of peer ID (not yet in #master).
|
||||||
NoProcessCtrl ## Process was not spawned.
|
NoProcessCtrl ## Process was not spawned.
|
||||||
|
|
||||||
P2PStream* = ref object
|
P2PStream* = ref object
|
||||||
flags*: set[P2PStreamFlags]
|
flags*: set[P2PStreamFlags]
|
||||||
peer*: PeerID
|
peer*: PeerId
|
||||||
raddress*: MultiAddress
|
raddress*: MultiAddress
|
||||||
protocol*: string
|
protocol*: string
|
||||||
transp*: StreamTransport
|
transp*: StreamTransport
|
||||||
|
@ -133,7 +133,7 @@ type
|
||||||
userData*: RootRef
|
userData*: RootRef
|
||||||
|
|
||||||
PeerInfo* = object
|
PeerInfo* = object
|
||||||
peer*: PeerID
|
peer*: PeerId
|
||||||
addresses*: seq[MultiAddress]
|
addresses*: seq[MultiAddress]
|
||||||
|
|
||||||
PubsubTicket* = ref object
|
PubsubTicket* = ref object
|
||||||
|
@ -142,7 +142,7 @@ type
|
||||||
transp*: StreamTransport
|
transp*: StreamTransport
|
||||||
|
|
||||||
PubSubMessage* = object
|
PubSubMessage* = object
|
||||||
peer*: PeerID
|
peer*: PeerId
|
||||||
data*: seq[byte]
|
data*: seq[byte]
|
||||||
seqno*: seq[byte]
|
seqno*: seq[byte]
|
||||||
topics*: seq[string]
|
topics*: seq[string]
|
||||||
|
@ -170,8 +170,8 @@ proc requestIdentity(): ProtoBuffer =
|
||||||
result.write(1, cast[uint](RequestType.IDENTIFY))
|
result.write(1, cast[uint](RequestType.IDENTIFY))
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestConnect(peerid: PeerID,
|
proc requestConnect(peerid: PeerId,
|
||||||
addresses: openarray[MultiAddress],
|
addresses: openArray[MultiAddress],
|
||||||
timeout = 0): ProtoBuffer =
|
timeout = 0): ProtoBuffer =
|
||||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
|
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
|
||||||
## Processing function `doConnect(req *pb.Request)`.
|
## Processing function `doConnect(req *pb.Request)`.
|
||||||
|
@ -186,7 +186,7 @@ proc requestConnect(peerid: PeerID,
|
||||||
result.write(2, msg)
|
result.write(2, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestDisconnect(peerid: PeerID): ProtoBuffer =
|
proc requestDisconnect(peerid: PeerId): ProtoBuffer =
|
||||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
|
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
|
||||||
## Processing function `doDisconnect(req *pb.Request)`.
|
## Processing function `doDisconnect(req *pb.Request)`.
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
|
@ -196,8 +196,8 @@ proc requestDisconnect(peerid: PeerID): ProtoBuffer =
|
||||||
result.write(7, msg)
|
result.write(7, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestStreamOpen(peerid: PeerID,
|
proc requestStreamOpen(peerid: PeerId,
|
||||||
protocols: openarray[string],
|
protocols: openArray[string],
|
||||||
timeout = 0): ProtoBuffer =
|
timeout = 0): ProtoBuffer =
|
||||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
|
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
|
||||||
## Processing function `doStreamOpen(req *pb.Request)`.
|
## Processing function `doStreamOpen(req *pb.Request)`.
|
||||||
|
@ -213,7 +213,7 @@ proc requestStreamOpen(peerid: PeerID,
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestStreamHandler(address: MultiAddress,
|
proc requestStreamHandler(address: MultiAddress,
|
||||||
protocols: openarray[MultiProtocol]): ProtoBuffer =
|
protocols: openArray[MultiProtocol]): ProtoBuffer =
|
||||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
|
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
|
||||||
## Processing function `doStreamHandler(req *pb.Request)`.
|
## Processing function `doStreamHandler(req *pb.Request)`.
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
|
@ -232,7 +232,7 @@ proc requestListPeers(): ProtoBuffer =
|
||||||
result.write(1, cast[uint](RequestType.LIST_PEERS))
|
result.write(1, cast[uint](RequestType.LIST_PEERS))
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestDHTFindPeer(peer: PeerID, timeout = 0): ProtoBuffer =
|
proc requestDHTFindPeer(peer: PeerId, timeout = 0): ProtoBuffer =
|
||||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
||||||
## Processing function `doDHTFindPeer(req *pb.DHTRequest)`.
|
## Processing function `doDHTFindPeer(req *pb.DHTRequest)`.
|
||||||
let msgid = cast[uint](DHTRequestType.FIND_PEER)
|
let msgid = cast[uint](DHTRequestType.FIND_PEER)
|
||||||
|
@ -247,7 +247,7 @@ proc requestDHTFindPeer(peer: PeerID, timeout = 0): ProtoBuffer =
|
||||||
result.write(5, msg)
|
result.write(5, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestDHTFindPeersConnectedToPeer(peer: PeerID,
|
proc requestDHTFindPeersConnectedToPeer(peer: PeerId,
|
||||||
timeout = 0): ProtoBuffer =
|
timeout = 0): ProtoBuffer =
|
||||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
||||||
## Processing function `doDHTFindPeersConnectedToPeer(req *pb.DHTRequest)`.
|
## Processing function `doDHTFindPeersConnectedToPeer(req *pb.DHTRequest)`.
|
||||||
|
@ -295,7 +295,7 @@ proc requestDHTGetClosestPeers(key: string, timeout = 0): ProtoBuffer =
|
||||||
result.write(5, msg)
|
result.write(5, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestDHTGetPublicKey(peer: PeerID, timeout = 0): ProtoBuffer =
|
proc requestDHTGetPublicKey(peer: PeerId, timeout = 0): ProtoBuffer =
|
||||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
||||||
## Processing function `doDHTGetPublicKey(req *pb.DHTRequest)`.
|
## Processing function `doDHTGetPublicKey(req *pb.DHTRequest)`.
|
||||||
let msgid = cast[uint](DHTRequestType.GET_PUBLIC_KEY)
|
let msgid = cast[uint](DHTRequestType.GET_PUBLIC_KEY)
|
||||||
|
@ -340,7 +340,7 @@ proc requestDHTSearchValue(key: string, timeout = 0): ProtoBuffer =
|
||||||
result.write(5, msg)
|
result.write(5, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestDHTPutValue(key: string, value: openarray[byte],
|
proc requestDHTPutValue(key: string, value: openArray[byte],
|
||||||
timeout = 0): ProtoBuffer =
|
timeout = 0): ProtoBuffer =
|
||||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
|
||||||
## Processing function `doDHTPutValue(req *pb.DHTRequest)`.
|
## Processing function `doDHTPutValue(req *pb.DHTRequest)`.
|
||||||
|
@ -372,7 +372,7 @@ proc requestDHTProvide(cid: Cid, timeout = 0): ProtoBuffer =
|
||||||
result.write(5, msg)
|
result.write(5, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestCMTagPeer(peer: PeerID, tag: string, weight: int): ProtoBuffer =
|
proc requestCMTagPeer(peer: PeerId, tag: string, weight: int): ProtoBuffer =
|
||||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/connmgr.go#L18
|
## https://github.com/libp2p/go-libp2p-daemon/blob/master/connmgr.go#L18
|
||||||
let msgid = cast[uint](ConnManagerRequestType.TAG_PEER)
|
let msgid = cast[uint](ConnManagerRequestType.TAG_PEER)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
|
@ -386,7 +386,7 @@ proc requestCMTagPeer(peer: PeerID, tag: string, weight: int): ProtoBuffer =
|
||||||
result.write(6, msg)
|
result.write(6, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestCMUntagPeer(peer: PeerID, tag: string): ProtoBuffer =
|
proc requestCMUntagPeer(peer: PeerId, tag: string): ProtoBuffer =
|
||||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/connmgr.go#L33
|
## https://github.com/libp2p/go-libp2p-daemon/blob/master/connmgr.go#L33
|
||||||
let msgid = cast[uint](ConnManagerRequestType.UNTAG_PEER)
|
let msgid = cast[uint](ConnManagerRequestType.UNTAG_PEER)
|
||||||
result = initProtoBuffer({WithVarintLength})
|
result = initProtoBuffer({WithVarintLength})
|
||||||
|
@ -435,7 +435,7 @@ proc requestPSListPeers(topic: string): ProtoBuffer =
|
||||||
result.write(8, msg)
|
result.write(8, msg)
|
||||||
result.finish()
|
result.finish()
|
||||||
|
|
||||||
proc requestPSPublish(topic: string, data: openarray[byte]): ProtoBuffer =
|
proc requestPSPublish(topic: string, data: openArray[byte]): ProtoBuffer =
|
||||||
## https://github.com/libp2p/go-libp2p-daemon/blob/master/pubsub.go
|
## https://github.com/libp2p/go-libp2p-daemon/blob/master/pubsub.go
|
||||||
## Processing function `doPubsubPublish(req *pb.PSRequest)`.
|
## Processing function `doPubsubPublish(req *pb.PSRequest)`.
|
||||||
let msgid = cast[uint](PSRequestType.PUBLISH)
|
let msgid = cast[uint](PSRequestType.PUBLISH)
|
||||||
|
@ -725,8 +725,8 @@ proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
|
||||||
args.add("-relayDiscovery=true")
|
args.add("-relayDiscovery=true")
|
||||||
if RelayHop in api.flags:
|
if RelayHop in api.flags:
|
||||||
args.add("-relayHop=true")
|
args.add("-relayHop=true")
|
||||||
if NoInlinePeerID in api.flags:
|
if NoInlinePeerId in api.flags:
|
||||||
args.add("-noInlinePeerID=true")
|
args.add("-noInlinePeerId=true")
|
||||||
if len(bootstrapNodes) > 0:
|
if len(bootstrapNodes) > 0:
|
||||||
args.add("-bootstrapPeers=" & bootstrapNodes.join(","))
|
args.add("-bootstrapPeers=" & bootstrapNodes.join(","))
|
||||||
if len(id) != 0:
|
if len(id) != 0:
|
||||||
|
@ -853,7 +853,7 @@ proc identity*(api: DaemonAPI): Future[PeerInfo] {.async.} =
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
proc connect*(api: DaemonAPI, peer: PeerID,
|
proc connect*(api: DaemonAPI, peer: PeerId,
|
||||||
addresses: seq[MultiAddress],
|
addresses: seq[MultiAddress],
|
||||||
timeout = 0) {.async.} =
|
timeout = 0) {.async.} =
|
||||||
## Connect to remote peer with id ``peer`` and addresses ``addresses``.
|
## Connect to remote peer with id ``peer`` and addresses ``addresses``.
|
||||||
|
@ -866,7 +866,7 @@ proc connect*(api: DaemonAPI, peer: PeerID,
|
||||||
except:
|
except:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
proc disconnect*(api: DaemonAPI, peer: PeerID) {.async.} =
|
proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
|
||||||
## Disconnect from remote peer with id ``peer``.
|
## Disconnect from remote peer with id ``peer``.
|
||||||
var transp = await api.newConnection()
|
var transp = await api.newConnection()
|
||||||
try:
|
try:
|
||||||
|
@ -876,7 +876,7 @@ proc disconnect*(api: DaemonAPI, peer: PeerID) {.async.} =
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
proc openStream*(api: DaemonAPI, peer: PeerID,
|
proc openStream*(api: DaemonAPI, peer: PeerId,
|
||||||
protocols: seq[string],
|
protocols: seq[string],
|
||||||
timeout = 0): Future[P2PStream] {.async.} =
|
timeout = 0): Future[P2PStream] {.async.} =
|
||||||
## Open new stream to peer ``peer`` using one of the protocols in
|
## Open new stream to peer ``peer`` using one of the protocols in
|
||||||
|
@ -961,7 +961,7 @@ proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.} =
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
proc cmTagPeer*(api: DaemonAPI, peer: PeerID, tag: string,
|
proc cmTagPeer*(api: DaemonAPI, peer: PeerId, tag: string,
|
||||||
weight: int) {.async.} =
|
weight: int) {.async.} =
|
||||||
## Tag peer with id ``peer`` using ``tag`` and ``weight``.
|
## Tag peer with id ``peer`` using ``tag`` and ``weight``.
|
||||||
var transp = await api.newConnection()
|
var transp = await api.newConnection()
|
||||||
|
@ -972,7 +972,7 @@ proc cmTagPeer*(api: DaemonAPI, peer: PeerID, tag: string,
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
proc cmUntagPeer*(api: DaemonAPI, peer: PeerID, tag: string) {.async.} =
|
proc cmUntagPeer*(api: DaemonAPI, peer: PeerId, tag: string) {.async.} =
|
||||||
## Remove tag ``tag`` from peer with id ``peer``.
|
## Remove tag ``tag`` from peer with id ``peer``.
|
||||||
var transp = await api.newConnection()
|
var transp = await api.newConnection()
|
||||||
try:
|
try:
|
||||||
|
@ -1011,7 +1011,7 @@ proc dhtGetSinglePublicKey(pb: ProtoBuffer): PublicKey
|
||||||
if pb.getRequiredField(3, result).isErr():
|
if pb.getRequiredField(3, result).isErr():
|
||||||
raise newException(DaemonLocalError, "Missing field `value`!")
|
raise newException(DaemonLocalError, "Missing field `value`!")
|
||||||
|
|
||||||
proc dhtGetSinglePeerID(pb: ProtoBuffer): PeerID
|
proc dhtGetSinglePeerId(pb: ProtoBuffer): PeerId
|
||||||
{.raises: [Defect, DaemonLocalError].} =
|
{.raises: [Defect, DaemonLocalError].} =
|
||||||
if pb.getRequiredField(3, result).isErr():
|
if pb.getRequiredField(3, result).isErr():
|
||||||
raise newException(DaemonLocalError, "Missing field `value`!")
|
raise newException(DaemonLocalError, "Missing field `value`!")
|
||||||
|
@ -1055,7 +1055,7 @@ proc getDhtMessageType(pb: ProtoBuffer): DHTResponseType
|
||||||
else:
|
else:
|
||||||
raise newException(DaemonLocalError, "Wrong DHT answer type!")
|
raise newException(DaemonLocalError, "Wrong DHT answer type!")
|
||||||
|
|
||||||
proc dhtFindPeer*(api: DaemonAPI, peer: PeerID,
|
proc dhtFindPeer*(api: DaemonAPI, peer: PeerId,
|
||||||
timeout = 0): Future[PeerInfo] {.async.} =
|
timeout = 0): Future[PeerInfo] {.async.} =
|
||||||
## Find peer with id ``peer`` and return peer information ``PeerInfo``.
|
## Find peer with id ``peer`` and return peer information ``PeerInfo``.
|
||||||
##
|
##
|
||||||
|
@ -1069,7 +1069,7 @@ proc dhtFindPeer*(api: DaemonAPI, peer: PeerID,
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
proc dhtGetPublicKey*(api: DaemonAPI, peer: PeerID,
|
proc dhtGetPublicKey*(api: DaemonAPI, peer: PeerId,
|
||||||
timeout = 0): Future[PublicKey] {.async.} =
|
timeout = 0): Future[PublicKey] {.async.} =
|
||||||
## Get peer's public key from peer with id ``peer``.
|
## Get peer's public key from peer with id ``peer``.
|
||||||
##
|
##
|
||||||
|
@ -1125,7 +1125,7 @@ proc dhtProvide*(api: DaemonAPI, cid: Cid, timeout = 0) {.async.} =
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
proc dhtFindPeersConnectedToPeer*(api: DaemonAPI, peer: PeerID,
|
proc dhtFindPeersConnectedToPeer*(api: DaemonAPI, peer: PeerId,
|
||||||
timeout = 0): Future[seq[PeerInfo]] {.async.} =
|
timeout = 0): Future[seq[PeerInfo]] {.async.} =
|
||||||
## Find peers which are connected to peer with id ``peer``.
|
## Find peers which are connected to peer with id ``peer``.
|
||||||
##
|
##
|
||||||
|
@ -1151,13 +1151,13 @@ proc dhtFindPeersConnectedToPeer*(api: DaemonAPI, peer: PeerID,
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
proc dhtGetClosestPeers*(api: DaemonAPI, key: string,
|
proc dhtGetClosestPeers*(api: DaemonAPI, key: string,
|
||||||
timeout = 0): Future[seq[PeerID]] {.async.} =
|
timeout = 0): Future[seq[PeerId]] {.async.} =
|
||||||
## Get closest peers for ``key``.
|
## Get closest peers for ``key``.
|
||||||
##
|
##
|
||||||
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
|
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
|
||||||
## means no timeout.
|
## means no timeout.
|
||||||
var transp = await api.newConnection()
|
var transp = await api.newConnection()
|
||||||
var list = newSeq[PeerID]()
|
var list = newSeq[PeerId]()
|
||||||
try:
|
try:
|
||||||
let spb = requestDHTGetClosestPeers(key, timeout)
|
let spb = requestDHTGetClosestPeers(key, timeout)
|
||||||
var pb = await transp.transactMessage(spb)
|
var pb = await transp.transactMessage(spb)
|
||||||
|
@ -1170,7 +1170,7 @@ proc dhtGetClosestPeers*(api: DaemonAPI, key: string,
|
||||||
var cpb = initProtoBuffer(message)
|
var cpb = initProtoBuffer(message)
|
||||||
if cpb.getDhtMessageType() == DHTResponseType.END:
|
if cpb.getDhtMessageType() == DHTResponseType.END:
|
||||||
break
|
break
|
||||||
list.add(cpb.dhtGetSinglePeerID())
|
list.add(cpb.dhtGetSinglePeerId())
|
||||||
result = list
|
result = list
|
||||||
finally:
|
finally:
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
@ -1238,14 +1238,14 @@ proc pubsubGetTopics*(api: DaemonAPI): Future[seq[string]] {.async.} =
|
||||||
await api.closeConnection(transp)
|
await api.closeConnection(transp)
|
||||||
|
|
||||||
proc pubsubListPeers*(api: DaemonAPI,
|
proc pubsubListPeers*(api: DaemonAPI,
|
||||||
topic: string): Future[seq[PeerID]] {.async.} =
|
topic: string): Future[seq[PeerId]] {.async.} =
|
||||||
## Get list of peers we are connected to and which also subscribed to topic
|
## Get list of peers we are connected to and which also subscribed to topic
|
||||||
## ``topic``.
|
## ``topic``.
|
||||||
var transp = await api.newConnection()
|
var transp = await api.newConnection()
|
||||||
try:
|
try:
|
||||||
var pb = await transp.transactMessage(requestPSListPeers(topic))
|
var pb = await transp.transactMessage(requestPSListPeers(topic))
|
||||||
withMessage(pb) do:
|
withMessage(pb) do:
|
||||||
var peer: PeerID
|
var peer: PeerId
|
||||||
let innerPb = pb.enterPsMessage()
|
let innerPb = pb.enterPsMessage()
|
||||||
var peers = newSeq[seq[byte]]()
|
var peers = newSeq[seq[byte]]()
|
||||||
discard innerPb.getRepeatedField(2, peers)
|
discard innerPb.getRepeatedField(2, peers)
|
||||||
|
@ -1308,7 +1308,7 @@ proc pubsubSubscribe*(api: DaemonAPI, topic: string,
|
||||||
proc shortLog*(pinfo: PeerInfo): string =
|
proc shortLog*(pinfo: PeerInfo): string =
|
||||||
## Get string representation of ``PeerInfo`` object.
|
## Get string representation of ``PeerInfo`` object.
|
||||||
result = newStringOfCap(128)
|
result = newStringOfCap(128)
|
||||||
result.add("{PeerID: '")
|
result.add("{PeerId: '")
|
||||||
result.add($pinfo.peer.shortLog())
|
result.add($pinfo.peer.shortLog())
|
||||||
result.add("' Addresses: [")
|
result.add("' Addresses: [")
|
||||||
let length = len(pinfo.addresses)
|
let length = len(pinfo.addresses)
|
||||||
|
|
|
@ -12,8 +12,8 @@
|
||||||
## option enabled ``nim-libp2p`` will create dumps of unencrypted messages for
|
## option enabled ``nim-libp2p`` will create dumps of unencrypted messages for
|
||||||
## every peer libp2p communicates.
|
## every peer libp2p communicates.
|
||||||
##
|
##
|
||||||
## Every file is created with name "<PeerID>.pbcap". One file represents
|
## Every file is created with name "<PeerId>.pbcap". One file represents
|
||||||
## all the communication with peer which identified by ``PeerID``.
|
## all the communication with peer which identified by ``PeerId``.
|
||||||
##
|
##
|
||||||
## File can have multiple protobuf encoded messages of this format:
|
## File can have multiple protobuf encoded messages of this format:
|
||||||
##
|
##
|
||||||
|
@ -170,7 +170,7 @@ iterator messages*(data: seq[byte]): Option[ProtoMessage] =
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
|
||||||
proc dumpHex*(pbytes: openarray[byte], groupBy = 1, ascii = true): string =
|
proc dumpHex*(pbytes: openArray[byte], groupBy = 1, ascii = true): string =
|
||||||
## Get hexadecimal dump of memory for array ``pbytes``.
|
## Get hexadecimal dump of memory for array ``pbytes``.
|
||||||
var res = ""
|
var res = ""
|
||||||
var offset = 0
|
var offset = 0
|
||||||
|
|
|
@ -18,7 +18,7 @@ type
|
||||||
|
|
||||||
method connect*(
|
method connect*(
|
||||||
self: Dial,
|
self: Dial,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
addrs: seq[MultiAddress]) {.async, base.} =
|
addrs: seq[MultiAddress]) {.async, base.} =
|
||||||
## connect remote peer without negotiating
|
## connect remote peer without negotiating
|
||||||
## a protocol
|
## a protocol
|
||||||
|
@ -28,7 +28,7 @@ method connect*(
|
||||||
|
|
||||||
method dial*(
|
method dial*(
|
||||||
self: Dial,
|
self: Dial,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
protos: seq[string]): Future[Connection] {.async, base.} =
|
protos: seq[string]): Future[Connection] {.async, base.} =
|
||||||
## create a protocol stream over an
|
## create a protocol stream over an
|
||||||
## existing connection
|
## existing connection
|
||||||
|
@ -38,7 +38,7 @@ method dial*(
|
||||||
|
|
||||||
method dial*(
|
method dial*(
|
||||||
self: Dial,
|
self: Dial,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
protos: seq[string]): Future[Connection] {.async, base.} =
|
protos: seq[string]): Future[Connection] {.async, base.} =
|
||||||
## create a protocol stream and establish
|
## create a protocol stream and establish
|
||||||
|
|
|
@ -40,13 +40,13 @@ type
|
||||||
localPeerId*: PeerId
|
localPeerId*: PeerId
|
||||||
ms: MultistreamSelect
|
ms: MultistreamSelect
|
||||||
connManager: ConnManager
|
connManager: ConnManager
|
||||||
dialLock: Table[PeerID, AsyncLock]
|
dialLock: Table[PeerId, AsyncLock]
|
||||||
transports: seq[Transport]
|
transports: seq[Transport]
|
||||||
nameResolver: NameResolver
|
nameResolver: NameResolver
|
||||||
|
|
||||||
proc dialAndUpgrade(
|
proc dialAndUpgrade(
|
||||||
self: Dialer,
|
self: Dialer,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
addrs: seq[MultiAddress]):
|
addrs: seq[MultiAddress]):
|
||||||
Future[Connection] {.async.} =
|
Future[Connection] {.async.} =
|
||||||
debug "Dialing peer", peerId
|
debug "Dialing peer", peerId
|
||||||
|
@ -111,7 +111,7 @@ proc dialAndUpgrade(
|
||||||
|
|
||||||
proc internalConnect(
|
proc internalConnect(
|
||||||
self: Dialer,
|
self: Dialer,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
addrs: seq[MultiAddress]):
|
addrs: seq[MultiAddress]):
|
||||||
Future[Connection] {.async.} =
|
Future[Connection] {.async.} =
|
||||||
if self.localPeerId == peerId:
|
if self.localPeerId == peerId:
|
||||||
|
@ -158,7 +158,7 @@ proc internalConnect(
|
||||||
|
|
||||||
method connect*(
|
method connect*(
|
||||||
self: Dialer,
|
self: Dialer,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
addrs: seq[MultiAddress]) {.async.} =
|
addrs: seq[MultiAddress]) {.async.} =
|
||||||
## connect remote peer without negotiating
|
## connect remote peer without negotiating
|
||||||
## a protocol
|
## a protocol
|
||||||
|
@ -183,7 +183,7 @@ proc negotiateStream(
|
||||||
|
|
||||||
method dial*(
|
method dial*(
|
||||||
self: Dialer,
|
self: Dialer,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
protos: seq[string]): Future[Connection] {.async.} =
|
protos: seq[string]): Future[Connection] {.async.} =
|
||||||
## create a protocol stream over an
|
## create a protocol stream over an
|
||||||
## existing connection
|
## existing connection
|
||||||
|
@ -198,7 +198,7 @@ method dial*(
|
||||||
|
|
||||||
method dial*(
|
method dial*(
|
||||||
self: Dialer,
|
self: Dialer,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
protos: seq[string]): Future[Connection] {.async.} =
|
protos: seq[string]): Future[Connection] {.async.} =
|
||||||
## create a protocol stream and establish
|
## create a protocol stream and establish
|
||||||
|
|
|
@ -498,7 +498,7 @@ proc protoName*(ma: MultiAddress): MaResult[string] =
|
||||||
ok($(proto.mcodec))
|
ok($(proto.mcodec))
|
||||||
|
|
||||||
proc protoArgument*(ma: MultiAddress,
|
proc protoArgument*(ma: MultiAddress,
|
||||||
value: var openarray[byte]): MaResult[int] =
|
value: var openArray[byte]): MaResult[int] =
|
||||||
## Returns MultiAddress ``ma`` protocol argument value.
|
## Returns MultiAddress ``ma`` protocol argument value.
|
||||||
##
|
##
|
||||||
## If current MultiAddress do not have argument value, then result will be
|
## If current MultiAddress do not have argument value, then result will be
|
||||||
|
@ -723,7 +723,7 @@ proc validate*(ma: MultiAddress): bool =
|
||||||
|
|
||||||
proc init*(
|
proc init*(
|
||||||
mtype: typedesc[MultiAddress], protocol: MultiCodec,
|
mtype: typedesc[MultiAddress], protocol: MultiCodec,
|
||||||
value: openarray[byte] = []): MaResult[MultiAddress] =
|
value: openArray[byte] = []): MaResult[MultiAddress] =
|
||||||
## Initialize MultiAddress object from protocol id ``protocol`` and array
|
## Initialize MultiAddress object from protocol id ``protocol`` and array
|
||||||
## of bytes ``value``.
|
## of bytes ``value``.
|
||||||
let proto = CodeAddresses.getOrDefault(protocol)
|
let proto = CodeAddresses.getOrDefault(protocol)
|
||||||
|
@ -754,7 +754,7 @@ proc init*(
|
||||||
raiseAssert "None checked above"
|
raiseAssert "None checked above"
|
||||||
|
|
||||||
proc init*(mtype: typedesc[MultiAddress], protocol: MultiCodec,
|
proc init*(mtype: typedesc[MultiAddress], protocol: MultiCodec,
|
||||||
value: PeerID): MaResult[MultiAddress] {.inline.} =
|
value: PeerId): MaResult[MultiAddress] {.inline.} =
|
||||||
## Initialize MultiAddress object from protocol id ``protocol`` and peer id
|
## Initialize MultiAddress object from protocol id ``protocol`` and peer id
|
||||||
## ``value``.
|
## ``value``.
|
||||||
init(mtype, protocol, value.data)
|
init(mtype, protocol, value.data)
|
||||||
|
@ -832,7 +832,7 @@ proc init*(mtype: typedesc[MultiAddress],
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
proc init*(mtype: typedesc[MultiAddress],
|
proc init*(mtype: typedesc[MultiAddress],
|
||||||
data: openarray[byte]): MaResult[MultiAddress] =
|
data: openArray[byte]): MaResult[MultiAddress] =
|
||||||
## Initialize MultiAddress with array of bytes ``data``.
|
## Initialize MultiAddress with array of bytes ``data``.
|
||||||
if len(data) == 0:
|
if len(data) == 0:
|
||||||
err("multiaddress: Address could not be empty!")
|
err("multiaddress: Address could not be empty!")
|
||||||
|
|
|
@ -19,7 +19,7 @@ import tables
|
||||||
import stew/[base32, base58, base64, results]
|
import stew/[base32, base58, base64, results]
|
||||||
|
|
||||||
type
|
type
|
||||||
MultibaseStatus* {.pure.} = enum
|
MultiBaseStatus* {.pure.} = enum
|
||||||
Error, Success, Overrun, Incorrect, BadCodec, NotSupported
|
Error, Success, Overrun, Incorrect, BadCodec, NotSupported
|
||||||
|
|
||||||
MultiBase* = object
|
MultiBase* = object
|
||||||
|
@ -29,169 +29,169 @@ type
|
||||||
MBCodec = object
|
MBCodec = object
|
||||||
code: char
|
code: char
|
||||||
name: string
|
name: string
|
||||||
encr: proc(inbytes: openarray[byte],
|
encr: proc(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
|
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
|
||||||
decr: proc(inbytes: openarray[char],
|
decr: proc(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
|
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
|
||||||
encl: MBCodeSize
|
encl: MBCodeSize
|
||||||
decl: MBCodeSize
|
decl: MBCodeSize
|
||||||
|
|
||||||
proc idd(inbytes: openarray[char], outbytes: var openarray[byte],
|
proc idd(inbytes: openArray[char], outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
let length = len(inbytes)
|
let length = len(inbytes)
|
||||||
if length > len(outbytes):
|
if length > len(outbytes):
|
||||||
outlen = length
|
outlen = length
|
||||||
result = MultibaseStatus.Overrun
|
result = MultiBaseStatus.Overrun
|
||||||
else:
|
else:
|
||||||
copyMem(addr outbytes[0], unsafeAddr inbytes[0], length)
|
copyMem(addr outbytes[0], unsafeAddr inbytes[0], length)
|
||||||
outlen = length
|
outlen = length
|
||||||
result = MultibaseStatus.Success
|
result = MultiBaseStatus.Success
|
||||||
|
|
||||||
proc ide(inbytes: openarray[byte],
|
proc ide(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
let length = len(inbytes)
|
let length = len(inbytes)
|
||||||
if length > len(outbytes):
|
if length > len(outbytes):
|
||||||
outlen = length
|
outlen = length
|
||||||
result = MultibaseStatus.Overrun
|
result = MultiBaseStatus.Overrun
|
||||||
else:
|
else:
|
||||||
copyMem(addr outbytes[0], unsafeAddr inbytes[0], length)
|
copyMem(addr outbytes[0], unsafeAddr inbytes[0], length)
|
||||||
outlen = length
|
outlen = length
|
||||||
result = MultibaseStatus.Success
|
result = MultiBaseStatus.Success
|
||||||
|
|
||||||
proc idel(length: int): int = length
|
proc idel(length: int): int = length
|
||||||
proc iddl(length: int): int = length
|
proc iddl(length: int): int = length
|
||||||
|
|
||||||
proc b16d(inbytes: openarray[char],
|
proc b16d(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc b16e(inbytes: openarray[byte],
|
proc b16e(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc b16ud(inbytes: openarray[char],
|
proc b16ud(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc b16ue(inbytes: openarray[byte],
|
proc b16ue(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc b16el(length: int): int = length shl 1
|
proc b16el(length: int): int = length shl 1
|
||||||
proc b16dl(length: int): int = (length + 1) div 2
|
proc b16dl(length: int): int = (length + 1) div 2
|
||||||
|
|
||||||
proc b32ce(r: Base32Status): MultibaseStatus {.inline.} =
|
proc b32ce(r: Base32Status): MultiBaseStatus {.inline.} =
|
||||||
result = MultibaseStatus.Error
|
result = MultiBaseStatus.Error
|
||||||
if r == Base32Status.Incorrect:
|
if r == Base32Status.Incorrect:
|
||||||
result = MultibaseStatus.Incorrect
|
result = MultiBaseStatus.Incorrect
|
||||||
elif r == Base32Status.Overrun:
|
elif r == Base32Status.Overrun:
|
||||||
result = MultibaseStatus.Overrun
|
result = MultiBaseStatus.Overrun
|
||||||
elif r == Base32Status.Success:
|
elif r == Base32Status.Success:
|
||||||
result = MultibaseStatus.Success
|
result = MultiBaseStatus.Success
|
||||||
|
|
||||||
proc b58ce(r: Base58Status): MultibaseStatus {.inline.} =
|
proc b58ce(r: Base58Status): MultiBaseStatus {.inline.} =
|
||||||
result = MultibaseStatus.Error
|
result = MultiBaseStatus.Error
|
||||||
if r == Base58Status.Incorrect:
|
if r == Base58Status.Incorrect:
|
||||||
result = MultibaseStatus.Incorrect
|
result = MultiBaseStatus.Incorrect
|
||||||
elif r == Base58Status.Overrun:
|
elif r == Base58Status.Overrun:
|
||||||
result = MultibaseStatus.Overrun
|
result = MultiBaseStatus.Overrun
|
||||||
elif r == Base58Status.Success:
|
elif r == Base58Status.Success:
|
||||||
result = MultibaseStatus.Success
|
result = MultiBaseStatus.Success
|
||||||
|
|
||||||
proc b64ce(r: Base64Status): MultibaseStatus {.inline.} =
|
proc b64ce(r: Base64Status): MultiBaseStatus {.inline.} =
|
||||||
result = MultiBaseStatus.Error
|
result = MultiBaseStatus.Error
|
||||||
if r == Base64Status.Incorrect:
|
if r == Base64Status.Incorrect:
|
||||||
result = MultibaseStatus.Incorrect
|
result = MultiBaseStatus.Incorrect
|
||||||
elif r == Base64Status.Overrun:
|
elif r == Base64Status.Overrun:
|
||||||
result = MultiBaseStatus.Overrun
|
result = MultiBaseStatus.Overrun
|
||||||
elif r == Base64Status.Success:
|
elif r == Base64Status.Success:
|
||||||
result = MultibaseStatus.Success
|
result = MultiBaseStatus.Success
|
||||||
|
|
||||||
proc b32hd(inbytes: openarray[char],
|
proc b32hd(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(HexBase32Lower.decode(inbytes, outbytes, outlen))
|
result = b32ce(HexBase32Lower.decode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32he(inbytes: openarray[byte],
|
proc b32he(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(HexBase32Lower.encode(inbytes, outbytes, outlen))
|
result = b32ce(HexBase32Lower.encode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32hud(inbytes: openarray[char],
|
proc b32hud(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(HexBase32Upper.decode(inbytes, outbytes, outlen))
|
result = b32ce(HexBase32Upper.decode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32hue(inbytes: openarray[byte],
|
proc b32hue(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(HexBase32Upper.encode(inbytes, outbytes, outlen))
|
result = b32ce(HexBase32Upper.encode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32hpd(inbytes: openarray[char],
|
proc b32hpd(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(HexBase32LowerPad.decode(inbytes, outbytes, outlen))
|
result = b32ce(HexBase32LowerPad.decode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32hpe(inbytes: openarray[byte],
|
proc b32hpe(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(HexBase32LowerPad.encode(inbytes, outbytes, outlen))
|
result = b32ce(HexBase32LowerPad.encode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32hpud(inbytes: openarray[char],
|
proc b32hpud(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(HexBase32UpperPad.decode(inbytes, outbytes, outlen))
|
result = b32ce(HexBase32UpperPad.decode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32hpue(inbytes: openarray[byte],
|
proc b32hpue(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(HexBase32UpperPad.encode(inbytes, outbytes, outlen))
|
result = b32ce(HexBase32UpperPad.encode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32d(inbytes: openarray[char],
|
proc b32d(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(Base32Lower.decode(inbytes, outbytes, outlen))
|
result = b32ce(Base32Lower.decode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32e(inbytes: openarray[byte],
|
proc b32e(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(Base32Lower.encode(inbytes, outbytes, outlen))
|
result = b32ce(Base32Lower.encode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32ud(inbytes: openarray[char],
|
proc b32ud(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(Base32Upper.decode(inbytes, outbytes, outlen))
|
result = b32ce(Base32Upper.decode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32ue(inbytes: openarray[byte],
|
proc b32ue(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(Base32Upper.encode(inbytes, outbytes, outlen))
|
result = b32ce(Base32Upper.encode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32pd(inbytes: openarray[char],
|
proc b32pd(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(Base32LowerPad.decode(inbytes, outbytes, outlen))
|
result = b32ce(Base32LowerPad.decode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32pe(inbytes: openarray[byte],
|
proc b32pe(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(Base32LowerPad.encode(inbytes, outbytes, outlen))
|
result = b32ce(Base32LowerPad.encode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32pud(inbytes: openarray[char],
|
proc b32pud(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(Base32UpperPad.decode(inbytes, outbytes, outlen))
|
result = b32ce(Base32UpperPad.decode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32pue(inbytes: openarray[byte],
|
proc b32pue(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b32ce(Base32UpperPad.encode(inbytes, outbytes, outlen))
|
result = b32ce(Base32UpperPad.encode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b32el(length: int): int = Base32Lower.encodedLength(length)
|
proc b32el(length: int): int = Base32Lower.encodedLength(length)
|
||||||
|
@ -199,24 +199,24 @@ proc b32dl(length: int): int = Base32Lower.decodedLength(length)
|
||||||
proc b32pel(length: int): int = Base32LowerPad.encodedLength(length)
|
proc b32pel(length: int): int = Base32LowerPad.encodedLength(length)
|
||||||
proc b32pdl(length: int): int = Base32LowerPad.decodedLength(length)
|
proc b32pdl(length: int): int = Base32LowerPad.decodedLength(length)
|
||||||
|
|
||||||
proc b58fd(inbytes: openarray[char],
|
proc b58fd(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b58ce(FLCBase58.decode(inbytes, outbytes, outlen))
|
result = b58ce(FLCBase58.decode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b58fe(inbytes: openarray[byte],
|
proc b58fe(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b58ce(FLCBase58.encode(inbytes, outbytes, outlen))
|
result = b58ce(FLCBase58.encode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b58bd(inbytes: openarray[char],
|
proc b58bd(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b58ce(BTCBase58.decode(inbytes, outbytes, outlen))
|
result = b58ce(BTCBase58.decode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b58be(inbytes: openarray[byte],
|
proc b58be(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b58ce(BTCBase58.encode(inbytes, outbytes, outlen))
|
result = b58ce(BTCBase58.encode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b58el(length: int): int = Base58.encodedLength(length)
|
proc b58el(length: int): int = Base58.encodedLength(length)
|
||||||
|
@ -227,48 +227,48 @@ proc b64dl(length: int): int = Base64.decodedLength(length)
|
||||||
proc b64pel(length: int): int = Base64Pad.encodedLength(length)
|
proc b64pel(length: int): int = Base64Pad.encodedLength(length)
|
||||||
proc b64pdl(length: int): int = Base64Pad.decodedLength(length)
|
proc b64pdl(length: int): int = Base64Pad.decodedLength(length)
|
||||||
|
|
||||||
proc b64e(inbytes: openarray[byte],
|
proc b64e(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b64ce(Base64.encode(inbytes, outbytes, outlen))
|
result = b64ce(Base64.encode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b64d(inbytes: openarray[char],
|
proc b64d(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b64ce(Base64.decode(inbytes, outbytes, outlen))
|
result = b64ce(Base64.decode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b64pe(inbytes: openarray[byte],
|
proc b64pe(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b64ce(Base64Pad.encode(inbytes, outbytes, outlen))
|
result = b64ce(Base64Pad.encode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b64pd(inbytes: openarray[char],
|
proc b64pd(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b64ce(Base64Pad.decode(inbytes, outbytes, outlen))
|
result = b64ce(Base64Pad.decode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b64ue(inbytes: openarray[byte],
|
proc b64ue(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b64ce(Base64Url.encode(inbytes, outbytes, outlen))
|
result = b64ce(Base64Url.encode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b64ud(inbytes: openarray[char],
|
proc b64ud(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b64ce(Base64Url.decode(inbytes, outbytes, outlen))
|
result = b64ce(Base64Url.decode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b64upe(inbytes: openarray[byte],
|
proc b64upe(inbytes: openArray[byte],
|
||||||
outbytes: var openarray[char],
|
outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b64ce(Base64UrlPad.encode(inbytes, outbytes, outlen))
|
result = b64ce(Base64UrlPad.encode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
proc b64upd(inbytes: openarray[char],
|
proc b64upd(inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte],
|
outbytes: var openArray[byte],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
result = b64ce(Base64UrlPad.decode(inbytes, outbytes, outlen))
|
result = b64ce(Base64UrlPad.decode(inbytes, outbytes, outlen))
|
||||||
|
|
||||||
const
|
const
|
||||||
MultibaseCodecs = [
|
MultiBaseCodecs = [
|
||||||
MBCodec(name: "identity", code: chr(0x00),
|
MBCodec(name: "identity", code: chr(0x00),
|
||||||
decr: idd, encr: ide, decl: iddl, encl: idel
|
decr: idd, encr: ide, decl: iddl, encl: idel
|
||||||
),
|
),
|
||||||
|
@ -328,16 +328,16 @@ const
|
||||||
]
|
]
|
||||||
|
|
||||||
proc initMultiBaseCodeTable(): Table[char, MBCodec] {.compileTime.} =
|
proc initMultiBaseCodeTable(): Table[char, MBCodec] {.compileTime.} =
|
||||||
for item in MultibaseCodecs:
|
for item in MultiBaseCodecs:
|
||||||
result[item.code] = item
|
result[item.code] = item
|
||||||
|
|
||||||
proc initMultiBaseNameTable(): Table[string, MBCodec] {.compileTime.} =
|
proc initMultiBaseNameTable(): Table[string, MBCodec] {.compileTime.} =
|
||||||
for item in MultibaseCodecs:
|
for item in MultiBaseCodecs:
|
||||||
result[item.name] = item
|
result[item.name] = item
|
||||||
|
|
||||||
const
|
const
|
||||||
CodeMultibases = initMultiBaseCodeTable()
|
CodeMultiBases = initMultiBaseCodeTable()
|
||||||
NameMultibases = initMultiBaseNameTable()
|
NameMultiBases = initMultiBaseNameTable()
|
||||||
|
|
||||||
proc encodedLength*(mbtype: typedesc[MultiBase], encoding: string,
|
proc encodedLength*(mbtype: typedesc[MultiBase], encoding: string,
|
||||||
length: int): int =
|
length: int): int =
|
||||||
|
@ -346,7 +346,7 @@ proc encodedLength*(mbtype: typedesc[MultiBase], encoding: string,
|
||||||
##
|
##
|
||||||
## Procedure returns ``-1`` if ``encoding`` scheme is not supported or
|
## Procedure returns ``-1`` if ``encoding`` scheme is not supported or
|
||||||
## not present.
|
## not present.
|
||||||
let mb = NameMultibases.getOrDefault(encoding)
|
let mb = NameMultiBases.getOrDefault(encoding)
|
||||||
if len(mb.name) == 0 or isNil(mb.encl):
|
if len(mb.name) == 0 or isNil(mb.encl):
|
||||||
result = -1
|
result = -1
|
||||||
else:
|
else:
|
||||||
|
@ -359,7 +359,7 @@ proc decodedLength*(mbtype: typedesc[MultiBase], encoding: char,
|
||||||
length: int): int =
|
length: int): int =
|
||||||
## Return estimated size of buffer to store MultiBase decoded value with
|
## Return estimated size of buffer to store MultiBase decoded value with
|
||||||
## encoding character ``encoding`` of length ``length``.
|
## encoding character ``encoding`` of length ``length``.
|
||||||
let mb = CodeMultibases.getOrDefault(encoding)
|
let mb = CodeMultiBases.getOrDefault(encoding)
|
||||||
if len(mb.name) == 0 or isNil(mb.decl) or length == 0:
|
if len(mb.name) == 0 or isNil(mb.decl) or length == 0:
|
||||||
result = -1
|
result = -1
|
||||||
else:
|
else:
|
||||||
|
@ -369,8 +369,8 @@ proc decodedLength*(mbtype: typedesc[MultiBase], encoding: char,
|
||||||
result = mb.decl(length - 1)
|
result = mb.decl(length - 1)
|
||||||
|
|
||||||
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||||
inbytes: openarray[byte], outbytes: var openarray[char],
|
inbytes: openArray[byte], outbytes: var openArray[char],
|
||||||
outlen: var int): MultibaseStatus =
|
outlen: var int): MultiBaseStatus =
|
||||||
## Encode array ``inbytes`` using MultiBase encoding scheme ``encoding`` and
|
## Encode array ``inbytes`` using MultiBase encoding scheme ``encoding`` and
|
||||||
## store encoded value to ``outbytes``.
|
## store encoded value to ``outbytes``.
|
||||||
##
|
##
|
||||||
|
@ -386,11 +386,11 @@ proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||||
##
|
##
|
||||||
## On successfull encoding ``MultiBaseStatus.Success`` will be returned and
|
## On successfull encoding ``MultiBaseStatus.Success`` will be returned and
|
||||||
## ``outlen`` will be set to number of encoded octets (bytes).
|
## ``outlen`` will be set to number of encoded octets (bytes).
|
||||||
let mb = NameMultibases.getOrDefault(encoding)
|
let mb = NameMultiBases.getOrDefault(encoding)
|
||||||
if len(mb.name) == 0:
|
if len(mb.name) == 0:
|
||||||
return MultibaseStatus.BadCodec
|
return MultiBaseStatus.BadCodec
|
||||||
if isNil(mb.encr) or isNil(mb.encl):
|
if isNil(mb.encr) or isNil(mb.encl):
|
||||||
return MultibaseStatus.NotSupported
|
return MultiBaseStatus.NotSupported
|
||||||
if len(outbytes) > 1:
|
if len(outbytes) > 1:
|
||||||
result = mb.encr(inbytes, outbytes.toOpenArray(1, outbytes.high),
|
result = mb.encr(inbytes, outbytes.toOpenArray(1, outbytes.high),
|
||||||
outlen)
|
outlen)
|
||||||
|
@ -408,8 +408,8 @@ proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||||
result = MultiBaseStatus.Overrun
|
result = MultiBaseStatus.Overrun
|
||||||
outlen = mb.encl(len(inbytes)) + 1
|
outlen = mb.encl(len(inbytes)) + 1
|
||||||
|
|
||||||
proc decode*(mbtype: typedesc[MultiBase], inbytes: openarray[char],
|
proc decode*(mbtype: typedesc[MultiBase], inbytes: openArray[char],
|
||||||
outbytes: var openarray[byte], outlen: var int): MultibaseStatus =
|
outbytes: var openArray[byte], outlen: var int): MultiBaseStatus =
|
||||||
## Decode array ``inbytes`` using MultiBase encoding and store decoded value
|
## Decode array ``inbytes`` using MultiBase encoding and store decoded value
|
||||||
## to ``outbytes``.
|
## to ``outbytes``.
|
||||||
##
|
##
|
||||||
|
@ -426,24 +426,24 @@ proc decode*(mbtype: typedesc[MultiBase], inbytes: openarray[char],
|
||||||
## ``outlen`` will be set to number of encoded octets (bytes).
|
## ``outlen`` will be set to number of encoded octets (bytes).
|
||||||
let length = len(inbytes)
|
let length = len(inbytes)
|
||||||
if length == 0:
|
if length == 0:
|
||||||
return MultibaseStatus.Incorrect
|
return MultiBaseStatus.Incorrect
|
||||||
let mb = CodeMultibases.getOrDefault(inbytes[0])
|
let mb = CodeMultiBases.getOrDefault(inbytes[0])
|
||||||
if len(mb.name) == 0:
|
if len(mb.name) == 0:
|
||||||
return MultibaseStatus.BadCodec
|
return MultiBaseStatus.BadCodec
|
||||||
if isNil(mb.decr) or isNil(mb.decl):
|
if isNil(mb.decr) or isNil(mb.decl):
|
||||||
return MultibaseStatus.NotSupported
|
return MultiBaseStatus.NotSupported
|
||||||
if length == 1:
|
if length == 1:
|
||||||
outlen = 0
|
outlen = 0
|
||||||
result = MultibaseStatus.Success
|
result = MultiBaseStatus.Success
|
||||||
else:
|
else:
|
||||||
result = mb.decr(inbytes.toOpenArray(1, length - 1), outbytes, outlen)
|
result = mb.decr(inbytes.toOpenArray(1, length - 1), outbytes, outlen)
|
||||||
|
|
||||||
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||||
inbytes: openarray[byte]): Result[string, string] =
|
inbytes: openArray[byte]): Result[string, string] =
|
||||||
## Encode array ``inbytes`` using MultiBase encoding scheme ``encoding`` and
|
## Encode array ``inbytes`` using MultiBase encoding scheme ``encoding`` and
|
||||||
## return encoded string.
|
## return encoded string.
|
||||||
let length = len(inbytes)
|
let length = len(inbytes)
|
||||||
let mb = NameMultibases.getOrDefault(encoding)
|
let mb = NameMultiBases.getOrDefault(encoding)
|
||||||
if len(mb.name) == 0:
|
if len(mb.name) == 0:
|
||||||
return err("multibase: Encoding scheme is incorrect!")
|
return err("multibase: Encoding scheme is incorrect!")
|
||||||
if isNil(mb.encr) or isNil(mb.encl):
|
if isNil(mb.encr) or isNil(mb.encl):
|
||||||
|
@ -462,13 +462,13 @@ proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||||
buffer[0] = mb.code
|
buffer[0] = mb.code
|
||||||
ok(buffer)
|
ok(buffer)
|
||||||
|
|
||||||
proc decode*(mbtype: typedesc[MultiBase], inbytes: openarray[char]): Result[seq[byte], string] =
|
proc decode*(mbtype: typedesc[MultiBase], inbytes: openArray[char]): Result[seq[byte], string] =
|
||||||
## Decode MultiBase encoded array ``inbytes`` and return decoded sequence of
|
## Decode MultiBase encoded array ``inbytes`` and return decoded sequence of
|
||||||
## bytes.
|
## bytes.
|
||||||
let length = len(inbytes)
|
let length = len(inbytes)
|
||||||
if length == 0:
|
if length == 0:
|
||||||
return err("multibase: Could not decode zero-length string")
|
return err("multibase: Could not decode zero-length string")
|
||||||
let mb = CodeMultibases.getOrDefault(inbytes[0])
|
let mb = CodeMultiBases.getOrDefault(inbytes[0])
|
||||||
if len(mb.name) == 0:
|
if len(mb.name) == 0:
|
||||||
return err("multibase: MultiBase scheme is incorrect!")
|
return err("multibase: MultiBase scheme is incorrect!")
|
||||||
if isNil(mb.decr) or isNil(mb.decl):
|
if isNil(mb.decr) or isNil(mb.decl):
|
||||||
|
|
|
@ -41,8 +41,8 @@ const
|
||||||
ErrParseError = "Parse error fromHex"
|
ErrParseError = "Parse error fromHex"
|
||||||
|
|
||||||
type
|
type
|
||||||
MHashCoderProc* = proc(data: openarray[byte],
|
MHashCoderProc* = proc(data: openArray[byte],
|
||||||
output: var openarray[byte]) {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
|
output: var openArray[byte]) {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
|
||||||
MHash* = object
|
MHash* = object
|
||||||
mcodec*: MultiCodec
|
mcodec*: MultiCodec
|
||||||
size*: int
|
size*: int
|
||||||
|
@ -56,20 +56,20 @@ type
|
||||||
|
|
||||||
MhResult*[T] = Result[T, cstring]
|
MhResult*[T] = Result[T, cstring]
|
||||||
|
|
||||||
proc identhash(data: openarray[byte], output: var openarray[byte]) =
|
proc identhash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
var length = if len(data) > len(output): len(output)
|
var length = if len(data) > len(output): len(output)
|
||||||
else: len(data)
|
else: len(data)
|
||||||
copyMem(addr output[0], unsafeAddr data[0], length)
|
copyMem(addr output[0], unsafeAddr data[0], length)
|
||||||
|
|
||||||
proc sha1hash(data: openarray[byte], output: var openarray[byte]) =
|
proc sha1hash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
var digest = sha1.digest(data)
|
var digest = sha1.digest(data)
|
||||||
var length = if sha1.sizeDigest > len(output): len(output)
|
var length = if sha1.sizeDigest > len(output): len(output)
|
||||||
else: sha1.sizeDigest
|
else: sha1.sizeDigest
|
||||||
copyMem(addr output[0], addr digest.data[0], length)
|
copyMem(addr output[0], addr digest.data[0], length)
|
||||||
|
|
||||||
proc dblsha2_256hash(data: openarray[byte], output: var openarray[byte]) =
|
proc dblsha2_256hash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
var digest1 = sha256.digest(data)
|
var digest1 = sha256.digest(data)
|
||||||
var digest2 = sha256.digest(digest1.data)
|
var digest2 = sha256.digest(digest1.data)
|
||||||
|
@ -77,91 +77,91 @@ proc dblsha2_256hash(data: openarray[byte], output: var openarray[byte]) =
|
||||||
else: sha256.sizeDigest
|
else: sha256.sizeDigest
|
||||||
copyMem(addr output[0], addr digest2.data[0], length)
|
copyMem(addr output[0], addr digest2.data[0], length)
|
||||||
|
|
||||||
proc blake2Bhash(data: openarray[byte], output: var openarray[byte]) =
|
proc blake2Bhash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
var digest = blake2_512.digest(data)
|
var digest = blake2_512.digest(data)
|
||||||
var length = if blake2_512.sizeDigest > len(output): len(output)
|
var length = if blake2_512.sizeDigest > len(output): len(output)
|
||||||
else: blake2_512.sizeDigest
|
else: blake2_512.sizeDigest
|
||||||
copyMem(addr output[0], addr digest.data[0], length)
|
copyMem(addr output[0], addr digest.data[0], length)
|
||||||
|
|
||||||
proc blake2Shash(data: openarray[byte], output: var openarray[byte]) =
|
proc blake2Shash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
var digest = blake2_256.digest(data)
|
var digest = blake2_256.digest(data)
|
||||||
var length = if blake2_256.sizeDigest > len(output): len(output)
|
var length = if blake2_256.sizeDigest > len(output): len(output)
|
||||||
else: blake2_256.sizeDigest
|
else: blake2_256.sizeDigest
|
||||||
copyMem(addr output[0], addr digest.data[0], length)
|
copyMem(addr output[0], addr digest.data[0], length)
|
||||||
|
|
||||||
proc sha2_256hash(data: openarray[byte], output: var openarray[byte]) =
|
proc sha2_256hash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
var digest = sha256.digest(data)
|
var digest = sha256.digest(data)
|
||||||
var length = if sha256.sizeDigest > len(output): len(output)
|
var length = if sha256.sizeDigest > len(output): len(output)
|
||||||
else: sha256.sizeDigest
|
else: sha256.sizeDigest
|
||||||
copyMem(addr output[0], addr digest.data[0], length)
|
copyMem(addr output[0], addr digest.data[0], length)
|
||||||
|
|
||||||
proc sha2_512hash(data: openarray[byte], output: var openarray[byte]) =
|
proc sha2_512hash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
var digest = sha512.digest(data)
|
var digest = sha512.digest(data)
|
||||||
var length = if sha512.sizeDigest > len(output): len(output)
|
var length = if sha512.sizeDigest > len(output): len(output)
|
||||||
else: sha512.sizeDigest
|
else: sha512.sizeDigest
|
||||||
copyMem(addr output[0], addr digest.data[0], length)
|
copyMem(addr output[0], addr digest.data[0], length)
|
||||||
|
|
||||||
proc sha3_224hash(data: openarray[byte], output: var openarray[byte]) =
|
proc sha3_224hash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
var digest = sha3_224.digest(data)
|
var digest = sha3_224.digest(data)
|
||||||
var length = if sha3_224.sizeDigest > len(output): len(output)
|
var length = if sha3_224.sizeDigest > len(output): len(output)
|
||||||
else: sha3_224.sizeDigest
|
else: sha3_224.sizeDigest
|
||||||
copyMem(addr output[0], addr digest.data[0], length)
|
copyMem(addr output[0], addr digest.data[0], length)
|
||||||
|
|
||||||
proc sha3_256hash(data: openarray[byte], output: var openarray[byte]) =
|
proc sha3_256hash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
var digest = sha3_256.digest(data)
|
var digest = sha3_256.digest(data)
|
||||||
var length = if sha3_256.sizeDigest > len(output): len(output)
|
var length = if sha3_256.sizeDigest > len(output): len(output)
|
||||||
else: sha3_256.sizeDigest
|
else: sha3_256.sizeDigest
|
||||||
copyMem(addr output[0], addr digest.data[0], length)
|
copyMem(addr output[0], addr digest.data[0], length)
|
||||||
|
|
||||||
proc sha3_384hash(data: openarray[byte], output: var openarray[byte]) =
|
proc sha3_384hash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
var digest = sha3_384.digest(data)
|
var digest = sha3_384.digest(data)
|
||||||
var length = if sha3_384.sizeDigest > len(output): len(output)
|
var length = if sha3_384.sizeDigest > len(output): len(output)
|
||||||
else: sha3_384.sizeDigest
|
else: sha3_384.sizeDigest
|
||||||
copyMem(addr output[0], addr digest.data[0], length)
|
copyMem(addr output[0], addr digest.data[0], length)
|
||||||
|
|
||||||
proc sha3_512hash(data: openarray[byte], output: var openarray[byte]) =
|
proc sha3_512hash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
var digest = sha3_512.digest(data)
|
var digest = sha3_512.digest(data)
|
||||||
var length = if sha3_512.sizeDigest > len(output): len(output)
|
var length = if sha3_512.sizeDigest > len(output): len(output)
|
||||||
else: sha3_512.sizeDigest
|
else: sha3_512.sizeDigest
|
||||||
copyMem(addr output[0], addr digest.data[0], length)
|
copyMem(addr output[0], addr digest.data[0], length)
|
||||||
|
|
||||||
proc keccak_224hash(data: openarray[byte], output: var openarray[byte]) =
|
proc keccak_224hash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
var digest = keccak224.digest(data)
|
var digest = keccak224.digest(data)
|
||||||
var length = if keccak224.sizeDigest > len(output): len(output)
|
var length = if keccak224.sizeDigest > len(output): len(output)
|
||||||
else: keccak224.sizeDigest
|
else: keccak224.sizeDigest
|
||||||
copyMem(addr output[0], addr digest.data[0], length)
|
copyMem(addr output[0], addr digest.data[0], length)
|
||||||
|
|
||||||
proc keccak_256hash(data: openarray[byte], output: var openarray[byte]) =
|
proc keccak_256hash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
var digest = keccak256.digest(data)
|
var digest = keccak256.digest(data)
|
||||||
var length = if keccak256.sizeDigest > len(output): len(output)
|
var length = if keccak256.sizeDigest > len(output): len(output)
|
||||||
else: keccak256.sizeDigest
|
else: keccak256.sizeDigest
|
||||||
copyMem(addr output[0], addr digest.data[0], length)
|
copyMem(addr output[0], addr digest.data[0], length)
|
||||||
|
|
||||||
proc keccak_384hash(data: openarray[byte], output: var openarray[byte]) =
|
proc keccak_384hash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
var digest = keccak384.digest(data)
|
var digest = keccak384.digest(data)
|
||||||
var length = if keccak384.sizeDigest > len(output): len(output)
|
var length = if keccak384.sizeDigest > len(output): len(output)
|
||||||
else: keccak384.sizeDigest
|
else: keccak384.sizeDigest
|
||||||
copyMem(addr output[0], addr digest.data[0], length)
|
copyMem(addr output[0], addr digest.data[0], length)
|
||||||
|
|
||||||
proc keccak_512hash(data: openarray[byte], output: var openarray[byte]) =
|
proc keccak_512hash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
var digest = keccak512.digest(data)
|
var digest = keccak512.digest(data)
|
||||||
var length = if keccak512.sizeDigest > len(output): len(output)
|
var length = if keccak512.sizeDigest > len(output): len(output)
|
||||||
else: keccak512.sizeDigest
|
else: keccak512.sizeDigest
|
||||||
copyMem(addr output[0], addr digest.data[0], length)
|
copyMem(addr output[0], addr digest.data[0], length)
|
||||||
|
|
||||||
proc shake_128hash(data: openarray[byte], output: var openarray[byte]) =
|
proc shake_128hash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
var sctx: shake128
|
var sctx: shake128
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
sctx.init()
|
sctx.init()
|
||||||
|
@ -170,7 +170,7 @@ proc shake_128hash(data: openarray[byte], output: var openarray[byte]) =
|
||||||
discard sctx.output(addr output[0], uint(len(output)))
|
discard sctx.output(addr output[0], uint(len(output)))
|
||||||
sctx.clear()
|
sctx.clear()
|
||||||
|
|
||||||
proc shake_256hash(data: openarray[byte], output: var openarray[byte]) =
|
proc shake_256hash(data: openArray[byte], output: var openArray[byte]) =
|
||||||
var sctx: shake256
|
var sctx: shake256
|
||||||
if len(output) > 0:
|
if len(output) > 0:
|
||||||
sctx.init()
|
sctx.init()
|
||||||
|
@ -208,16 +208,16 @@ const
|
||||||
),
|
),
|
||||||
MHash(mcodec: multiCodec("shake-128"), size: 32, coder: shake_128hash),
|
MHash(mcodec: multiCodec("shake-128"), size: 32, coder: shake_128hash),
|
||||||
MHash(mcodec: multiCodec("shake-256"), size: 64, coder: shake_256hash),
|
MHash(mcodec: multiCodec("shake-256"), size: 64, coder: shake_256hash),
|
||||||
MHash(mcodec: multiCodec("keccak-224"), size: keccak_224.sizeDigest,
|
MHash(mcodec: multiCodec("keccak-224"), size: keccak224.sizeDigest,
|
||||||
coder: keccak_224hash
|
coder: keccak_224hash
|
||||||
),
|
),
|
||||||
MHash(mcodec: multiCodec("keccak-256"), size: keccak_256.sizeDigest,
|
MHash(mcodec: multiCodec("keccak-256"), size: keccak256.sizeDigest,
|
||||||
coder: keccak_256hash
|
coder: keccak_256hash
|
||||||
),
|
),
|
||||||
MHash(mcodec: multiCodec("keccak-384"), size: keccak_384.sizeDigest,
|
MHash(mcodec: multiCodec("keccak-384"), size: keccak384.sizeDigest,
|
||||||
coder: keccak_384hash
|
coder: keccak_384hash
|
||||||
),
|
),
|
||||||
MHash(mcodec: multiCodec("keccak-512"), size: keccak_512.sizeDigest,
|
MHash(mcodec: multiCodec("keccak-512"), size: keccak512.sizeDigest,
|
||||||
coder: keccak_512hash
|
coder: keccak_512hash
|
||||||
),
|
),
|
||||||
MHash(mcodec: multiCodec("blake2b-8"), size: 1, coder: blake2Bhash),
|
MHash(mcodec: multiCodec("blake2b-8"), size: 1, coder: blake2Bhash),
|
||||||
|
@ -325,7 +325,7 @@ proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
|
||||||
const
|
const
|
||||||
CodeHashes = initMultiHashCodeTable()
|
CodeHashes = initMultiHashCodeTable()
|
||||||
|
|
||||||
proc digestImplWithHash(hash: MHash, data: openarray[byte]): MultiHash =
|
proc digestImplWithHash(hash: MHash, data: openArray[byte]): MultiHash =
|
||||||
var buffer: array[MaxHashSize, byte]
|
var buffer: array[MaxHashSize, byte]
|
||||||
result.data = initVBuffer()
|
result.data = initVBuffer()
|
||||||
result.mcodec = hash.mcodec
|
result.mcodec = hash.mcodec
|
||||||
|
@ -343,7 +343,7 @@ proc digestImplWithHash(hash: MHash, data: openarray[byte]): MultiHash =
|
||||||
result.size = hash.size
|
result.size = hash.size
|
||||||
result.data.finish()
|
result.data.finish()
|
||||||
|
|
||||||
proc digestImplWithoutHash(hash: MHash, data: openarray[byte]): MultiHash =
|
proc digestImplWithoutHash(hash: MHash, data: openArray[byte]): MultiHash =
|
||||||
result.data = initVBuffer()
|
result.data = initVBuffer()
|
||||||
result.mcodec = hash.mcodec
|
result.mcodec = hash.mcodec
|
||||||
result.size = len(data)
|
result.size = len(data)
|
||||||
|
@ -354,7 +354,7 @@ proc digestImplWithoutHash(hash: MHash, data: openarray[byte]): MultiHash =
|
||||||
result.data.finish()
|
result.data.finish()
|
||||||
|
|
||||||
proc digest*(mhtype: typedesc[MultiHash], hashname: string,
|
proc digest*(mhtype: typedesc[MultiHash], hashname: string,
|
||||||
data: openarray[byte]): MhResult[MultiHash] {.inline.} =
|
data: openArray[byte]): MhResult[MultiHash] {.inline.} =
|
||||||
## Perform digest calculation using hash algorithm with name ``hashname`` on
|
## Perform digest calculation using hash algorithm with name ``hashname`` on
|
||||||
## data array ``data``.
|
## data array ``data``.
|
||||||
let mc = MultiCodec.codec(hashname)
|
let mc = MultiCodec.codec(hashname)
|
||||||
|
@ -368,7 +368,7 @@ proc digest*(mhtype: typedesc[MultiHash], hashname: string,
|
||||||
ok(digestImplWithHash(hash, data))
|
ok(digestImplWithHash(hash, data))
|
||||||
|
|
||||||
proc digest*(mhtype: typedesc[MultiHash], hashcode: int,
|
proc digest*(mhtype: typedesc[MultiHash], hashcode: int,
|
||||||
data: openarray[byte]): MhResult[MultiHash] {.inline.} =
|
data: openArray[byte]): MhResult[MultiHash] {.inline.} =
|
||||||
## Perform digest calculation using hash algorithm with code ``hashcode`` on
|
## Perform digest calculation using hash algorithm with code ``hashcode`` on
|
||||||
## data array ``data``.
|
## data array ``data``.
|
||||||
let hash = CodeHashes.getOrDefault(hashcode)
|
let hash = CodeHashes.getOrDefault(hashcode)
|
||||||
|
@ -406,7 +406,7 @@ proc init*[T](mhtype: typedesc[MultiHash], hashcode: MultiCodec,
|
||||||
ok(digestImplWithoutHash(hash, mdigest.data))
|
ok(digestImplWithoutHash(hash, mdigest.data))
|
||||||
|
|
||||||
proc init*(mhtype: typedesc[MultiHash], hashname: string,
|
proc init*(mhtype: typedesc[MultiHash], hashname: string,
|
||||||
bdigest: openarray[byte]): MhResult[MultiHash] {.inline.} =
|
bdigest: openArray[byte]): MhResult[MultiHash] {.inline.} =
|
||||||
## Create MultiHash from array of bytes ``bdigest`` and hash algorithm code
|
## Create MultiHash from array of bytes ``bdigest`` and hash algorithm code
|
||||||
## ``hashcode``.
|
## ``hashcode``.
|
||||||
let mc = MultiCodec.codec(hashname)
|
let mc = MultiCodec.codec(hashname)
|
||||||
|
@ -422,7 +422,7 @@ proc init*(mhtype: typedesc[MultiHash], hashname: string,
|
||||||
ok(digestImplWithoutHash(hash, bdigest))
|
ok(digestImplWithoutHash(hash, bdigest))
|
||||||
|
|
||||||
proc init*(mhtype: typedesc[MultiHash], hashcode: MultiCodec,
|
proc init*(mhtype: typedesc[MultiHash], hashcode: MultiCodec,
|
||||||
bdigest: openarray[byte]): MhResult[MultiHash] {.inline.} =
|
bdigest: openArray[byte]): MhResult[MultiHash] {.inline.} =
|
||||||
## Create MultiHash from array of bytes ``bdigest`` and hash algorithm code
|
## Create MultiHash from array of bytes ``bdigest`` and hash algorithm code
|
||||||
## ``hashcode``.
|
## ``hashcode``.
|
||||||
let hash = CodeHashes.getOrDefault(hashcode)
|
let hash = CodeHashes.getOrDefault(hashcode)
|
||||||
|
@ -433,7 +433,7 @@ proc init*(mhtype: typedesc[MultiHash], hashcode: MultiCodec,
|
||||||
else:
|
else:
|
||||||
ok(digestImplWithoutHash(hash, bdigest))
|
ok(digestImplWithoutHash(hash, bdigest))
|
||||||
|
|
||||||
proc decode*(mhtype: typedesc[MultiHash], data: openarray[byte],
|
proc decode*(mhtype: typedesc[MultiHash], data: openArray[byte],
|
||||||
mhash: var MultiHash): MhResult[int] =
|
mhash: var MultiHash): MhResult[int] =
|
||||||
## Decode MultiHash value from array of bytes ``data``.
|
## Decode MultiHash value from array of bytes ``data``.
|
||||||
##
|
##
|
||||||
|
@ -478,7 +478,7 @@ proc decode*(mhtype: typedesc[MultiHash], data: openarray[byte],
|
||||||
vb.offset + int(size) - 1))
|
vb.offset + int(size) - 1))
|
||||||
ok(vb.offset + int(size))
|
ok(vb.offset + int(size))
|
||||||
|
|
||||||
proc validate*(mhtype: typedesc[MultiHash], data: openarray[byte]): bool =
|
proc validate*(mhtype: typedesc[MultiHash], data: openArray[byte]): bool =
|
||||||
## Returns ``true`` if array of bytes ``data`` has correct MultiHash inside.
|
## Returns ``true`` if array of bytes ``data`` has correct MultiHash inside.
|
||||||
var code, size: uint64
|
var code, size: uint64
|
||||||
var res: VarintResult[void]
|
var res: VarintResult[void]
|
||||||
|
@ -509,7 +509,7 @@ proc validate*(mhtype: typedesc[MultiHash], data: openarray[byte]): bool =
|
||||||
result = true
|
result = true
|
||||||
|
|
||||||
proc init*(mhtype: typedesc[MultiHash],
|
proc init*(mhtype: typedesc[MultiHash],
|
||||||
data: openarray[byte]): MhResult[MultiHash] {.inline.} =
|
data: openArray[byte]): MhResult[MultiHash] {.inline.} =
|
||||||
## Create MultiHash from bytes array ``data``.
|
## Create MultiHash from bytes array ``data``.
|
||||||
var hash: MultiHash
|
var hash: MultiHash
|
||||||
discard ? MultiHash.decode(data, hash)
|
discard ? MultiHash.decode(data, hash)
|
||||||
|
@ -530,7 +530,7 @@ proc init58*(mhtype: typedesc[MultiHash],
|
||||||
if MultiHash.decode(Base58.decode(data), result) == -1:
|
if MultiHash.decode(Base58.decode(data), result) == -1:
|
||||||
raise newException(MultihashError, "Incorrect MultiHash binary format")
|
raise newException(MultihashError, "Incorrect MultiHash binary format")
|
||||||
|
|
||||||
proc cmp(a: openarray[byte], b: openarray[byte]): bool {.inline.} =
|
proc cmp(a: openArray[byte], b: openArray[byte]): bool {.inline.} =
|
||||||
if len(a) != len(b):
|
if len(a) != len(b):
|
||||||
return false
|
return false
|
||||||
var n = len(a)
|
var n = len(a)
|
||||||
|
|
|
@ -46,7 +46,7 @@ type
|
||||||
oid*: Oid
|
oid*: Oid
|
||||||
maxChannCount: int
|
maxChannCount: int
|
||||||
|
|
||||||
func shortLog*(m: MPlex): auto =
|
func shortLog*(m: Mplex): auto =
|
||||||
shortLog(m.connection)
|
shortLog(m.connection)
|
||||||
|
|
||||||
chronicles.formatIt(Mplex): shortLog(it)
|
chronicles.formatIt(Mplex): shortLog(it)
|
||||||
|
|
|
@ -88,9 +88,9 @@ method resolveIp*(
|
||||||
port: Port,
|
port: Port,
|
||||||
domain: Domain = Domain.AF_UNSPEC): Future[seq[TransportAddress]] {.async.} =
|
domain: Domain = Domain.AF_UNSPEC): Future[seq[TransportAddress]] {.async.} =
|
||||||
|
|
||||||
trace "Resolving IP using DNS", address, servers = self.nameservers.mapIt($it), domain
|
trace "Resolving IP using DNS", address, servers = self.nameServers.mapIt($it), domain
|
||||||
for _ in 0 ..< self.nameservers.len:
|
for _ in 0 ..< self.nameServers.len:
|
||||||
let server = self.nameservers[0]
|
let server = self.nameServers[0]
|
||||||
var responseFutures: seq[Future[Response]]
|
var responseFutures: seq[Future[Response]]
|
||||||
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
|
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
|
||||||
responseFutures.add(getDnsResponse(server, address, A))
|
responseFutures.add(getDnsResponse(server, address, A))
|
||||||
|
@ -122,8 +122,8 @@ method resolveIp*(
|
||||||
break
|
break
|
||||||
|
|
||||||
if resolveFailed:
|
if resolveFailed:
|
||||||
self.nameservers.add(self.nameservers[0])
|
self.nameServers.add(self.nameServers[0])
|
||||||
self.nameservers.delete(0)
|
self.nameServers.delete(0)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
trace "Got IPs from DNS server", resolvedAddresses, server = $server
|
trace "Got IPs from DNS server", resolvedAddresses, server = $server
|
||||||
|
@ -136,9 +136,9 @@ method resolveTxt*(
|
||||||
self: DnsResolver,
|
self: DnsResolver,
|
||||||
address: string): Future[seq[string]] {.async.} =
|
address: string): Future[seq[string]] {.async.} =
|
||||||
|
|
||||||
trace "Resolving TXT using DNS", address, servers = self.nameservers.mapIt($it)
|
trace "Resolving TXT using DNS", address, servers = self.nameServers.mapIt($it)
|
||||||
for _ in 0 ..< self.nameservers.len:
|
for _ in 0 ..< self.nameServers.len:
|
||||||
let server = self.nameservers[0]
|
let server = self.nameServers[0]
|
||||||
try:
|
try:
|
||||||
let response = await getDnsResponse(server, address, TXT)
|
let response = await getDnsResponse(server, address, TXT)
|
||||||
trace "Got TXT response", server = $server, answer=response.answers.mapIt(it.toString())
|
trace "Got TXT response", server = $server, answer=response.answers.mapIt(it.toString())
|
||||||
|
@ -147,8 +147,8 @@ method resolveTxt*(
|
||||||
raise e
|
raise e
|
||||||
except CatchableError as e:
|
except CatchableError as e:
|
||||||
info "Failed to query DNS", address, error=e.msg
|
info "Failed to query DNS", address, error=e.msg
|
||||||
self.nameservers.add(self.nameservers[0])
|
self.nameServers.add(self.nameServers[0])
|
||||||
self.nameservers.delete(0)
|
self.nameServers.delete(0)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
debug "Failed to resolve TXT, returning empty set"
|
debug "Failed to resolve TXT, returning empty set"
|
||||||
|
|
|
@ -25,10 +25,10 @@ const
|
||||||
maxInlineKeyLength* = 42
|
maxInlineKeyLength* = 42
|
||||||
|
|
||||||
type
|
type
|
||||||
PeerID* = object
|
PeerId* = object
|
||||||
data*: seq[byte]
|
data*: seq[byte]
|
||||||
|
|
||||||
func `$`*(pid: PeerID): string =
|
func `$`*(pid: PeerId): string =
|
||||||
## Return base58 encoded ``pid`` representation.
|
## Return base58 encoded ``pid`` representation.
|
||||||
# This unusual call syntax is used to avoid a strange Nim compilation error
|
# This unusual call syntax is used to avoid a strange Nim compilation error
|
||||||
base58.encode(Base58, pid.data)
|
base58.encode(Base58, pid.data)
|
||||||
|
@ -42,29 +42,29 @@ func shortLog*(pid: PeerId): string =
|
||||||
|
|
||||||
spid
|
spid
|
||||||
|
|
||||||
chronicles.formatIt(PeerID): shortLog(it)
|
chronicles.formatIt(PeerId): shortLog(it)
|
||||||
|
|
||||||
func toBytes*(pid: PeerID, data: var openarray[byte]): int =
|
func toBytes*(pid: PeerId, data: var openArray[byte]): int =
|
||||||
## Store PeerID ``pid`` to array of bytes ``data``.
|
## Store PeerId ``pid`` to array of bytes ``data``.
|
||||||
##
|
##
|
||||||
## Returns number of bytes needed to store ``pid``.
|
## Returns number of bytes needed to store ``pid``.
|
||||||
result = len(pid.data)
|
result = len(pid.data)
|
||||||
if len(data) >= result and result > 0:
|
if len(data) >= result and result > 0:
|
||||||
copyMem(addr data[0], unsafeAddr pid.data[0], result)
|
copyMem(addr data[0], unsafeAddr pid.data[0], result)
|
||||||
|
|
||||||
template getBytes*(pid: PeerID): seq[byte] =
|
template getBytes*(pid: PeerId): seq[byte] =
|
||||||
## Return PeerID ``pid`` as array of bytes.
|
## Return PeerId ``pid`` as array of bytes.
|
||||||
pid.data
|
pid.data
|
||||||
|
|
||||||
func hex*(pid: PeerID): string =
|
func hex*(pid: PeerId): string =
|
||||||
## Returns hexadecimal string representation of ``pid``.
|
## Returns hexadecimal string representation of ``pid``.
|
||||||
toHex(pid.data)
|
toHex(pid.data)
|
||||||
|
|
||||||
template len*(pid: PeerID): int =
|
template len*(pid: PeerId): int =
|
||||||
## Returns length of ``pid`` binary representation.
|
## Returns length of ``pid`` binary representation.
|
||||||
len(pid.data)
|
len(pid.data)
|
||||||
|
|
||||||
func cmp*(a, b: PeerID): int =
|
func cmp*(a, b: PeerId): int =
|
||||||
## Compares two peer ids ``a`` and ``b``.
|
## Compares two peer ids ``a`` and ``b``.
|
||||||
## Returns:
|
## Returns:
|
||||||
##
|
##
|
||||||
|
@ -79,29 +79,29 @@ func cmp*(a, b: PeerID): int =
|
||||||
inc(i)
|
inc(i)
|
||||||
result = len(a.data) - len(b.data)
|
result = len(a.data) - len(b.data)
|
||||||
|
|
||||||
template `<=`*(a, b: PeerID): bool =
|
template `<=`*(a, b: PeerId): bool =
|
||||||
(cmp(a, b) <= 0)
|
(cmp(a, b) <= 0)
|
||||||
|
|
||||||
template `<`*(a, b: PeerID): bool =
|
template `<`*(a, b: PeerId): bool =
|
||||||
(cmp(a, b) < 0)
|
(cmp(a, b) < 0)
|
||||||
|
|
||||||
template `>=`*(a, b: PeerID): bool =
|
template `>=`*(a, b: PeerId): bool =
|
||||||
(cmp(a, b) >= 0)
|
(cmp(a, b) >= 0)
|
||||||
|
|
||||||
template `>`*(a, b: PeerID): bool =
|
template `>`*(a, b: PeerId): bool =
|
||||||
(cmp(a, b) > 0)
|
(cmp(a, b) > 0)
|
||||||
|
|
||||||
template `==`*(a, b: PeerID): bool =
|
template `==`*(a, b: PeerId): bool =
|
||||||
(cmp(a, b) == 0)
|
(cmp(a, b) == 0)
|
||||||
|
|
||||||
template hash*(pid: PeerID): Hash =
|
template hash*(pid: PeerId): Hash =
|
||||||
hash(pid.data)
|
hash(pid.data)
|
||||||
|
|
||||||
func validate*(pid: PeerID): bool =
|
func validate*(pid: PeerId): bool =
|
||||||
## Validate check if ``pid`` is empty or not.
|
## Validate check if ``pid`` is empty or not.
|
||||||
len(pid.data) > 0 and MultiHash.validate(pid.data)
|
len(pid.data) > 0 and MultiHash.validate(pid.data)
|
||||||
|
|
||||||
func hasPublicKey*(pid: PeerID): bool =
|
func hasPublicKey*(pid: PeerId): bool =
|
||||||
## Returns ``true`` if ``pid`` is small enough to hold public key inside.
|
## Returns ``true`` if ``pid`` is small enough to hold public key inside.
|
||||||
if len(pid.data) > 0:
|
if len(pid.data) > 0:
|
||||||
var mh: MultiHash
|
var mh: MultiHash
|
||||||
|
@ -109,8 +109,8 @@ func hasPublicKey*(pid: PeerID): bool =
|
||||||
if mh.mcodec == multiCodec("identity"):
|
if mh.mcodec == multiCodec("identity"):
|
||||||
result = true
|
result = true
|
||||||
|
|
||||||
func extractPublicKey*(pid: PeerID, pubkey: var PublicKey): bool =
|
func extractPublicKey*(pid: PeerId, pubkey: var PublicKey): bool =
|
||||||
## Returns ``true`` if public key was successfully decoded from PeerID
|
## Returns ``true`` if public key was successfully decoded from PeerId
|
||||||
## ``pid``and stored to ``pubkey``.
|
## ``pid``and stored to ``pubkey``.
|
||||||
##
|
##
|
||||||
## Returns ``false`` otherwise.
|
## Returns ``false`` otherwise.
|
||||||
|
@ -121,16 +121,16 @@ func extractPublicKey*(pid: PeerID, pubkey: var PublicKey): bool =
|
||||||
let length = len(mh.data.buffer)
|
let length = len(mh.data.buffer)
|
||||||
result = pubkey.init(mh.data.buffer.toOpenArray(mh.dpos, length - 1))
|
result = pubkey.init(mh.data.buffer.toOpenArray(mh.dpos, length - 1))
|
||||||
|
|
||||||
func init*(pid: var PeerID, data: openarray[byte]): bool =
|
func init*(pid: var PeerId, data: openArray[byte]): bool =
|
||||||
## Initialize peer id from raw binary representation ``data``.
|
## Initialize peer id from raw binary representation ``data``.
|
||||||
##
|
##
|
||||||
## Returns ``true`` if peer was successfully initialiazed.
|
## Returns ``true`` if peer was successfully initialiazed.
|
||||||
var p = PeerID(data: @data)
|
var p = PeerId(data: @data)
|
||||||
if p.validate():
|
if p.validate():
|
||||||
pid = p
|
pid = p
|
||||||
result = true
|
result = true
|
||||||
|
|
||||||
func init*(pid: var PeerID, data: string): bool =
|
func init*(pid: var PeerId, data: string): bool =
|
||||||
## Initialize peer id from base58 encoded string representation.
|
## Initialize peer id from base58 encoded string representation.
|
||||||
##
|
##
|
||||||
## Returns ``true`` if peer was successfully initialiazed.
|
## Returns ``true`` if peer was successfully initialiazed.
|
||||||
|
@ -138,29 +138,29 @@ func init*(pid: var PeerID, data: string): bool =
|
||||||
var length = 0
|
var length = 0
|
||||||
if Base58.decode(data, p, length) == Base58Status.Success:
|
if Base58.decode(data, p, length) == Base58Status.Success:
|
||||||
p.setLen(length)
|
p.setLen(length)
|
||||||
var opid: PeerID
|
var opid: PeerId
|
||||||
shallowCopy(opid.data, p)
|
shallowCopy(opid.data, p)
|
||||||
if opid.validate():
|
if opid.validate():
|
||||||
pid = opid
|
pid = opid
|
||||||
result = true
|
result = true
|
||||||
|
|
||||||
func init*(t: typedesc[PeerID], data: openarray[byte]): Result[PeerID, cstring] =
|
func init*(t: typedesc[PeerId], data: openArray[byte]): Result[PeerId, cstring] =
|
||||||
## Create new peer id from raw binary representation ``data``.
|
## Create new peer id from raw binary representation ``data``.
|
||||||
var res: PeerID
|
var res: PeerId
|
||||||
if not init(res, data):
|
if not init(res, data):
|
||||||
err("peerid: incorrect PeerID binary form")
|
err("peerid: incorrect PeerId binary form")
|
||||||
else:
|
else:
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
func init*(t: typedesc[PeerID], data: string): Result[PeerID, cstring] =
|
func init*(t: typedesc[PeerId], data: string): Result[PeerId, cstring] =
|
||||||
## Create new peer id from base58 encoded string representation ``data``.
|
## Create new peer id from base58 encoded string representation ``data``.
|
||||||
var res: PeerID
|
var res: PeerId
|
||||||
if not init(res, data):
|
if not init(res, data):
|
||||||
err("peerid: incorrect PeerID string")
|
err("peerid: incorrect PeerId string")
|
||||||
else:
|
else:
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
func init*(t: typedesc[PeerID], pubkey: PublicKey): Result[PeerID, cstring] =
|
func init*(t: typedesc[PeerId], pubkey: PublicKey): Result[PeerId, cstring] =
|
||||||
## Create new peer id from public key ``pubkey``.
|
## Create new peer id from public key ``pubkey``.
|
||||||
var pubraw = ? pubkey.getBytes().orError(
|
var pubraw = ? pubkey.getBytes().orError(
|
||||||
cstring("peerid: failed to get bytes from given key"))
|
cstring("peerid: failed to get bytes from given key"))
|
||||||
|
@ -169,23 +169,23 @@ func init*(t: typedesc[PeerID], pubkey: PublicKey): Result[PeerID, cstring] =
|
||||||
mh = ? MultiHash.digest("identity", pubraw)
|
mh = ? MultiHash.digest("identity", pubraw)
|
||||||
else:
|
else:
|
||||||
mh = ? MultiHash.digest("sha2-256", pubraw)
|
mh = ? MultiHash.digest("sha2-256", pubraw)
|
||||||
ok(PeerID(data: mh.data.buffer))
|
ok(PeerId(data: mh.data.buffer))
|
||||||
|
|
||||||
func init*(t: typedesc[PeerID], seckey: PrivateKey): Result[PeerID, cstring] =
|
func init*(t: typedesc[PeerId], seckey: PrivateKey): Result[PeerId, cstring] =
|
||||||
## Create new peer id from private key ``seckey``.
|
## Create new peer id from private key ``seckey``.
|
||||||
PeerID.init(? seckey.getPublicKey().orError(cstring("invalid private key")))
|
PeerId.init(? seckey.getPublicKey().orError(cstring("invalid private key")))
|
||||||
|
|
||||||
func match*(pid: PeerID, pubkey: PublicKey): bool =
|
func match*(pid: PeerId, pubkey: PublicKey): bool =
|
||||||
## Returns ``true`` if ``pid`` matches public key ``pubkey``.
|
## Returns ``true`` if ``pid`` matches public key ``pubkey``.
|
||||||
let p = PeerID.init(pubkey)
|
let p = PeerId.init(pubkey)
|
||||||
if p.isErr:
|
if p.isErr:
|
||||||
false
|
false
|
||||||
else:
|
else:
|
||||||
pid == p.get()
|
pid == p.get()
|
||||||
|
|
||||||
func match*(pid: PeerID, seckey: PrivateKey): bool =
|
func match*(pid: PeerId, seckey: PrivateKey): bool =
|
||||||
## Returns ``true`` if ``pid`` matches private key ``seckey``.
|
## Returns ``true`` if ``pid`` matches private key ``seckey``.
|
||||||
let p = PeerID.init(seckey)
|
let p = PeerId.init(seckey)
|
||||||
if p.isErr:
|
if p.isErr:
|
||||||
false
|
false
|
||||||
else:
|
else:
|
||||||
|
@ -193,23 +193,23 @@ func match*(pid: PeerID, seckey: PrivateKey): bool =
|
||||||
|
|
||||||
## Serialization/Deserialization helpers
|
## Serialization/Deserialization helpers
|
||||||
|
|
||||||
func write*(vb: var VBuffer, pid: PeerID) =
|
func write*(vb: var VBuffer, pid: PeerId) =
|
||||||
## Write PeerID value ``peerid`` to buffer ``vb``.
|
## Write PeerId value ``peerid`` to buffer ``vb``.
|
||||||
vb.writeSeq(pid.data)
|
vb.writeSeq(pid.data)
|
||||||
|
|
||||||
func write*(pb: var ProtoBuffer, field: int, pid: PeerID) =
|
func write*(pb: var ProtoBuffer, field: int, pid: PeerId) =
|
||||||
## Write PeerID value ``peerid`` to object ``pb`` using ProtoBuf's encoding.
|
## Write PeerId value ``peerid`` to object ``pb`` using ProtoBuf's encoding.
|
||||||
write(pb, field, pid.data)
|
write(pb, field, pid.data)
|
||||||
|
|
||||||
func getField*(pb: ProtoBuffer, field: int,
|
func getField*(pb: ProtoBuffer, field: int,
|
||||||
pid: var PeerID): ProtoResult[bool] {.inline.} =
|
pid: var PeerId): ProtoResult[bool] {.inline.} =
|
||||||
## Read ``PeerID`` from ProtoBuf's message and validate it
|
## Read ``PeerId`` from ProtoBuf's message and validate it
|
||||||
var buffer: seq[byte]
|
var buffer: seq[byte]
|
||||||
let res = ? pb.getField(field, buffer)
|
let res = ? pb.getField(field, buffer)
|
||||||
if not(res):
|
if not(res):
|
||||||
ok(false)
|
ok(false)
|
||||||
else:
|
else:
|
||||||
var peerId: PeerID
|
var peerId: PeerId
|
||||||
if peerId.init(buffer):
|
if peerId.init(buffer):
|
||||||
pid = peerId
|
pid = peerId
|
||||||
ok(true)
|
ok(true)
|
||||||
|
|
|
@ -21,7 +21,7 @@ type
|
||||||
PeerInfoError* = LPError
|
PeerInfoError* = LPError
|
||||||
|
|
||||||
PeerInfo* = ref object
|
PeerInfo* = ref object
|
||||||
peerId*: PeerID
|
peerId*: PeerId
|
||||||
addrs*: seq[MultiAddress]
|
addrs*: seq[MultiAddress]
|
||||||
protocols*: seq[string]
|
protocols*: seq[string]
|
||||||
protoVersion*: string
|
protoVersion*: string
|
||||||
|
@ -42,8 +42,8 @@ chronicles.formatIt(PeerInfo): shortLog(it)
|
||||||
proc new*(
|
proc new*(
|
||||||
p: typedesc[PeerInfo],
|
p: typedesc[PeerInfo],
|
||||||
key: PrivateKey,
|
key: PrivateKey,
|
||||||
addrs: openarray[MultiAddress] = [],
|
addrs: openArray[MultiAddress] = [],
|
||||||
protocols: openarray[string] = [],
|
protocols: openArray[string] = [],
|
||||||
protoVersion: string = "",
|
protoVersion: string = "",
|
||||||
agentVersion: string = ""): PeerInfo
|
agentVersion: string = ""): PeerInfo
|
||||||
{.raises: [Defect, PeerInfoError].} =
|
{.raises: [Defect, PeerInfoError].} =
|
||||||
|
@ -54,7 +54,7 @@ proc new*(
|
||||||
raise newException(PeerInfoError, "invalid private key")
|
raise newException(PeerInfoError, "invalid private key")
|
||||||
|
|
||||||
let peerInfo = PeerInfo(
|
let peerInfo = PeerInfo(
|
||||||
peerId: PeerID.init(key).tryGet(),
|
peerId: PeerId.init(key).tryGet(),
|
||||||
publicKey: pubkey,
|
publicKey: pubkey,
|
||||||
privateKey: key,
|
privateKey: key,
|
||||||
protoVersion: protoVersion,
|
protoVersion: protoVersion,
|
||||||
|
|
|
@ -21,7 +21,7 @@ type
|
||||||
# Handler types #
|
# Handler types #
|
||||||
#################
|
#################
|
||||||
|
|
||||||
PeerBookChangeHandler*[T] = proc(peerId: PeerID, entry: T)
|
PeerBookChangeHandler*[T] = proc(peerId: PeerId, entry: T)
|
||||||
|
|
||||||
AddrChangeHandler* = PeerBookChangeHandler[HashSet[MultiAddress]]
|
AddrChangeHandler* = PeerBookChangeHandler[HashSet[MultiAddress]]
|
||||||
ProtoChangeHandler* = PeerBookChangeHandler[HashSet[string]]
|
ProtoChangeHandler* = PeerBookChangeHandler[HashSet[string]]
|
||||||
|
@ -33,7 +33,7 @@ type
|
||||||
|
|
||||||
# Each book contains a book (map) and event handler(s)
|
# Each book contains a book (map) and event handler(s)
|
||||||
PeerBook*[T] = object of RootObj
|
PeerBook*[T] = object of RootObj
|
||||||
book*: Table[PeerID, T]
|
book*: Table[PeerId, T]
|
||||||
changeHandlers: seq[PeerBookChangeHandler[T]]
|
changeHandlers: seq[PeerBookChangeHandler[T]]
|
||||||
|
|
||||||
SetPeerBook*[T] = object of PeerBook[HashSet[T]]
|
SetPeerBook*[T] = object of PeerBook[HashSet[T]]
|
||||||
|
@ -65,13 +65,13 @@ proc new*(T: type PeerStore): PeerStore =
|
||||||
#########################
|
#########################
|
||||||
|
|
||||||
proc get*[T](peerBook: PeerBook[T],
|
proc get*[T](peerBook: PeerBook[T],
|
||||||
peerId: PeerID): T =
|
peerId: PeerId): T =
|
||||||
## Get all the known metadata of a provided peer.
|
## Get all the known metadata of a provided peer.
|
||||||
|
|
||||||
peerBook.book.getOrDefault(peerId)
|
peerBook.book.getOrDefault(peerId)
|
||||||
|
|
||||||
proc set*[T](peerBook: var PeerBook[T],
|
proc set*[T](peerBook: var PeerBook[T],
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
entry: T) =
|
entry: T) =
|
||||||
## Set metadata for a given peerId. This will replace any
|
## Set metadata for a given peerId. This will replace any
|
||||||
## previously stored metadata.
|
## previously stored metadata.
|
||||||
|
@ -83,7 +83,7 @@ proc set*[T](peerBook: var PeerBook[T],
|
||||||
handler(peerId, peerBook.get(peerId))
|
handler(peerId, peerBook.get(peerId))
|
||||||
|
|
||||||
proc delete*[T](peerBook: var PeerBook[T],
|
proc delete*[T](peerBook: var PeerBook[T],
|
||||||
peerId: PeerID): bool =
|
peerId: PeerId): bool =
|
||||||
## Delete the provided peer from the book.
|
## Delete the provided peer from the book.
|
||||||
|
|
||||||
if not peerBook.book.hasKey(peerId):
|
if not peerBook.book.hasKey(peerId):
|
||||||
|
@ -92,7 +92,7 @@ proc delete*[T](peerBook: var PeerBook[T],
|
||||||
peerBook.book.del(peerId)
|
peerBook.book.del(peerId)
|
||||||
return true
|
return true
|
||||||
|
|
||||||
proc contains*[T](peerBook: PeerBook[T], peerId: PeerID): bool =
|
proc contains*[T](peerBook: PeerBook[T], peerId: PeerId): bool =
|
||||||
peerId in peerBook.book
|
peerId in peerBook.book
|
||||||
|
|
||||||
################
|
################
|
||||||
|
@ -101,7 +101,7 @@ proc contains*[T](peerBook: PeerBook[T], peerId: PeerID): bool =
|
||||||
|
|
||||||
proc add*[T](
|
proc add*[T](
|
||||||
peerBook: var SetPeerBook[T],
|
peerBook: var SetPeerBook[T],
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
entry: T) =
|
entry: T) =
|
||||||
## Add entry to a given peer. If the peer is not known,
|
## Add entry to a given peer. If the peer is not known,
|
||||||
## it will be set with the provided entry.
|
## it will be set with the provided entry.
|
||||||
|
@ -116,7 +116,7 @@ proc add*[T](
|
||||||
# Helper for seq
|
# Helper for seq
|
||||||
proc set*[T](
|
proc set*[T](
|
||||||
peerBook: var SetPeerBook[T],
|
peerBook: var SetPeerBook[T],
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
entry: seq[T]) =
|
entry: seq[T]) =
|
||||||
## Add entry to a given peer. If the peer is not known,
|
## Add entry to a given peer. If the peer is not known,
|
||||||
## it will be set with the provided entry.
|
## it will be set with the provided entry.
|
||||||
|
@ -138,7 +138,7 @@ proc addHandlers*(peerStore: PeerStore,
|
||||||
peerStore.keyBook.changeHandlers.add(keyChangeHandler)
|
peerStore.keyBook.changeHandlers.add(keyChangeHandler)
|
||||||
|
|
||||||
proc delete*(peerStore: PeerStore,
|
proc delete*(peerStore: PeerStore,
|
||||||
peerId: PeerID): bool =
|
peerId: PeerId): bool =
|
||||||
## Delete the provided peer from every book.
|
## Delete the provided peer from every book.
|
||||||
|
|
||||||
peerStore.addressBook.delete(peerId) and
|
peerStore.addressBook.delete(peerId) and
|
||||||
|
|
|
@ -123,7 +123,7 @@ proc initProtoBuffer*(data: seq[byte], offset = 0,
|
||||||
result.offset = offset
|
result.offset = offset
|
||||||
result.options = options
|
result.options = options
|
||||||
|
|
||||||
proc initProtoBuffer*(data: openarray[byte], offset = 0,
|
proc initProtoBuffer*(data: openArray[byte], offset = 0,
|
||||||
options: set[ProtoFlags] = {}): ProtoBuffer =
|
options: set[ProtoFlags] = {}): ProtoBuffer =
|
||||||
## Initialize ProtoBuffer with copy of ``data``.
|
## Initialize ProtoBuffer with copy of ``data``.
|
||||||
result.buffer = @data
|
result.buffer = @data
|
||||||
|
@ -191,7 +191,7 @@ proc write*[T: ProtoScalar](pb: var ProtoBuffer,
|
||||||
pb.offset += sizeof(T)
|
pb.offset += sizeof(T)
|
||||||
|
|
||||||
proc writePacked*[T: ProtoScalar](pb: var ProtoBuffer, field: int,
|
proc writePacked*[T: ProtoScalar](pb: var ProtoBuffer, field: int,
|
||||||
value: openarray[T]) =
|
value: openArray[T]) =
|
||||||
checkFieldNumber(field)
|
checkFieldNumber(field)
|
||||||
var length = 0
|
var length = 0
|
||||||
let dlength =
|
let dlength =
|
||||||
|
@ -239,7 +239,7 @@ proc writePacked*[T: ProtoScalar](pb: var ProtoBuffer, field: int,
|
||||||
pb.offset += sizeof(T)
|
pb.offset += sizeof(T)
|
||||||
|
|
||||||
proc write*[T: byte|char](pb: var ProtoBuffer, field: int,
|
proc write*[T: byte|char](pb: var ProtoBuffer, field: int,
|
||||||
value: openarray[T]) =
|
value: openArray[T]) =
|
||||||
checkFieldNumber(field)
|
checkFieldNumber(field)
|
||||||
var length = 0
|
var length = 0
|
||||||
let flength = vsizeof(getProtoHeader(field, ProtoFieldKind.Length)) +
|
let flength = vsizeof(getProtoHeader(field, ProtoFieldKind.Length)) +
|
||||||
|
@ -385,7 +385,7 @@ proc getValue[T: ProtoScalar](data: var ProtoBuffer,
|
||||||
err(ProtoError.MessageIncomplete)
|
err(ProtoError.MessageIncomplete)
|
||||||
|
|
||||||
proc getValue[T:byte|char](data: var ProtoBuffer, header: ProtoHeader,
|
proc getValue[T:byte|char](data: var ProtoBuffer, header: ProtoHeader,
|
||||||
outBytes: var openarray[T],
|
outBytes: var openArray[T],
|
||||||
outLength: var int): ProtoResult[void] =
|
outLength: var int): ProtoResult[void] =
|
||||||
doAssert(header.wire == ProtoFieldKind.Length)
|
doAssert(header.wire == ProtoFieldKind.Length)
|
||||||
var length = 0
|
var length = 0
|
||||||
|
@ -478,7 +478,7 @@ proc getField*[T: ProtoScalar](data: ProtoBuffer, field: int,
|
||||||
ok(false)
|
ok(false)
|
||||||
|
|
||||||
proc getField*[T: byte|char](data: ProtoBuffer, field: int,
|
proc getField*[T: byte|char](data: ProtoBuffer, field: int,
|
||||||
output: var openarray[T],
|
output: var openArray[T],
|
||||||
outlen: var int): ProtoResult[bool] =
|
outlen: var int): ProtoResult[bool] =
|
||||||
checkFieldNumber(field)
|
checkFieldNumber(field)
|
||||||
var pb = data
|
var pb = data
|
||||||
|
|
|
@ -37,7 +37,7 @@ type
|
||||||
IdentifyNoPubKeyError* = object of IdentifyError
|
IdentifyNoPubKeyError* = object of IdentifyError
|
||||||
|
|
||||||
IdentifyInfo* = object
|
IdentifyInfo* = object
|
||||||
pubKey*: Option[PublicKey]
|
pubkey*: Option[PublicKey]
|
||||||
peerId*: PeerId
|
peerId*: PeerId
|
||||||
addrs*: seq[MultiAddress]
|
addrs*: seq[MultiAddress]
|
||||||
observedAddr*: Option[MultiAddress]
|
observedAddr*: Option[MultiAddress]
|
||||||
|
@ -57,7 +57,7 @@ type
|
||||||
IdentifyPush* = ref object of LPProtocol
|
IdentifyPush* = ref object of LPProtocol
|
||||||
identifyHandler: IdentifyPushHandler
|
identifyHandler: IdentifyPushHandler
|
||||||
|
|
||||||
proc encodeMsg*(peerInfo: PeerInfo, observedAddr: Multiaddress): ProtoBuffer
|
proc encodeMsg*(peerInfo: PeerInfo, observedAddr: MultiAddress): ProtoBuffer
|
||||||
{.raises: [Defect, IdentifyNoPubKeyError].} =
|
{.raises: [Defect, IdentifyNoPubKeyError].} =
|
||||||
result = initProtoBuffer()
|
result = initProtoBuffer()
|
||||||
|
|
||||||
|
@ -81,14 +81,14 @@ proc encodeMsg*(peerInfo: PeerInfo, observedAddr: Multiaddress): ProtoBuffer
|
||||||
proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] =
|
proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] =
|
||||||
var
|
var
|
||||||
iinfo: IdentifyInfo
|
iinfo: IdentifyInfo
|
||||||
pubKey: PublicKey
|
pubkey: PublicKey
|
||||||
oaddr: MultiAddress
|
oaddr: MultiAddress
|
||||||
protoVersion: string
|
protoVersion: string
|
||||||
agentVersion: string
|
agentVersion: string
|
||||||
|
|
||||||
var pb = initProtoBuffer(buf)
|
var pb = initProtoBuffer(buf)
|
||||||
|
|
||||||
let r1 = pb.getField(1, pubKey)
|
let r1 = pb.getField(1, pubkey)
|
||||||
let r2 = pb.getRepeatedField(2, iinfo.addrs)
|
let r2 = pb.getRepeatedField(2, iinfo.addrs)
|
||||||
let r3 = pb.getRepeatedField(3, iinfo.protos)
|
let r3 = pb.getRepeatedField(3, iinfo.protos)
|
||||||
let r4 = pb.getField(4, oaddr)
|
let r4 = pb.getField(4, oaddr)
|
||||||
|
@ -100,14 +100,14 @@ proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] =
|
||||||
|
|
||||||
if res:
|
if res:
|
||||||
if r1.get():
|
if r1.get():
|
||||||
iinfo.pubKey = some(pubKey)
|
iinfo.pubkey = some(pubkey)
|
||||||
if r4.get():
|
if r4.get():
|
||||||
iinfo.observedAddr = some(oaddr)
|
iinfo.observedAddr = some(oaddr)
|
||||||
if r5.get():
|
if r5.get():
|
||||||
iinfo.protoVersion = some(protoVersion)
|
iinfo.protoVersion = some(protoVersion)
|
||||||
if r6.get():
|
if r6.get():
|
||||||
iinfo.agentVersion = some(agentVersion)
|
iinfo.agentVersion = some(agentVersion)
|
||||||
debug "decodeMsg: decoded message", pubkey = ($pubKey).shortLog,
|
debug "decodeMsg: decoded message", pubkey = ($pubkey).shortLog,
|
||||||
addresses = $iinfo.addrs, protocols = $iinfo.protos,
|
addresses = $iinfo.addrs, protocols = $iinfo.protos,
|
||||||
observable_address = $iinfo.observedAddr,
|
observable_address = $iinfo.observedAddr,
|
||||||
proto_version = $iinfo.protoVersion,
|
proto_version = $iinfo.protoVersion,
|
||||||
|
@ -153,8 +153,8 @@ proc identify*(p: Identify,
|
||||||
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
||||||
result = infoOpt.get()
|
result = infoOpt.get()
|
||||||
|
|
||||||
if result.pubKey.isSome:
|
if result.pubkey.isSome:
|
||||||
let peer = PeerID.init(result.pubKey.get())
|
let peer = PeerId.init(result.pubkey.get())
|
||||||
if peer.isErr:
|
if peer.isErr:
|
||||||
raise newException(IdentityInvalidMsgError, $peer.error)
|
raise newException(IdentityInvalidMsgError, $peer.error)
|
||||||
else:
|
else:
|
||||||
|
@ -185,8 +185,8 @@ proc init*(p: IdentifyPush) =
|
||||||
|
|
||||||
var indentInfo = infoOpt.get()
|
var indentInfo = infoOpt.get()
|
||||||
|
|
||||||
if indentInfo.pubKey.isSome:
|
if indentInfo.pubkey.isSome:
|
||||||
let receivedPeerId = PeerID.init(indentInfo.pubKey.get()).tryGet()
|
let receivedPeerId = PeerId.init(indentInfo.pubkey.get()).tryGet()
|
||||||
if receivedPeerId != conn.peerId:
|
if receivedPeerId != conn.peerId:
|
||||||
raise newException(IdentityNoMatchError, "Peer ids don't match")
|
raise newException(IdentityNoMatchError, "Peer ids don't match")
|
||||||
indentInfo.peerId = receivedPeerId
|
indentInfo.peerId = receivedPeerId
|
||||||
|
|
|
@ -75,7 +75,7 @@ proc handleSubscribe*(f: FloodSub,
|
||||||
# unsubscribe the peer from the topic
|
# unsubscribe the peer from the topic
|
||||||
peers[].excl(peer)
|
peers[].excl(peer)
|
||||||
|
|
||||||
method unsubscribePeer*(f: FloodSub, peer: PeerID) =
|
method unsubscribePeer*(f: FloodSub, peer: PeerId) =
|
||||||
## handle peer disconnects
|
## handle peer disconnects
|
||||||
##
|
##
|
||||||
trace "unsubscribing floodsub peer", peer
|
trace "unsubscribing floodsub peer", peer
|
||||||
|
|
|
@ -169,7 +169,7 @@ method onPubSubPeerEvent*(p: GossipSub, peer: PubsubPeer, event: PubSubPeerEvent
|
||||||
|
|
||||||
procCall FloodSub(p).onPubSubPeerEvent(peer, event)
|
procCall FloodSub(p).onPubSubPeerEvent(peer, event)
|
||||||
|
|
||||||
method unsubscribePeer*(g: GossipSub, peer: PeerID) =
|
method unsubscribePeer*(g: GossipSub, peer: PeerId) =
|
||||||
## handle peer disconnects
|
## handle peer disconnects
|
||||||
##
|
##
|
||||||
|
|
||||||
|
|
|
@ -51,7 +51,7 @@ proc pruned*(g: GossipSub,
|
||||||
backoffMoment = Moment.fromNow(backoffDuration)
|
backoffMoment = Moment.fromNow(backoffDuration)
|
||||||
|
|
||||||
g.backingOff
|
g.backingOff
|
||||||
.mgetOrPut(topic, initTable[PeerID, Moment]())[p.peerId] = backoffMoment
|
.mgetOrPut(topic, initTable[PeerId, Moment]())[p.peerId] = backoffMoment
|
||||||
|
|
||||||
g.peerStats.withValue(p.peerId, stats):
|
g.peerStats.withValue(p.peerId, stats):
|
||||||
stats.topicInfos.withValue(topic, info):
|
stats.topicInfos.withValue(topic, info):
|
||||||
|
@ -71,7 +71,7 @@ proc pruned*(g: GossipSub,
|
||||||
proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [Defect].} =
|
proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [Defect].} =
|
||||||
let now = Moment.now()
|
let now = Moment.now()
|
||||||
var expired = toSeq(t.getOrDefault(topic).pairs())
|
var expired = toSeq(t.getOrDefault(topic).pairs())
|
||||||
expired.keepIf do (pair: tuple[peer: PeerID, expire: Moment]) -> bool:
|
expired.keepIf do (pair: tuple[peer: PeerId, expire: Moment]) -> bool:
|
||||||
now >= pair.expire
|
now >= pair.expire
|
||||||
for (peer, _) in expired:
|
for (peer, _) in expired:
|
||||||
t.withValue(topic, v):
|
t.withValue(topic, v):
|
||||||
|
@ -84,7 +84,7 @@ proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] {.raises:
|
||||||
# by spec, larger then Dhi, but let's put some hard caps
|
# by spec, larger then Dhi, but let's put some hard caps
|
||||||
peers.setLen(min(peers.len, g.parameters.dHigh * 2))
|
peers.setLen(min(peers.len, g.parameters.dHigh * 2))
|
||||||
peers.map do (x: PubSubPeer) -> PeerInfoMsg:
|
peers.map do (x: PubSubPeer) -> PeerInfoMsg:
|
||||||
PeerInfoMsg(peerID: x.peerId.getBytes())
|
PeerInfoMsg(peerId: x.peerId.getBytes())
|
||||||
|
|
||||||
proc handleGraft*(g: GossipSub,
|
proc handleGraft*(g: GossipSub,
|
||||||
peer: PubSubPeer,
|
peer: PubSubPeer,
|
||||||
|
@ -107,7 +107,7 @@ proc handleGraft*(g: GossipSub,
|
||||||
|
|
||||||
let backoff = Moment.fromNow(g.parameters.pruneBackoff)
|
let backoff = Moment.fromNow(g.parameters.pruneBackoff)
|
||||||
g.backingOff
|
g.backingOff
|
||||||
.mgetOrPut(topic, initTable[PeerID, Moment]())[peer.peerId] = backoff
|
.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
|
||||||
|
|
||||||
peer.behaviourPenalty += 0.1
|
peer.behaviourPenalty += 0.1
|
||||||
|
|
||||||
|
@ -129,7 +129,7 @@ proc handleGraft*(g: GossipSub,
|
||||||
|
|
||||||
let backoff = Moment.fromNow(g.parameters.pruneBackoff)
|
let backoff = Moment.fromNow(g.parameters.pruneBackoff)
|
||||||
g.backingOff
|
g.backingOff
|
||||||
.mgetOrPut(topic, initTable[PeerID, Moment]())[peer.peerId] = backoff
|
.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
|
||||||
|
|
||||||
peer.behaviourPenalty += 0.1
|
peer.behaviourPenalty += 0.1
|
||||||
|
|
||||||
|
@ -184,7 +184,7 @@ proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.r
|
||||||
current = g.backingOff.getOrDefault(topic).getOrDefault(peer.peerId)
|
current = g.backingOff.getOrDefault(topic).getOrDefault(peer.peerId)
|
||||||
if backoff > current:
|
if backoff > current:
|
||||||
g.backingOff
|
g.backingOff
|
||||||
.mgetOrPut(topic, initTable[PeerID, Moment]())[peer.peerId] = backoff
|
.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
|
||||||
|
|
||||||
trace "pruning rpc received peer", peer, score = peer.score
|
trace "pruning rpc received peer", peer, score = peer.score
|
||||||
g.pruned(peer, topic, setBackoff = false)
|
g.pruned(peer, topic, setBackoff = false)
|
||||||
|
|
|
@ -73,7 +73,7 @@ proc colocationFactor(g: GossipSub, peer: PubSubPeer): float64 =
|
||||||
else:
|
else:
|
||||||
let
|
let
|
||||||
address = peer.address.get()
|
address = peer.address.get()
|
||||||
g.peersInIP.mgetOrPut(address, initHashSet[PeerID]()).incl(peer.peerId)
|
g.peersInIP.mgetOrPut(address, initHashSet[PeerId]()).incl(peer.peerId)
|
||||||
let
|
let
|
||||||
ipPeers = g.peersInIP.getOrDefault(address).len().float64
|
ipPeers = g.peersInIP.getOrDefault(address).len().float64
|
||||||
if ipPeers > g.parameters.ipColocationFactorThreshold:
|
if ipPeers > g.parameters.ipColocationFactorThreshold:
|
||||||
|
@ -109,7 +109,7 @@ proc updateScores*(g: GossipSub) = # avoid async
|
||||||
trace "updating scores", peers = g.peers.len
|
trace "updating scores", peers = g.peers.len
|
||||||
|
|
||||||
let now = Moment.now()
|
let now = Moment.now()
|
||||||
var evicting: seq[PeerID]
|
var evicting: seq[PeerId]
|
||||||
|
|
||||||
for peerId, stats in g.peerStats.mpairs:
|
for peerId, stats in g.peerStats.mpairs:
|
||||||
let peer = g.peers.getOrDefault(peerId)
|
let peer = g.peers.getOrDefault(peerId)
|
||||||
|
|
|
@ -139,7 +139,7 @@ type
|
||||||
|
|
||||||
disconnectBadPeers*: bool
|
disconnectBadPeers*: bool
|
||||||
|
|
||||||
BackoffTable* = Table[string, Table[PeerID, Moment]]
|
BackoffTable* = Table[string, Table[PeerId, Moment]]
|
||||||
ValidationSeenTable* = Table[MessageID, HashSet[PubSubPeer]]
|
ValidationSeenTable* = Table[MessageID, HashSet[PubSubPeer]]
|
||||||
|
|
||||||
GossipSub* = ref object of FloodSub
|
GossipSub* = ref object of FloodSub
|
||||||
|
@ -156,11 +156,11 @@ type
|
||||||
heartbeatFut*: Future[void] # cancellation future for heartbeat interval
|
heartbeatFut*: Future[void] # cancellation future for heartbeat interval
|
||||||
heartbeatRunning*: bool
|
heartbeatRunning*: bool
|
||||||
|
|
||||||
peerStats*: Table[PeerID, PeerStats]
|
peerStats*: Table[PeerId, PeerStats]
|
||||||
parameters*: GossipSubParams
|
parameters*: GossipSubParams
|
||||||
topicParams*: Table[string, TopicParams]
|
topicParams*: Table[string, TopicParams]
|
||||||
directPeersLoop*: Future[void]
|
directPeersLoop*: Future[void]
|
||||||
peersInIP*: Table[MultiAddress, HashSet[PeerID]]
|
peersInIP*: Table[MultiAddress, HashSet[PeerId]]
|
||||||
|
|
||||||
heartbeatEvents*: seq[AsyncEvent]
|
heartbeatEvents*: seq[AsyncEvent]
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ import ./pubsubpeer, ../../peerid
|
||||||
type
|
type
|
||||||
PeerTable* = Table[string, HashSet[PubSubPeer]] # topic string to peer map
|
PeerTable* = Table[string, HashSet[PubSubPeer]] # topic string to peer map
|
||||||
|
|
||||||
proc hasPeerID*(t: PeerTable, topic: string, peerId: PeerID): bool =
|
proc hasPeerId*(t: PeerTable, topic: string, peerId: PeerId): bool =
|
||||||
if topic in t:
|
if topic in t:
|
||||||
try:
|
try:
|
||||||
for peer in t[topic]:
|
for peer in t[topic]:
|
||||||
|
|
|
@ -94,7 +94,7 @@ type
|
||||||
switch*: Switch # the switch used to dial/connect to peers
|
switch*: Switch # the switch used to dial/connect to peers
|
||||||
peerInfo*: PeerInfo # this peer's info
|
peerInfo*: PeerInfo # this peer's info
|
||||||
topics*: Table[string, seq[TopicHandler]] # the topics that _we_ are interested in
|
topics*: Table[string, seq[TopicHandler]] # the topics that _we_ are interested in
|
||||||
peers*: Table[PeerID, PubSubPeer] ##\
|
peers*: Table[PeerId, PubSubPeer] ##\
|
||||||
## Peers that we are interested to gossip with (but not necessarily
|
## Peers that we are interested to gossip with (but not necessarily
|
||||||
## yet connected to)
|
## yet connected to)
|
||||||
triggerSelf*: bool # trigger own local handler on publish
|
triggerSelf*: bool # trigger own local handler on publish
|
||||||
|
@ -119,7 +119,7 @@ type
|
||||||
|
|
||||||
knownTopics*: HashSet[string]
|
knownTopics*: HashSet[string]
|
||||||
|
|
||||||
method unsubscribePeer*(p: PubSub, peerId: PeerID) {.base.} =
|
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base.} =
|
||||||
## handle peer disconnects
|
## handle peer disconnects
|
||||||
##
|
##
|
||||||
|
|
||||||
|
@ -273,7 +273,7 @@ method onPubSubPeerEvent*(p: PubSub, peer: PubsubPeer, event: PubsubPeerEvent) {
|
||||||
|
|
||||||
proc getOrCreatePeer*(
|
proc getOrCreatePeer*(
|
||||||
p: PubSub,
|
p: PubSub,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
protos: seq[string]): PubSubPeer =
|
protos: seq[string]): PubSubPeer =
|
||||||
p.peers.withValue(peerId, peer):
|
p.peers.withValue(peerId, peer):
|
||||||
return peer[]
|
return peer[]
|
||||||
|
@ -374,7 +374,7 @@ method handleConn*(p: PubSub,
|
||||||
finally:
|
finally:
|
||||||
await conn.closeWithEOF()
|
await conn.closeWithEOF()
|
||||||
|
|
||||||
method subscribePeer*(p: PubSub, peer: PeerID) {.base.} =
|
method subscribePeer*(p: PubSub, peer: PeerId) {.base.} =
|
||||||
## subscribe to remote peer to receive/send pubsub
|
## subscribe to remote peer to receive/send pubsub
|
||||||
## messages
|
## messages
|
||||||
##
|
##
|
||||||
|
|
|
@ -53,7 +53,7 @@ type
|
||||||
codec*: string # the protocol that this peer joined from
|
codec*: string # the protocol that this peer joined from
|
||||||
sendConn*: Connection # cached send connection
|
sendConn*: Connection # cached send connection
|
||||||
address*: Option[MultiAddress]
|
address*: Option[MultiAddress]
|
||||||
peerId*: PeerID
|
peerId*: PeerId
|
||||||
handler*: RPCHandler
|
handler*: RPCHandler
|
||||||
observers*: ref seq[PubSubObserver] # ref as in smart_ptr
|
observers*: ref seq[PubSubObserver] # ref as in smart_ptr
|
||||||
|
|
||||||
|
@ -281,7 +281,7 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} =
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: typedesc[PubSubPeer],
|
T: typedesc[PubSubPeer],
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
getConn: GetConn,
|
getConn: GetConn,
|
||||||
dropConn: DropConn,
|
dropConn: DropConn,
|
||||||
onEvent: OnEvent,
|
onEvent: OnEvent,
|
||||||
|
|
|
@ -17,7 +17,7 @@ export options
|
||||||
|
|
||||||
type
|
type
|
||||||
PeerInfoMsg* = object
|
PeerInfoMsg* = object
|
||||||
peerID*: seq[byte]
|
peerId*: seq[byte]
|
||||||
signedPeerRecord*: seq[byte]
|
signedPeerRecord*: seq[byte]
|
||||||
|
|
||||||
SubOpts* = object
|
SubOpts* = object
|
||||||
|
|
|
@ -39,7 +39,7 @@ proc write*(pb: var ProtoBuffer, field: int, graft: ControlGraft) =
|
||||||
|
|
||||||
proc write*(pb: var ProtoBuffer, field: int, infoMsg: PeerInfoMsg) =
|
proc write*(pb: var ProtoBuffer, field: int, infoMsg: PeerInfoMsg) =
|
||||||
var ipb = initProtoBuffer()
|
var ipb = initProtoBuffer()
|
||||||
ipb.write(1, infoMsg.peerID)
|
ipb.write(1, infoMsg.peerId)
|
||||||
ipb.write(2, infoMsg.signedPeerRecord)
|
ipb.write(2, infoMsg.signedPeerRecord)
|
||||||
ipb.finish()
|
ipb.finish()
|
||||||
pb.write(field, ipb)
|
pb.write(field, ipb)
|
||||||
|
@ -142,10 +142,10 @@ proc decodePeerInfoMsg*(pb: ProtoBuffer): ProtoResult[PeerInfoMsg] {.
|
||||||
inline.} =
|
inline.} =
|
||||||
trace "decodePeerInfoMsg: decoding message"
|
trace "decodePeerInfoMsg: decoding message"
|
||||||
var pi = PeerInfoMsg()
|
var pi = PeerInfoMsg()
|
||||||
if ? pb.getField(1, pi.peerID):
|
if ? pb.getField(1, pi.peerId):
|
||||||
trace "decodePeerInfoMsg: read peerID", peerID = pi.peerID
|
trace "decodePeerInfoMsg: read peerId", peerId = pi.peerId
|
||||||
else:
|
else:
|
||||||
trace "decodePeerInfoMsg: peerID is missing"
|
trace "decodePeerInfoMsg: peerId is missing"
|
||||||
if ? pb.getField(2, pi.signedPeerRecord):
|
if ? pb.getField(2, pi.signedPeerRecord):
|
||||||
trace "decodePeerInfoMsg: read signedPeerRecord", signedPeerRecord = pi.signedPeerRecord
|
trace "decodePeerInfoMsg: read signedPeerRecord", signedPeerRecord = pi.signedPeerRecord
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -146,7 +146,7 @@ proc encrypt(
|
||||||
|
|
||||||
proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
|
proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
|
||||||
{.raises: [Defect, NoiseNonceMaxError].} =
|
{.raises: [Defect, NoiseNonceMaxError].} =
|
||||||
result = newSeqOfCap[byte](data.len + sizeof(ChachaPolyTag))
|
result = newSeqOfCap[byte](data.len + sizeof(ChaChaPolyTag))
|
||||||
result.add(data)
|
result.add(data)
|
||||||
|
|
||||||
let tag = encrypt(state, result, ad)
|
let tag = encrypt(state, result, ad)
|
||||||
|
@ -217,7 +217,7 @@ proc encryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
|
||||||
proc decryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
|
proc decryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
|
||||||
{.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} =
|
{.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} =
|
||||||
# according to spec if key is empty leave plaintext
|
# according to spec if key is empty leave plaintext
|
||||||
if ss.cs.hasKey:
|
if ss.cs.hasKey and data.len > ChaChaPolyTag.len:
|
||||||
result = ss.cs.decryptWithAd(ss.h.data, data)
|
result = ss.cs.decryptWithAd(ss.h.data, data)
|
||||||
else:
|
else:
|
||||||
result = @data
|
result = @data
|
||||||
|
@ -368,7 +368,7 @@ proc handshakeXXOutbound(
|
||||||
dh_se()
|
dh_se()
|
||||||
|
|
||||||
# last payload must follow the encrypted way of sending
|
# last payload must follow the encrypted way of sending
|
||||||
msg.add hs.ss.encryptAndHash(p2psecret)
|
msg.add hs.ss.encryptAndHash(p2pSecret)
|
||||||
|
|
||||||
await conn.sendHSMessage(msg.data)
|
await conn.sendHSMessage(msg.data)
|
||||||
|
|
||||||
|
@ -408,7 +408,7 @@ proc handshakeXXInbound(
|
||||||
write_s()
|
write_s()
|
||||||
dh_es()
|
dh_es()
|
||||||
|
|
||||||
msg.add hs.ss.encryptAndHash(p2psecret)
|
msg.add hs.ss.encryptAndHash(p2pSecret)
|
||||||
|
|
||||||
await conn.sendHSMessage(msg.data)
|
await conn.sendHSMessage(msg.data)
|
||||||
msg.clear()
|
msg.clear()
|
||||||
|
@ -431,7 +431,7 @@ method readMessage*(sconn: NoiseConnection): Future[seq[byte]] {.async.} =
|
||||||
while true: # Discard 0-length payloads
|
while true: # Discard 0-length payloads
|
||||||
let frame = await sconn.stream.readFrame()
|
let frame = await sconn.stream.readFrame()
|
||||||
sconn.activity = true
|
sconn.activity = true
|
||||||
if frame.len > 0:
|
if frame.len > ChaChaPolyTag.len:
|
||||||
let res = sconn.readCs.decryptWithAd([], frame)
|
let res = sconn.readCs.decryptWithAd([], frame)
|
||||||
if res.len > 0:
|
if res.len > 0:
|
||||||
when defined(libp2p_dump):
|
when defined(libp2p_dump):
|
||||||
|
@ -554,7 +554,7 @@ method handshake*(p: Noise, conn: Connection, initiator: bool): Future[SecureCon
|
||||||
trace "Remote signature verified", conn
|
trace "Remote signature verified", conn
|
||||||
|
|
||||||
if initiator:
|
if initiator:
|
||||||
let pid = PeerID.init(remotePubKey)
|
let pid = PeerId.init(remotePubKey)
|
||||||
if not conn.peerId.validate():
|
if not conn.peerId.validate():
|
||||||
raise newException(NoiseHandshakeError, "Failed to validate peerId.")
|
raise newException(NoiseHandshakeError, "Failed to validate peerId.")
|
||||||
if pid.isErr or pid.get() != conn.peerId:
|
if pid.isErr or pid.get() != conn.peerId:
|
||||||
|
@ -567,7 +567,7 @@ method handshake*(p: Noise, conn: Connection, initiator: bool): Future[SecureCon
|
||||||
received_key = $remotePubKey
|
received_key = $remotePubKey
|
||||||
raise newException(NoiseHandshakeError, "Noise handshake, peer infos don't match! " & $pid & " != " & $conn.peerId)
|
raise newException(NoiseHandshakeError, "Noise handshake, peer infos don't match! " & $pid & " != " & $conn.peerId)
|
||||||
else:
|
else:
|
||||||
let pid = PeerID.init(remotePubKey)
|
let pid = PeerId.init(remotePubKey)
|
||||||
if pid.isErr:
|
if pid.isErr:
|
||||||
raise newException(NoiseHandshakeError, "Invalid remote peer id")
|
raise newException(NoiseHandshakeError, "Invalid remote peer id")
|
||||||
conn.peerId = pid.get()
|
conn.peerId = pid.get()
|
||||||
|
|
|
@ -83,7 +83,7 @@ func shortLog*(conn: SecioConn): auto =
|
||||||
|
|
||||||
chronicles.formatIt(SecioConn): shortLog(it)
|
chronicles.formatIt(SecioConn): shortLog(it)
|
||||||
|
|
||||||
proc init(mac: var SecureMac, hash: string, key: openarray[byte]) =
|
proc init(mac: var SecureMac, hash: string, key: openArray[byte]) =
|
||||||
if hash == "SHA256":
|
if hash == "SHA256":
|
||||||
mac = SecureMac(kind: SecureMacType.Sha256)
|
mac = SecureMac(kind: SecureMacType.Sha256)
|
||||||
mac.ctxsha256.init(key)
|
mac.ctxsha256.init(key)
|
||||||
|
@ -94,7 +94,7 @@ proc init(mac: var SecureMac, hash: string, key: openarray[byte]) =
|
||||||
mac = SecureMac(kind: SecureMacType.Sha1)
|
mac = SecureMac(kind: SecureMacType.Sha1)
|
||||||
mac.ctxsha1.init(key)
|
mac.ctxsha1.init(key)
|
||||||
|
|
||||||
proc update(mac: var SecureMac, data: openarray[byte]) =
|
proc update(mac: var SecureMac, data: openArray[byte]) =
|
||||||
case mac.kind
|
case mac.kind
|
||||||
of SecureMacType.Sha256:
|
of SecureMacType.Sha256:
|
||||||
update(mac.ctxsha256, data)
|
update(mac.ctxsha256, data)
|
||||||
|
@ -112,7 +112,7 @@ proc sizeDigest(mac: SecureMac): int {.inline.} =
|
||||||
of SecureMacType.Sha1:
|
of SecureMacType.Sha1:
|
||||||
result = int(mac.ctxsha1.sizeDigest())
|
result = int(mac.ctxsha1.sizeDigest())
|
||||||
|
|
||||||
proc finish(mac: var SecureMac, data: var openarray[byte]) =
|
proc finish(mac: var SecureMac, data: var openArray[byte]) =
|
||||||
case mac.kind
|
case mac.kind
|
||||||
of SecureMacType.Sha256:
|
of SecureMacType.Sha256:
|
||||||
discard finish(mac.ctxsha256, data)
|
discard finish(mac.ctxsha256, data)
|
||||||
|
@ -130,8 +130,8 @@ proc reset(mac: var SecureMac) =
|
||||||
of SecureMacType.Sha1:
|
of SecureMacType.Sha1:
|
||||||
reset(mac.ctxsha1)
|
reset(mac.ctxsha1)
|
||||||
|
|
||||||
proc init(sc: var SecureCipher, cipher: string, key: openarray[byte],
|
proc init(sc: var SecureCipher, cipher: string, key: openArray[byte],
|
||||||
iv: openarray[byte]) {.inline.} =
|
iv: openArray[byte]) {.inline.} =
|
||||||
if cipher == "AES-128":
|
if cipher == "AES-128":
|
||||||
sc = SecureCipher(kind: SecureCipherType.Aes128)
|
sc = SecureCipher(kind: SecureCipherType.Aes128)
|
||||||
sc.ctxaes128.init(key, iv)
|
sc.ctxaes128.init(key, iv)
|
||||||
|
@ -142,8 +142,8 @@ proc init(sc: var SecureCipher, cipher: string, key: openarray[byte],
|
||||||
sc = SecureCipher(kind: SecureCipherType.Twofish)
|
sc = SecureCipher(kind: SecureCipherType.Twofish)
|
||||||
sc.ctxtwofish256.init(key, iv)
|
sc.ctxtwofish256.init(key, iv)
|
||||||
|
|
||||||
proc encrypt(cipher: var SecureCipher, input: openarray[byte],
|
proc encrypt(cipher: var SecureCipher, input: openArray[byte],
|
||||||
output: var openarray[byte]) {.inline.} =
|
output: var openArray[byte]) {.inline.} =
|
||||||
case cipher.kind
|
case cipher.kind
|
||||||
of SecureCipherType.Aes128:
|
of SecureCipherType.Aes128:
|
||||||
cipher.ctxaes128.encrypt(input, output)
|
cipher.ctxaes128.encrypt(input, output)
|
||||||
|
@ -152,8 +152,8 @@ proc encrypt(cipher: var SecureCipher, input: openarray[byte],
|
||||||
of SecureCipherType.Twofish:
|
of SecureCipherType.Twofish:
|
||||||
cipher.ctxtwofish256.encrypt(input, output)
|
cipher.ctxtwofish256.encrypt(input, output)
|
||||||
|
|
||||||
proc decrypt(cipher: var SecureCipher, input: openarray[byte],
|
proc decrypt(cipher: var SecureCipher, input: openArray[byte],
|
||||||
output: var openarray[byte]) {.inline.} =
|
output: var openArray[byte]) {.inline.} =
|
||||||
case cipher.kind
|
case cipher.kind
|
||||||
of SecureCipherType.Aes128:
|
of SecureCipherType.Aes128:
|
||||||
cipher.ctxaes128.decrypt(input, output)
|
cipher.ctxaes128.decrypt(input, output)
|
||||||
|
@ -300,8 +300,8 @@ method handshake*(s: Secio, conn: Connection, initiator: bool = false): Future[S
|
||||||
remoteExchanges: string
|
remoteExchanges: string
|
||||||
remoteCiphers: string
|
remoteCiphers: string
|
||||||
remoteHashes: string
|
remoteHashes: string
|
||||||
remotePeerId: PeerID
|
remotePeerId: PeerId
|
||||||
localPeerId: PeerID
|
localPeerId: PeerId
|
||||||
localBytesPubkey = s.localPublicKey.getBytes().tryGet()
|
localBytesPubkey = s.localPublicKey.getBytes().tryGet()
|
||||||
|
|
||||||
brHmacDrbgGenerate(s.rng[], localNonce)
|
brHmacDrbgGenerate(s.rng[], localNonce)
|
||||||
|
@ -312,7 +312,7 @@ method handshake*(s: Secio, conn: Connection, initiator: bool = false): Future[S
|
||||||
SecioCiphers,
|
SecioCiphers,
|
||||||
SecioHashes)
|
SecioHashes)
|
||||||
|
|
||||||
localPeerId = PeerID.init(s.localPublicKey).tryGet()
|
localPeerId = PeerId.init(s.localPublicKey).tryGet()
|
||||||
|
|
||||||
trace "Local proposal", schemes = SecioExchanges,
|
trace "Local proposal", schemes = SecioExchanges,
|
||||||
ciphers = SecioCiphers,
|
ciphers = SecioCiphers,
|
||||||
|
@ -336,9 +336,9 @@ method handshake*(s: Secio, conn: Connection, initiator: bool = false): Future[S
|
||||||
pubkey = remoteBytesPubkey.shortLog
|
pubkey = remoteBytesPubkey.shortLog
|
||||||
raise (ref SecioError)(msg: "Remote public key incorrect or corrupted")
|
raise (ref SecioError)(msg: "Remote public key incorrect or corrupted")
|
||||||
|
|
||||||
remotePeerId = PeerID.init(remotePubkey).tryGet()
|
remotePeerId = PeerId.init(remotePubkey).tryGet()
|
||||||
|
|
||||||
# TODO: PeerID check against supplied PeerID
|
# TODO: PeerId check against supplied PeerId
|
||||||
if not initiator:
|
if not initiator:
|
||||||
conn.peerId = remotePeerId
|
conn.peerId = remotePeerId
|
||||||
let order = getOrder(remoteBytesPubkey, localNonce, localBytesPubkey,
|
let order = getOrder(remoteBytesPubkey, localNonce, localBytesPubkey,
|
||||||
|
|
|
@ -45,7 +45,7 @@ chronicles.formatIt(SecureConn): shortLog(it)
|
||||||
proc new*(T: type SecureConn,
|
proc new*(T: type SecureConn,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
peerId: PeerId,
|
peerId: PeerId,
|
||||||
observedAddr: Multiaddress,
|
observedAddr: MultiAddress,
|
||||||
timeout: Duration = DefaultConnectionTimeout): T =
|
timeout: Duration = DefaultConnectionTimeout): T =
|
||||||
result = T(stream: conn,
|
result = T(stream: conn,
|
||||||
peerId: peerId,
|
peerId: peerId,
|
||||||
|
|
|
@ -34,7 +34,7 @@ type
|
||||||
timerTaskFut: Future[void] # the current timer instance
|
timerTaskFut: Future[void] # the current timer instance
|
||||||
timeoutHandler*: TimeoutHandler # timeout handler
|
timeoutHandler*: TimeoutHandler # timeout handler
|
||||||
peerId*: PeerId
|
peerId*: PeerId
|
||||||
observedAddr*: Multiaddress
|
observedAddr*: MultiAddress
|
||||||
upgraded*: Future[void]
|
upgraded*: Future[void]
|
||||||
tag*: string # debug tag for metrics (generally ms protocol)
|
tag*: string # debug tag for metrics (generally ms protocol)
|
||||||
transportDir*: Direction # The bottom level transport (generally the socket) direction
|
transportDir*: Direction # The bottom level transport (generally the socket) direction
|
||||||
|
|
|
@ -86,43 +86,43 @@ proc removePeerEventHandler*(s: Switch,
|
||||||
kind: PeerEventKind) =
|
kind: PeerEventKind) =
|
||||||
s.connManager.removePeerEventHandler(handler, kind)
|
s.connManager.removePeerEventHandler(handler, kind)
|
||||||
|
|
||||||
proc isConnected*(s: Switch, peerId: PeerID): bool =
|
proc isConnected*(s: Switch, peerId: PeerId): bool =
|
||||||
## returns true if the peer has one or more
|
## returns true if the peer has one or more
|
||||||
## associated connections (sockets)
|
## associated connections (sockets)
|
||||||
##
|
##
|
||||||
|
|
||||||
peerId in s.connManager
|
peerId in s.connManager
|
||||||
|
|
||||||
proc disconnect*(s: Switch, peerId: PeerID): Future[void] {.gcsafe.} =
|
proc disconnect*(s: Switch, peerId: PeerId): Future[void] {.gcsafe.} =
|
||||||
s.connManager.dropPeer(peerId)
|
s.connManager.dropPeer(peerId)
|
||||||
|
|
||||||
method connect*(
|
method connect*(
|
||||||
s: Switch,
|
s: Switch,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
addrs: seq[MultiAddress]): Future[void] =
|
addrs: seq[MultiAddress]): Future[void] =
|
||||||
s.dialer.connect(peerId, addrs)
|
s.dialer.connect(peerId, addrs)
|
||||||
|
|
||||||
method dial*(
|
method dial*(
|
||||||
s: Switch,
|
s: Switch,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
protos: seq[string]): Future[Connection] =
|
protos: seq[string]): Future[Connection] =
|
||||||
s.dialer.dial(peerId, protos)
|
s.dialer.dial(peerId, protos)
|
||||||
|
|
||||||
proc dial*(s: Switch,
|
proc dial*(s: Switch,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
proto: string): Future[Connection] =
|
proto: string): Future[Connection] =
|
||||||
dial(s, peerId, @[proto])
|
dial(s, peerId, @[proto])
|
||||||
|
|
||||||
method dial*(
|
method dial*(
|
||||||
s: Switch,
|
s: Switch,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
protos: seq[string]): Future[Connection] =
|
protos: seq[string]): Future[Connection] =
|
||||||
s.dialer.dial(peerId, addrs, protos)
|
s.dialer.dial(peerId, addrs, protos)
|
||||||
|
|
||||||
proc dial*(
|
proc dial*(
|
||||||
s: Switch,
|
s: Switch,
|
||||||
peerId: PeerID,
|
peerId: PeerId,
|
||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
proto: string): Future[Connection] =
|
proto: string): Future[Connection] =
|
||||||
dial(s, peerId, addrs, @[proto])
|
dial(s, peerId, addrs, @[proto])
|
||||||
|
@ -212,9 +212,9 @@ proc stop*(s: Switch) {.async.} =
|
||||||
# close and cleanup all connections
|
# close and cleanup all connections
|
||||||
await s.connManager.close()
|
await s.connManager.close()
|
||||||
|
|
||||||
for t in s.transports:
|
for transp in s.transports:
|
||||||
try:
|
try:
|
||||||
await t.stop()
|
await transp.stop()
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
raise exc
|
raise exc
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
|
@ -269,7 +269,7 @@ proc newSwitch*(peerInfo: PeerInfo,
|
||||||
transports: seq[Transport],
|
transports: seq[Transport],
|
||||||
identity: Identify,
|
identity: Identify,
|
||||||
muxers: Table[string, MuxerProvider],
|
muxers: Table[string, MuxerProvider],
|
||||||
secureManagers: openarray[Secure] = [],
|
secureManagers: openArray[Secure] = [],
|
||||||
connManager: ConnManager,
|
connManager: ConnManager,
|
||||||
ms: MultistreamSelect,
|
ms: MultistreamSelect,
|
||||||
nameResolver: NameResolver = nil): Switch
|
nameResolver: NameResolver = nil): Switch
|
||||||
|
|
|
@ -26,7 +26,7 @@ type
|
||||||
TransportClosedError* = object of TransportError
|
TransportClosedError* = object of TransportError
|
||||||
|
|
||||||
Transport* = ref object of RootObj
|
Transport* = ref object of RootObj
|
||||||
addrs*: seq[Multiaddress]
|
addrs*: seq[MultiAddress]
|
||||||
running*: bool
|
running*: bool
|
||||||
upgrader*: Upgrade
|
upgrader*: Upgrade
|
||||||
|
|
||||||
|
|
|
@ -174,7 +174,7 @@ method stop*(self: WsTransport) {.async, gcsafe.} =
|
||||||
trace "Error shutting down ws transport", exc = exc.msg
|
trace "Error shutting down ws transport", exc = exc.msg
|
||||||
|
|
||||||
proc connHandler(self: WsTransport,
|
proc connHandler(self: WsTransport,
|
||||||
stream: WsSession,
|
stream: WSSession,
|
||||||
secure: bool,
|
secure: bool,
|
||||||
dir: Direction): Future[Connection] {.async.} =
|
dir: Direction): Future[Connection] {.async.} =
|
||||||
let observedAddr =
|
let observedAddr =
|
||||||
|
|
|
@ -198,7 +198,7 @@ proc new*(
|
||||||
T: type MuxedUpgrade,
|
T: type MuxedUpgrade,
|
||||||
identity: Identify,
|
identity: Identify,
|
||||||
muxers: Table[string, MuxerProvider],
|
muxers: Table[string, MuxerProvider],
|
||||||
secureManagers: openarray[Secure] = [],
|
secureManagers: openArray[Secure] = [],
|
||||||
connManager: ConnManager,
|
connManager: ConnManager,
|
||||||
ms: MultistreamSelect): T =
|
ms: MultistreamSelect): T =
|
||||||
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ proc identify*(
|
||||||
info = await self.identity.identify(conn, conn.peerId)
|
info = await self.identity.identify(conn, conn.peerId)
|
||||||
peerStore = self.connManager.peerStore
|
peerStore = self.connManager.peerStore
|
||||||
|
|
||||||
if info.pubKey.isNone and isNil(conn):
|
if info.pubkey.isNone and isNil(conn):
|
||||||
raise newException(UpgradeFailedError,
|
raise newException(UpgradeFailedError,
|
||||||
"no public key provided and no existing peer identity found")
|
"no public key provided and no existing peer identity found")
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ import stew/byteutils
|
||||||
const
|
const
|
||||||
ShortDumpMax = 12
|
ShortDumpMax = 12
|
||||||
|
|
||||||
func shortLog*(item: openarray[byte]): string =
|
func shortLog*(item: openArray[byte]): string =
|
||||||
if item.len <= ShortDumpMax:
|
if item.len <= ShortDumpMax:
|
||||||
result = item.toHex()
|
result = item.toHex()
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -103,7 +103,7 @@ proc vsizeof*(x: SomeVarint): int {.inline.} =
|
||||||
Leb128.len(toUleb(x))
|
Leb128.len(toUleb(x))
|
||||||
|
|
||||||
proc getUVarint*[T: PB|LP](vtype: typedesc[T],
|
proc getUVarint*[T: PB|LP](vtype: typedesc[T],
|
||||||
pbytes: openarray[byte],
|
pbytes: openArray[byte],
|
||||||
outlen: var int,
|
outlen: var int,
|
||||||
outval: var SomeUVarint): VarintResult[void] =
|
outval: var SomeUVarint): VarintResult[void] =
|
||||||
## Decode `unsigned varint` from buffer ``pbytes`` and store it to ``outval``.
|
## Decode `unsigned varint` from buffer ``pbytes`` and store it to ``outval``.
|
||||||
|
@ -149,7 +149,7 @@ proc getUVarint*[T: PB|LP](vtype: typedesc[T],
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc putUVarint*[T: PB|LP](vtype: typedesc[T],
|
proc putUVarint*[T: PB|LP](vtype: typedesc[T],
|
||||||
pbytes: var openarray[byte],
|
pbytes: var openArray[byte],
|
||||||
outlen: var int,
|
outlen: var int,
|
||||||
outval: SomeUVarint): VarintResult[void] =
|
outval: SomeUVarint): VarintResult[void] =
|
||||||
## Encode `unsigned varint` ``outval`` and store it to array ``pbytes``.
|
## Encode `unsigned varint` ``outval`` and store it to array ``pbytes``.
|
||||||
|
@ -180,7 +180,7 @@ proc putUVarint*[T: PB|LP](vtype: typedesc[T],
|
||||||
else:
|
else:
|
||||||
err(VarintError.Overrun)
|
err(VarintError.Overrun)
|
||||||
|
|
||||||
proc getSVarint*(pbytes: openarray[byte], outsize: var int,
|
proc getSVarint*(pbytes: openArray[byte], outsize: var int,
|
||||||
outval: var (PBZigVarint | PBSomeSVarint)): VarintResult[void] {.inline.} =
|
outval: var (PBZigVarint | PBSomeSVarint)): VarintResult[void] {.inline.} =
|
||||||
## Decode signed integer (``int32`` or ``int64``) from buffer ``pbytes``
|
## Decode signed integer (``int32`` or ``int64``) from buffer ``pbytes``
|
||||||
## and store it to ``outval``.
|
## and store it to ``outval``.
|
||||||
|
@ -210,7 +210,7 @@ proc getSVarint*(pbytes: openarray[byte], outsize: var int,
|
||||||
outval = fromUleb(value, type(outval))
|
outval = fromUleb(value, type(outval))
|
||||||
res
|
res
|
||||||
|
|
||||||
proc putSVarint*(pbytes: var openarray[byte], outsize: var int,
|
proc putSVarint*(pbytes: var openArray[byte], outsize: var int,
|
||||||
outval: (PBZigVarint | PBSomeSVarint)): VarintResult[void] {.inline.} =
|
outval: (PBZigVarint | PBSomeSVarint)): VarintResult[void] {.inline.} =
|
||||||
## Encode signed integer ``outval`` using ProtoBuffer's zigzag encoding
|
## Encode signed integer ``outval`` using ProtoBuffer's zigzag encoding
|
||||||
## (``sint32`` or ``sint64``) and store it to array ``pbytes``.
|
## (``sint32`` or ``sint64``) and store it to array ``pbytes``.
|
||||||
|
@ -230,7 +230,7 @@ template varintFatal(msg) =
|
||||||
const m = msg
|
const m = msg
|
||||||
{.fatal: m.}
|
{.fatal: m.}
|
||||||
|
|
||||||
proc putVarint*[T: PB|LP](vtype: typedesc[T], pbytes: var openarray[byte],
|
proc putVarint*[T: PB|LP](vtype: typedesc[T], pbytes: var openArray[byte],
|
||||||
nbytes: var int, value: SomeVarint): VarintResult[void] {.inline.} =
|
nbytes: var int, value: SomeVarint): VarintResult[void] {.inline.} =
|
||||||
when vtype is PB:
|
when vtype is PB:
|
||||||
when (type(value) is PBSomeSVarint) or (type(value) is PBZigVarint):
|
when (type(value) is PBSomeSVarint) or (type(value) is PBZigVarint):
|
||||||
|
@ -247,7 +247,7 @@ proc putVarint*[T: PB|LP](vtype: typedesc[T], pbytes: var openarray[byte],
|
||||||
varintFatal("LibP2P's varint do not support type [" &
|
varintFatal("LibP2P's varint do not support type [" &
|
||||||
typetraits.name(type(value)) & "]")
|
typetraits.name(type(value)) & "]")
|
||||||
|
|
||||||
proc getVarint*[T: PB|LP](vtype: typedesc[T], pbytes: openarray[byte],
|
proc getVarint*[T: PB|LP](vtype: typedesc[T], pbytes: openArray[byte],
|
||||||
nbytes: var int,
|
nbytes: var int,
|
||||||
value: var SomeVarint): VarintResult[void] {.inline.} =
|
value: var SomeVarint): VarintResult[void] {.inline.} =
|
||||||
when vtype is PB:
|
when vtype is PB:
|
||||||
|
|
|
@ -53,7 +53,7 @@ proc initVBuffer*(data: seq[byte], offset = 0): VBuffer =
|
||||||
shallowCopy(result.buffer, data)
|
shallowCopy(result.buffer, data)
|
||||||
result.offset = offset
|
result.offset = offset
|
||||||
|
|
||||||
proc initVBuffer*(data: openarray[byte], offset = 0): VBuffer =
|
proc initVBuffer*(data: openArray[byte], offset = 0): VBuffer =
|
||||||
## Initialize VBuffer with copy of ``data``.
|
## Initialize VBuffer with copy of ``data``.
|
||||||
result.buffer = newSeq[byte](len(data))
|
result.buffer = newSeq[byte](len(data))
|
||||||
if len(data) > 0:
|
if len(data) > 0:
|
||||||
|
@ -88,7 +88,7 @@ proc writeLPVarint*(vb: var VBuffer, value: LPSomeUVarint) =
|
||||||
proc writeVarint*(vb: var VBuffer, value: LPSomeUVarint) =
|
proc writeVarint*(vb: var VBuffer, value: LPSomeUVarint) =
|
||||||
writeLPVarint(vb, value)
|
writeLPVarint(vb, value)
|
||||||
|
|
||||||
proc writeSeq*[T: byte|char](vb: var VBuffer, value: openarray[T]) =
|
proc writeSeq*[T: byte|char](vb: var VBuffer, value: openArray[T]) =
|
||||||
## Write array ``value`` to buffer ``vb``, value will be prefixed with
|
## Write array ``value`` to buffer ``vb``, value will be prefixed with
|
||||||
## varint length of the array.
|
## varint length of the array.
|
||||||
var length = 0
|
var length = 0
|
||||||
|
@ -101,7 +101,7 @@ proc writeSeq*[T: byte|char](vb: var VBuffer, value: openarray[T]) =
|
||||||
copyMem(addr vb.buffer[vb.offset], unsafeAddr value[0], len(value))
|
copyMem(addr vb.buffer[vb.offset], unsafeAddr value[0], len(value))
|
||||||
vb.offset += len(value)
|
vb.offset += len(value)
|
||||||
|
|
||||||
proc writeArray*[T: byte|char](vb: var VBuffer, value: openarray[T]) =
|
proc writeArray*[T: byte|char](vb: var VBuffer, value: openArray[T]) =
|
||||||
## Write array ``value`` to buffer ``vb``, value will NOT be prefixed with
|
## Write array ``value`` to buffer ``vb``, value will NOT be prefixed with
|
||||||
## varint length of the array.
|
## varint length of the array.
|
||||||
if len(value) > 0:
|
if len(value) > 0:
|
||||||
|
@ -151,7 +151,7 @@ proc peekSeq*[T: string|seq[byte]](vb: var VBuffer, value: var T): int =
|
||||||
vb.offset -= length
|
vb.offset -= length
|
||||||
|
|
||||||
proc peekArray*[T: char|byte](vb: var VBuffer,
|
proc peekArray*[T: char|byte](vb: var VBuffer,
|
||||||
value: var openarray[T]): int =
|
value: var openArray[T]): int =
|
||||||
## Peek array from buffer ``vb`` and store result to ``value``.
|
## Peek array from buffer ``vb`` and store result to ``value``.
|
||||||
##
|
##
|
||||||
## This procedure will not adjust internal offset.
|
## This procedure will not adjust internal offset.
|
||||||
|
@ -183,7 +183,7 @@ proc readSeq*[T: string|seq[byte]](vb: var VBuffer,
|
||||||
vb.offset += result
|
vb.offset += result
|
||||||
|
|
||||||
proc readArray*[T: char|byte](vb: var VBuffer,
|
proc readArray*[T: char|byte](vb: var VBuffer,
|
||||||
value: var openarray[T]): int {.inline.} =
|
value: var openArray[T]): int {.inline.} =
|
||||||
## Read array from buffer ``vb`` and store result to ``value``.
|
## Read array from buffer ``vb`` and store result to ``value``.
|
||||||
##
|
##
|
||||||
## Returns number of bytes consumed from ``vb`` or ``-1`` on error.
|
## Returns number of bytes consumed from ``vb`` or ``-1`` on error.
|
||||||
|
|
|
@ -19,14 +19,14 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
||||||
checkTrackers()
|
checkTrackers()
|
||||||
|
|
||||||
asyncTest "can handle local address":
|
asyncTest "can handle local address":
|
||||||
let ma = @[Multiaddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma).tryGet()]
|
||||||
let transport1 = prov()
|
let transport1 = prov()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
check transport1.handles(transport1.addrs[0])
|
check transport1.handles(transport1.addrs[0])
|
||||||
await transport1.stop()
|
await transport1.stop()
|
||||||
|
|
||||||
asyncTest "e2e: handle observedAddr":
|
asyncTest "e2e: handle observedAddr":
|
||||||
let ma = @[Multiaddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma).tryGet()]
|
||||||
|
|
||||||
let transport1 = prov()
|
let transport1 = prov()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
@ -54,7 +54,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
||||||
await handlerWait.wait(1.seconds) # when no issues will not wait that long!
|
await handlerWait.wait(1.seconds) # when no issues will not wait that long!
|
||||||
|
|
||||||
asyncTest "e2e: handle write":
|
asyncTest "e2e: handle write":
|
||||||
let ma = @[Multiaddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma).tryGet()]
|
||||||
|
|
||||||
let transport1 = prov()
|
let transport1 = prov()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
@ -82,7 +82,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
||||||
await handlerWait.wait(1.seconds) # when no issues will not wait that long!
|
await handlerWait.wait(1.seconds) # when no issues will not wait that long!
|
||||||
|
|
||||||
asyncTest "e2e: handle read":
|
asyncTest "e2e: handle read":
|
||||||
let ma = @[Multiaddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma).tryGet()]
|
||||||
let transport1 = prov()
|
let transport1 = prov()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
|
||||||
|
@ -108,7 +108,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
||||||
transport2.stop()))
|
transport2.stop()))
|
||||||
|
|
||||||
asyncTest "e2e: handle dial cancellation":
|
asyncTest "e2e: handle dial cancellation":
|
||||||
let ma = @[Multiaddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma).tryGet()]
|
||||||
|
|
||||||
let transport1 = prov()
|
let transport1 = prov()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
@ -125,7 +125,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
||||||
transport2.stop()))
|
transport2.stop()))
|
||||||
|
|
||||||
asyncTest "e2e: handle accept cancellation":
|
asyncTest "e2e: handle accept cancellation":
|
||||||
let ma = @[Multiaddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma).tryGet()]
|
||||||
|
|
||||||
let transport1 = prov()
|
let transport1 = prov()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
@ -186,7 +186,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
||||||
await transport1.stop()
|
await transport1.stop()
|
||||||
|
|
||||||
asyncTest "e2e: stopping transport kills connections":
|
asyncTest "e2e: stopping transport kills connections":
|
||||||
let ma = @[Multiaddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma).tryGet()]
|
||||||
|
|
||||||
let transport1 = prov()
|
let transport1 = prov()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
@ -206,7 +206,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
||||||
check conn.closed()
|
check conn.closed()
|
||||||
|
|
||||||
asyncTest "read or write on closed connection":
|
asyncTest "read or write on closed connection":
|
||||||
let ma = @[Multiaddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma).tryGet()]
|
||||||
let transport1 = prov()
|
let transport1 = prov()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
|
||||||
var ceil = 15
|
var ceil = 15
|
||||||
let fsub = cast[FloodSub](sender)
|
let fsub = cast[FloodSub](sender)
|
||||||
while not fsub.floodsub.hasKey(key) or
|
while not fsub.floodsub.hasKey(key) or
|
||||||
not fsub.floodsub.hasPeerID(key, receiver.peerInfo.peerId):
|
not fsub.floodsub.hasPeerId(key, receiver.peerInfo.peerId):
|
||||||
await sleepAsync(100.millis)
|
await sleepAsync(100.millis)
|
||||||
dec ceil
|
dec ceil
|
||||||
doAssert(ceil > 0, "waitSub timeout!")
|
doAssert(ceil > 0, "waitSub timeout!")
|
||||||
|
|
|
@ -18,7 +18,7 @@ type
|
||||||
|
|
||||||
proc noop(data: seq[byte]) {.async, gcsafe.} = discard
|
proc noop(data: seq[byte]) {.async, gcsafe.} = discard
|
||||||
|
|
||||||
proc getPubSubPeer(p: TestGossipSub, peerId: PeerID): PubSubPeer =
|
proc getPubSubPeer(p: TestGossipSub, peerId: PeerId): PubSubPeer =
|
||||||
proc getConn(): Future[Connection] =
|
proc getConn(): Future[Connection] =
|
||||||
p.switch.dial(peerId, GossipSubCodec)
|
p.switch.dial(peerId, GossipSubCodec)
|
||||||
|
|
||||||
|
@ -317,8 +317,8 @@ suite "GossipSub internal":
|
||||||
let peers = gossipSub.getGossipPeers()
|
let peers = gossipSub.getGossipPeers()
|
||||||
check peers.len == gossipSub.parameters.d
|
check peers.len == gossipSub.parameters.d
|
||||||
for p in peers.keys:
|
for p in peers.keys:
|
||||||
check not gossipSub.fanout.hasPeerID(topic, p.peerId)
|
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
|
||||||
check not gossipSub.mesh.hasPeerID(topic, p.peerId)
|
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
|
||||||
|
|
||||||
await allFuturesThrowing(conns.mapIt(it.close()))
|
await allFuturesThrowing(conns.mapIt(it.close()))
|
||||||
await gossipSub.switch.stop()
|
await gossipSub.switch.stop()
|
||||||
|
@ -552,7 +552,7 @@ suite "GossipSub internal":
|
||||||
peer.sendConn = conn
|
peer.sendConn = conn
|
||||||
gossipSub.gossipsub[topic].incl(peer)
|
gossipSub.gossipsub[topic].incl(peer)
|
||||||
gossipSub.backingOff
|
gossipSub.backingOff
|
||||||
.mgetOrPut(topic, initTable[PeerID, Moment]())
|
.mgetOrPut(topic, initTable[PeerId, Moment]())
|
||||||
.add(peerId, Moment.now() + 1.hours)
|
.add(peerId, Moment.now() + 1.hours)
|
||||||
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
|
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
|
||||||
# there must be a control prune due to violation of backoff
|
# there must be a control prune due to violation of backoff
|
||||||
|
|
|
@ -44,11 +44,11 @@ proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
|
||||||
ev.clear()
|
ev.clear()
|
||||||
|
|
||||||
while (not fsub.gossipsub.hasKey(key) or
|
while (not fsub.gossipsub.hasKey(key) or
|
||||||
not fsub.gossipsub.hasPeerID(key, receiver.peerInfo.peerId)) and
|
not fsub.gossipsub.hasPeerId(key, receiver.peerInfo.peerId)) and
|
||||||
(not fsub.mesh.hasKey(key) or
|
(not fsub.mesh.hasKey(key) or
|
||||||
not fsub.mesh.hasPeerID(key, receiver.peerInfo.peerId)) and
|
not fsub.mesh.hasPeerId(key, receiver.peerInfo.peerId)) and
|
||||||
(not fsub.fanout.hasKey(key) or
|
(not fsub.fanout.hasKey(key) or
|
||||||
not fsub.fanout.hasPeerID(key , receiver.peerInfo.peerId)):
|
not fsub.fanout.hasPeerId(key , receiver.peerInfo.peerId)):
|
||||||
trace "waitSub sleeping..."
|
trace "waitSub sleeping..."
|
||||||
|
|
||||||
# await more heartbeats
|
# await more heartbeats
|
||||||
|
@ -417,7 +417,7 @@ suite "GossipSub":
|
||||||
check:
|
check:
|
||||||
"foobar" in gossip2.topics
|
"foobar" in gossip2.topics
|
||||||
"foobar" in gossip1.gossipsub
|
"foobar" in gossip1.gossipsub
|
||||||
gossip1.gossipsub.hasPeerID("foobar", gossip2.peerInfo.peerId)
|
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||||
|
|
||||||
await allFuturesThrowing(
|
await allFuturesThrowing(
|
||||||
nodes[0].switch.stop(),
|
nodes[0].switch.stop(),
|
||||||
|
@ -475,11 +475,11 @@ suite "GossipSub":
|
||||||
"foobar" in gossip1.gossipsub
|
"foobar" in gossip1.gossipsub
|
||||||
"foobar" in gossip2.gossipsub
|
"foobar" in gossip2.gossipsub
|
||||||
|
|
||||||
gossip1.gossipsub.hasPeerID("foobar", gossip2.peerInfo.peerId) or
|
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId) or
|
||||||
gossip1.mesh.hasPeerID("foobar", gossip2.peerInfo.peerId)
|
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||||
|
|
||||||
gossip2.gossipsub.hasPeerID("foobar", gossip1.peerInfo.peerId) or
|
gossip2.gossipsub.hasPeerId("foobar", gossip1.peerInfo.peerId) or
|
||||||
gossip2.mesh.hasPeerID("foobar", gossip1.peerInfo.peerId)
|
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||||
|
|
||||||
await allFuturesThrowing(
|
await allFuturesThrowing(
|
||||||
nodes[0].switch.stop(),
|
nodes[0].switch.stop(),
|
||||||
|
@ -541,8 +541,8 @@ suite "GossipSub":
|
||||||
|
|
||||||
check:
|
check:
|
||||||
"foobar" in gossip1.gossipsub
|
"foobar" in gossip1.gossipsub
|
||||||
gossip1.fanout.hasPeerID("foobar", gossip2.peerInfo.peerId)
|
gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||||
not gossip1.mesh.hasPeerID("foobar", gossip2.peerInfo.peerId)
|
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||||
|
|
||||||
await passed.wait(2.seconds)
|
await passed.wait(2.seconds)
|
||||||
|
|
||||||
|
@ -604,10 +604,10 @@ suite "GossipSub":
|
||||||
check:
|
check:
|
||||||
"foobar" in gossip1.gossipsub
|
"foobar" in gossip1.gossipsub
|
||||||
"foobar" in gossip2.gossipsub
|
"foobar" in gossip2.gossipsub
|
||||||
gossip1.mesh.hasPeerID("foobar", gossip2.peerInfo.peerId)
|
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||||
not gossip1.fanout.hasPeerID("foobar", gossip2.peerInfo.peerId)
|
not gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||||
gossip2.mesh.hasPeerID("foobar", gossip1.peerInfo.peerId)
|
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||||
not gossip2.fanout.hasPeerID("foobar", gossip1.peerInfo.peerId)
|
not gossip2.fanout.hasPeerId("foobar", gossip1.peerInfo.peerId)
|
||||||
|
|
||||||
await allFuturesThrowing(
|
await allFuturesThrowing(
|
||||||
nodes[0].switch.stop(),
|
nodes[0].switch.stop(),
|
||||||
|
@ -746,8 +746,8 @@ suite "GossipSub":
|
||||||
check:
|
check:
|
||||||
"foobar" in gossip1.gossipsub
|
"foobar" in gossip1.gossipsub
|
||||||
"foobar" notin gossip2.gossipsub
|
"foobar" notin gossip2.gossipsub
|
||||||
not gossip1.mesh.hasPeerID("foobar", gossip2.peerInfo.peerId)
|
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||||
not gossip1.fanout.hasPeerID("foobar", gossip2.peerInfo.peerId)
|
not gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||||
|
|
||||||
await allFuturesThrowing(
|
await allFuturesThrowing(
|
||||||
nodes[0].switch.stop(),
|
nodes[0].switch.stop(),
|
||||||
|
|
|
@ -41,11 +41,11 @@ proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
|
||||||
ev.clear()
|
ev.clear()
|
||||||
|
|
||||||
while (not fsub.gossipsub.hasKey(key) or
|
while (not fsub.gossipsub.hasKey(key) or
|
||||||
not fsub.gossipsub.hasPeerID(key, receiver.peerInfo.peerId)) and
|
not fsub.gossipsub.hasPeerId(key, receiver.peerInfo.peerId)) and
|
||||||
(not fsub.mesh.hasKey(key) or
|
(not fsub.mesh.hasKey(key) or
|
||||||
not fsub.mesh.hasPeerID(key, receiver.peerInfo.peerId)) and
|
not fsub.mesh.hasPeerId(key, receiver.peerInfo.peerId)) and
|
||||||
(not fsub.fanout.hasKey(key) or
|
(not fsub.fanout.hasKey(key) or
|
||||||
not fsub.fanout.hasPeerID(key , receiver.peerInfo.peerId)):
|
not fsub.fanout.hasPeerId(key , receiver.peerInfo.peerId)):
|
||||||
trace "waitSub sleeping..."
|
trace "waitSub sleeping..."
|
||||||
|
|
||||||
# await more heartbeats
|
# await more heartbeats
|
||||||
|
|
|
@ -10,13 +10,13 @@ import ../../libp2p/[peerid,
|
||||||
|
|
||||||
var rng = newRng()
|
var rng = newRng()
|
||||||
|
|
||||||
proc randomPeerID(): PeerID =
|
proc randomPeerId(): PeerId =
|
||||||
PeerID.init(PrivateKey.random(ECDSA, rng[]).get()).get()
|
PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).get()
|
||||||
|
|
||||||
suite "MCache":
|
suite "MCache":
|
||||||
test "put/get":
|
test "put/get":
|
||||||
var mCache = MCache.init(3, 5)
|
var mCache = MCache.init(3, 5)
|
||||||
var msg = Message(fromPeer: randomPeerID(), seqno: "12345".toBytes())
|
var msg = Message(fromPeer: randomPeerId(), seqno: "12345".toBytes())
|
||||||
let msgId = defaultMsgIdProvider(msg)
|
let msgId = defaultMsgIdProvider(msg)
|
||||||
mCache.put(msgId, msg)
|
mCache.put(msgId, msg)
|
||||||
check mCache.get(msgId).isSome and mCache.get(msgId).get() == msg
|
check mCache.get(msgId).isSome and mCache.get(msgId).get() == msg
|
||||||
|
@ -25,13 +25,13 @@ suite "MCache":
|
||||||
var mCache = MCache.init(3, 5)
|
var mCache = MCache.init(3, 5)
|
||||||
|
|
||||||
for i in 0..<3:
|
for i in 0..<3:
|
||||||
var msg = Message(fromPeer: randomPeerID(),
|
var msg = Message(fromPeer: randomPeerId(),
|
||||||
seqno: "12345".toBytes(),
|
seqno: "12345".toBytes(),
|
||||||
topicIDs: @["foo"])
|
topicIDs: @["foo"])
|
||||||
mCache.put(defaultMsgIdProvider(msg), msg)
|
mCache.put(defaultMsgIdProvider(msg), msg)
|
||||||
|
|
||||||
for i in 0..<5:
|
for i in 0..<5:
|
||||||
var msg = Message(fromPeer: randomPeerID(),
|
var msg = Message(fromPeer: randomPeerId(),
|
||||||
seqno: "12345".toBytes(),
|
seqno: "12345".toBytes(),
|
||||||
topicIDs: @["bar"])
|
topicIDs: @["bar"])
|
||||||
mCache.put(defaultMsgIdProvider(msg), msg)
|
mCache.put(defaultMsgIdProvider(msg), msg)
|
||||||
|
@ -46,7 +46,7 @@ suite "MCache":
|
||||||
var mCache = MCache.init(1, 5)
|
var mCache = MCache.init(1, 5)
|
||||||
|
|
||||||
for i in 0..<3:
|
for i in 0..<3:
|
||||||
var msg = Message(fromPeer: randomPeerID(),
|
var msg = Message(fromPeer: randomPeerId(),
|
||||||
seqno: "12345".toBytes(),
|
seqno: "12345".toBytes(),
|
||||||
topicIDs: @["foo"])
|
topicIDs: @["foo"])
|
||||||
mCache.put(defaultMsgIdProvider(msg), msg)
|
mCache.put(defaultMsgIdProvider(msg), msg)
|
||||||
|
@ -55,7 +55,7 @@ suite "MCache":
|
||||||
check mCache.window("foo").len == 0
|
check mCache.window("foo").len == 0
|
||||||
|
|
||||||
for i in 0..<3:
|
for i in 0..<3:
|
||||||
var msg = Message(fromPeer: randomPeerID(),
|
var msg = Message(fromPeer: randomPeerId(),
|
||||||
seqno: "12345".toBytes(),
|
seqno: "12345".toBytes(),
|
||||||
topicIDs: @["bar"])
|
topicIDs: @["bar"])
|
||||||
mCache.put(defaultMsgIdProvider(msg), msg)
|
mCache.put(defaultMsgIdProvider(msg), msg)
|
||||||
|
@ -64,7 +64,7 @@ suite "MCache":
|
||||||
check mCache.window("bar").len == 0
|
check mCache.window("bar").len == 0
|
||||||
|
|
||||||
for i in 0..<3:
|
for i in 0..<3:
|
||||||
var msg = Message(fromPeer: randomPeerID(),
|
var msg = Message(fromPeer: randomPeerId(),
|
||||||
seqno: "12345".toBytes(),
|
seqno: "12345".toBytes(),
|
||||||
topicIDs: @["baz"])
|
topicIDs: @["baz"])
|
||||||
mCache.put(defaultMsgIdProvider(msg), msg)
|
mCache.put(defaultMsgIdProvider(msg), msg)
|
||||||
|
@ -76,19 +76,19 @@ suite "MCache":
|
||||||
var mCache = MCache.init(1, 5)
|
var mCache = MCache.init(1, 5)
|
||||||
|
|
||||||
for i in 0..<3:
|
for i in 0..<3:
|
||||||
var msg = Message(fromPeer: randomPeerID(),
|
var msg = Message(fromPeer: randomPeerId(),
|
||||||
seqno: "12345".toBytes(),
|
seqno: "12345".toBytes(),
|
||||||
topicIDs: @["foo"])
|
topicIDs: @["foo"])
|
||||||
mCache.put(defaultMsgIdProvider(msg), msg)
|
mCache.put(defaultMsgIdProvider(msg), msg)
|
||||||
|
|
||||||
for i in 0..<3:
|
for i in 0..<3:
|
||||||
var msg = Message(fromPeer: randomPeerID(),
|
var msg = Message(fromPeer: randomPeerId(),
|
||||||
seqno: "12345".toBytes(),
|
seqno: "12345".toBytes(),
|
||||||
topicIDs: @["bar"])
|
topicIDs: @["bar"])
|
||||||
mCache.put(defaultMsgIdProvider(msg), msg)
|
mCache.put(defaultMsgIdProvider(msg), msg)
|
||||||
|
|
||||||
for i in 0..<3:
|
for i in 0..<3:
|
||||||
var msg = Message(fromPeer: randomPeerID(),
|
var msg = Message(fromPeer: randomPeerId(),
|
||||||
seqno: "12345".toBytes(),
|
seqno: "12345".toBytes(),
|
||||||
topicIDs: @["baz"])
|
topicIDs: @["baz"])
|
||||||
mCache.put(defaultMsgIdProvider(msg), msg)
|
mCache.put(defaultMsgIdProvider(msg), msg)
|
||||||
|
|
|
@ -18,7 +18,7 @@ randomize()
|
||||||
|
|
||||||
proc generateNodes*(
|
proc generateNodes*(
|
||||||
num: Natural,
|
num: Natural,
|
||||||
secureManagers: openarray[SecureProtocol] = [
|
secureManagers: openArray[SecureProtocol] = [
|
||||||
SecureProtocol.Noise
|
SecureProtocol.Noise
|
||||||
],
|
],
|
||||||
msgIdProvider: MsgIdProvider = nil,
|
msgIdProvider: MsgIdProvider = nil,
|
||||||
|
@ -79,7 +79,7 @@ proc subscribeSparseNodes*(nodes: seq[PubSub], degree: int = 2) {.async.} =
|
||||||
|
|
||||||
proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
|
proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
|
||||||
for dialer in nodes:
|
for dialer in nodes:
|
||||||
var dialed: seq[PeerID]
|
var dialed: seq[PeerId]
|
||||||
while dialed.len < nodes.len - 1:
|
while dialed.len < nodes.len - 1:
|
||||||
let node = sample(nodes)
|
let node = sample(nodes)
|
||||||
if node.peerInfo.peerId notin dialed:
|
if node.peerInfo.peerId notin dialed:
|
||||||
|
|
|
@ -335,7 +335,7 @@ const
|
||||||
"8613E8F86D2DD1CF3CEDC52AD91423F2F31E0003",
|
"8613E8F86D2DD1CF3CEDC52AD91423F2F31E0003",
|
||||||
]
|
]
|
||||||
|
|
||||||
proc cmp(a, b: openarray[byte]): bool =
|
proc cmp(a, b: openArray[byte]): bool =
|
||||||
result = (@a == @b)
|
result = (@a == @b)
|
||||||
|
|
||||||
proc testStretcher(s, e: int, cs: string, ds: string): bool =
|
proc testStretcher(s, e: int, cs: string, ds: string): bool =
|
||||||
|
|
|
@ -36,7 +36,7 @@ suite "Identify":
|
||||||
conn {.threadvar.}: Connection
|
conn {.threadvar.}: Connection
|
||||||
|
|
||||||
asyncSetup:
|
asyncSetup:
|
||||||
ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
remoteSecKey = PrivateKey.random(ECDSA, rng[]).get()
|
remoteSecKey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
remotePeerInfo = PeerInfo.new(
|
remotePeerInfo = PeerInfo.new(
|
||||||
remoteSecKey,
|
remoteSecKey,
|
||||||
|
@ -72,7 +72,7 @@ suite "Identify":
|
||||||
discard await msDial.select(conn, IdentifyCodec)
|
discard await msDial.select(conn, IdentifyCodec)
|
||||||
let id = await identifyProto2.identify(conn, remotePeerInfo.peerId)
|
let id = await identifyProto2.identify(conn, remotePeerInfo.peerId)
|
||||||
|
|
||||||
check id.pubKey.get() == remoteSecKey.getPublicKey().get()
|
check id.pubkey.get() == remoteSecKey.getPublicKey().get()
|
||||||
check id.addrs == ma
|
check id.addrs == ma
|
||||||
check id.protoVersion.get() == ProtoVersion
|
check id.protoVersion.get() == ProtoVersion
|
||||||
check id.agentVersion.get() == AgentVersion
|
check id.agentVersion.get() == AgentVersion
|
||||||
|
@ -95,7 +95,7 @@ suite "Identify":
|
||||||
discard await msDial.select(conn, IdentifyCodec)
|
discard await msDial.select(conn, IdentifyCodec)
|
||||||
let id = await identifyProto2.identify(conn, remotePeerInfo.peerId)
|
let id = await identifyProto2.identify(conn, remotePeerInfo.peerId)
|
||||||
|
|
||||||
check id.pubKey.get() == remoteSecKey.getPublicKey().get()
|
check id.pubkey.get() == remoteSecKey.getPublicKey().get()
|
||||||
check id.addrs == ma
|
check id.addrs == ma
|
||||||
check id.protoVersion.get() == ProtoVersion
|
check id.protoVersion.get() == ProtoVersion
|
||||||
check id.agentVersion.get() == customAgentVersion
|
check id.agentVersion.get() == customAgentVersion
|
||||||
|
|
|
@ -96,7 +96,7 @@ suite "Minimal ASN.1 encode/decode suite":
|
||||||
ncrutils.fromHex(Asn1EdgeExpects[i]) == value
|
ncrutils.fromHex(Asn1EdgeExpects[i]) == value
|
||||||
|
|
||||||
test "ASN.1 DER INTEGER encoding/decoding of native unsigned values test":
|
test "ASN.1 DER INTEGER encoding/decoding of native unsigned values test":
|
||||||
proc decodeBuffer(data: openarray[byte]): uint64 =
|
proc decodeBuffer(data: openArray[byte]): uint64 =
|
||||||
var ab = Asn1Buffer.init(data)
|
var ab = Asn1Buffer.init(data)
|
||||||
let fres = ab.read()
|
let fres = ab.read()
|
||||||
doAssert(fres.isOk() and fres.get().kind == Asn1Tag.Integer)
|
doAssert(fres.isOk() and fres.get().kind == Asn1Tag.Integer)
|
||||||
|
|
|
@ -84,7 +84,7 @@ suite "MinProtobuf test suite":
|
||||||
pb.finish()
|
pb.finish()
|
||||||
return pb.buffer
|
return pb.buffer
|
||||||
|
|
||||||
proc getVarintDecodedValue(data: openarray[byte]): uint64 =
|
proc getVarintDecodedValue(data: openArray[byte]): uint64 =
|
||||||
var value: uint64
|
var value: uint64
|
||||||
var pb = initProtoBuffer(data)
|
var pb = initProtoBuffer(data)
|
||||||
let res = pb.getField(1, value)
|
let res = pb.getField(1, value)
|
||||||
|
@ -97,7 +97,7 @@ suite "MinProtobuf test suite":
|
||||||
pb.finish()
|
pb.finish()
|
||||||
return pb.buffer
|
return pb.buffer
|
||||||
|
|
||||||
proc getFixed32DecodedValue(data: openarray[byte]): uint32 =
|
proc getFixed32DecodedValue(data: openArray[byte]): uint32 =
|
||||||
var value: float32
|
var value: float32
|
||||||
var pb = initProtoBuffer(data)
|
var pb = initProtoBuffer(data)
|
||||||
let res = pb.getField(1, value)
|
let res = pb.getField(1, value)
|
||||||
|
@ -110,7 +110,7 @@ suite "MinProtobuf test suite":
|
||||||
pb.finish()
|
pb.finish()
|
||||||
return pb.buffer
|
return pb.buffer
|
||||||
|
|
||||||
proc getFixed64DecodedValue(data: openarray[byte]): uint64 =
|
proc getFixed64DecodedValue(data: openArray[byte]): uint64 =
|
||||||
var value: float64
|
var value: float64
|
||||||
var pb = initProtoBuffer(data)
|
var pb = initProtoBuffer(data)
|
||||||
let res = pb.getField(1, value)
|
let res = pb.getField(1, value)
|
||||||
|
@ -129,7 +129,7 @@ suite "MinProtobuf test suite":
|
||||||
pb.finish()
|
pb.finish()
|
||||||
return pb.buffer
|
return pb.buffer
|
||||||
|
|
||||||
proc getLengthDecodedValue(data: openarray[byte]): string =
|
proc getLengthDecodedValue(data: openArray[byte]): string =
|
||||||
var value = newString(len(data))
|
var value = newString(len(data))
|
||||||
var valueLen = 0
|
var valueLen = 0
|
||||||
var pb = initProtoBuffer(data)
|
var pb = initProtoBuffer(data)
|
||||||
|
@ -138,13 +138,13 @@ suite "MinProtobuf test suite":
|
||||||
value.setLen(valueLen)
|
value.setLen(valueLen)
|
||||||
value
|
value
|
||||||
|
|
||||||
proc isFullZero[T: byte|char](data: openarray[T]): bool =
|
proc isFullZero[T: byte|char](data: openArray[T]): bool =
|
||||||
for ch in data:
|
for ch in data:
|
||||||
if int(ch) != 0:
|
if int(ch) != 0:
|
||||||
return false
|
return false
|
||||||
return true
|
return true
|
||||||
|
|
||||||
proc corruptHeader(data: var openarray[byte], index: int) =
|
proc corruptHeader(data: var openArray[byte], index: int) =
|
||||||
var values = [3, 4, 6]
|
var values = [3, 4, 6]
|
||||||
data[0] = data[0] and 0xF8'u8
|
data[0] = data[0] and 0xF8'u8
|
||||||
data[0] = data[0] or byte(values[index mod len(values)])
|
data[0] = data[0] or byte(values[index mod len(values)])
|
||||||
|
|
|
@ -378,7 +378,7 @@ suite "Mplex":
|
||||||
|
|
||||||
suite "mplex e2e":
|
suite "mplex e2e":
|
||||||
asyncTest "read/write receiver":
|
asyncTest "read/write receiver":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
|
|
||||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let listenFut = transport1.start(ma)
|
let listenFut = transport1.start(ma)
|
||||||
|
@ -415,7 +415,7 @@ suite "Mplex":
|
||||||
await listenFut
|
await listenFut
|
||||||
|
|
||||||
asyncTest "read/write receiver lazy":
|
asyncTest "read/write receiver lazy":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
|
|
||||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let listenFut = transport1.start(ma)
|
let listenFut = transport1.start(ma)
|
||||||
|
@ -454,7 +454,7 @@ suite "Mplex":
|
||||||
|
|
||||||
asyncTest "write fragmented":
|
asyncTest "write fragmented":
|
||||||
let
|
let
|
||||||
ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
listenJob = newFuture[void]()
|
listenJob = newFuture[void]()
|
||||||
|
|
||||||
var bigseq = newSeqOfCap[uint8](MaxMsgSize * 2)
|
var bigseq = newSeqOfCap[uint8](MaxMsgSize * 2)
|
||||||
|
@ -506,7 +506,7 @@ suite "Mplex":
|
||||||
await listenFut
|
await listenFut
|
||||||
|
|
||||||
asyncTest "read/write initiator":
|
asyncTest "read/write initiator":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
|
|
||||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let listenFut = transport1.start(ma)
|
let listenFut = transport1.start(ma)
|
||||||
|
@ -542,7 +542,7 @@ suite "Mplex":
|
||||||
await listenFut
|
await listenFut
|
||||||
|
|
||||||
asyncTest "multiple streams":
|
asyncTest "multiple streams":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
|
|
||||||
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||||
let listenFut = transport1.start(ma)
|
let listenFut = transport1.start(ma)
|
||||||
|
@ -586,7 +586,7 @@ suite "Mplex":
|
||||||
await listenFut
|
await listenFut
|
||||||
|
|
||||||
asyncTest "multiple read/write streams":
|
asyncTest "multiple read/write streams":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
|
|
||||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let listenFut = transport1.start(ma)
|
let listenFut = transport1.start(ma)
|
||||||
|
@ -633,7 +633,7 @@ suite "Mplex":
|
||||||
await listenFut
|
await listenFut
|
||||||
|
|
||||||
asyncTest "channel closes listener with EOF":
|
asyncTest "channel closes listener with EOF":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
|
|
||||||
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
|
@ -681,7 +681,7 @@ suite "Mplex":
|
||||||
await acceptFut
|
await acceptFut
|
||||||
|
|
||||||
asyncTest "channel closes dialer with EOF":
|
asyncTest "channel closes dialer with EOF":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||||
|
|
||||||
var count = 0
|
var count = 0
|
||||||
|
@ -746,7 +746,7 @@ suite "Mplex":
|
||||||
await acceptFut
|
await acceptFut
|
||||||
|
|
||||||
asyncTest "dialing mplex closes both ends":
|
asyncTest "dialing mplex closes both ends":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||||
|
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
|
@ -788,7 +788,7 @@ suite "Mplex":
|
||||||
await acceptFut
|
await acceptFut
|
||||||
|
|
||||||
asyncTest "listening mplex closes both ends":
|
asyncTest "listening mplex closes both ends":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||||
|
|
||||||
var mplexListen: Mplex
|
var mplexListen: Mplex
|
||||||
|
@ -833,7 +833,7 @@ suite "Mplex":
|
||||||
await acceptFut
|
await acceptFut
|
||||||
|
|
||||||
asyncTest "canceling mplex handler closes both ends":
|
asyncTest "canceling mplex handler closes both ends":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||||
|
|
||||||
var mplexHandle: Future[void]
|
var mplexHandle: Future[void]
|
||||||
|
@ -878,7 +878,7 @@ suite "Mplex":
|
||||||
transport2.stop())
|
transport2.stop())
|
||||||
|
|
||||||
asyncTest "closing dialing connection should close both ends":
|
asyncTest "closing dialing connection should close both ends":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||||
|
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
|
@ -923,7 +923,7 @@ suite "Mplex":
|
||||||
await acceptFut
|
await acceptFut
|
||||||
|
|
||||||
asyncTest "canceling listening connection should close both ends":
|
asyncTest "canceling listening connection should close both ends":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||||
|
|
||||||
var listenConn: Connection
|
var listenConn: Connection
|
||||||
|
@ -969,7 +969,7 @@ suite "Mplex":
|
||||||
|
|
||||||
suite "jitter":
|
suite "jitter":
|
||||||
asyncTest "channel should be able to handle erratic read/writes":
|
asyncTest "channel should be able to handle erratic read/writes":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
|
|
||||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let listenFut = transport1.start(ma)
|
let listenFut = transport1.start(ma)
|
||||||
|
@ -1041,7 +1041,7 @@ suite "Mplex":
|
||||||
await listenFut
|
await listenFut
|
||||||
|
|
||||||
asyncTest "channel should handle 1 byte read/write":
|
asyncTest "channel should handle 1 byte read/write":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
|
|
||||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let listenFut = transport1.start(ma)
|
let listenFut = transport1.start(ma)
|
||||||
|
|
|
@ -234,7 +234,7 @@ suite "Multistream select":
|
||||||
await ms.handle(conn)
|
await ms.handle(conn)
|
||||||
|
|
||||||
asyncTest "e2e - handle":
|
asyncTest "e2e - handle":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
|
|
||||||
var protocol: LPProtocol = new LPProtocol
|
var protocol: LPProtocol = new LPProtocol
|
||||||
proc testHandler(conn: Connection,
|
proc testHandler(conn: Connection,
|
||||||
|
@ -274,7 +274,7 @@ suite "Multistream select":
|
||||||
await handlerWait.wait(30.seconds)
|
await handlerWait.wait(30.seconds)
|
||||||
|
|
||||||
asyncTest "e2e - ls":
|
asyncTest "e2e - ls":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
|
|
||||||
let
|
let
|
||||||
handlerWait = newFuture[void]()
|
handlerWait = newFuture[void]()
|
||||||
|
@ -326,7 +326,7 @@ suite "Multistream select":
|
||||||
await listenFut.wait(5.seconds)
|
await listenFut.wait(5.seconds)
|
||||||
|
|
||||||
asyncTest "e2e - select one from a list with unsupported protos":
|
asyncTest "e2e - select one from a list with unsupported protos":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
|
|
||||||
var protocol: LPProtocol = new LPProtocol
|
var protocol: LPProtocol = new LPProtocol
|
||||||
proc testHandler(conn: Connection,
|
proc testHandler(conn: Connection,
|
||||||
|
@ -364,7 +364,7 @@ suite "Multistream select":
|
||||||
await transport1.stop()
|
await transport1.stop()
|
||||||
|
|
||||||
asyncTest "e2e - select one with both valid":
|
asyncTest "e2e - select one with both valid":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
|
|
||||||
var protocol: LPProtocol = new LPProtocol
|
var protocol: LPProtocol = new LPProtocol
|
||||||
proc testHandler(conn: Connection,
|
proc testHandler(conn: Connection,
|
||||||
|
|
|
@ -58,8 +58,8 @@ suite "Name resolving":
|
||||||
suite "Generic Resolving":
|
suite "Generic Resolving":
|
||||||
var resolver {.threadvar.}: MockResolver
|
var resolver {.threadvar.}: MockResolver
|
||||||
|
|
||||||
proc testOne(input: string, output: seq[Multiaddress]): bool =
|
proc testOne(input: string, output: seq[MultiAddress]): bool =
|
||||||
let resolved = waitFor resolver.resolveMAddress(Multiaddress.init(input).tryGet())
|
let resolved = waitFor resolver.resolveMAddress(MultiAddress.init(input).tryGet())
|
||||||
if resolved != output:
|
if resolved != output:
|
||||||
echo "Expected ", output
|
echo "Expected ", output
|
||||||
echo "Got ", resolved
|
echo "Got ", resolved
|
||||||
|
@ -67,10 +67,10 @@ suite "Name resolving":
|
||||||
return true
|
return true
|
||||||
|
|
||||||
proc testOne(input: string, output: seq[string]): bool =
|
proc testOne(input: string, output: seq[string]): bool =
|
||||||
testOne(input, output.mapIt(Multiaddress.init(it).tryGet()))
|
testOne(input, output.mapIt(MultiAddress.init(it).tryGet()))
|
||||||
|
|
||||||
proc testOne(input, output: string): bool =
|
proc testOne(input, output: string): bool =
|
||||||
testOne(input, @[Multiaddress.init(output).tryGet()])
|
testOne(input, @[MultiAddress.init(output).tryGet()])
|
||||||
|
|
||||||
asyncSetup:
|
asyncSetup:
|
||||||
resolver = MockResolver.new()
|
resolver = MockResolver.new()
|
||||||
|
|
|
@ -88,7 +88,7 @@ suite "Noise":
|
||||||
|
|
||||||
asyncTest "e2e: handle write + noise":
|
asyncTest "e2e: handle write + noise":
|
||||||
let
|
let
|
||||||
server = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
server = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
serverInfo = PeerInfo.new(serverPrivKey, server)
|
serverInfo = PeerInfo.new(serverPrivKey, server)
|
||||||
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
||||||
|
@ -129,7 +129,7 @@ suite "Noise":
|
||||||
|
|
||||||
asyncTest "e2e: handle write + noise (wrong prologue)":
|
asyncTest "e2e: handle write + noise (wrong prologue)":
|
||||||
let
|
let
|
||||||
server = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
server = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
serverInfo = PeerInfo.new(serverPrivKey, server)
|
serverInfo = PeerInfo.new(serverPrivKey, server)
|
||||||
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
||||||
|
@ -169,7 +169,7 @@ suite "Noise":
|
||||||
|
|
||||||
asyncTest "e2e: handle read + noise":
|
asyncTest "e2e: handle read + noise":
|
||||||
let
|
let
|
||||||
server = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
server = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
serverInfo = PeerInfo.new(serverPrivKey, server)
|
serverInfo = PeerInfo.new(serverPrivKey, server)
|
||||||
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
||||||
|
@ -208,7 +208,7 @@ suite "Noise":
|
||||||
|
|
||||||
asyncTest "e2e: handle read + noise fragmented":
|
asyncTest "e2e: handle read + noise fragmented":
|
||||||
let
|
let
|
||||||
server = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
server = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
serverInfo = PeerInfo.new(serverPrivKey, server)
|
serverInfo = PeerInfo.new(serverPrivKey, server)
|
||||||
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
|
||||||
|
@ -252,8 +252,8 @@ suite "Noise":
|
||||||
await listenFut
|
await listenFut
|
||||||
|
|
||||||
asyncTest "e2e use switch dial proto string":
|
asyncTest "e2e use switch dial proto string":
|
||||||
let ma1 = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
let ma1 = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
let ma2 = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
let ma2 = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
|
|
||||||
var peerInfo1, peerInfo2: PeerInfo
|
var peerInfo1, peerInfo2: PeerInfo
|
||||||
var switch1, switch2: Switch
|
var switch1, switch2: Switch
|
||||||
|
@ -278,8 +278,8 @@ suite "Noise":
|
||||||
switch2.stop())
|
switch2.stop())
|
||||||
|
|
||||||
asyncTest "e2e test wrong secure negotiation":
|
asyncTest "e2e test wrong secure negotiation":
|
||||||
let ma1 = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
let ma1 = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
let ma2 = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
let ma2 = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
|
|
||||||
var peerInfo1, peerInfo2: PeerInfo
|
var peerInfo1, peerInfo2: PeerInfo
|
||||||
var switch1, switch2: Switch
|
var switch1, switch2: Switch
|
||||||
|
|
|
@ -164,7 +164,7 @@ const
|
||||||
"08021220B333BE3E843339E0E2CE9E083ABC119BE05C7B65B8665ADE19E172D47BF91305"
|
"08021220B333BE3E843339E0E2CE9E083ABC119BE05C7B65B8665ADE19E172D47BF91305"
|
||||||
]
|
]
|
||||||
|
|
||||||
PeerIDs = [
|
PeerIds = [
|
||||||
"QmeuZJbXrszW2jdT7GdduSjQskPU3S7vvGWKtKgDfkDvWs",
|
"QmeuZJbXrszW2jdT7GdduSjQskPU3S7vvGWKtKgDfkDvWs",
|
||||||
"QmeasUkAi1BhVUmopWzYJ5G1PGys9T5MZ2sPn87XTyaUAM",
|
"QmeasUkAi1BhVUmopWzYJ5G1PGys9T5MZ2sPn87XTyaUAM",
|
||||||
"Qmc3PxhMhQja8N4t7mRDyGm2vHkvcxe5Kabp2iAig1DXHb",
|
"Qmc3PxhMhQja8N4t7mRDyGm2vHkvcxe5Kabp2iAig1DXHb",
|
||||||
|
@ -180,15 +180,15 @@ const
|
||||||
]
|
]
|
||||||
|
|
||||||
suite "Peer testing suite":
|
suite "Peer testing suite":
|
||||||
test "Go PeerID test vectors":
|
test "Go PeerId test vectors":
|
||||||
for i in 0..<len(PrivateKeys):
|
for i in 0..<len(PrivateKeys):
|
||||||
var seckey = PrivateKey.init(stripSpaces(PrivateKeys[i])).get()
|
var seckey = PrivateKey.init(stripSpaces(PrivateKeys[i])).get()
|
||||||
var pubkey = seckey.getPublicKey().get()
|
var pubkey = seckey.getPublicKey().get()
|
||||||
var p1 = PeerID.init(seckey).get()
|
var p1 = PeerId.init(seckey).get()
|
||||||
var p2 = PeerID.init(pubkey).get()
|
var p2 = PeerId.init(pubkey).get()
|
||||||
var p3 = PeerID.init(PeerIDs[i]).get()
|
var p3 = PeerId.init(PeerIds[i]).get()
|
||||||
var b1 = Base58.decode(PeerIDs[i])
|
var b1 = Base58.decode(PeerIds[i])
|
||||||
var p4 = PeerID.init(b1).get()
|
var p4 = PeerId.init(b1).get()
|
||||||
var buf1 = newSeq[byte](len(p1))
|
var buf1 = newSeq[byte](len(p1))
|
||||||
var buf2 = newSeq[byte](len(p2))
|
var buf2 = newSeq[byte](len(p2))
|
||||||
var buf3 = newSeq[byte](len(p3))
|
var buf3 = newSeq[byte](len(p3))
|
||||||
|
@ -200,10 +200,10 @@ suite "Peer testing suite":
|
||||||
p1 == p2
|
p1 == p2
|
||||||
p1 == p4
|
p1 == p4
|
||||||
p2 == p4
|
p2 == p4
|
||||||
$p1 == PeerIDs[i]
|
$p1 == PeerIds[i]
|
||||||
$p2 == PeerIDs[i]
|
$p2 == PeerIds[i]
|
||||||
$p3 == PeerIDs[i]
|
$p3 == PeerIds[i]
|
||||||
$p4 == PeerIDs[i]
|
$p4 == PeerIds[i]
|
||||||
p1.match(seckey) == true
|
p1.match(seckey) == true
|
||||||
p1.match(pubkey) == true
|
p1.match(pubkey) == true
|
||||||
p1.getBytes() == p2.getBytes()
|
p1.getBytes() == p2.getBytes()
|
||||||
|
|
|
@ -12,7 +12,7 @@ suite "PeerInfo":
|
||||||
test "Should init with private key":
|
test "Should init with private key":
|
||||||
let seckey = PrivateKey.random(ECDSA, rng[]).get()
|
let seckey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
var peerInfo = PeerInfo.new(seckey)
|
var peerInfo = PeerInfo.new(seckey)
|
||||||
var peerId = PeerID.init(seckey).get()
|
var peerId = PeerId.init(seckey).get()
|
||||||
|
|
||||||
check peerId == peerInfo.peerId
|
check peerId == peerInfo.peerId
|
||||||
check seckey.getPublicKey().get() == peerInfo.publicKey
|
check seckey.getPublicKey().get() == peerInfo.publicKey
|
||||||
|
|
|
@ -12,13 +12,13 @@ suite "PeerStore":
|
||||||
let
|
let
|
||||||
# Peer 1
|
# Peer 1
|
||||||
keyPair1 = KeyPair.random(ECDSA, rng[]).get()
|
keyPair1 = KeyPair.random(ECDSA, rng[]).get()
|
||||||
peerId1 = PeerID.init(keyPair1.secKey).get()
|
peerId1 = PeerId.init(keyPair1.seckey).get()
|
||||||
multiaddrStr1 = "/ip4/127.0.0.1/udp/1234/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"
|
multiaddrStr1 = "/ip4/127.0.0.1/udp/1234/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"
|
||||||
multiaddr1 = MultiAddress.init(multiaddrStr1).get()
|
multiaddr1 = MultiAddress.init(multiaddrStr1).get()
|
||||||
testcodec1 = "/nim/libp2p/test/0.0.1-beta1"
|
testcodec1 = "/nim/libp2p/test/0.0.1-beta1"
|
||||||
# Peer 2
|
# Peer 2
|
||||||
keyPair2 = KeyPair.random(ECDSA, rng[]).get()
|
keyPair2 = KeyPair.random(ECDSA, rng[]).get()
|
||||||
peerId2 = PeerID.init(keyPair2.secKey).get()
|
peerId2 = PeerId.init(keyPair2.seckey).get()
|
||||||
multiaddrStr2 = "/ip4/0.0.0.0/tcp/1234/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"
|
multiaddrStr2 = "/ip4/0.0.0.0/tcp/1234/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"
|
||||||
multiaddr2 = MultiAddress.init(multiaddrStr2).get()
|
multiaddr2 = MultiAddress.init(multiaddrStr2).get()
|
||||||
testcodec2 = "/nim/libp2p/test/0.0.2-beta1"
|
testcodec2 = "/nim/libp2p/test/0.0.2-beta1"
|
||||||
|
@ -32,8 +32,8 @@ suite "PeerStore":
|
||||||
peerStore.addressBook.add(peerId2, multiaddr2)
|
peerStore.addressBook.add(peerId2, multiaddr2)
|
||||||
peerStore.protoBook.add(peerId1, testcodec1)
|
peerStore.protoBook.add(peerId1, testcodec1)
|
||||||
peerStore.protoBook.add(peerId2, testcodec2)
|
peerStore.protoBook.add(peerId2, testcodec2)
|
||||||
peerStore.keyBook.set(peerId1, keyPair1.pubKey)
|
peerStore.keyBook.set(peerId1, keyPair1.pubkey)
|
||||||
peerStore.keyBook.set(peerId2, keyPair2.pubKey)
|
peerStore.keyBook.set(peerId2, keyPair2.pubkey)
|
||||||
|
|
||||||
# Test PeerStore::delete
|
# Test PeerStore::delete
|
||||||
check:
|
check:
|
||||||
|
@ -52,13 +52,13 @@ suite "PeerStore":
|
||||||
protoChanged = false
|
protoChanged = false
|
||||||
keyChanged = false
|
keyChanged = false
|
||||||
|
|
||||||
proc addrChange(peerId: PeerID, addrs: HashSet[MultiAddress]) =
|
proc addrChange(peerId: PeerId, addrs: HashSet[MultiAddress]) =
|
||||||
addrChanged = true
|
addrChanged = true
|
||||||
|
|
||||||
proc protoChange(peerId: PeerID, protos: HashSet[string]) =
|
proc protoChange(peerId: PeerId, protos: HashSet[string]) =
|
||||||
protoChanged = true
|
protoChanged = true
|
||||||
|
|
||||||
proc keyChange(peerId: PeerID, publicKey: PublicKey) =
|
proc keyChange(peerId: PeerId, publicKey: PublicKey) =
|
||||||
keyChanged = true
|
keyChanged = true
|
||||||
|
|
||||||
peerStore.addHandlers(addrChangeHandler = addrChange,
|
peerStore.addHandlers(addrChangeHandler = addrChange,
|
||||||
|
|
|
@ -31,7 +31,7 @@ suite "Ping":
|
||||||
pingReceivedCount {.threadvar.}: int
|
pingReceivedCount {.threadvar.}: int
|
||||||
|
|
||||||
asyncSetup:
|
asyncSetup:
|
||||||
ma = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
ma = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
|
|
||||||
transport1 = TcpTransport.new(upgrade = Upgrade())
|
transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||||
transport2 = TcpTransport.new(upgrade = Upgrade())
|
transport2 = TcpTransport.new(upgrade = Upgrade())
|
||||||
|
|
|
@ -586,7 +586,7 @@ suite "Switch":
|
||||||
# for most of the steps in the upgrade flow -
|
# for most of the steps in the upgrade flow -
|
||||||
# this is just a basic test for dials
|
# this is just a basic test for dials
|
||||||
asyncTest "e2e canceling dial should not leak":
|
asyncTest "e2e canceling dial should not leak":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
|
|
||||||
let transport = TcpTransport.new(upgrade = Upgrade())
|
let transport = TcpTransport.new(upgrade = Upgrade())
|
||||||
await transport.start(ma)
|
await transport.start(ma)
|
||||||
|
@ -604,7 +604,7 @@ suite "Switch":
|
||||||
|
|
||||||
await switch.start()
|
await switch.start()
|
||||||
|
|
||||||
var peerId = PeerID.init(PrivateKey.random(ECDSA, rng[]).get()).get()
|
var peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).get()
|
||||||
let connectFut = switch.connect(peerId, transport.addrs)
|
let connectFut = switch.connect(peerId, transport.addrs)
|
||||||
await sleepAsync(500.millis)
|
await sleepAsync(500.millis)
|
||||||
connectFut.cancel()
|
connectFut.cancel()
|
||||||
|
@ -619,7 +619,7 @@ suite "Switch":
|
||||||
switch.stop())
|
switch.stop())
|
||||||
|
|
||||||
asyncTest "e2e closing remote conn should not leak":
|
asyncTest "e2e closing remote conn should not leak":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
|
|
||||||
let transport = TcpTransport.new(upgrade = Upgrade())
|
let transport = TcpTransport.new(upgrade = Upgrade())
|
||||||
await transport.start(ma)
|
await transport.start(ma)
|
||||||
|
@ -633,7 +633,7 @@ suite "Switch":
|
||||||
|
|
||||||
await switch.start()
|
await switch.start()
|
||||||
|
|
||||||
var peerId = PeerID.init(PrivateKey.random(ECDSA, rng[]).get()).get()
|
var peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).get()
|
||||||
expect LPStreamClosedError, LPStreamEOFError:
|
expect LPStreamClosedError, LPStreamEOFError:
|
||||||
await switch.connect(peerId, transport.addrs)
|
await switch.connect(peerId, transport.addrs)
|
||||||
|
|
||||||
|
@ -673,7 +673,7 @@ suite "Switch":
|
||||||
|
|
||||||
await allFuturesThrowing(readers)
|
await allFuturesThrowing(readers)
|
||||||
await switch2.stop() #Otherwise this leaks
|
await switch2.stop() #Otherwise this leaks
|
||||||
check await checkExpiring(not switch1.isConnected(switch2.peerInfo.peerID))
|
check await checkExpiring(not switch1.isConnected(switch2.peerInfo.peerId))
|
||||||
|
|
||||||
checkTracker(LPChannelTrackerName)
|
checkTracker(LPChannelTrackerName)
|
||||||
checkTracker(SecureConnTrackerName)
|
checkTracker(SecureConnTrackerName)
|
||||||
|
@ -686,7 +686,7 @@ suite "Switch":
|
||||||
await switch2.start()
|
await switch2.start()
|
||||||
let someAddr = MultiAddress.init("/ip4/127.128.0.99").get()
|
let someAddr = MultiAddress.init("/ip4/127.128.0.99").get()
|
||||||
let seckey = PrivateKey.random(ECDSA, rng[]).get()
|
let seckey = PrivateKey.random(ECDSA, rng[]).get()
|
||||||
let somePeer = PeerInfo.new(secKey, [someAddr])
|
let somePeer = PeerInfo.new(seckey, [someAddr])
|
||||||
expect(DialFailedError):
|
expect(DialFailedError):
|
||||||
discard await switch2.dial(somePeer.peerId, somePeer.addrs, TestCodec)
|
discard await switch2.dial(somePeer.peerId, somePeer.addrs, TestCodec)
|
||||||
await switch2.stop()
|
await switch2.stop()
|
||||||
|
|
|
@ -17,7 +17,7 @@ suite "TCP transport":
|
||||||
checkTrackers()
|
checkTrackers()
|
||||||
|
|
||||||
asyncTest "test listener: handle write":
|
asyncTest "test listener: handle write":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
asyncSpawn transport.start(ma)
|
asyncSpawn transport.start(ma)
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ suite "TCP transport":
|
||||||
check string.fromBytes(msg) == "Hello!"
|
check string.fromBytes(msg) == "Hello!"
|
||||||
|
|
||||||
asyncTest "test listener: handle read":
|
asyncTest "test listener: handle read":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
|
||||||
|
|
||||||
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
asyncSpawn transport.start(ma)
|
asyncSpawn transport.start(ma)
|
||||||
|
|
|
@ -171,7 +171,7 @@ proc hexChar*(c: byte, lowercase: bool = false): string =
|
||||||
of 0..9: result[1] = chr(t0 + ord('0'))
|
of 0..9: result[1] = chr(t0 + ord('0'))
|
||||||
else: result[1] = chr(t0 - 10 + alpha)
|
else: result[1] = chr(t0 - 10 + alpha)
|
||||||
|
|
||||||
proc toHex*(a: openarray[byte], lowercase: bool = false): string =
|
proc toHex*(a: openArray[byte], lowercase: bool = false): string =
|
||||||
result = ""
|
result = ""
|
||||||
for i in a:
|
for i in a:
|
||||||
result = result & hexChar(i, lowercase)
|
result = result & hexChar(i, lowercase)
|
||||||
|
@ -263,7 +263,7 @@ suite "Variable integer test suite":
|
||||||
buffer.setLen(PBedgeSizes[i])
|
buffer.setLen(PBedgeSizes[i])
|
||||||
check:
|
check:
|
||||||
PB.putUVarint(buffer, length, PBedgeValues[i]).isOk()
|
PB.putUVarint(buffer, length, PBedgeValues[i]).isOk()
|
||||||
buffer.setlen(buffer.high)
|
buffer.setLen(buffer.high)
|
||||||
check:
|
check:
|
||||||
PB.getUVarint(buffer, length, value).error() == VarintError.Incomplete
|
PB.getUVarint(buffer, length, value).error() == VarintError.Incomplete
|
||||||
|
|
||||||
|
@ -339,7 +339,7 @@ suite "Variable integer test suite":
|
||||||
buffer.setLen(LPedgeSizes[i])
|
buffer.setLen(LPedgeSizes[i])
|
||||||
check:
|
check:
|
||||||
LP.putUVarint(buffer, length, LPedgeValues[i]).isOk()
|
LP.putUVarint(buffer, length, LPedgeValues[i]).isOk()
|
||||||
buffer.setlen(buffer.high)
|
buffer.setLen(buffer.high)
|
||||||
check:
|
check:
|
||||||
LP.getUVarint(buffer, length, value).error() == VarintError.Incomplete
|
LP.getUVarint(buffer, length, value).error() == VarintError.Incomplete
|
||||||
|
|
||||||
|
|
|
@ -71,7 +71,7 @@ suite "WebSocket transport":
|
||||||
"/ip4/0.0.0.0/tcp/0/wss")
|
"/ip4/0.0.0.0/tcp/0/wss")
|
||||||
|
|
||||||
asyncTest "Hostname verification":
|
asyncTest "Hostname verification":
|
||||||
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0/wss").tryGet()]
|
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0/wss").tryGet()]
|
||||||
let transport1 = WsTransport.new(Upgrade(), TLSPrivateKey.init(SecureKey), TLSCertificate.init(SecureCert), {TLSFlags.NoVerifyHost})
|
let transport1 = WsTransport.new(Upgrade(), TLSPrivateKey.init(SecureKey), TLSCertificate.init(SecureCert), {TLSFlags.NoVerifyHost})
|
||||||
|
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
|
Loading…
Reference in New Issue