Fixes for style check (#676)

This commit is contained in:
Tanguy 2021-12-16 11:05:20 +01:00 committed by GitHub
parent c49932b55a
commit df566e69db
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
77 changed files with 681 additions and 681 deletions

View File

@ -56,7 +56,7 @@ proc dialPeer(p: ChatProto, address: string) {.async.} =
.tryGet()
.protoAddress()
.tryGet()
remotePeer = PeerID.init(peerIdBytes).tryGet()
remotePeer = PeerId.init(peerIdBytes).tryGet()
# split the wire address
ip4Addr = multiAddr[multiCodec("ip4")].tryGet()
tcpAddr = multiAddr[multiCodec("tcp")].tryGet()
@ -182,7 +182,7 @@ proc processInput(rfd: AsyncFD, rng: ref BrHmacDrbgContext) {.async.} =
chatProto.started = true
let id = $switch.peerInfo.peerId
echo "PeerID: " & id
echo "PeerId: " & id
echo "listening on: "
for a in switch.peerInfo.addrs:
echo &"{a}/p2p/{id}"

View File

@ -92,7 +92,7 @@ proc dialPeer(c: Chat, address: string) {.async.} =
.tryGet()
.protoAddress()
.tryGet()
remotePeer = PeerID.init(peerIdBytes).tryGet()
remotePeer = PeerId.init(peerIdBytes).tryGet()
# split the wire address
ip4Addr = multiAddr[multiCodec("ip4")].tryGet()
tcpAddr = multiAddr[multiCodec("tcp")].tryGet()
@ -184,7 +184,7 @@ proc main() {.async.} =
await switch.start()
let id = $switch.peerInfo.peerId
echo "PeerID: " & id
echo "PeerId: " & id
echo "listening on: "
for a in switch.peerInfo.addrs:
echo &"{a}/p2p/{id}"

View File

@ -41,7 +41,7 @@ proc serveThread(udata: CustomData) {.async.} =
if line.startsWith("/connect"):
var parts = line.split(" ")
if len(parts) == 2:
var peerId = PeerID.init(parts[1])
var peerId = PeerId.init(parts[1])
var address = MultiAddress.init(multiCodec("p2p-circuit"))
address &= MultiAddress.init(multiCodec("p2p"), peerId)
echo "= Searching for peer ", peerId.pretty()
@ -59,7 +59,7 @@ proc serveThread(udata: CustomData) {.async.} =
elif line.startsWith("/search"):
var parts = line.split(" ")
if len(parts) == 2:
var peerId = PeerID.init(parts[1])
var peerId = PeerId.init(parts[1])
echo "= Searching for peer ", peerId.pretty()
var id = await udata.api.dhtFindPeer(peerId)
echo "= Peer " & parts[1] & " found at addresses:"
@ -68,7 +68,7 @@ proc serveThread(udata: CustomData) {.async.} =
elif line.startsWith("/consearch"):
var parts = line.split(" ")
if len(parts) == 2:
var peerId = PeerID.init(parts[1])
var peerId = PeerId.init(parts[1])
echo "= Searching for peers connected to peer ", parts[1]
var peers = await udata.api.dhtFindPeersConnectedToPeer(peerId)
echo "= Found ", len(peers), " connected to peer ", parts[1]
@ -127,7 +127,7 @@ proc main() {.async.} =
echo ">> ", line
await data.api.addHandler(ServerProtocols, streamHandler)
echo "= Your PeerID is ", id.peer.pretty()
echo "= Your PeerId is ", id.peer.pretty()
await data.serveFut
when isMainModule:

View File

@ -45,10 +45,10 @@ proc main() {.async, gcsafe.} =
let
rng = newRng() # Single random number source for the whole application
# port 0 will take a random available port
# `tryGet` will throw an exception if the Multiaddress failed
# `tryGet` will throw an exception if the MultiAddress failed
# (for instance, if the address is not well formatted)
ma1 = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
ma2 = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
ma1 = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
ma2 = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
# setup the custom proto
let testProto = TestProto.new()

View File

@ -71,8 +71,8 @@ proc dumpHex*(pbytes: pointer, nbytes: int, items = 1, ascii = true): string =
result = result & asciiText
result = result & "\n"
proc dumpHex*[T](v: openarray[T], items: int = 0, ascii = true): string =
## Return hexadecimal memory dump representation of openarray[T] ``v``.
proc dumpHex*[T](v: openArray[T], items: int = 0, ascii = true): string =
## Return hexadecimal memory dump representation of openArray[T] ``v``.
## ``items`` - number of bytes in group (supported ``items`` count is
## 0, 1, 2, 4, 8). If ``items`` is ``0`` group size will depend on
## ``sizeof(T)``.

View File

@ -21,7 +21,7 @@ requires "nim >= 1.2.0",
proc runTest(filename: string, verify: bool = true, sign: bool = true,
moreoptions: string = "") =
let env_nimflags = getEnv("NIMFLAGS")
var excstr = "nim c --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics --verbosity:0 --hints:off " & env_nimflags
var excstr = "nim c --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics --verbosity:0 --hints:off --styleCheck:usages --styleCheck:hint " & env_nimflags
excstr.add(" --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off")
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
@ -34,7 +34,7 @@ proc runTest(filename: string, verify: bool = true, sign: bool = true,
rmFile "tests/" & filename.toExe
proc buildSample(filename: string, run = false) =
var excstr = "nim c --opt:speed --threads:on -d:debug --verbosity:0 --hints:off"
var excstr = "nim c --opt:speed --threads:on -d:debug --verbosity:0 --hints:off "
excstr.add(" --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off")
excstr.add(" examples/" & filename)
exec excstr

View File

@ -198,7 +198,7 @@ proc build*(b: SwitchBuilder): Switch
proc newStandardSwitch*(
privKey = none(PrivateKey),
addrs: MultiAddress | seq[MultiAddress] = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
secureManagers: openarray[SecureProtocol] = [
secureManagers: openArray[SecureProtocol] = [
SecureProtocol.Noise,
],
transportFlags: set[ServerFlags] = {},

View File

@ -71,7 +71,7 @@ const
template orError*(exp: untyped, err: untyped): untyped =
(exp.mapErr do (_: auto) -> auto: err)
proc decode(data: openarray[byte]): Result[Cid, CidError] =
proc decode(data: openArray[byte]): Result[Cid, CidError] =
if len(data) == 34 and data[0] == 0x12'u8 and data[1] == 0x20'u8:
ok(Cid(
cidver: CIDv0,
@ -114,7 +114,7 @@ proc decode(data: openarray[byte]): Result[Cid, CidError] =
hpos: offset,
data: vb))
proc decode(data: openarray[char]): Result[Cid, CidError] =
proc decode(data: openArray[char]): Result[Cid, CidError] =
var buffer: seq[byte]
var plen = 0
if len(data) < 2:
@ -137,7 +137,7 @@ proc decode(data: openarray[char]): Result[Cid, CidError] =
return err(CidError.Incorrect)
decode(buffer)
proc validate*(ctype: typedesc[Cid], data: openarray[byte]): bool =
proc validate*(ctype: typedesc[Cid], data: openArray[byte]): bool =
## Returns ``true`` is data has valid binary CID representation.
var version, codec: uint64
var res: VarintResult[void]
@ -185,7 +185,7 @@ proc version*(cid: Cid): CidVersion =
## Returns CID version
result = cid.cidver
proc init*[T: char|byte](ctype: typedesc[Cid], data: openarray[T]): Result[Cid, CidError] =
proc init*[T: char|byte](ctype: typedesc[Cid], data: openArray[T]): Result[Cid, CidError] =
## Create new content identifier using array of bytes or string ``data``.
decode(data)
@ -275,7 +275,7 @@ proc `$`*(cid: Cid): string =
if cid.cidver == CIDv0:
BTCBase58.encode(cid.data.buffer)
elif cid.cidver == CIDv1:
let res = Multibase.encode("base58btc", cid.data.buffer)
let res = MultiBase.encode("base58btc", cid.data.buffer)
if res.isOk():
res.get()
else:

View File

@ -75,7 +75,7 @@ type
maxConnsPerPeer: int
inSema*: AsyncSemaphore
outSema*: AsyncSemaphore
conns: Table[PeerID, HashSet[Connection]]
conns: Table[PeerId, HashSet[Connection]]
muxed: Table[Connection, MuxerHolder]
connEvents: array[ConnEventKind, OrderedSet[ConnEventHandler]]
peerEvents: array[PeerEventKind, OrderedSet[PeerEventHandler]]
@ -103,7 +103,7 @@ proc new*(C: type ConnManager,
inSema: inSema,
outSema: outSema)
proc connCount*(c: ConnManager, peerId: PeerID): int =
proc connCount*(c: ConnManager, peerId: PeerId): int =
c.conns.getOrDefault(peerId).len
proc addConnEventHandler*(c: ConnManager,
@ -219,7 +219,7 @@ proc contains*(c: ConnManager, conn: Connection): bool =
return conn in c.conns.getOrDefault(conn.peerId)
proc contains*(c: ConnManager, peerId: PeerID): bool =
proc contains*(c: ConnManager, peerId: PeerId): bool =
peerId in c.conns
proc contains*(c: ConnManager, muxer: Muxer): bool =
@ -334,7 +334,7 @@ proc onClose(c: ConnManager, conn: Connection) {.async.} =
asyncSpawn c.peerCleanup(conn)
proc selectConn*(c: ConnManager,
peerId: PeerID,
peerId: PeerId,
dir: Direction): Connection =
## Select a connection for the provided peer and direction
##
@ -345,7 +345,7 @@ proc selectConn*(c: ConnManager,
if conns.len > 0:
return conns[0]
proc selectConn*(c: ConnManager, peerId: PeerID): Connection =
proc selectConn*(c: ConnManager, peerId: PeerId): Connection =
## Select a connection for the provided giving priority
## to outgoing connections
##
@ -506,7 +506,7 @@ proc storeMuxer*(c: ConnManager,
asyncSpawn c.onConnUpgraded(muxer.connection)
proc getStream*(c: ConnManager,
peerId: PeerID,
peerId: PeerId,
dir: Direction): Future[Connection] {.async, gcsafe.} =
## get a muxed stream for the provided peer
## with the given direction
@ -517,7 +517,7 @@ proc getStream*(c: ConnManager,
return await muxer.newStream()
proc getStream*(c: ConnManager,
peerId: PeerID): Future[Connection] {.async, gcsafe.} =
peerId: PeerId): Future[Connection] {.async, gcsafe.} =
## get a muxed stream for the passed peer from any connection
##
@ -534,7 +534,7 @@ proc getStream*(c: ConnManager,
if not(isNil(muxer)):
return await muxer.newStream()
proc dropPeer*(c: ConnManager, peerId: PeerID) {.async.} =
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
## drop connections and cleanup resources for peer
##
trace "Dropping peer", peerId

View File

@ -37,17 +37,17 @@ type
ChaChaPolyNonce* = array[ChaChaPolyNonceSize, byte]
ChaChaPolyTag* = array[ChaChaPolyTagSize, byte]
proc intoChaChaPolyKey*(s: openarray[byte]): ChaChaPolyKey =
proc intoChaChaPolyKey*(s: openArray[byte]): ChaChaPolyKey =
assert s.len == ChaChaPolyKeySize
copyMem(addr result[0], unsafeaddr s[0], ChaChaPolyKeySize)
copyMem(addr result[0], unsafeAddr s[0], ChaChaPolyKeySize)
proc intoChaChaPolyNonce*(s: openarray[byte]): ChaChaPolyNonce =
proc intoChaChaPolyNonce*(s: openArray[byte]): ChaChaPolyNonce =
assert s.len == ChaChaPolyNonceSize
copyMem(addr result[0], unsafeaddr s[0], ChaChaPolyNonceSize)
copyMem(addr result[0], unsafeAddr s[0], ChaChaPolyNonceSize)
proc intoChaChaPolyTag*(s: openarray[byte]): ChaChaPolyTag =
proc intoChaChaPolyTag*(s: openArray[byte]): ChaChaPolyTag =
assert s.len == ChaChaPolyTagSize
copyMem(addr result[0], unsafeaddr s[0], ChaChaPolyTagSize)
copyMem(addr result[0], unsafeAddr s[0], ChaChaPolyTagSize)
# bearssl allows us to use optimized versions
# this is reconciled at runtime
@ -57,17 +57,17 @@ proc encrypt*(_: type[ChaChaPoly],
key: ChaChaPolyKey,
nonce: ChaChaPolyNonce,
tag: var ChaChaPolyTag,
data: var openarray[byte],
aad: openarray[byte]) =
data: var openArray[byte],
aad: openArray[byte]) =
let
ad = if aad.len > 0:
unsafeaddr aad[0]
unsafeAddr aad[0]
else:
nil
ourPoly1305CtmulRun(
unsafeaddr key[0],
unsafeaddr nonce[0],
unsafeAddr key[0],
unsafeAddr nonce[0],
addr data[0],
data.len,
ad,
@ -80,17 +80,17 @@ proc decrypt*(_: type[ChaChaPoly],
key: ChaChaPolyKey,
nonce: ChaChaPolyNonce,
tag: var ChaChaPolyTag,
data: var openarray[byte],
aad: openarray[byte]) =
data: var openArray[byte],
aad: openArray[byte]) =
let
ad = if aad.len > 0:
unsafeaddr aad[0]
unsafeAddr aad[0]
else:
nil
ourPoly1305CtmulRun(
unsafeaddr key[0],
unsafeaddr nonce[0],
unsafeAddr key[0],
unsafeAddr nonce[0],
addr data[0],
data.len,
ad,

View File

@ -130,8 +130,8 @@ type
skkey*: SkPrivateKey
else:
discard
of PKSCheme.ECDSA:
when supported(PKSCheme.ECDSA):
of PKScheme.ECDSA:
when supported(PKScheme.ECDSA):
eckey*: ecnist.EcPrivateKey
else:
discard
@ -345,7 +345,7 @@ proc getPublicKey*(key: PrivateKey): CryptoResult[PublicKey] =
err(SchemeError)
proc toRawBytes*(key: PrivateKey | PublicKey,
data: var openarray[byte]): CryptoResult[int] =
data: var openArray[byte]): CryptoResult[int] =
## Serialize private key ``key`` (using scheme's own serialization) and store
## it to ``data``.
##
@ -397,7 +397,7 @@ proc getRawBytes*(key: PrivateKey | PublicKey): CryptoResult[seq[byte]] =
else:
err(SchemeError)
proc toBytes*(key: PrivateKey, data: var openarray[byte]): CryptoResult[int] =
proc toBytes*(key: PrivateKey, data: var openArray[byte]): CryptoResult[int] =
## Serialize private key ``key`` (using libp2p protobuf scheme) and store
## it to ``data``.
##
@ -411,7 +411,7 @@ proc toBytes*(key: PrivateKey, data: var openarray[byte]): CryptoResult[int] =
copyMem(addr data[0], addr msg.buffer[0], blen)
ok(blen)
proc toBytes*(key: PublicKey, data: var openarray[byte]): CryptoResult[int] =
proc toBytes*(key: PublicKey, data: var openArray[byte]): CryptoResult[int] =
## Serialize public key ``key`` (using libp2p protobuf scheme) and store
## it to ``data``.
##
@ -425,7 +425,7 @@ proc toBytes*(key: PublicKey, data: var openarray[byte]): CryptoResult[int] =
copyMem(addr data[0], addr msg.buffer[0], blen)
ok(blen)
proc toBytes*(sig: Signature, data: var openarray[byte]): int =
proc toBytes*(sig: Signature, data: var openArray[byte]): int =
## Serialize signature ``sig`` and store it to ``data``.
##
## Returns number of bytes (octets) needed to store signature ``sig``.
@ -455,7 +455,7 @@ proc getBytes*(sig: Signature): seq[byte] =
## Return signature ``sig`` in binary form.
result = sig.data
proc init*[T: PrivateKey|PublicKey](key: var T, data: openarray[byte]): bool =
proc init*[T: PrivateKey|PublicKey](key: var T, data: openArray[byte]): bool =
## Initialize private key ``key`` from libp2p's protobuf serialized raw
## binary form.
##
@ -517,7 +517,7 @@ proc init*[T: PrivateKey|PublicKey](key: var T, data: openarray[byte]): bool =
else:
false
proc init*(sig: var Signature, data: openarray[byte]): bool =
proc init*(sig: var Signature, data: openArray[byte]): bool =
## Initialize signature ``sig`` from raw binary form.
##
## Returns ``true`` on success.
@ -540,7 +540,7 @@ proc init*(sig: var Signature, data: string): bool =
sig.init(ncrutils.fromHex(data))
proc init*(t: typedesc[PrivateKey],
data: openarray[byte]): CryptoResult[PrivateKey] =
data: openArray[byte]): CryptoResult[PrivateKey] =
## Create new private key from libp2p's protobuf serialized binary form.
var res: t
if not res.init(data):
@ -549,7 +549,7 @@ proc init*(t: typedesc[PrivateKey],
ok(res)
proc init*(t: typedesc[PublicKey],
data: openarray[byte]): CryptoResult[PublicKey] =
data: openArray[byte]): CryptoResult[PublicKey] =
## Create new public key from libp2p's protobuf serialized binary form.
var res: t
if not res.init(data):
@ -558,7 +558,7 @@ proc init*(t: typedesc[PublicKey],
ok(res)
proc init*(t: typedesc[Signature],
data: openarray[byte]): CryptoResult[Signature] =
data: openArray[byte]): CryptoResult[Signature] =
## Create new public key from libp2p's protobuf serialized binary form.
var res: t
if not res.init(data):
@ -713,7 +713,7 @@ proc `$`*(sig: Signature): string =
result = ncrutils.toHex(sig.data)
proc sign*(key: PrivateKey,
data: openarray[byte]): CryptoResult[Signature] {.gcsafe.} =
data: openArray[byte]): CryptoResult[Signature] {.gcsafe.} =
## Sign message ``data`` using private key ``key`` and return generated
## signature in raw binary form.
var res: Signature
@ -747,7 +747,7 @@ proc sign*(key: PrivateKey,
else:
err(SchemeError)
proc verify*(sig: Signature, message: openarray[byte], key: PublicKey): bool =
proc verify*(sig: Signature, message: openArray[byte], key: PublicKey): bool =
## Verify signature ``sig`` using message ``message`` and public key ``key``.
## Return ``true`` if message signature is valid.
case key.scheme:
@ -898,8 +898,8 @@ proc ephemeral*(
else:
ephemeral(Secp521r1, rng)
proc getOrder*(remotePubkey, localNonce: openarray[byte],
localPubkey, remoteNonce: openarray[byte]): CryptoResult[int] =
proc getOrder*(remotePubkey, localNonce: openArray[byte],
localPubkey, remoteNonce: openArray[byte]): CryptoResult[int] =
## Compare values and calculate `order` parameter.
var ctx: sha256
ctx.init()
@ -943,7 +943,7 @@ proc selectBest*(order: int, p1, p2: string): string =
if felement == selement:
return felement
proc createProposal*(nonce, pubkey: openarray[byte],
proc createProposal*(nonce, pubkey: openArray[byte],
exchanges, ciphers, hashes: string): seq[byte] =
## Create SecIO proposal message using random ``nonce``, local public key
## ``pubkey``, comma-delimieted list of supported exchange schemes
@ -977,7 +977,7 @@ proc decodeProposal*(message: seq[byte], nonce, pubkey: var seq[byte],
r3.isOk() and r3.get() and r4.isOk() and r4.get() and
r5.isOk() and r5.get()
proc createExchange*(epubkey, signature: openarray[byte]): seq[byte] =
proc createExchange*(epubkey, signature: openArray[byte]): seq[byte] =
## Create SecIO exchange message using ephemeral public key ``epubkey`` and
## signature of proposal blocks ``signature``.
var msg = initProtoBuffer({WithUint32BeLength})

View File

@ -31,9 +31,9 @@ type
Curve25519Error* = enum
Curver25519GenError
proc intoCurve25519Key*(s: openarray[byte]): Curve25519Key =
proc intoCurve25519Key*(s: openArray[byte]): Curve25519Key =
assert s.len == Curve25519KeySize
copyMem(addr result[0], unsafeaddr s[0], Curve25519KeySize)
copyMem(addr result[0], unsafeAddr s[0], Curve25519KeySize)
proc getBytes*(key: Curve25519Key): seq[byte] = @key

View File

@ -94,7 +94,7 @@ proc NEQ(x, y: uint32): uint32 {.inline.} =
proc LT0(x: int32): uint32 {.inline.} =
result = cast[uint32](x) shr 31
proc checkScalar(scalar: openarray[byte], curve: cint): uint32 =
proc checkScalar(scalar: openArray[byte], curve: cint): uint32 =
## Return ``1`` if all of the following hold:
## - len(``scalar``) <= ``orderlen``
## - ``scalar`` != 0
@ -116,7 +116,7 @@ proc checkScalar(scalar: openarray[byte], curve: cint): uint32 =
c = -1
result = NEQ(z, 0'u32) and LT0(c)
proc checkPublic(key: openarray[byte], curve: cint): uint32 =
proc checkPublic(key: openArray[byte], curve: cint): uint32 =
## Return ``1`` if public key ``key`` is on curve.
var ckey = @key
var x = [0x00'u8, 0x01'u8]
@ -315,7 +315,7 @@ proc `$`*(sig: EcSignature): string =
else:
result = ncrutils.toHex(sig.buffer)
proc toRawBytes*(seckey: EcPrivateKey, data: var openarray[byte]): EcResult[int] =
proc toRawBytes*(seckey: EcPrivateKey, data: var openArray[byte]): EcResult[int] =
## Serialize EC private key ``seckey`` to raw binary form and store it
## to ``data``.
##
@ -331,7 +331,7 @@ proc toRawBytes*(seckey: EcPrivateKey, data: var openarray[byte]): EcResult[int]
else:
err(EcKeyIncorrectError)
proc toRawBytes*(pubkey: EcPublicKey, data: var openarray[byte]): EcResult[int] =
proc toRawBytes*(pubkey: EcPublicKey, data: var openArray[byte]): EcResult[int] =
## Serialize EC public key ``pubkey`` to uncompressed form specified in
## section 4.3.6 of ANSI X9.62.
##
@ -347,7 +347,7 @@ proc toRawBytes*(pubkey: EcPublicKey, data: var openarray[byte]): EcResult[int]
else:
err(EcKeyIncorrectError)
proc toRawBytes*(sig: EcSignature, data: var openarray[byte]): int =
proc toRawBytes*(sig: EcSignature, data: var openArray[byte]): int =
## Serialize EC signature ``sig`` to raw binary form and store it to ``data``.
##
## Returns number of bytes (octets) needed to store EC signature, or `0`
@ -358,7 +358,7 @@ proc toRawBytes*(sig: EcSignature, data: var openarray[byte]): int =
if len(sig.buffer) > 0:
copyMem(addr data[0], unsafeAddr sig.buffer[0], len(sig.buffer))
proc toBytes*(seckey: EcPrivateKey, data: var openarray[byte]): EcResult[int] =
proc toBytes*(seckey: EcPrivateKey, data: var openArray[byte]): EcResult[int] =
## Serialize EC private key ``seckey`` to ASN.1 DER binary form and store it
## to ``data``.
##
@ -408,7 +408,7 @@ proc toBytes*(seckey: EcPrivateKey, data: var openarray[byte]): EcResult[int] =
err(EcKeyIncorrectError)
proc toBytes*(pubkey: EcPublicKey, data: var openarray[byte]): EcResult[int] =
proc toBytes*(pubkey: EcPublicKey, data: var openArray[byte]): EcResult[int] =
## Serialize EC public key ``pubkey`` to ASN.1 DER binary form and store it
## to ``data``.
##
@ -445,7 +445,7 @@ proc toBytes*(pubkey: EcPublicKey, data: var openarray[byte]): EcResult[int] =
else:
err(EcKeyIncorrectError)
proc toBytes*(sig: EcSignature, data: var openarray[byte]): EcResult[int] =
proc toBytes*(sig: EcSignature, data: var openArray[byte]): EcResult[int] =
## Serialize EC signature ``sig`` to ASN.1 DER binary form and store it
## to ``data``.
##
@ -592,7 +592,7 @@ proc `==`*(a, b: EcSignature): bool =
else:
CT.isEqual(a.buffer, b.buffer)
proc init*(key: var EcPrivateKey, data: openarray[byte]): Result[void, Asn1Error] =
proc init*(key: var EcPrivateKey, data: openArray[byte]): Result[void, Asn1Error] =
## Initialize EC `private key` or `signature` ``key`` from ASN.1 DER binary
## representation ``data``.
##
@ -645,7 +645,7 @@ proc init*(key: var EcPrivateKey, data: openarray[byte]): Result[void, Asn1Error
else:
err(Asn1Error.Incorrect)
proc init*(pubkey: var EcPublicKey, data: openarray[byte]): Result[void, Asn1Error] =
proc init*(pubkey: var EcPublicKey, data: openArray[byte]): Result[void, Asn1Error] =
## Initialize EC public key ``pubkey`` from ASN.1 DER binary representation
## ``data``.
##
@ -704,7 +704,7 @@ proc init*(pubkey: var EcPublicKey, data: openarray[byte]): Result[void, Asn1Err
else:
err(Asn1Error.Incorrect)
proc init*(sig: var EcSignature, data: openarray[byte]): Result[void, Asn1Error] =
proc init*(sig: var EcSignature, data: openArray[byte]): Result[void, Asn1Error] =
## Initialize EC signature ``sig`` from raw binary representation ``data``.
##
## Procedure returns ``Result[void, Asn1Error]``.
@ -724,7 +724,7 @@ proc init*[T: EcPKI](sospk: var T,
sospk.init(ncrutils.fromHex(data))
proc init*(t: typedesc[EcPrivateKey],
data: openarray[byte]): EcResult[EcPrivateKey] =
data: openArray[byte]): EcResult[EcPrivateKey] =
## Initialize EC private key from ASN.1 DER binary representation ``data`` and
## return constructed object.
var key: EcPrivateKey
@ -735,7 +735,7 @@ proc init*(t: typedesc[EcPrivateKey],
ok(key)
proc init*(t: typedesc[EcPublicKey],
data: openarray[byte]): EcResult[EcPublicKey] =
data: openArray[byte]): EcResult[EcPublicKey] =
## Initialize EC public key from ASN.1 DER binary representation ``data`` and
## return constructed object.
var key: EcPublicKey
@ -746,7 +746,7 @@ proc init*(t: typedesc[EcPublicKey],
ok(key)
proc init*(t: typedesc[EcSignature],
data: openarray[byte]): EcResult[EcSignature] =
data: openArray[byte]): EcResult[EcSignature] =
## Initialize EC signature from raw binary representation ``data`` and
## return constructed object.
var sig: EcSignature
@ -761,7 +761,7 @@ proc init*[T: EcPKI](t: typedesc[T], data: string): EcResult[T] =
## string representation ``data`` and return constructed object.
t.init(ncrutils.fromHex(data))
proc initRaw*(key: var EcPrivateKey, data: openarray[byte]): bool =
proc initRaw*(key: var EcPrivateKey, data: openArray[byte]): bool =
## Initialize EC `private key` or `scalar` ``key`` from raw binary
## representation ``data``.
##
@ -790,7 +790,7 @@ proc initRaw*(key: var EcPrivateKey, data: openarray[byte]): bool =
key.key.curve = curve
result = true
proc initRaw*(pubkey: var EcPublicKey, data: openarray[byte]): bool =
proc initRaw*(pubkey: var EcPublicKey, data: openArray[byte]): bool =
## Initialize EC public key ``pubkey`` from raw binary representation
## ``data``.
##
@ -821,7 +821,7 @@ proc initRaw*(pubkey: var EcPublicKey, data: openarray[byte]): bool =
pubkey.key.curve = curve
result = true
proc initRaw*(sig: var EcSignature, data: openarray[byte]): bool =
proc initRaw*(sig: var EcSignature, data: openArray[byte]): bool =
## Initialize EC signature ``sig`` from raw binary representation ``data``.
##
## Length of ``data`` array must be ``Sig256Length``, ``Sig384Length``
@ -844,7 +844,7 @@ proc initRaw*[T: EcPKI](sospk: var T, data: string): bool {.inline.} =
result = sospk.initRaw(ncrutils.fromHex(data))
proc initRaw*(t: typedesc[EcPrivateKey],
data: openarray[byte]): EcResult[EcPrivateKey] =
data: openArray[byte]): EcResult[EcPrivateKey] =
## Initialize EC private key from raw binary representation ``data`` and
## return constructed object.
var res: EcPrivateKey
@ -854,7 +854,7 @@ proc initRaw*(t: typedesc[EcPrivateKey],
ok(res)
proc initRaw*(t: typedesc[EcPublicKey],
data: openarray[byte]): EcResult[EcPublicKey] =
data: openArray[byte]): EcResult[EcPublicKey] =
## Initialize EC public key from raw binary representation ``data`` and
## return constructed object.
var res: EcPublicKey
@ -864,7 +864,7 @@ proc initRaw*(t: typedesc[EcPublicKey],
ok(res)
proc initRaw*(t: typedesc[EcSignature],
data: openarray[byte]): EcResult[EcSignature] =
data: openArray[byte]): EcResult[EcSignature] =
## Initialize EC signature from raw binary representation ``data`` and
## return constructed object.
var res: EcSignature
@ -900,7 +900,7 @@ proc scalarMul*(pub: EcPublicKey, sec: EcPrivateKey): EcPublicKey =
result = key
proc toSecret*(pubkey: EcPublicKey, seckey: EcPrivateKey,
data: var openarray[byte]): int =
data: var openArray[byte]): int =
## Calculate ECDHE shared secret using Go's elliptic/curve approach, using
## remote public key ``pubkey`` and local private key ``seckey`` and store
## shared secret to ``data``.
@ -937,7 +937,7 @@ proc getSecret*(pubkey: EcPublicKey, seckey: EcPrivateKey): seq[byte] =
copyMem(addr result[0], addr data[0], res)
proc sign*[T: byte|char](seckey: EcPrivateKey,
message: openarray[T]): EcResult[EcSignature] {.gcsafe.} =
message: openArray[T]): EcResult[EcSignature] {.gcsafe.} =
## Get ECDSA signature of data ``message`` using private key ``seckey``.
if isNil(seckey):
return err(EcKeyIncorrectError)
@ -966,7 +966,7 @@ proc sign*[T: byte|char](seckey: EcPrivateKey,
else:
err(EcKeyIncorrectError)
proc verify*[T: byte|char](sig: EcSignature, message: openarray[T],
proc verify*[T: byte|char](sig: EcSignature, message: openArray[T],
pubkey: EcPublicKey): bool {.inline.} =
## Verify ECDSA signature ``sig`` using public key ``pubkey`` and data
## ``message``.

View File

@ -165,30 +165,30 @@ proc feCopy(h: var Fe, f: Fe) =
h[8] = f8
h[9] = f9
proc load3(inp: openarray[byte]): uint64 =
proc load_3(inp: openArray[byte]): uint64 =
result = cast[uint64](inp[0])
result = result or (cast[uint64](inp[1]) shl 8)
result = result or (cast[uint64](inp[2]) shl 16)
proc load4(inp: openarray[byte]): uint64 =
proc load_4(inp: openArray[byte]): uint64 =
result = cast[uint64](inp[0])
result = result or (cast[uint64](inp[1]) shl 8)
result = result or (cast[uint64](inp[2]) shl 16)
result = result or (cast[uint64](inp[3]) shl 24)
proc feFromBytes(h: var Fe, s: openarray[byte]) =
proc feFromBytes(h: var Fe, s: openArray[byte]) =
var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9: int64
var h0 = cast[int64](load4(s.toOpenArray(0, 3)))
var h1 = cast[int64](load3(s.toOpenArray(4, 6))) shl 6
var h2 = cast[int64](load3(s.toOpenArray(7, 9))) shl 5
var h3 = cast[int64](load3(s.toOpenArray(10, 12))) shl 3
var h4 = cast[int64](load3(s.toOpenArray(13, 15))) shl 2
var h5 = cast[int64](load4(s.toOpenArray(16, 19)))
var h6 = cast[int64](load3(s.toOpenArray(20, 22))) shl 7
var h7 = cast[int64](load3(s.toOpenArray(23, 25))) shl 5
var h8 = cast[int64](load3(s.toOpenArray(26, 28))) shl 4
var h9 = (cast[int64](load3(s.toOpenArray(29, 31))) and 8388607'i32) shl 2
var h0 = cast[int64](load_4(s.toOpenArray(0, 3)))
var h1 = cast[int64](load_3(s.toOpenArray(4, 6))) shl 6
var h2 = cast[int64](load_3(s.toOpenArray(7, 9))) shl 5
var h3 = cast[int64](load_3(s.toOpenArray(10, 12))) shl 3
var h4 = cast[int64](load_3(s.toOpenArray(13, 15))) shl 2
var h5 = cast[int64](load_4(s.toOpenArray(16, 19)))
var h6 = cast[int64](load_3(s.toOpenArray(20, 22))) shl 7
var h7 = cast[int64](load_3(s.toOpenArray(23, 25))) shl 5
var h8 = cast[int64](load_3(s.toOpenArray(26, 28))) shl 4
var h9 = (cast[int64](load_3(s.toOpenArray(29, 31))) and 8388607'i32) shl 2
c9 = ashr((h9 + (1'i64 shl 24)), 25); h0 = h0 + (c9 * 19); h9 -= (c9 shl 25)
c1 = ashr((h1 + (1'i64 shl 24)), 25); h2 = h2 + c1; h1 -= (c1 shl 25)
@ -213,7 +213,7 @@ proc feFromBytes(h: var Fe, s: openarray[byte]) =
h[8] = cast[int32](h8)
h[9] = cast[int32](h9)
proc feToBytes(s: var openarray[byte], h: Fe) =
proc feToBytes(s: var openArray[byte], h: Fe) =
var h0 = h[0]; var h1 = h[1]; var h2 = h[2]; var h3 = h[3]; var h4 = h[4]
var h5 = h[5]; var h6 = h[6]; var h7 = h[7]; var h8 = h[8]; var h9 = h[9]
var q, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9: int32
@ -450,7 +450,7 @@ proc feNeg(h: var Fe, f: Fe) =
h[0] = h0; h[1] = h1; h[2] = h2; h[3] = h3; h[4] = h4
h[5] = h5; h[6] = h6; h[7] = h7; h[8] = h8; h[9] = h9
proc verify32(x: openarray[byte], y: openarray[byte]): int32 =
proc verify32(x: openArray[byte], y: openArray[byte]): int32 =
var d = 0'u32
d = d or (x[0] xor y[0])
d = d or (x[1] xor y[1])
@ -800,7 +800,7 @@ proc geAdd(r: var GeP1P1, p: GeP3, q: GeCached) =
feAdd(r.z, t0, r.t)
feSub(r.t, t0, r.t)
proc geFromBytesNegateVartime(h: var GeP3, s: openarray[byte]): int32 =
proc geFromBytesNegateVartime(h: var GeP3, s: openArray[byte]): int32 =
var u, v, v3, vxx, check: Fe
feFromBytes(h.y, s)
@ -876,12 +876,12 @@ proc geSub(r: var GeP1P1, p: GeP3, q: GeCached) =
feSub(r.z, t0, r.t)
feAdd(r.t, t0, r.t)
proc geToBytes(s: var openarray[byte], h: GeP2) =
proc geToBytes(s: var openArray[byte], h: GeP2) =
var recip, x, y: Fe
feInvert(recip, h.z)
feMul(x, h.x, recip)
feMul(y, h.y, recip)
feTobytes(s, y)
feToBytes(s, y)
s[31] = s[31] xor cast[byte](feIsNegative(x) shl 7)
proc geP1P1toP2(r: var GeP2, p: GeP1P1) =
@ -925,10 +925,10 @@ proc geP3toP2(r: var GeP2, p: GeP3) =
proc geP3dbl(r: var GeP1P1, p: GeP3) =
var q: GeP2
geP3ToP2(q, p)
geP3toP2(q, p)
geP2dbl(r, q)
proc geP3ToBytes(s: var openarray[byte], h: GeP3) =
proc geP3ToBytes(s: var openArray[byte], h: GeP3) =
var recip, x, y: Fe
feInvert(recip, h.z);
@ -985,7 +985,7 @@ proc select(t: var GePrecomp, pos: int, b: int8) =
feNeg(minust.xy2d, t.xy2d)
cmov(t, minust, bnegative)
proc geScalarMultBase(h: var GeP3, a: openarray[byte]) =
proc geScalarMultBase(h: var GeP3, a: openArray[byte]) =
var e: array[64, int8]
var carry: int8
var r: GeP1P1
@ -1010,8 +1010,8 @@ proc geScalarMultBase(h: var GeP3, a: openarray[byte]) =
geMadd(r, h, t)
geP1P1toP3(h, r)
geP3dbl(r, h); geP1P1ToP2(s, r)
geP2dbl(r, s); geP1P1ToP2(s, r)
geP3dbl(r, h); geP1P1toP2(s, r)
geP2dbl(r, s); geP1P1toP2(s, r)
geP2dbl(r, s); geP1P1toP2(s, r)
geP2dbl(r, s); geP1P1toP3(h, r)
@ -1020,7 +1020,7 @@ proc geScalarMultBase(h: var GeP3, a: openarray[byte]) =
geMadd(r, h, t)
geP1P1toP3(h, r)
proc scMulAdd(s: var openarray[byte], a, b, c: openarray[byte]) =
proc scMulAdd(s: var openArray[byte], a, b, c: openArray[byte]) =
var a0 = 2097151'i64 and cast[int64](load_3(a.toOpenArray(0, 2)))
var a1 = 2097151'i64 and cast[int64](load_4(a.toOpenArray(2, 5)) shr 5)
var a2 = 2097151'i64 and cast[int64](load_3(a.toOpenArray(5, 7)) shr 2)
@ -1320,7 +1320,7 @@ proc scMulAdd(s: var openarray[byte], a, b, c: openarray[byte]) =
s[30] = cast[uint8](ashr(s11, 9))
s[31] = cast[uint8](ashr(s11, 17))
proc scReduce(s: var openarray[byte]) =
proc scReduce(s: var openArray[byte]) =
var s0 = 2097151'i64 and cast[int64](load_3(s.toOpenArray(0, 2)));
var s1 = 2097151'i64 and cast[int64](load_4(s.toOpenArray(2, 5)) shr 5)
var s2 = 2097151'i64 and cast[int64](load_3(s.toOpenArray(5, 7)) shr 2)
@ -1546,7 +1546,7 @@ proc scReduce(s: var openarray[byte]) =
s[30] = cast[byte](ashr(s11, 9))
s[31] = cast[byte](ashr(s11, 17))
proc slide(r: var openarray[int8], a: openarray[byte]) =
proc slide(r: var openArray[int8], a: openArray[byte]) =
for i in 0..<256:
r[i] = cast[int8](1'u8 and (a[i shr 3] shr (i and 7)))
for i in 0..<256:
@ -1567,8 +1567,8 @@ proc slide(r: var openarray[int8], a: openarray[byte]) =
break
inc(b)
proc geDoubleScalarMultVartime(r: var GeP2, a: openarray[byte], A: GeP3,
b: openarray[byte]) =
proc geDoubleScalarMultVartime(r: var GeP2, a: openArray[byte], A: GeP3,
b: openArray[byte]) =
var
aslide: array[256, int8]
bslide: array[256, int8]
@ -1632,7 +1632,7 @@ proc NEQ(x, y: uint32): uint32 {.inline.} =
proc LT0(x: int32): uint32 {.inline.} =
result = cast[uint32](x) shr 31
proc checkScalar*(scalar: openarray[byte]): uint32 =
proc checkScalar*(scalar: openArray[byte]): uint32 =
var z = 0'u32
var c = 0'i32
for u in scalar:
@ -1686,7 +1686,7 @@ proc getPublicKey*(key: EdPrivateKey): EdPublicKey =
## Calculate and return ED25519 public key from private key ``key``.
copyMem(addr result.data[0], unsafeAddr key.data[32], 32)
proc toBytes*(key: EdPrivateKey, data: var openarray[byte]): int =
proc toBytes*(key: EdPrivateKey, data: var openArray[byte]): int =
## Serialize ED25519 `private key` ``key`` to raw binary form and store it
## to ``data``.
##
@ -1696,7 +1696,7 @@ proc toBytes*(key: EdPrivateKey, data: var openarray[byte]): int =
if len(data) >= result:
copyMem(addr data[0], unsafeAddr key.data[0], len(key.data))
proc toBytes*(key: EdPublicKey, data: var openarray[byte]): int =
proc toBytes*(key: EdPublicKey, data: var openArray[byte]): int =
## Serialize ED25519 `public key` ``key`` to raw binary form and store it
## to ``data``.
##
@ -1706,7 +1706,7 @@ proc toBytes*(key: EdPublicKey, data: var openarray[byte]): int =
if len(data) >= result:
copyMem(addr data[0], unsafeAddr key.data[0], len(key.data))
proc toBytes*(sig: EdSignature, data: var openarray[byte]): int =
proc toBytes*(sig: EdSignature, data: var openArray[byte]): int =
## Serialize ED25519 `signature` ``sig`` to raw binary form and store it
## to ``data``.
##
@ -1749,7 +1749,7 @@ proc `$`*(sig: EdSignature): string =
## Return string representation of ED25519 `signature`.
ncrutils.toHex(sig.data)
proc init*(key: var EdPrivateKey, data: openarray[byte]): bool =
proc init*(key: var EdPrivateKey, data: openArray[byte]): bool =
## Initialize ED25519 `private key` ``key`` from raw binary
## representation ``data``.
##
@ -1759,7 +1759,7 @@ proc init*(key: var EdPrivateKey, data: openarray[byte]): bool =
copyMem(addr key.data[0], unsafeAddr data[0], length)
result = true
proc init*(key: var EdPublicKey, data: openarray[byte]): bool =
proc init*(key: var EdPublicKey, data: openArray[byte]): bool =
## Initialize ED25519 `public key` ``key`` from raw binary
## representation ``data``.
##
@ -1769,7 +1769,7 @@ proc init*(key: var EdPublicKey, data: openarray[byte]): bool =
copyMem(addr key.data[0], unsafeAddr data[0], length)
result = true
proc init*(sig: var EdSignature, data: openarray[byte]): bool =
proc init*(sig: var EdSignature, data: openArray[byte]): bool =
## Initialize ED25519 `signature` ``sig`` from raw binary
## representation ``data``.
##
@ -1801,7 +1801,7 @@ proc init*(sig: var EdSignature, data: string): bool =
init(sig, ncrutils.fromHex(data))
proc init*(t: typedesc[EdPrivateKey],
data: openarray[byte]): Result[EdPrivateKey, EdError] =
data: openArray[byte]): Result[EdPrivateKey, EdError] =
## Initialize ED25519 `private key` from raw binary representation ``data``
## and return constructed object.
var res: t
@ -1811,7 +1811,7 @@ proc init*(t: typedesc[EdPrivateKey],
ok(res)
proc init*(t: typedesc[EdPublicKey],
data: openarray[byte]): Result[EdPublicKey, EdError] =
data: openArray[byte]): Result[EdPublicKey, EdError] =
## Initialize ED25519 `public key` from raw binary representation ``data``
## and return constructed object.
var res: t
@ -1821,7 +1821,7 @@ proc init*(t: typedesc[EdPublicKey],
ok(res)
proc init*(t: typedesc[EdSignature],
data: openarray[byte]): Result[EdSignature, EdError] =
data: openArray[byte]): Result[EdSignature, EdError] =
## Initialize ED25519 `signature` from raw binary representation ``data``
## and return constructed object.
var res: t
@ -1878,7 +1878,7 @@ proc clear*(pair: var EdKeyPair) =
burnMem(pair.pubkey.data)
proc sign*[T: byte|char](key: EdPrivateKey,
message: openarray[T]): EdSignature {.gcsafe, noinit.} =
message: openArray[T]): EdSignature {.gcsafe, noinit.} =
## Create ED25519 signature of data ``message`` using private key ``key``.
var ctx: sha512
var r: GeP3
@ -1911,7 +1911,7 @@ proc sign*[T: byte|char](key: EdPrivateKey,
scMulAdd(result.data.toOpenArray(32, 63), hram.data.toOpenArray(0, 31),
hash.data.toOpenArray(0, 31), nonce.data.toOpenArray(0, 31))
proc verify*[T: byte|char](sig: EdSignature, message: openarray[T],
proc verify*[T: byte|char](sig: EdSignature, message: openArray[T],
key: EdPublicKey): bool =
## Verify ED25519 signature ``sig`` using public key ``key`` and data
## ``message``.

View File

@ -23,18 +23,18 @@ proc br_hkdf_inject(ctx: ptr BearHKDFContext; ikm: pointer; len: csize_t) {.impo
proc br_hkdf_flip(ctx: ptr BearHKDFContext) {.importc: "br_hkdf_flip", header: "bearssl_kdf.h", raises: [].}
proc br_hkdf_produce(ctx: ptr BearHKDFContext; info: pointer; infoLen: csize_t; output: pointer; outputLen: csize_t) {.importc: "br_hkdf_produce", header: "bearssl_kdf.h", raises: [].}
proc hkdf*[T: sha256; len: static int](_: type[T]; salt, ikm, info: openarray[byte]; outputs: var openarray[HKDFResult[len]]) =
proc hkdf*[T: sha256; len: static int](_: type[T]; salt, ikm, info: openArray[byte]; outputs: var openArray[HKDFResult[len]]) =
var
ctx: BearHKDFContext
br_hkdf_init(
addr ctx, addr sha256Vtable,
if salt.len > 0: unsafeaddr salt[0] else: nil, csize_t(salt.len))
if salt.len > 0: unsafeAddr salt[0] else: nil, csize_t(salt.len))
br_hkdf_inject(
addr ctx, if ikm.len > 0: unsafeaddr ikm[0] else: nil, csize_t(ikm.len))
addr ctx, if ikm.len > 0: unsafeAddr ikm[0] else: nil, csize_t(ikm.len))
br_hkdf_flip(addr ctx)
for i in 0..outputs.high:
br_hkdf_produce(
addr ctx,
if info.len > 0: unsafeaddr info[0]
if info.len > 0: unsafeAddr info[0]
else: nil, csize_t(info.len),
addr outputs[i][0], csize_t(outputs[i].len))

View File

@ -154,7 +154,7 @@ proc code*(tag: Asn1Tag): byte {.inline.} =
of Asn1Tag.Context:
0xA0'u8
proc asn1EncodeLength*(dest: var openarray[byte], length: uint64): int =
proc asn1EncodeLength*(dest: var openArray[byte], length: uint64): int =
## Encode ASN.1 DER length part of TLV triple and return number of bytes
## (octets) used.
##
@ -181,8 +181,8 @@ proc asn1EncodeLength*(dest: var openarray[byte], length: uint64): int =
# then 9, so it is safe to convert it to `int`.
int(res)
proc asn1EncodeInteger*(dest: var openarray[byte],
value: openarray[byte]): int =
proc asn1EncodeInteger*(dest: var openArray[byte],
value: openArray[byte]): int =
## Encode big-endian binary representation of integer as ASN.1 DER `INTEGER`
## and return number of bytes (octets) used.
##
@ -228,7 +228,7 @@ proc asn1EncodeInteger*(dest: var openarray[byte],
len(value) - offset)
destlen
proc asn1EncodeInteger*[T: SomeUnsignedInt](dest: var openarray[byte],
proc asn1EncodeInteger*[T: SomeUnsignedInt](dest: var openArray[byte],
value: T): int =
## Encode Nim's unsigned integer as ASN.1 DER `INTEGER` and return number of
## bytes (octets) used.
@ -238,7 +238,7 @@ proc asn1EncodeInteger*[T: SomeUnsignedInt](dest: var openarray[byte],
## but number of bytes (octets) required will be returned.
dest.asn1EncodeInteger(value.toBytesBE())
proc asn1EncodeBoolean*(dest: var openarray[byte], value: bool): int =
proc asn1EncodeBoolean*(dest: var openArray[byte], value: bool): int =
## Encode Nim's boolean as ASN.1 DER `BOOLEAN` and return number of bytes
## (octets) used.
##
@ -252,7 +252,7 @@ proc asn1EncodeBoolean*(dest: var openarray[byte], value: bool): int =
dest[2] = if value: 0xFF'u8 else: 0x00'u8
res
proc asn1EncodeNull*(dest: var openarray[byte]): int =
proc asn1EncodeNull*(dest: var openArray[byte]): int =
## Encode ASN.1 DER `NULL` and return number of bytes (octets) used.
##
## If length of ``dest`` is less then number of required bytes to encode
@ -264,8 +264,8 @@ proc asn1EncodeNull*(dest: var openarray[byte]): int =
dest[1] = 0x00'u8
res
proc asn1EncodeOctetString*(dest: var openarray[byte],
value: openarray[byte]): int =
proc asn1EncodeOctetString*(dest: var openArray[byte],
value: openArray[byte]): int =
## Encode array of bytes as ASN.1 DER `OCTET STRING` and return number of
## bytes (octets) used.
##
@ -282,8 +282,8 @@ proc asn1EncodeOctetString*(dest: var openarray[byte],
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
res
proc asn1EncodeBitString*(dest: var openarray[byte],
value: openarray[byte], bits = 0): int =
proc asn1EncodeBitString*(dest: var openArray[byte],
value: openArray[byte], bits = 0): int =
## Encode array of bytes as ASN.1 DER `BIT STRING` and return number of bytes
## (octets) used.
##
@ -318,7 +318,7 @@ proc asn1EncodeBitString*(dest: var openarray[byte],
dest[2 + lenlen + bytelen - 1] = lastbyte and mask
res
proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openarray[byte],
proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openArray[byte],
value: T): int =
var v = value
if value <= cast[T](0x7F):
@ -341,7 +341,7 @@ proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openarray[byte],
dest[k - 1] = dest[k - 1] and 0x7F'u8
res
proc asn1EncodeOid*(dest: var openarray[byte], value: openarray[int]): int =
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[int]): int =
## Encode array of integers ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and
## return number of bytes (octets) used.
##
@ -367,7 +367,7 @@ proc asn1EncodeOid*(dest: var openarray[byte], value: openarray[int]): int =
cast[uint64](value[i]))
res
proc asn1EncodeOid*(dest: var openarray[byte], value: openarray[byte]): int =
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
## Encode array of bytes ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and return
## number of bytes (octets) used.
##
@ -386,8 +386,8 @@ proc asn1EncodeOid*(dest: var openarray[byte], value: openarray[byte]): int =
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
res
proc asn1EncodeSequence*(dest: var openarray[byte],
value: openarray[byte]): int =
proc asn1EncodeSequence*(dest: var openArray[byte],
value: openArray[byte]): int =
## Encode ``value`` as ASN.1 DER `SEQUENCE` and return number of bytes
## (octets) used.
##
@ -403,7 +403,7 @@ proc asn1EncodeSequence*(dest: var openarray[byte],
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
res
proc asn1EncodeComposite*(dest: var openarray[byte],
proc asn1EncodeComposite*(dest: var openArray[byte],
value: Asn1Composite): int =
## Encode composite value and return number of bytes (octets) used.
##
@ -420,7 +420,7 @@ proc asn1EncodeComposite*(dest: var openarray[byte],
len(value.buffer))
res
proc asn1EncodeContextTag*(dest: var openarray[byte], value: openarray[byte],
proc asn1EncodeContextTag*(dest: var openArray[byte], value: openArray[byte],
tag: int): int =
## Encode ASN.1 DER `CONTEXT SPECIFIC TAG` ``tag`` for value ``value`` and
## return number of bytes (octets) used.
@ -692,7 +692,7 @@ proc getBuffer*(field: Asn1Field): Asn1Buffer {.inline.} =
## Return ``field`` as Asn1Buffer to enter composite types.
Asn1Buffer(buffer: field.buffer, offset: field.offset, length: field.length)
proc `==`*(field: Asn1Field, data: openarray[byte]): bool =
proc `==`*(field: Asn1Field, data: openArray[byte]): bool =
## Compares field ``field`` data with ``data`` and returns ``true`` if both
## buffers are equal.
let length = len(field.buffer)
@ -710,7 +710,7 @@ proc `==`*(field: Asn1Field, data: openarray[byte]): bool =
else:
false
proc init*(t: typedesc[Asn1Buffer], data: openarray[byte]): Asn1Buffer =
proc init*(t: typedesc[Asn1Buffer], data: openArray[byte]): Asn1Buffer =
## Initialize ``Asn1Buffer`` from array of bytes ``data``.
Asn1Buffer(buffer: @data)
@ -825,7 +825,7 @@ proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, value: bool) =
abc.offset += length
proc write*[T: Asn1Buffer|Asn1Composite](abc: var T, tag: Asn1Tag,
value: openarray[byte], bits = 0) =
value: openArray[byte], bits = 0) =
## Write array ``value`` using ``tag``.
##
## This procedure is used to write ASN.1 `INTEGER`, `OCTET STRING`,

View File

@ -279,7 +279,7 @@ proc clear*[T: RsaPKI|RsaKeyPair](pki: var T) =
burnMem(pki.buffer)
pki.buffer.setLen(0)
proc toBytes*(key: RsaPrivateKey, data: var openarray[byte]): RsaResult[int] =
proc toBytes*(key: RsaPrivateKey, data: var openArray[byte]): RsaResult[int] =
## Serialize RSA private key ``key`` to ASN.1 DER binary form and store it
## to ``data``.
##
@ -316,7 +316,7 @@ proc toBytes*(key: RsaPrivateKey, data: var openarray[byte]): RsaResult[int] =
else:
err(RsaKeyIncorrectError)
proc toBytes*(key: RsaPublicKey, data: var openarray[byte]): RsaResult[int] =
proc toBytes*(key: RsaPublicKey, data: var openArray[byte]): RsaResult[int] =
## Serialize RSA public key ``key`` to ASN.1 DER binary form and store it
## to ``data``.
##
@ -350,7 +350,7 @@ proc toBytes*(key: RsaPublicKey, data: var openarray[byte]): RsaResult[int] =
else:
err(RsaKeyIncorrectError)
proc toBytes*(sig: RsaSignature, data: var openarray[byte]): RSaResult[int] =
proc toBytes*(sig: RsaSignature, data: var openArray[byte]): RsaResult[int] =
## Serialize RSA signature ``sig`` to raw binary form and store it
## to ``data``.
##
@ -402,7 +402,7 @@ proc getBytes*(sig: RsaSignature): RsaResult[seq[byte]] =
else:
err(RsaSignatureError)
proc init*(key: var RsaPrivateKey, data: openarray[byte]): Result[void, Asn1Error] =
proc init*(key: var RsaPrivateKey, data: openArray[byte]): Result[void, Asn1Error] =
## Initialize RSA private key ``key`` from ASN.1 DER binary representation
## ``data``.
##
@ -493,7 +493,7 @@ proc init*(key: var RsaPrivateKey, data: openarray[byte]): Result[void, Asn1Erro
else:
err(Asn1Error.Incorrect)
proc init*(key: var RsaPublicKey, data: openarray[byte]): Result[void, Asn1Error] =
proc init*(key: var RsaPublicKey, data: openArray[byte]): Result[void, Asn1Error] =
## Initialize RSA public key ``key`` from ASN.1 DER binary representation
## ``data``.
##
@ -562,7 +562,7 @@ proc init*(key: var RsaPublicKey, data: openarray[byte]): Result[void, Asn1Error
else:
err(Asn1Error.Incorrect)
proc init*(sig: var RsaSignature, data: openarray[byte]): Result[void, Asn1Error] =
proc init*(sig: var RsaSignature, data: openArray[byte]): Result[void, Asn1Error] =
## Initialize RSA signature ``sig`` from ASN.1 DER binary representation
## ``data``.
##
@ -583,7 +583,7 @@ proc init*[T: RsaPKI](sospk: var T,
sospk.init(ncrutils.fromHex(data))
proc init*(t: typedesc[RsaPrivateKey],
data: openarray[byte]): RsaResult[RsaPrivateKey] =
data: openArray[byte]): RsaResult[RsaPrivateKey] =
## Initialize RSA private key from ASN.1 DER binary representation ``data``
## and return constructed object.
var res: RsaPrivateKey
@ -593,7 +593,7 @@ proc init*(t: typedesc[RsaPrivateKey],
ok(res)
proc init*(t: typedesc[RsaPublicKey],
data: openarray[byte]): RsaResult[RsaPublicKey] =
data: openArray[byte]): RsaResult[RsaPublicKey] =
## Initialize RSA public key from ASN.1 DER binary representation ``data``
## and return constructed object.
var res: RsaPublicKey
@ -603,7 +603,7 @@ proc init*(t: typedesc[RsaPublicKey],
ok(res)
proc init*(t: typedesc[RsaSignature],
data: openarray[byte]): RsaResult[RsaSignature] =
data: openArray[byte]): RsaResult[RsaSignature] =
## Initialize RSA signature from raw binary representation ``data`` and
## return constructed object.
var res: RsaSignature
@ -743,7 +743,7 @@ proc `==`*(a, b: RsaPublicKey): bool =
(r1 and r2)
proc sign*[T: byte|char](key: RsaPrivateKey,
message: openarray[T]): RsaResult[RsaSignature] {.gcsafe.} =
message: openArray[T]): RsaResult[RsaSignature] {.gcsafe.} =
## Get RSA PKCS1.5 signature of data ``message`` using SHA256 and private
## key ``key``.
if isNil(key):
@ -770,7 +770,7 @@ proc sign*[T: byte|char](key: RsaPrivateKey,
else:
ok(res)
proc verify*[T: byte|char](sig: RsaSignature, message: openarray[T],
proc verify*[T: byte|char](sig: RsaSignature, message: openArray[T],
pubkey: RsaPublicKey): bool {.inline.} =
## Verify RSA signature ``sig`` using public key ``pubkey`` and data
## ``message``.

View File

@ -54,7 +54,7 @@ template seckey*(v: SkKeyPair): SkPrivateKey =
template pubkey*(v: SkKeyPair): SkPublicKey =
SkPublicKey(secp256k1.SkKeyPair(v).pubkey)
proc init*(key: var SkPrivateKey, data: openarray[byte]): SkResult[void] =
proc init*(key: var SkPrivateKey, data: openArray[byte]): SkResult[void] =
## Initialize Secp256k1 `private key` ``key`` from raw binary
## representation ``data``.
key = SkPrivateKey(? secp256k1.SkSecretKey.fromRaw(data))
@ -66,7 +66,7 @@ proc init*(key: var SkPrivateKey, data: string): SkResult[void] =
key = SkPrivateKey(? secp256k1.SkSecretKey.fromHex(data))
ok()
proc init*(key: var SkPublicKey, data: openarray[byte]): SkResult[void] =
proc init*(key: var SkPublicKey, data: openArray[byte]): SkResult[void] =
## Initialize Secp256k1 `public key` ``key`` from raw binary
## representation ``data``.
key = SkPublicKey(? secp256k1.SkPublicKey.fromRaw(data))
@ -78,7 +78,7 @@ proc init*(key: var SkPublicKey, data: string): SkResult[void] =
key = SkPublicKey(? secp256k1.SkPublicKey.fromHex(data))
ok()
proc init*(sig: var SkSignature, data: openarray[byte]): SkResult[void] =
proc init*(sig: var SkSignature, data: openArray[byte]): SkResult[void] =
## Initialize Secp256k1 `signature` ``sig`` from raw binary
## representation ``data``.
sig = SkSignature(? secp256k1.SkSignature.fromDer(data))
@ -95,7 +95,7 @@ proc init*(sig: var SkSignature, data: string): SkResult[void] =
return err("secp: Hex to bytes failed")
init(sig, buffer)
proc init*(t: typedesc[SkPrivateKey], data: openarray[byte]): SkResult[SkPrivateKey] =
proc init*(t: typedesc[SkPrivateKey], data: openArray[byte]): SkResult[SkPrivateKey] =
## Initialize Secp256k1 `private key` from raw binary
## representation ``data``.
##
@ -109,7 +109,7 @@ proc init*(t: typedesc[SkPrivateKey], data: string): SkResult[SkPrivateKey] =
## Procedure returns `private key` on success.
SkSecretKey.fromHex(data).mapConvert(SkPrivateKey)
proc init*(t: typedesc[SkPublicKey], data: openarray[byte]): SkResult[SkPublicKey] =
proc init*(t: typedesc[SkPublicKey], data: openArray[byte]): SkResult[SkPublicKey] =
## Initialize Secp256k1 `public key` from raw binary
## representation ``data``.
##
@ -125,7 +125,7 @@ proc init*(t: typedesc[SkPublicKey], data: string): SkResult[SkPublicKey] =
var key: SkPublicKey
key.init(data) and ok(key)
proc init*(t: typedesc[SkSignature], data: openarray[byte]): SkResult[SkSignature] =
proc init*(t: typedesc[SkSignature], data: openArray[byte]): SkResult[SkSignature] =
## Initialize Secp256k1 `signature` from raw binary
## representation ``data``.
##
@ -145,7 +145,7 @@ proc getPublicKey*(key: SkPrivateKey): SkPublicKey =
## Calculate and return Secp256k1 `public key` from `private key` ``key``.
SkPublicKey(SkSecretKey(key).toPublicKey())
proc toBytes*(key: SkPrivateKey, data: var openarray[byte]): SkResult[int] =
proc toBytes*(key: SkPrivateKey, data: var openArray[byte]): SkResult[int] =
## Serialize Secp256k1 `private key` ``key`` to raw binary form and store it
## to ``data``.
##
@ -157,7 +157,7 @@ proc toBytes*(key: SkPrivateKey, data: var openarray[byte]): SkResult[int] =
else:
err("secp: Not enough bytes")
proc toBytes*(key: SkPublicKey, data: var openarray[byte]): SkResult[int] =
proc toBytes*(key: SkPublicKey, data: var openArray[byte]): SkResult[int] =
## Serialize Secp256k1 `public key` ``key`` to raw binary form and store it
## to ``data``.
##
@ -169,7 +169,7 @@ proc toBytes*(key: SkPublicKey, data: var openarray[byte]): SkResult[int] =
else:
err("secp: Not enough bytes")
proc toBytes*(sig: SkSignature, data: var openarray[byte]): int =
proc toBytes*(sig: SkSignature, data: var openArray[byte]): int =
## Serialize Secp256k1 `signature` ``sig`` to raw binary form and store it
## to ``data``.
##
@ -191,12 +191,12 @@ proc getBytes*(sig: SkSignature): seq[byte] {.inline.} =
let length = toBytes(sig, result)
result.setLen(length)
proc sign*[T: byte|char](key: SkPrivateKey, msg: openarray[T]): SkSignature =
proc sign*[T: byte|char](key: SkPrivateKey, msg: openArray[T]): SkSignature =
## Sign message `msg` using private key `key` and return signature object.
let h = sha256.digest(msg)
SkSignature(sign(SkSecretKey(key), SkMessage(h.data)))
proc verify*[T: byte|char](sig: SkSignature, msg: openarray[T],
proc verify*[T: byte|char](sig: SkSignature, msg: openArray[T],
key: SkPublicKey): bool =
let h = sha256.digest(msg)
verify(secp256k1.SkSignature(sig), SkMessage(h.data), secp256k1.SkPublicKey(key))

View File

@ -107,12 +107,12 @@ type
RelayActive, ## Enables active mode for relay.
RelayDiscovery,## Enables passive discovery for relay.
RelayHop, ## Enables hop for relay.
NoInlinePeerID,## Disable inlining of peer ID (not yet in #master).
NoInlinePeerId,## Disable inlining of peer ID (not yet in #master).
NoProcessCtrl ## Process was not spawned.
P2PStream* = ref object
flags*: set[P2PStreamFlags]
peer*: PeerID
peer*: PeerId
raddress*: MultiAddress
protocol*: string
transp*: StreamTransport
@ -133,7 +133,7 @@ type
userData*: RootRef
PeerInfo* = object
peer*: PeerID
peer*: PeerId
addresses*: seq[MultiAddress]
PubsubTicket* = ref object
@ -142,7 +142,7 @@ type
transp*: StreamTransport
PubSubMessage* = object
peer*: PeerID
peer*: PeerId
data*: seq[byte]
seqno*: seq[byte]
topics*: seq[string]
@ -170,8 +170,8 @@ proc requestIdentity(): ProtoBuffer =
result.write(1, cast[uint](RequestType.IDENTIFY))
result.finish()
proc requestConnect(peerid: PeerID,
addresses: openarray[MultiAddress],
proc requestConnect(peerid: PeerId,
addresses: openArray[MultiAddress],
timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
## Processing function `doConnect(req *pb.Request)`.
@ -186,7 +186,7 @@ proc requestConnect(peerid: PeerID,
result.write(2, msg)
result.finish()
proc requestDisconnect(peerid: PeerID): ProtoBuffer =
proc requestDisconnect(peerid: PeerId): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
## Processing function `doDisconnect(req *pb.Request)`.
result = initProtoBuffer({WithVarintLength})
@ -196,8 +196,8 @@ proc requestDisconnect(peerid: PeerID): ProtoBuffer =
result.write(7, msg)
result.finish()
proc requestStreamOpen(peerid: PeerID,
protocols: openarray[string],
proc requestStreamOpen(peerid: PeerId,
protocols: openArray[string],
timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
## Processing function `doStreamOpen(req *pb.Request)`.
@ -213,7 +213,7 @@ proc requestStreamOpen(peerid: PeerID,
result.finish()
proc requestStreamHandler(address: MultiAddress,
protocols: openarray[MultiProtocol]): ProtoBuffer =
protocols: openArray[MultiProtocol]): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
## Processing function `doStreamHandler(req *pb.Request)`.
result = initProtoBuffer({WithVarintLength})
@ -232,7 +232,7 @@ proc requestListPeers(): ProtoBuffer =
result.write(1, cast[uint](RequestType.LIST_PEERS))
result.finish()
proc requestDHTFindPeer(peer: PeerID, timeout = 0): ProtoBuffer =
proc requestDHTFindPeer(peer: PeerId, timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTFindPeer(req *pb.DHTRequest)`.
let msgid = cast[uint](DHTRequestType.FIND_PEER)
@ -247,7 +247,7 @@ proc requestDHTFindPeer(peer: PeerID, timeout = 0): ProtoBuffer =
result.write(5, msg)
result.finish()
proc requestDHTFindPeersConnectedToPeer(peer: PeerID,
proc requestDHTFindPeersConnectedToPeer(peer: PeerId,
timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTFindPeersConnectedToPeer(req *pb.DHTRequest)`.
@ -295,7 +295,7 @@ proc requestDHTGetClosestPeers(key: string, timeout = 0): ProtoBuffer =
result.write(5, msg)
result.finish()
proc requestDHTGetPublicKey(peer: PeerID, timeout = 0): ProtoBuffer =
proc requestDHTGetPublicKey(peer: PeerId, timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTGetPublicKey(req *pb.DHTRequest)`.
let msgid = cast[uint](DHTRequestType.GET_PUBLIC_KEY)
@ -340,7 +340,7 @@ proc requestDHTSearchValue(key: string, timeout = 0): ProtoBuffer =
result.write(5, msg)
result.finish()
proc requestDHTPutValue(key: string, value: openarray[byte],
proc requestDHTPutValue(key: string, value: openArray[byte],
timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTPutValue(req *pb.DHTRequest)`.
@ -372,7 +372,7 @@ proc requestDHTProvide(cid: Cid, timeout = 0): ProtoBuffer =
result.write(5, msg)
result.finish()
proc requestCMTagPeer(peer: PeerID, tag: string, weight: int): ProtoBuffer =
proc requestCMTagPeer(peer: PeerId, tag: string, weight: int): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/connmgr.go#L18
let msgid = cast[uint](ConnManagerRequestType.TAG_PEER)
result = initProtoBuffer({WithVarintLength})
@ -386,7 +386,7 @@ proc requestCMTagPeer(peer: PeerID, tag: string, weight: int): ProtoBuffer =
result.write(6, msg)
result.finish()
proc requestCMUntagPeer(peer: PeerID, tag: string): ProtoBuffer =
proc requestCMUntagPeer(peer: PeerId, tag: string): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/connmgr.go#L33
let msgid = cast[uint](ConnManagerRequestType.UNTAG_PEER)
result = initProtoBuffer({WithVarintLength})
@ -435,7 +435,7 @@ proc requestPSListPeers(topic: string): ProtoBuffer =
result.write(8, msg)
result.finish()
proc requestPSPublish(topic: string, data: openarray[byte]): ProtoBuffer =
proc requestPSPublish(topic: string, data: openArray[byte]): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/pubsub.go
## Processing function `doPubsubPublish(req *pb.PSRequest)`.
let msgid = cast[uint](PSRequestType.PUBLISH)
@ -725,8 +725,8 @@ proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
args.add("-relayDiscovery=true")
if RelayHop in api.flags:
args.add("-relayHop=true")
if NoInlinePeerID in api.flags:
args.add("-noInlinePeerID=true")
if NoInlinePeerId in api.flags:
args.add("-noInlinePeerId=true")
if len(bootstrapNodes) > 0:
args.add("-bootstrapPeers=" & bootstrapNodes.join(","))
if len(id) != 0:
@ -853,7 +853,7 @@ proc identity*(api: DaemonAPI): Future[PeerInfo] {.async.} =
finally:
await api.closeConnection(transp)
proc connect*(api: DaemonAPI, peer: PeerID,
proc connect*(api: DaemonAPI, peer: PeerId,
addresses: seq[MultiAddress],
timeout = 0) {.async.} =
## Connect to remote peer with id ``peer`` and addresses ``addresses``.
@ -866,7 +866,7 @@ proc connect*(api: DaemonAPI, peer: PeerID,
except:
await api.closeConnection(transp)
proc disconnect*(api: DaemonAPI, peer: PeerID) {.async.} =
proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
## Disconnect from remote peer with id ``peer``.
var transp = await api.newConnection()
try:
@ -876,7 +876,7 @@ proc disconnect*(api: DaemonAPI, peer: PeerID) {.async.} =
finally:
await api.closeConnection(transp)
proc openStream*(api: DaemonAPI, peer: PeerID,
proc openStream*(api: DaemonAPI, peer: PeerId,
protocols: seq[string],
timeout = 0): Future[P2PStream] {.async.} =
## Open new stream to peer ``peer`` using one of the protocols in
@ -961,7 +961,7 @@ proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.} =
finally:
await api.closeConnection(transp)
proc cmTagPeer*(api: DaemonAPI, peer: PeerID, tag: string,
proc cmTagPeer*(api: DaemonAPI, peer: PeerId, tag: string,
weight: int) {.async.} =
## Tag peer with id ``peer`` using ``tag`` and ``weight``.
var transp = await api.newConnection()
@ -972,7 +972,7 @@ proc cmTagPeer*(api: DaemonAPI, peer: PeerID, tag: string,
finally:
await api.closeConnection(transp)
proc cmUntagPeer*(api: DaemonAPI, peer: PeerID, tag: string) {.async.} =
proc cmUntagPeer*(api: DaemonAPI, peer: PeerId, tag: string) {.async.} =
## Remove tag ``tag`` from peer with id ``peer``.
var transp = await api.newConnection()
try:
@ -1011,7 +1011,7 @@ proc dhtGetSinglePublicKey(pb: ProtoBuffer): PublicKey
if pb.getRequiredField(3, result).isErr():
raise newException(DaemonLocalError, "Missing field `value`!")
proc dhtGetSinglePeerID(pb: ProtoBuffer): PeerID
proc dhtGetSinglePeerId(pb: ProtoBuffer): PeerId
{.raises: [Defect, DaemonLocalError].} =
if pb.getRequiredField(3, result).isErr():
raise newException(DaemonLocalError, "Missing field `value`!")
@ -1055,7 +1055,7 @@ proc getDhtMessageType(pb: ProtoBuffer): DHTResponseType
else:
raise newException(DaemonLocalError, "Wrong DHT answer type!")
proc dhtFindPeer*(api: DaemonAPI, peer: PeerID,
proc dhtFindPeer*(api: DaemonAPI, peer: PeerId,
timeout = 0): Future[PeerInfo] {.async.} =
## Find peer with id ``peer`` and return peer information ``PeerInfo``.
##
@ -1069,7 +1069,7 @@ proc dhtFindPeer*(api: DaemonAPI, peer: PeerID,
finally:
await api.closeConnection(transp)
proc dhtGetPublicKey*(api: DaemonAPI, peer: PeerID,
proc dhtGetPublicKey*(api: DaemonAPI, peer: PeerId,
timeout = 0): Future[PublicKey] {.async.} =
## Get peer's public key from peer with id ``peer``.
##
@ -1125,7 +1125,7 @@ proc dhtProvide*(api: DaemonAPI, cid: Cid, timeout = 0) {.async.} =
finally:
await api.closeConnection(transp)
proc dhtFindPeersConnectedToPeer*(api: DaemonAPI, peer: PeerID,
proc dhtFindPeersConnectedToPeer*(api: DaemonAPI, peer: PeerId,
timeout = 0): Future[seq[PeerInfo]] {.async.} =
## Find peers which are connected to peer with id ``peer``.
##
@ -1151,13 +1151,13 @@ proc dhtFindPeersConnectedToPeer*(api: DaemonAPI, peer: PeerID,
await api.closeConnection(transp)
proc dhtGetClosestPeers*(api: DaemonAPI, key: string,
timeout = 0): Future[seq[PeerID]] {.async.} =
timeout = 0): Future[seq[PeerId]] {.async.} =
## Get closest peers for ``key``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
## means no timeout.
var transp = await api.newConnection()
var list = newSeq[PeerID]()
var list = newSeq[PeerId]()
try:
let spb = requestDHTGetClosestPeers(key, timeout)
var pb = await transp.transactMessage(spb)
@ -1170,7 +1170,7 @@ proc dhtGetClosestPeers*(api: DaemonAPI, key: string,
var cpb = initProtoBuffer(message)
if cpb.getDhtMessageType() == DHTResponseType.END:
break
list.add(cpb.dhtGetSinglePeerID())
list.add(cpb.dhtGetSinglePeerId())
result = list
finally:
await api.closeConnection(transp)
@ -1238,14 +1238,14 @@ proc pubsubGetTopics*(api: DaemonAPI): Future[seq[string]] {.async.} =
await api.closeConnection(transp)
proc pubsubListPeers*(api: DaemonAPI,
topic: string): Future[seq[PeerID]] {.async.} =
topic: string): Future[seq[PeerId]] {.async.} =
## Get list of peers we are connected to and which also subscribed to topic
## ``topic``.
var transp = await api.newConnection()
try:
var pb = await transp.transactMessage(requestPSListPeers(topic))
withMessage(pb) do:
var peer: PeerID
var peer: PeerId
let innerPb = pb.enterPsMessage()
var peers = newSeq[seq[byte]]()
discard innerPb.getRepeatedField(2, peers)
@ -1308,7 +1308,7 @@ proc pubsubSubscribe*(api: DaemonAPI, topic: string,
proc shortLog*(pinfo: PeerInfo): string =
## Get string representation of ``PeerInfo`` object.
result = newStringOfCap(128)
result.add("{PeerID: '")
result.add("{PeerId: '")
result.add($pinfo.peer.shortLog())
result.add("' Addresses: [")
let length = len(pinfo.addresses)

View File

@ -12,8 +12,8 @@
## option enabled ``nim-libp2p`` will create dumps of unencrypted messages for
## every peer libp2p communicates.
##
## Every file is created with name "<PeerID>.pbcap". One file represents
## all the communication with peer which identified by ``PeerID``.
## Every file is created with name "<PeerId>.pbcap". One file represents
## all the communication with peer which identified by ``PeerId``.
##
## File can have multiple protobuf encoded messages of this format:
##
@ -170,7 +170,7 @@ iterator messages*(data: seq[byte]): Option[ProtoMessage] =
else:
break
proc dumpHex*(pbytes: openarray[byte], groupBy = 1, ascii = true): string =
proc dumpHex*(pbytes: openArray[byte], groupBy = 1, ascii = true): string =
## Get hexadecimal dump of memory for array ``pbytes``.
var res = ""
var offset = 0

View File

@ -18,7 +18,7 @@ type
method connect*(
self: Dial,
peerId: PeerID,
peerId: PeerId,
addrs: seq[MultiAddress]) {.async, base.} =
## connect remote peer without negotiating
## a protocol
@ -28,7 +28,7 @@ method connect*(
method dial*(
self: Dial,
peerId: PeerID,
peerId: PeerId,
protos: seq[string]): Future[Connection] {.async, base.} =
## create a protocol stream over an
## existing connection
@ -38,7 +38,7 @@ method dial*(
method dial*(
self: Dial,
peerId: PeerID,
peerId: PeerId,
addrs: seq[MultiAddress],
protos: seq[string]): Future[Connection] {.async, base.} =
## create a protocol stream and establish

View File

@ -40,13 +40,13 @@ type
localPeerId*: PeerId
ms: MultistreamSelect
connManager: ConnManager
dialLock: Table[PeerID, AsyncLock]
dialLock: Table[PeerId, AsyncLock]
transports: seq[Transport]
nameResolver: NameResolver
proc dialAndUpgrade(
self: Dialer,
peerId: PeerID,
peerId: PeerId,
addrs: seq[MultiAddress]):
Future[Connection] {.async.} =
debug "Dialing peer", peerId
@ -111,7 +111,7 @@ proc dialAndUpgrade(
proc internalConnect(
self: Dialer,
peerId: PeerID,
peerId: PeerId,
addrs: seq[MultiAddress]):
Future[Connection] {.async.} =
if self.localPeerId == peerId:
@ -158,7 +158,7 @@ proc internalConnect(
method connect*(
self: Dialer,
peerId: PeerID,
peerId: PeerId,
addrs: seq[MultiAddress]) {.async.} =
## connect remote peer without negotiating
## a protocol
@ -183,7 +183,7 @@ proc negotiateStream(
method dial*(
self: Dialer,
peerId: PeerID,
peerId: PeerId,
protos: seq[string]): Future[Connection] {.async.} =
## create a protocol stream over an
## existing connection
@ -198,7 +198,7 @@ method dial*(
method dial*(
self: Dialer,
peerId: PeerID,
peerId: PeerId,
addrs: seq[MultiAddress],
protos: seq[string]): Future[Connection] {.async.} =
## create a protocol stream and establish

View File

@ -498,7 +498,7 @@ proc protoName*(ma: MultiAddress): MaResult[string] =
ok($(proto.mcodec))
proc protoArgument*(ma: MultiAddress,
value: var openarray[byte]): MaResult[int] =
value: var openArray[byte]): MaResult[int] =
## Returns MultiAddress ``ma`` protocol argument value.
##
## If current MultiAddress do not have argument value, then result will be
@ -723,7 +723,7 @@ proc validate*(ma: MultiAddress): bool =
proc init*(
mtype: typedesc[MultiAddress], protocol: MultiCodec,
value: openarray[byte] = []): MaResult[MultiAddress] =
value: openArray[byte] = []): MaResult[MultiAddress] =
## Initialize MultiAddress object from protocol id ``protocol`` and array
## of bytes ``value``.
let proto = CodeAddresses.getOrDefault(protocol)
@ -754,7 +754,7 @@ proc init*(
raiseAssert "None checked above"
proc init*(mtype: typedesc[MultiAddress], protocol: MultiCodec,
value: PeerID): MaResult[MultiAddress] {.inline.} =
value: PeerId): MaResult[MultiAddress] {.inline.} =
## Initialize MultiAddress object from protocol id ``protocol`` and peer id
## ``value``.
init(mtype, protocol, value.data)
@ -832,7 +832,7 @@ proc init*(mtype: typedesc[MultiAddress],
ok(res)
proc init*(mtype: typedesc[MultiAddress],
data: openarray[byte]): MaResult[MultiAddress] =
data: openArray[byte]): MaResult[MultiAddress] =
## Initialize MultiAddress with array of bytes ``data``.
if len(data) == 0:
err("multiaddress: Address could not be empty!")

View File

@ -19,7 +19,7 @@ import tables
import stew/[base32, base58, base64, results]
type
MultibaseStatus* {.pure.} = enum
MultiBaseStatus* {.pure.} = enum
Error, Success, Overrun, Incorrect, BadCodec, NotSupported
MultiBase* = object
@ -29,169 +29,169 @@ type
MBCodec = object
code: char
name: string
encr: proc(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
decr: proc(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
encr: proc(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
decr: proc(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
encl: MBCodeSize
decl: MBCodeSize
proc idd(inbytes: openarray[char], outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc idd(inbytes: openArray[char], outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
let length = len(inbytes)
if length > len(outbytes):
outlen = length
result = MultibaseStatus.Overrun
result = MultiBaseStatus.Overrun
else:
copyMem(addr outbytes[0], unsafeAddr inbytes[0], length)
outlen = length
result = MultibaseStatus.Success
result = MultiBaseStatus.Success
proc ide(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc ide(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
let length = len(inbytes)
if length > len(outbytes):
outlen = length
result = MultibaseStatus.Overrun
result = MultiBaseStatus.Overrun
else:
copyMem(addr outbytes[0], unsafeAddr inbytes[0], length)
outlen = length
result = MultibaseStatus.Success
result = MultiBaseStatus.Success
proc idel(length: int): int = length
proc iddl(length: int): int = length
proc b16d(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b16d(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
discard
proc b16e(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b16e(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
discard
proc b16ud(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b16ud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
discard
proc b16ue(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b16ue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
discard
proc b16el(length: int): int = length shl 1
proc b16dl(length: int): int = (length + 1) div 2
proc b32ce(r: Base32Status): MultibaseStatus {.inline.} =
result = MultibaseStatus.Error
proc b32ce(r: Base32Status): MultiBaseStatus {.inline.} =
result = MultiBaseStatus.Error
if r == Base32Status.Incorrect:
result = MultibaseStatus.Incorrect
result = MultiBaseStatus.Incorrect
elif r == Base32Status.Overrun:
result = MultibaseStatus.Overrun
result = MultiBaseStatus.Overrun
elif r == Base32Status.Success:
result = MultibaseStatus.Success
result = MultiBaseStatus.Success
proc b58ce(r: Base58Status): MultibaseStatus {.inline.} =
result = MultibaseStatus.Error
proc b58ce(r: Base58Status): MultiBaseStatus {.inline.} =
result = MultiBaseStatus.Error
if r == Base58Status.Incorrect:
result = MultibaseStatus.Incorrect
result = MultiBaseStatus.Incorrect
elif r == Base58Status.Overrun:
result = MultibaseStatus.Overrun
result = MultiBaseStatus.Overrun
elif r == Base58Status.Success:
result = MultibaseStatus.Success
result = MultiBaseStatus.Success
proc b64ce(r: Base64Status): MultibaseStatus {.inline.} =
proc b64ce(r: Base64Status): MultiBaseStatus {.inline.} =
result = MultiBaseStatus.Error
if r == Base64Status.Incorrect:
result = MultibaseStatus.Incorrect
result = MultiBaseStatus.Incorrect
elif r == Base64Status.Overrun:
result = MultiBaseStatus.Overrun
elif r == Base64Status.Success:
result = MultibaseStatus.Success
result = MultiBaseStatus.Success
proc b32hd(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b32hd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32Lower.decode(inbytes, outbytes, outlen))
proc b32he(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b32he(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32Lower.encode(inbytes, outbytes, outlen))
proc b32hud(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b32hud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32Upper.decode(inbytes, outbytes, outlen))
proc b32hue(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b32hue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32Upper.encode(inbytes, outbytes, outlen))
proc b32hpd(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b32hpd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32LowerPad.decode(inbytes, outbytes, outlen))
proc b32hpe(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b32hpe(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32LowerPad.encode(inbytes, outbytes, outlen))
proc b32hpud(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b32hpud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32UpperPad.decode(inbytes, outbytes, outlen))
proc b32hpue(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b32hpue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(HexBase32UpperPad.encode(inbytes, outbytes, outlen))
proc b32d(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b32d(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32Lower.decode(inbytes, outbytes, outlen))
proc b32e(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b32e(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32Lower.encode(inbytes, outbytes, outlen))
proc b32ud(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b32ud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32Upper.decode(inbytes, outbytes, outlen))
proc b32ue(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b32ue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32Upper.encode(inbytes, outbytes, outlen))
proc b32pd(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b32pd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32LowerPad.decode(inbytes, outbytes, outlen))
proc b32pe(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b32pe(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32LowerPad.encode(inbytes, outbytes, outlen))
proc b32pud(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b32pud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32UpperPad.decode(inbytes, outbytes, outlen))
proc b32pue(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b32pue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b32ce(Base32UpperPad.encode(inbytes, outbytes, outlen))
proc b32el(length: int): int = Base32Lower.encodedLength(length)
@ -199,24 +199,24 @@ proc b32dl(length: int): int = Base32Lower.decodedLength(length)
proc b32pel(length: int): int = Base32LowerPad.encodedLength(length)
proc b32pdl(length: int): int = Base32LowerPad.decodedLength(length)
proc b58fd(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b58fd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b58ce(FLCBase58.decode(inbytes, outbytes, outlen))
proc b58fe(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b58fe(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b58ce(FLCBase58.encode(inbytes, outbytes, outlen))
proc b58bd(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b58bd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b58ce(BTCBase58.decode(inbytes, outbytes, outlen))
proc b58be(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b58be(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b58ce(BTCBase58.encode(inbytes, outbytes, outlen))
proc b58el(length: int): int = Base58.encodedLength(length)
@ -227,48 +227,48 @@ proc b64dl(length: int): int = Base64.decodedLength(length)
proc b64pel(length: int): int = Base64Pad.encodedLength(length)
proc b64pdl(length: int): int = Base64Pad.decodedLength(length)
proc b64e(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b64e(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64.encode(inbytes, outbytes, outlen))
proc b64d(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b64d(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64.decode(inbytes, outbytes, outlen))
proc b64pe(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b64pe(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64Pad.encode(inbytes, outbytes, outlen))
proc b64pd(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b64pd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64Pad.decode(inbytes, outbytes, outlen))
proc b64ue(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b64ue(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64Url.encode(inbytes, outbytes, outlen))
proc b64ud(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b64ud(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64Url.decode(inbytes, outbytes, outlen))
proc b64upe(inbytes: openarray[byte],
outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
proc b64upe(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64UrlPad.encode(inbytes, outbytes, outlen))
proc b64upd(inbytes: openarray[char],
outbytes: var openarray[byte],
outlen: var int): MultibaseStatus =
proc b64upd(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus =
result = b64ce(Base64UrlPad.decode(inbytes, outbytes, outlen))
const
MultibaseCodecs = [
MultiBaseCodecs = [
MBCodec(name: "identity", code: chr(0x00),
decr: idd, encr: ide, decl: iddl, encl: idel
),
@ -328,16 +328,16 @@ const
]
proc initMultiBaseCodeTable(): Table[char, MBCodec] {.compileTime.} =
for item in MultibaseCodecs:
for item in MultiBaseCodecs:
result[item.code] = item
proc initMultiBaseNameTable(): Table[string, MBCodec] {.compileTime.} =
for item in MultibaseCodecs:
for item in MultiBaseCodecs:
result[item.name] = item
const
CodeMultibases = initMultiBaseCodeTable()
NameMultibases = initMultiBaseNameTable()
CodeMultiBases = initMultiBaseCodeTable()
NameMultiBases = initMultiBaseNameTable()
proc encodedLength*(mbtype: typedesc[MultiBase], encoding: string,
length: int): int =
@ -346,7 +346,7 @@ proc encodedLength*(mbtype: typedesc[MultiBase], encoding: string,
##
## Procedure returns ``-1`` if ``encoding`` scheme is not supported or
## not present.
let mb = NameMultibases.getOrDefault(encoding)
let mb = NameMultiBases.getOrDefault(encoding)
if len(mb.name) == 0 or isNil(mb.encl):
result = -1
else:
@ -359,7 +359,7 @@ proc decodedLength*(mbtype: typedesc[MultiBase], encoding: char,
length: int): int =
## Return estimated size of buffer to store MultiBase decoded value with
## encoding character ``encoding`` of length ``length``.
let mb = CodeMultibases.getOrDefault(encoding)
let mb = CodeMultiBases.getOrDefault(encoding)
if len(mb.name) == 0 or isNil(mb.decl) or length == 0:
result = -1
else:
@ -369,8 +369,8 @@ proc decodedLength*(mbtype: typedesc[MultiBase], encoding: char,
result = mb.decl(length - 1)
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
inbytes: openarray[byte], outbytes: var openarray[char],
outlen: var int): MultibaseStatus =
inbytes: openArray[byte], outbytes: var openArray[char],
outlen: var int): MultiBaseStatus =
## Encode array ``inbytes`` using MultiBase encoding scheme ``encoding`` and
## store encoded value to ``outbytes``.
##
@ -386,11 +386,11 @@ proc encode*(mbtype: typedesc[MultiBase], encoding: string,
##
## On successfull encoding ``MultiBaseStatus.Success`` will be returned and
## ``outlen`` will be set to number of encoded octets (bytes).
let mb = NameMultibases.getOrDefault(encoding)
let mb = NameMultiBases.getOrDefault(encoding)
if len(mb.name) == 0:
return MultibaseStatus.BadCodec
return MultiBaseStatus.BadCodec
if isNil(mb.encr) or isNil(mb.encl):
return MultibaseStatus.NotSupported
return MultiBaseStatus.NotSupported
if len(outbytes) > 1:
result = mb.encr(inbytes, outbytes.toOpenArray(1, outbytes.high),
outlen)
@ -408,8 +408,8 @@ proc encode*(mbtype: typedesc[MultiBase], encoding: string,
result = MultiBaseStatus.Overrun
outlen = mb.encl(len(inbytes)) + 1
proc decode*(mbtype: typedesc[MultiBase], inbytes: openarray[char],
outbytes: var openarray[byte], outlen: var int): MultibaseStatus =
proc decode*(mbtype: typedesc[MultiBase], inbytes: openArray[char],
outbytes: var openArray[byte], outlen: var int): MultiBaseStatus =
## Decode array ``inbytes`` using MultiBase encoding and store decoded value
## to ``outbytes``.
##
@ -426,24 +426,24 @@ proc decode*(mbtype: typedesc[MultiBase], inbytes: openarray[char],
## ``outlen`` will be set to number of encoded octets (bytes).
let length = len(inbytes)
if length == 0:
return MultibaseStatus.Incorrect
let mb = CodeMultibases.getOrDefault(inbytes[0])
return MultiBaseStatus.Incorrect
let mb = CodeMultiBases.getOrDefault(inbytes[0])
if len(mb.name) == 0:
return MultibaseStatus.BadCodec
return MultiBaseStatus.BadCodec
if isNil(mb.decr) or isNil(mb.decl):
return MultibaseStatus.NotSupported
return MultiBaseStatus.NotSupported
if length == 1:
outlen = 0
result = MultibaseStatus.Success
result = MultiBaseStatus.Success
else:
result = mb.decr(inbytes.toOpenArray(1, length - 1), outbytes, outlen)
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
inbytes: openarray[byte]): Result[string, string] =
inbytes: openArray[byte]): Result[string, string] =
## Encode array ``inbytes`` using MultiBase encoding scheme ``encoding`` and
## return encoded string.
let length = len(inbytes)
let mb = NameMultibases.getOrDefault(encoding)
let mb = NameMultiBases.getOrDefault(encoding)
if len(mb.name) == 0:
return err("multibase: Encoding scheme is incorrect!")
if isNil(mb.encr) or isNil(mb.encl):
@ -462,13 +462,13 @@ proc encode*(mbtype: typedesc[MultiBase], encoding: string,
buffer[0] = mb.code
ok(buffer)
proc decode*(mbtype: typedesc[MultiBase], inbytes: openarray[char]): Result[seq[byte], string] =
proc decode*(mbtype: typedesc[MultiBase], inbytes: openArray[char]): Result[seq[byte], string] =
## Decode MultiBase encoded array ``inbytes`` and return decoded sequence of
## bytes.
let length = len(inbytes)
if length == 0:
return err("multibase: Could not decode zero-length string")
let mb = CodeMultibases.getOrDefault(inbytes[0])
let mb = CodeMultiBases.getOrDefault(inbytes[0])
if len(mb.name) == 0:
return err("multibase: MultiBase scheme is incorrect!")
if isNil(mb.decr) or isNil(mb.decl):

View File

@ -41,8 +41,8 @@ const
ErrParseError = "Parse error fromHex"
type
MHashCoderProc* = proc(data: openarray[byte],
output: var openarray[byte]) {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
MHashCoderProc* = proc(data: openArray[byte],
output: var openArray[byte]) {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
MHash* = object
mcodec*: MultiCodec
size*: int
@ -56,20 +56,20 @@ type
MhResult*[T] = Result[T, cstring]
proc identhash(data: openarray[byte], output: var openarray[byte]) =
proc identhash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var length = if len(data) > len(output): len(output)
else: len(data)
copyMem(addr output[0], unsafeAddr data[0], length)
proc sha1hash(data: openarray[byte], output: var openarray[byte]) =
proc sha1hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha1.digest(data)
var length = if sha1.sizeDigest > len(output): len(output)
else: sha1.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc dblsha2_256hash(data: openarray[byte], output: var openarray[byte]) =
proc dblsha2_256hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest1 = sha256.digest(data)
var digest2 = sha256.digest(digest1.data)
@ -77,91 +77,91 @@ proc dblsha2_256hash(data: openarray[byte], output: var openarray[byte]) =
else: sha256.sizeDigest
copyMem(addr output[0], addr digest2.data[0], length)
proc blake2Bhash(data: openarray[byte], output: var openarray[byte]) =
proc blake2Bhash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = blake2_512.digest(data)
var length = if blake2_512.sizeDigest > len(output): len(output)
else: blake2_512.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc blake2Shash(data: openarray[byte], output: var openarray[byte]) =
proc blake2Shash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = blake2_256.digest(data)
var length = if blake2_256.sizeDigest > len(output): len(output)
else: blake2_256.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha2_256hash(data: openarray[byte], output: var openarray[byte]) =
proc sha2_256hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha256.digest(data)
var length = if sha256.sizeDigest > len(output): len(output)
else: sha256.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha2_512hash(data: openarray[byte], output: var openarray[byte]) =
proc sha2_512hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha512.digest(data)
var length = if sha512.sizeDigest > len(output): len(output)
else: sha512.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha3_224hash(data: openarray[byte], output: var openarray[byte]) =
proc sha3_224hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha3_224.digest(data)
var length = if sha3_224.sizeDigest > len(output): len(output)
else: sha3_224.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha3_256hash(data: openarray[byte], output: var openarray[byte]) =
proc sha3_256hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha3_256.digest(data)
var length = if sha3_256.sizeDigest > len(output): len(output)
else: sha3_256.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha3_384hash(data: openarray[byte], output: var openarray[byte]) =
proc sha3_384hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha3_384.digest(data)
var length = if sha3_384.sizeDigest > len(output): len(output)
else: sha3_384.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc sha3_512hash(data: openarray[byte], output: var openarray[byte]) =
proc sha3_512hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = sha3_512.digest(data)
var length = if sha3_512.sizeDigest > len(output): len(output)
else: sha3_512.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc keccak_224hash(data: openarray[byte], output: var openarray[byte]) =
proc keccak_224hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = keccak224.digest(data)
var length = if keccak224.sizeDigest > len(output): len(output)
else: keccak224.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc keccak_256hash(data: openarray[byte], output: var openarray[byte]) =
proc keccak_256hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = keccak256.digest(data)
var length = if keccak256.sizeDigest > len(output): len(output)
else: keccak256.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc keccak_384hash(data: openarray[byte], output: var openarray[byte]) =
proc keccak_384hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = keccak384.digest(data)
var length = if keccak384.sizeDigest > len(output): len(output)
else: keccak384.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc keccak_512hash(data: openarray[byte], output: var openarray[byte]) =
proc keccak_512hash(data: openArray[byte], output: var openArray[byte]) =
if len(output) > 0:
var digest = keccak512.digest(data)
var length = if keccak512.sizeDigest > len(output): len(output)
else: keccak512.sizeDigest
copyMem(addr output[0], addr digest.data[0], length)
proc shake_128hash(data: openarray[byte], output: var openarray[byte]) =
proc shake_128hash(data: openArray[byte], output: var openArray[byte]) =
var sctx: shake128
if len(output) > 0:
sctx.init()
@ -170,7 +170,7 @@ proc shake_128hash(data: openarray[byte], output: var openarray[byte]) =
discard sctx.output(addr output[0], uint(len(output)))
sctx.clear()
proc shake_256hash(data: openarray[byte], output: var openarray[byte]) =
proc shake_256hash(data: openArray[byte], output: var openArray[byte]) =
var sctx: shake256
if len(output) > 0:
sctx.init()
@ -208,16 +208,16 @@ const
),
MHash(mcodec: multiCodec("shake-128"), size: 32, coder: shake_128hash),
MHash(mcodec: multiCodec("shake-256"), size: 64, coder: shake_256hash),
MHash(mcodec: multiCodec("keccak-224"), size: keccak_224.sizeDigest,
MHash(mcodec: multiCodec("keccak-224"), size: keccak224.sizeDigest,
coder: keccak_224hash
),
MHash(mcodec: multiCodec("keccak-256"), size: keccak_256.sizeDigest,
MHash(mcodec: multiCodec("keccak-256"), size: keccak256.sizeDigest,
coder: keccak_256hash
),
MHash(mcodec: multiCodec("keccak-384"), size: keccak_384.sizeDigest,
MHash(mcodec: multiCodec("keccak-384"), size: keccak384.sizeDigest,
coder: keccak_384hash
),
MHash(mcodec: multiCodec("keccak-512"), size: keccak_512.sizeDigest,
MHash(mcodec: multiCodec("keccak-512"), size: keccak512.sizeDigest,
coder: keccak_512hash
),
MHash(mcodec: multiCodec("blake2b-8"), size: 1, coder: blake2Bhash),
@ -325,7 +325,7 @@ proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} =
const
CodeHashes = initMultiHashCodeTable()
proc digestImplWithHash(hash: MHash, data: openarray[byte]): MultiHash =
proc digestImplWithHash(hash: MHash, data: openArray[byte]): MultiHash =
var buffer: array[MaxHashSize, byte]
result.data = initVBuffer()
result.mcodec = hash.mcodec
@ -343,7 +343,7 @@ proc digestImplWithHash(hash: MHash, data: openarray[byte]): MultiHash =
result.size = hash.size
result.data.finish()
proc digestImplWithoutHash(hash: MHash, data: openarray[byte]): MultiHash =
proc digestImplWithoutHash(hash: MHash, data: openArray[byte]): MultiHash =
result.data = initVBuffer()
result.mcodec = hash.mcodec
result.size = len(data)
@ -354,7 +354,7 @@ proc digestImplWithoutHash(hash: MHash, data: openarray[byte]): MultiHash =
result.data.finish()
proc digest*(mhtype: typedesc[MultiHash], hashname: string,
data: openarray[byte]): MhResult[MultiHash] {.inline.} =
data: openArray[byte]): MhResult[MultiHash] {.inline.} =
## Perform digest calculation using hash algorithm with name ``hashname`` on
## data array ``data``.
let mc = MultiCodec.codec(hashname)
@ -368,7 +368,7 @@ proc digest*(mhtype: typedesc[MultiHash], hashname: string,
ok(digestImplWithHash(hash, data))
proc digest*(mhtype: typedesc[MultiHash], hashcode: int,
data: openarray[byte]): MhResult[MultiHash] {.inline.} =
data: openArray[byte]): MhResult[MultiHash] {.inline.} =
## Perform digest calculation using hash algorithm with code ``hashcode`` on
## data array ``data``.
let hash = CodeHashes.getOrDefault(hashcode)
@ -406,7 +406,7 @@ proc init*[T](mhtype: typedesc[MultiHash], hashcode: MultiCodec,
ok(digestImplWithoutHash(hash, mdigest.data))
proc init*(mhtype: typedesc[MultiHash], hashname: string,
bdigest: openarray[byte]): MhResult[MultiHash] {.inline.} =
bdigest: openArray[byte]): MhResult[MultiHash] {.inline.} =
## Create MultiHash from array of bytes ``bdigest`` and hash algorithm code
## ``hashcode``.
let mc = MultiCodec.codec(hashname)
@ -422,7 +422,7 @@ proc init*(mhtype: typedesc[MultiHash], hashname: string,
ok(digestImplWithoutHash(hash, bdigest))
proc init*(mhtype: typedesc[MultiHash], hashcode: MultiCodec,
bdigest: openarray[byte]): MhResult[MultiHash] {.inline.} =
bdigest: openArray[byte]): MhResult[MultiHash] {.inline.} =
## Create MultiHash from array of bytes ``bdigest`` and hash algorithm code
## ``hashcode``.
let hash = CodeHashes.getOrDefault(hashcode)
@ -433,7 +433,7 @@ proc init*(mhtype: typedesc[MultiHash], hashcode: MultiCodec,
else:
ok(digestImplWithoutHash(hash, bdigest))
proc decode*(mhtype: typedesc[MultiHash], data: openarray[byte],
proc decode*(mhtype: typedesc[MultiHash], data: openArray[byte],
mhash: var MultiHash): MhResult[int] =
## Decode MultiHash value from array of bytes ``data``.
##
@ -478,7 +478,7 @@ proc decode*(mhtype: typedesc[MultiHash], data: openarray[byte],
vb.offset + int(size) - 1))
ok(vb.offset + int(size))
proc validate*(mhtype: typedesc[MultiHash], data: openarray[byte]): bool =
proc validate*(mhtype: typedesc[MultiHash], data: openArray[byte]): bool =
## Returns ``true`` if array of bytes ``data`` has correct MultiHash inside.
var code, size: uint64
var res: VarintResult[void]
@ -509,7 +509,7 @@ proc validate*(mhtype: typedesc[MultiHash], data: openarray[byte]): bool =
result = true
proc init*(mhtype: typedesc[MultiHash],
data: openarray[byte]): MhResult[MultiHash] {.inline.} =
data: openArray[byte]): MhResult[MultiHash] {.inline.} =
## Create MultiHash from bytes array ``data``.
var hash: MultiHash
discard ? MultiHash.decode(data, hash)
@ -530,7 +530,7 @@ proc init58*(mhtype: typedesc[MultiHash],
if MultiHash.decode(Base58.decode(data), result) == -1:
raise newException(MultihashError, "Incorrect MultiHash binary format")
proc cmp(a: openarray[byte], b: openarray[byte]): bool {.inline.} =
proc cmp(a: openArray[byte], b: openArray[byte]): bool {.inline.} =
if len(a) != len(b):
return false
var n = len(a)

View File

@ -46,7 +46,7 @@ type
oid*: Oid
maxChannCount: int
func shortLog*(m: MPlex): auto =
func shortLog*(m: Mplex): auto =
shortLog(m.connection)
chronicles.formatIt(Mplex): shortLog(it)

View File

@ -88,9 +88,9 @@ method resolveIp*(
port: Port,
domain: Domain = Domain.AF_UNSPEC): Future[seq[TransportAddress]] {.async.} =
trace "Resolving IP using DNS", address, servers = self.nameservers.mapIt($it), domain
for _ in 0 ..< self.nameservers.len:
let server = self.nameservers[0]
trace "Resolving IP using DNS", address, servers = self.nameServers.mapIt($it), domain
for _ in 0 ..< self.nameServers.len:
let server = self.nameServers[0]
var responseFutures: seq[Future[Response]]
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
responseFutures.add(getDnsResponse(server, address, A))
@ -122,8 +122,8 @@ method resolveIp*(
break
if resolveFailed:
self.nameservers.add(self.nameservers[0])
self.nameservers.delete(0)
self.nameServers.add(self.nameServers[0])
self.nameServers.delete(0)
continue
trace "Got IPs from DNS server", resolvedAddresses, server = $server
@ -136,9 +136,9 @@ method resolveTxt*(
self: DnsResolver,
address: string): Future[seq[string]] {.async.} =
trace "Resolving TXT using DNS", address, servers = self.nameservers.mapIt($it)
for _ in 0 ..< self.nameservers.len:
let server = self.nameservers[0]
trace "Resolving TXT using DNS", address, servers = self.nameServers.mapIt($it)
for _ in 0 ..< self.nameServers.len:
let server = self.nameServers[0]
try:
let response = await getDnsResponse(server, address, TXT)
trace "Got TXT response", server = $server, answer=response.answers.mapIt(it.toString())
@ -147,8 +147,8 @@ method resolveTxt*(
raise e
except CatchableError as e:
info "Failed to query DNS", address, error=e.msg
self.nameservers.add(self.nameservers[0])
self.nameservers.delete(0)
self.nameServers.add(self.nameServers[0])
self.nameServers.delete(0)
continue
debug "Failed to resolve TXT, returning empty set"

View File

@ -25,10 +25,10 @@ const
maxInlineKeyLength* = 42
type
PeerID* = object
PeerId* = object
data*: seq[byte]
func `$`*(pid: PeerID): string =
func `$`*(pid: PeerId): string =
## Return base58 encoded ``pid`` representation.
# This unusual call syntax is used to avoid a strange Nim compilation error
base58.encode(Base58, pid.data)
@ -42,29 +42,29 @@ func shortLog*(pid: PeerId): string =
spid
chronicles.formatIt(PeerID): shortLog(it)
chronicles.formatIt(PeerId): shortLog(it)
func toBytes*(pid: PeerID, data: var openarray[byte]): int =
## Store PeerID ``pid`` to array of bytes ``data``.
func toBytes*(pid: PeerId, data: var openArray[byte]): int =
## Store PeerId ``pid`` to array of bytes ``data``.
##
## Returns number of bytes needed to store ``pid``.
result = len(pid.data)
if len(data) >= result and result > 0:
copyMem(addr data[0], unsafeAddr pid.data[0], result)
template getBytes*(pid: PeerID): seq[byte] =
## Return PeerID ``pid`` as array of bytes.
template getBytes*(pid: PeerId): seq[byte] =
## Return PeerId ``pid`` as array of bytes.
pid.data
func hex*(pid: PeerID): string =
func hex*(pid: PeerId): string =
## Returns hexadecimal string representation of ``pid``.
toHex(pid.data)
template len*(pid: PeerID): int =
template len*(pid: PeerId): int =
## Returns length of ``pid`` binary representation.
len(pid.data)
func cmp*(a, b: PeerID): int =
func cmp*(a, b: PeerId): int =
## Compares two peer ids ``a`` and ``b``.
## Returns:
##
@ -79,29 +79,29 @@ func cmp*(a, b: PeerID): int =
inc(i)
result = len(a.data) - len(b.data)
template `<=`*(a, b: PeerID): bool =
template `<=`*(a, b: PeerId): bool =
(cmp(a, b) <= 0)
template `<`*(a, b: PeerID): bool =
template `<`*(a, b: PeerId): bool =
(cmp(a, b) < 0)
template `>=`*(a, b: PeerID): bool =
template `>=`*(a, b: PeerId): bool =
(cmp(a, b) >= 0)
template `>`*(a, b: PeerID): bool =
template `>`*(a, b: PeerId): bool =
(cmp(a, b) > 0)
template `==`*(a, b: PeerID): bool =
template `==`*(a, b: PeerId): bool =
(cmp(a, b) == 0)
template hash*(pid: PeerID): Hash =
template hash*(pid: PeerId): Hash =
hash(pid.data)
func validate*(pid: PeerID): bool =
func validate*(pid: PeerId): bool =
## Validate check if ``pid`` is empty or not.
len(pid.data) > 0 and MultiHash.validate(pid.data)
func hasPublicKey*(pid: PeerID): bool =
func hasPublicKey*(pid: PeerId): bool =
## Returns ``true`` if ``pid`` is small enough to hold public key inside.
if len(pid.data) > 0:
var mh: MultiHash
@ -109,8 +109,8 @@ func hasPublicKey*(pid: PeerID): bool =
if mh.mcodec == multiCodec("identity"):
result = true
func extractPublicKey*(pid: PeerID, pubkey: var PublicKey): bool =
## Returns ``true`` if public key was successfully decoded from PeerID
func extractPublicKey*(pid: PeerId, pubkey: var PublicKey): bool =
## Returns ``true`` if public key was successfully decoded from PeerId
## ``pid``and stored to ``pubkey``.
##
## Returns ``false`` otherwise.
@ -121,16 +121,16 @@ func extractPublicKey*(pid: PeerID, pubkey: var PublicKey): bool =
let length = len(mh.data.buffer)
result = pubkey.init(mh.data.buffer.toOpenArray(mh.dpos, length - 1))
func init*(pid: var PeerID, data: openarray[byte]): bool =
func init*(pid: var PeerId, data: openArray[byte]): bool =
## Initialize peer id from raw binary representation ``data``.
##
## Returns ``true`` if peer was successfully initialiazed.
var p = PeerID(data: @data)
var p = PeerId(data: @data)
if p.validate():
pid = p
result = true
func init*(pid: var PeerID, data: string): bool =
func init*(pid: var PeerId, data: string): bool =
## Initialize peer id from base58 encoded string representation.
##
## Returns ``true`` if peer was successfully initialiazed.
@ -138,29 +138,29 @@ func init*(pid: var PeerID, data: string): bool =
var length = 0
if Base58.decode(data, p, length) == Base58Status.Success:
p.setLen(length)
var opid: PeerID
var opid: PeerId
shallowCopy(opid.data, p)
if opid.validate():
pid = opid
result = true
func init*(t: typedesc[PeerID], data: openarray[byte]): Result[PeerID, cstring] =
func init*(t: typedesc[PeerId], data: openArray[byte]): Result[PeerId, cstring] =
## Create new peer id from raw binary representation ``data``.
var res: PeerID
var res: PeerId
if not init(res, data):
err("peerid: incorrect PeerID binary form")
err("peerid: incorrect PeerId binary form")
else:
ok(res)
func init*(t: typedesc[PeerID], data: string): Result[PeerID, cstring] =
func init*(t: typedesc[PeerId], data: string): Result[PeerId, cstring] =
## Create new peer id from base58 encoded string representation ``data``.
var res: PeerID
var res: PeerId
if not init(res, data):
err("peerid: incorrect PeerID string")
err("peerid: incorrect PeerId string")
else:
ok(res)
func init*(t: typedesc[PeerID], pubkey: PublicKey): Result[PeerID, cstring] =
func init*(t: typedesc[PeerId], pubkey: PublicKey): Result[PeerId, cstring] =
## Create new peer id from public key ``pubkey``.
var pubraw = ? pubkey.getBytes().orError(
cstring("peerid: failed to get bytes from given key"))
@ -169,23 +169,23 @@ func init*(t: typedesc[PeerID], pubkey: PublicKey): Result[PeerID, cstring] =
mh = ? MultiHash.digest("identity", pubraw)
else:
mh = ? MultiHash.digest("sha2-256", pubraw)
ok(PeerID(data: mh.data.buffer))
ok(PeerId(data: mh.data.buffer))
func init*(t: typedesc[PeerID], seckey: PrivateKey): Result[PeerID, cstring] =
func init*(t: typedesc[PeerId], seckey: PrivateKey): Result[PeerId, cstring] =
## Create new peer id from private key ``seckey``.
PeerID.init(? seckey.getPublicKey().orError(cstring("invalid private key")))
PeerId.init(? seckey.getPublicKey().orError(cstring("invalid private key")))
func match*(pid: PeerID, pubkey: PublicKey): bool =
func match*(pid: PeerId, pubkey: PublicKey): bool =
## Returns ``true`` if ``pid`` matches public key ``pubkey``.
let p = PeerID.init(pubkey)
let p = PeerId.init(pubkey)
if p.isErr:
false
else:
pid == p.get()
func match*(pid: PeerID, seckey: PrivateKey): bool =
func match*(pid: PeerId, seckey: PrivateKey): bool =
## Returns ``true`` if ``pid`` matches private key ``seckey``.
let p = PeerID.init(seckey)
let p = PeerId.init(seckey)
if p.isErr:
false
else:
@ -193,23 +193,23 @@ func match*(pid: PeerID, seckey: PrivateKey): bool =
## Serialization/Deserialization helpers
func write*(vb: var VBuffer, pid: PeerID) =
## Write PeerID value ``peerid`` to buffer ``vb``.
func write*(vb: var VBuffer, pid: PeerId) =
## Write PeerId value ``peerid`` to buffer ``vb``.
vb.writeSeq(pid.data)
func write*(pb: var ProtoBuffer, field: int, pid: PeerID) =
## Write PeerID value ``peerid`` to object ``pb`` using ProtoBuf's encoding.
func write*(pb: var ProtoBuffer, field: int, pid: PeerId) =
## Write PeerId value ``peerid`` to object ``pb`` using ProtoBuf's encoding.
write(pb, field, pid.data)
func getField*(pb: ProtoBuffer, field: int,
pid: var PeerID): ProtoResult[bool] {.inline.} =
## Read ``PeerID`` from ProtoBuf's message and validate it
pid: var PeerId): ProtoResult[bool] {.inline.} =
## Read ``PeerId`` from ProtoBuf's message and validate it
var buffer: seq[byte]
let res = ? pb.getField(field, buffer)
if not(res):
ok(false)
else:
var peerId: PeerID
var peerId: PeerId
if peerId.init(buffer):
pid = peerId
ok(true)

View File

@ -21,7 +21,7 @@ type
PeerInfoError* = LPError
PeerInfo* = ref object
peerId*: PeerID
peerId*: PeerId
addrs*: seq[MultiAddress]
protocols*: seq[string]
protoVersion*: string
@ -42,8 +42,8 @@ chronicles.formatIt(PeerInfo): shortLog(it)
proc new*(
p: typedesc[PeerInfo],
key: PrivateKey,
addrs: openarray[MultiAddress] = [],
protocols: openarray[string] = [],
addrs: openArray[MultiAddress] = [],
protocols: openArray[string] = [],
protoVersion: string = "",
agentVersion: string = ""): PeerInfo
{.raises: [Defect, PeerInfoError].} =
@ -54,7 +54,7 @@ proc new*(
raise newException(PeerInfoError, "invalid private key")
let peerInfo = PeerInfo(
peerId: PeerID.init(key).tryGet(),
peerId: PeerId.init(key).tryGet(),
publicKey: pubkey,
privateKey: key,
protoVersion: protoVersion,

View File

@ -21,7 +21,7 @@ type
# Handler types #
#################
PeerBookChangeHandler*[T] = proc(peerId: PeerID, entry: T)
PeerBookChangeHandler*[T] = proc(peerId: PeerId, entry: T)
AddrChangeHandler* = PeerBookChangeHandler[HashSet[MultiAddress]]
ProtoChangeHandler* = PeerBookChangeHandler[HashSet[string]]
@ -33,7 +33,7 @@ type
# Each book contains a book (map) and event handler(s)
PeerBook*[T] = object of RootObj
book*: Table[PeerID, T]
book*: Table[PeerId, T]
changeHandlers: seq[PeerBookChangeHandler[T]]
SetPeerBook*[T] = object of PeerBook[HashSet[T]]
@ -65,13 +65,13 @@ proc new*(T: type PeerStore): PeerStore =
#########################
proc get*[T](peerBook: PeerBook[T],
peerId: PeerID): T =
peerId: PeerId): T =
## Get all the known metadata of a provided peer.
peerBook.book.getOrDefault(peerId)
proc set*[T](peerBook: var PeerBook[T],
peerId: PeerID,
peerId: PeerId,
entry: T) =
## Set metadata for a given peerId. This will replace any
## previously stored metadata.
@ -83,7 +83,7 @@ proc set*[T](peerBook: var PeerBook[T],
handler(peerId, peerBook.get(peerId))
proc delete*[T](peerBook: var PeerBook[T],
peerId: PeerID): bool =
peerId: PeerId): bool =
## Delete the provided peer from the book.
if not peerBook.book.hasKey(peerId):
@ -92,7 +92,7 @@ proc delete*[T](peerBook: var PeerBook[T],
peerBook.book.del(peerId)
return true
proc contains*[T](peerBook: PeerBook[T], peerId: PeerID): bool =
proc contains*[T](peerBook: PeerBook[T], peerId: PeerId): bool =
peerId in peerBook.book
################
@ -101,7 +101,7 @@ proc contains*[T](peerBook: PeerBook[T], peerId: PeerID): bool =
proc add*[T](
peerBook: var SetPeerBook[T],
peerId: PeerID,
peerId: PeerId,
entry: T) =
## Add entry to a given peer. If the peer is not known,
## it will be set with the provided entry.
@ -116,7 +116,7 @@ proc add*[T](
# Helper for seq
proc set*[T](
peerBook: var SetPeerBook[T],
peerId: PeerID,
peerId: PeerId,
entry: seq[T]) =
## Add entry to a given peer. If the peer is not known,
## it will be set with the provided entry.
@ -138,7 +138,7 @@ proc addHandlers*(peerStore: PeerStore,
peerStore.keyBook.changeHandlers.add(keyChangeHandler)
proc delete*(peerStore: PeerStore,
peerId: PeerID): bool =
peerId: PeerId): bool =
## Delete the provided peer from every book.
peerStore.addressBook.delete(peerId) and

View File

@ -123,7 +123,7 @@ proc initProtoBuffer*(data: seq[byte], offset = 0,
result.offset = offset
result.options = options
proc initProtoBuffer*(data: openarray[byte], offset = 0,
proc initProtoBuffer*(data: openArray[byte], offset = 0,
options: set[ProtoFlags] = {}): ProtoBuffer =
## Initialize ProtoBuffer with copy of ``data``.
result.buffer = @data
@ -191,7 +191,7 @@ proc write*[T: ProtoScalar](pb: var ProtoBuffer,
pb.offset += sizeof(T)
proc writePacked*[T: ProtoScalar](pb: var ProtoBuffer, field: int,
value: openarray[T]) =
value: openArray[T]) =
checkFieldNumber(field)
var length = 0
let dlength =
@ -239,7 +239,7 @@ proc writePacked*[T: ProtoScalar](pb: var ProtoBuffer, field: int,
pb.offset += sizeof(T)
proc write*[T: byte|char](pb: var ProtoBuffer, field: int,
value: openarray[T]) =
value: openArray[T]) =
checkFieldNumber(field)
var length = 0
let flength = vsizeof(getProtoHeader(field, ProtoFieldKind.Length)) +
@ -385,7 +385,7 @@ proc getValue[T: ProtoScalar](data: var ProtoBuffer,
err(ProtoError.MessageIncomplete)
proc getValue[T:byte|char](data: var ProtoBuffer, header: ProtoHeader,
outBytes: var openarray[T],
outBytes: var openArray[T],
outLength: var int): ProtoResult[void] =
doAssert(header.wire == ProtoFieldKind.Length)
var length = 0
@ -478,7 +478,7 @@ proc getField*[T: ProtoScalar](data: ProtoBuffer, field: int,
ok(false)
proc getField*[T: byte|char](data: ProtoBuffer, field: int,
output: var openarray[T],
output: var openArray[T],
outlen: var int): ProtoResult[bool] =
checkFieldNumber(field)
var pb = data

View File

@ -37,7 +37,7 @@ type
IdentifyNoPubKeyError* = object of IdentifyError
IdentifyInfo* = object
pubKey*: Option[PublicKey]
pubkey*: Option[PublicKey]
peerId*: PeerId
addrs*: seq[MultiAddress]
observedAddr*: Option[MultiAddress]
@ -57,7 +57,7 @@ type
IdentifyPush* = ref object of LPProtocol
identifyHandler: IdentifyPushHandler
proc encodeMsg*(peerInfo: PeerInfo, observedAddr: Multiaddress): ProtoBuffer
proc encodeMsg*(peerInfo: PeerInfo, observedAddr: MultiAddress): ProtoBuffer
{.raises: [Defect, IdentifyNoPubKeyError].} =
result = initProtoBuffer()
@ -81,14 +81,14 @@ proc encodeMsg*(peerInfo: PeerInfo, observedAddr: Multiaddress): ProtoBuffer
proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] =
var
iinfo: IdentifyInfo
pubKey: PublicKey
pubkey: PublicKey
oaddr: MultiAddress
protoVersion: string
agentVersion: string
var pb = initProtoBuffer(buf)
let r1 = pb.getField(1, pubKey)
let r1 = pb.getField(1, pubkey)
let r2 = pb.getRepeatedField(2, iinfo.addrs)
let r3 = pb.getRepeatedField(3, iinfo.protos)
let r4 = pb.getField(4, oaddr)
@ -100,14 +100,14 @@ proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] =
if res:
if r1.get():
iinfo.pubKey = some(pubKey)
iinfo.pubkey = some(pubkey)
if r4.get():
iinfo.observedAddr = some(oaddr)
if r5.get():
iinfo.protoVersion = some(protoVersion)
if r6.get():
iinfo.agentVersion = some(agentVersion)
debug "decodeMsg: decoded message", pubkey = ($pubKey).shortLog,
debug "decodeMsg: decoded message", pubkey = ($pubkey).shortLog,
addresses = $iinfo.addrs, protocols = $iinfo.protos,
observable_address = $iinfo.observedAddr,
proto_version = $iinfo.protoVersion,
@ -153,8 +153,8 @@ proc identify*(p: Identify,
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
result = infoOpt.get()
if result.pubKey.isSome:
let peer = PeerID.init(result.pubKey.get())
if result.pubkey.isSome:
let peer = PeerId.init(result.pubkey.get())
if peer.isErr:
raise newException(IdentityInvalidMsgError, $peer.error)
else:
@ -185,8 +185,8 @@ proc init*(p: IdentifyPush) =
var indentInfo = infoOpt.get()
if indentInfo.pubKey.isSome:
let receivedPeerId = PeerID.init(indentInfo.pubKey.get()).tryGet()
if indentInfo.pubkey.isSome:
let receivedPeerId = PeerId.init(indentInfo.pubkey.get()).tryGet()
if receivedPeerId != conn.peerId:
raise newException(IdentityNoMatchError, "Peer ids don't match")
indentInfo.peerId = receivedPeerId

View File

@ -75,7 +75,7 @@ proc handleSubscribe*(f: FloodSub,
# unsubscribe the peer from the topic
peers[].excl(peer)
method unsubscribePeer*(f: FloodSub, peer: PeerID) =
method unsubscribePeer*(f: FloodSub, peer: PeerId) =
## handle peer disconnects
##
trace "unsubscribing floodsub peer", peer

View File

@ -169,7 +169,7 @@ method onPubSubPeerEvent*(p: GossipSub, peer: PubsubPeer, event: PubSubPeerEvent
procCall FloodSub(p).onPubSubPeerEvent(peer, event)
method unsubscribePeer*(g: GossipSub, peer: PeerID) =
method unsubscribePeer*(g: GossipSub, peer: PeerId) =
## handle peer disconnects
##

View File

@ -51,7 +51,7 @@ proc pruned*(g: GossipSub,
backoffMoment = Moment.fromNow(backoffDuration)
g.backingOff
.mgetOrPut(topic, initTable[PeerID, Moment]())[p.peerId] = backoffMoment
.mgetOrPut(topic, initTable[PeerId, Moment]())[p.peerId] = backoffMoment
g.peerStats.withValue(p.peerId, stats):
stats.topicInfos.withValue(topic, info):
@ -71,7 +71,7 @@ proc pruned*(g: GossipSub,
proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [Defect].} =
let now = Moment.now()
var expired = toSeq(t.getOrDefault(topic).pairs())
expired.keepIf do (pair: tuple[peer: PeerID, expire: Moment]) -> bool:
expired.keepIf do (pair: tuple[peer: PeerId, expire: Moment]) -> bool:
now >= pair.expire
for (peer, _) in expired:
t.withValue(topic, v):
@ -84,7 +84,7 @@ proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] {.raises:
# by spec, larger then Dhi, but let's put some hard caps
peers.setLen(min(peers.len, g.parameters.dHigh * 2))
peers.map do (x: PubSubPeer) -> PeerInfoMsg:
PeerInfoMsg(peerID: x.peerId.getBytes())
PeerInfoMsg(peerId: x.peerId.getBytes())
proc handleGraft*(g: GossipSub,
peer: PubSubPeer,
@ -107,7 +107,7 @@ proc handleGraft*(g: GossipSub,
let backoff = Moment.fromNow(g.parameters.pruneBackoff)
g.backingOff
.mgetOrPut(topic, initTable[PeerID, Moment]())[peer.peerId] = backoff
.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
peer.behaviourPenalty += 0.1
@ -129,7 +129,7 @@ proc handleGraft*(g: GossipSub,
let backoff = Moment.fromNow(g.parameters.pruneBackoff)
g.backingOff
.mgetOrPut(topic, initTable[PeerID, Moment]())[peer.peerId] = backoff
.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
peer.behaviourPenalty += 0.1
@ -184,7 +184,7 @@ proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.r
current = g.backingOff.getOrDefault(topic).getOrDefault(peer.peerId)
if backoff > current:
g.backingOff
.mgetOrPut(topic, initTable[PeerID, Moment]())[peer.peerId] = backoff
.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
trace "pruning rpc received peer", peer, score = peer.score
g.pruned(peer, topic, setBackoff = false)

View File

@ -73,7 +73,7 @@ proc colocationFactor(g: GossipSub, peer: PubSubPeer): float64 =
else:
let
address = peer.address.get()
g.peersInIP.mgetOrPut(address, initHashSet[PeerID]()).incl(peer.peerId)
g.peersInIP.mgetOrPut(address, initHashSet[PeerId]()).incl(peer.peerId)
let
ipPeers = g.peersInIP.getOrDefault(address).len().float64
if ipPeers > g.parameters.ipColocationFactorThreshold:
@ -109,7 +109,7 @@ proc updateScores*(g: GossipSub) = # avoid async
trace "updating scores", peers = g.peers.len
let now = Moment.now()
var evicting: seq[PeerID]
var evicting: seq[PeerId]
for peerId, stats in g.peerStats.mpairs:
let peer = g.peers.getOrDefault(peerId)

View File

@ -139,7 +139,7 @@ type
disconnectBadPeers*: bool
BackoffTable* = Table[string, Table[PeerID, Moment]]
BackoffTable* = Table[string, Table[PeerId, Moment]]
ValidationSeenTable* = Table[MessageID, HashSet[PubSubPeer]]
GossipSub* = ref object of FloodSub
@ -156,11 +156,11 @@ type
heartbeatFut*: Future[void] # cancellation future for heartbeat interval
heartbeatRunning*: bool
peerStats*: Table[PeerID, PeerStats]
peerStats*: Table[PeerId, PeerStats]
parameters*: GossipSubParams
topicParams*: Table[string, TopicParams]
directPeersLoop*: Future[void]
peersInIP*: Table[MultiAddress, HashSet[PeerID]]
peersInIP*: Table[MultiAddress, HashSet[PeerId]]
heartbeatEvents*: seq[AsyncEvent]

View File

@ -15,7 +15,7 @@ import ./pubsubpeer, ../../peerid
type
PeerTable* = Table[string, HashSet[PubSubPeer]] # topic string to peer map
proc hasPeerID*(t: PeerTable, topic: string, peerId: PeerID): bool =
proc hasPeerId*(t: PeerTable, topic: string, peerId: PeerId): bool =
if topic in t:
try:
for peer in t[topic]:

View File

@ -94,7 +94,7 @@ type
switch*: Switch # the switch used to dial/connect to peers
peerInfo*: PeerInfo # this peer's info
topics*: Table[string, seq[TopicHandler]] # the topics that _we_ are interested in
peers*: Table[PeerID, PubSubPeer] ##\
peers*: Table[PeerId, PubSubPeer] ##\
## Peers that we are interested to gossip with (but not necessarily
## yet connected to)
triggerSelf*: bool # trigger own local handler on publish
@ -119,7 +119,7 @@ type
knownTopics*: HashSet[string]
method unsubscribePeer*(p: PubSub, peerId: PeerID) {.base.} =
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base.} =
## handle peer disconnects
##
@ -273,7 +273,7 @@ method onPubSubPeerEvent*(p: PubSub, peer: PubsubPeer, event: PubsubPeerEvent) {
proc getOrCreatePeer*(
p: PubSub,
peerId: PeerID,
peerId: PeerId,
protos: seq[string]): PubSubPeer =
p.peers.withValue(peerId, peer):
return peer[]
@ -374,7 +374,7 @@ method handleConn*(p: PubSub,
finally:
await conn.closeWithEOF()
method subscribePeer*(p: PubSub, peer: PeerID) {.base.} =
method subscribePeer*(p: PubSub, peer: PeerId) {.base.} =
## subscribe to remote peer to receive/send pubsub
## messages
##

View File

@ -53,7 +53,7 @@ type
codec*: string # the protocol that this peer joined from
sendConn*: Connection # cached send connection
address*: Option[MultiAddress]
peerId*: PeerID
peerId*: PeerId
handler*: RPCHandler
observers*: ref seq[PubSubObserver] # ref as in smart_ptr
@ -281,7 +281,7 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} =
proc new*(
T: typedesc[PubSubPeer],
peerId: PeerID,
peerId: PeerId,
getConn: GetConn,
dropConn: DropConn,
onEvent: OnEvent,

View File

@ -17,7 +17,7 @@ export options
type
PeerInfoMsg* = object
peerID*: seq[byte]
peerId*: seq[byte]
signedPeerRecord*: seq[byte]
SubOpts* = object

View File

@ -39,7 +39,7 @@ proc write*(pb: var ProtoBuffer, field: int, graft: ControlGraft) =
proc write*(pb: var ProtoBuffer, field: int, infoMsg: PeerInfoMsg) =
var ipb = initProtoBuffer()
ipb.write(1, infoMsg.peerID)
ipb.write(1, infoMsg.peerId)
ipb.write(2, infoMsg.signedPeerRecord)
ipb.finish()
pb.write(field, ipb)
@ -142,10 +142,10 @@ proc decodePeerInfoMsg*(pb: ProtoBuffer): ProtoResult[PeerInfoMsg] {.
inline.} =
trace "decodePeerInfoMsg: decoding message"
var pi = PeerInfoMsg()
if ? pb.getField(1, pi.peerID):
trace "decodePeerInfoMsg: read peerID", peerID = pi.peerID
if ? pb.getField(1, pi.peerId):
trace "decodePeerInfoMsg: read peerId", peerId = pi.peerId
else:
trace "decodePeerInfoMsg: peerID is missing"
trace "decodePeerInfoMsg: peerId is missing"
if ? pb.getField(2, pi.signedPeerRecord):
trace "decodePeerInfoMsg: read signedPeerRecord", signedPeerRecord = pi.signedPeerRecord
else:

View File

@ -146,7 +146,7 @@ proc encrypt(
proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
{.raises: [Defect, NoiseNonceMaxError].} =
result = newSeqOfCap[byte](data.len + sizeof(ChachaPolyTag))
result = newSeqOfCap[byte](data.len + sizeof(ChaChaPolyTag))
result.add(data)
let tag = encrypt(state, result, ad)
@ -217,7 +217,7 @@ proc encryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
proc decryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
{.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} =
# according to spec if key is empty leave plaintext
if ss.cs.hasKey:
if ss.cs.hasKey and data.len > ChaChaPolyTag.len:
result = ss.cs.decryptWithAd(ss.h.data, data)
else:
result = @data
@ -368,7 +368,7 @@ proc handshakeXXOutbound(
dh_se()
# last payload must follow the encrypted way of sending
msg.add hs.ss.encryptAndHash(p2psecret)
msg.add hs.ss.encryptAndHash(p2pSecret)
await conn.sendHSMessage(msg.data)
@ -408,7 +408,7 @@ proc handshakeXXInbound(
write_s()
dh_es()
msg.add hs.ss.encryptAndHash(p2psecret)
msg.add hs.ss.encryptAndHash(p2pSecret)
await conn.sendHSMessage(msg.data)
msg.clear()
@ -431,7 +431,7 @@ method readMessage*(sconn: NoiseConnection): Future[seq[byte]] {.async.} =
while true: # Discard 0-length payloads
let frame = await sconn.stream.readFrame()
sconn.activity = true
if frame.len > 0:
if frame.len > ChaChaPolyTag.len:
let res = sconn.readCs.decryptWithAd([], frame)
if res.len > 0:
when defined(libp2p_dump):
@ -554,7 +554,7 @@ method handshake*(p: Noise, conn: Connection, initiator: bool): Future[SecureCon
trace "Remote signature verified", conn
if initiator:
let pid = PeerID.init(remotePubKey)
let pid = PeerId.init(remotePubKey)
if not conn.peerId.validate():
raise newException(NoiseHandshakeError, "Failed to validate peerId.")
if pid.isErr or pid.get() != conn.peerId:
@ -567,7 +567,7 @@ method handshake*(p: Noise, conn: Connection, initiator: bool): Future[SecureCon
received_key = $remotePubKey
raise newException(NoiseHandshakeError, "Noise handshake, peer infos don't match! " & $pid & " != " & $conn.peerId)
else:
let pid = PeerID.init(remotePubKey)
let pid = PeerId.init(remotePubKey)
if pid.isErr:
raise newException(NoiseHandshakeError, "Invalid remote peer id")
conn.peerId = pid.get()

View File

@ -83,7 +83,7 @@ func shortLog*(conn: SecioConn): auto =
chronicles.formatIt(SecioConn): shortLog(it)
proc init(mac: var SecureMac, hash: string, key: openarray[byte]) =
proc init(mac: var SecureMac, hash: string, key: openArray[byte]) =
if hash == "SHA256":
mac = SecureMac(kind: SecureMacType.Sha256)
mac.ctxsha256.init(key)
@ -94,7 +94,7 @@ proc init(mac: var SecureMac, hash: string, key: openarray[byte]) =
mac = SecureMac(kind: SecureMacType.Sha1)
mac.ctxsha1.init(key)
proc update(mac: var SecureMac, data: openarray[byte]) =
proc update(mac: var SecureMac, data: openArray[byte]) =
case mac.kind
of SecureMacType.Sha256:
update(mac.ctxsha256, data)
@ -112,7 +112,7 @@ proc sizeDigest(mac: SecureMac): int {.inline.} =
of SecureMacType.Sha1:
result = int(mac.ctxsha1.sizeDigest())
proc finish(mac: var SecureMac, data: var openarray[byte]) =
proc finish(mac: var SecureMac, data: var openArray[byte]) =
case mac.kind
of SecureMacType.Sha256:
discard finish(mac.ctxsha256, data)
@ -130,8 +130,8 @@ proc reset(mac: var SecureMac) =
of SecureMacType.Sha1:
reset(mac.ctxsha1)
proc init(sc: var SecureCipher, cipher: string, key: openarray[byte],
iv: openarray[byte]) {.inline.} =
proc init(sc: var SecureCipher, cipher: string, key: openArray[byte],
iv: openArray[byte]) {.inline.} =
if cipher == "AES-128":
sc = SecureCipher(kind: SecureCipherType.Aes128)
sc.ctxaes128.init(key, iv)
@ -142,8 +142,8 @@ proc init(sc: var SecureCipher, cipher: string, key: openarray[byte],
sc = SecureCipher(kind: SecureCipherType.Twofish)
sc.ctxtwofish256.init(key, iv)
proc encrypt(cipher: var SecureCipher, input: openarray[byte],
output: var openarray[byte]) {.inline.} =
proc encrypt(cipher: var SecureCipher, input: openArray[byte],
output: var openArray[byte]) {.inline.} =
case cipher.kind
of SecureCipherType.Aes128:
cipher.ctxaes128.encrypt(input, output)
@ -152,8 +152,8 @@ proc encrypt(cipher: var SecureCipher, input: openarray[byte],
of SecureCipherType.Twofish:
cipher.ctxtwofish256.encrypt(input, output)
proc decrypt(cipher: var SecureCipher, input: openarray[byte],
output: var openarray[byte]) {.inline.} =
proc decrypt(cipher: var SecureCipher, input: openArray[byte],
output: var openArray[byte]) {.inline.} =
case cipher.kind
of SecureCipherType.Aes128:
cipher.ctxaes128.decrypt(input, output)
@ -300,8 +300,8 @@ method handshake*(s: Secio, conn: Connection, initiator: bool = false): Future[S
remoteExchanges: string
remoteCiphers: string
remoteHashes: string
remotePeerId: PeerID
localPeerId: PeerID
remotePeerId: PeerId
localPeerId: PeerId
localBytesPubkey = s.localPublicKey.getBytes().tryGet()
brHmacDrbgGenerate(s.rng[], localNonce)
@ -312,7 +312,7 @@ method handshake*(s: Secio, conn: Connection, initiator: bool = false): Future[S
SecioCiphers,
SecioHashes)
localPeerId = PeerID.init(s.localPublicKey).tryGet()
localPeerId = PeerId.init(s.localPublicKey).tryGet()
trace "Local proposal", schemes = SecioExchanges,
ciphers = SecioCiphers,
@ -336,9 +336,9 @@ method handshake*(s: Secio, conn: Connection, initiator: bool = false): Future[S
pubkey = remoteBytesPubkey.shortLog
raise (ref SecioError)(msg: "Remote public key incorrect or corrupted")
remotePeerId = PeerID.init(remotePubkey).tryGet()
remotePeerId = PeerId.init(remotePubkey).tryGet()
# TODO: PeerID check against supplied PeerID
# TODO: PeerId check against supplied PeerId
if not initiator:
conn.peerId = remotePeerId
let order = getOrder(remoteBytesPubkey, localNonce, localBytesPubkey,

View File

@ -45,7 +45,7 @@ chronicles.formatIt(SecureConn): shortLog(it)
proc new*(T: type SecureConn,
conn: Connection,
peerId: PeerId,
observedAddr: Multiaddress,
observedAddr: MultiAddress,
timeout: Duration = DefaultConnectionTimeout): T =
result = T(stream: conn,
peerId: peerId,

View File

@ -34,7 +34,7 @@ type
timerTaskFut: Future[void] # the current timer instance
timeoutHandler*: TimeoutHandler # timeout handler
peerId*: PeerId
observedAddr*: Multiaddress
observedAddr*: MultiAddress
upgraded*: Future[void]
tag*: string # debug tag for metrics (generally ms protocol)
transportDir*: Direction # The bottom level transport (generally the socket) direction

View File

@ -86,43 +86,43 @@ proc removePeerEventHandler*(s: Switch,
kind: PeerEventKind) =
s.connManager.removePeerEventHandler(handler, kind)
proc isConnected*(s: Switch, peerId: PeerID): bool =
proc isConnected*(s: Switch, peerId: PeerId): bool =
## returns true if the peer has one or more
## associated connections (sockets)
##
peerId in s.connManager
proc disconnect*(s: Switch, peerId: PeerID): Future[void] {.gcsafe.} =
proc disconnect*(s: Switch, peerId: PeerId): Future[void] {.gcsafe.} =
s.connManager.dropPeer(peerId)
method connect*(
s: Switch,
peerId: PeerID,
peerId: PeerId,
addrs: seq[MultiAddress]): Future[void] =
s.dialer.connect(peerId, addrs)
method dial*(
s: Switch,
peerId: PeerID,
peerId: PeerId,
protos: seq[string]): Future[Connection] =
s.dialer.dial(peerId, protos)
proc dial*(s: Switch,
peerId: PeerID,
peerId: PeerId,
proto: string): Future[Connection] =
dial(s, peerId, @[proto])
method dial*(
s: Switch,
peerId: PeerID,
peerId: PeerId,
addrs: seq[MultiAddress],
protos: seq[string]): Future[Connection] =
s.dialer.dial(peerId, addrs, protos)
proc dial*(
s: Switch,
peerId: PeerID,
peerId: PeerId,
addrs: seq[MultiAddress],
proto: string): Future[Connection] =
dial(s, peerId, addrs, @[proto])
@ -212,9 +212,9 @@ proc stop*(s: Switch) {.async.} =
# close and cleanup all connections
await s.connManager.close()
for t in s.transports:
for transp in s.transports:
try:
await t.stop()
await transp.stop()
except CancelledError as exc:
raise exc
except CatchableError as exc:
@ -269,7 +269,7 @@ proc newSwitch*(peerInfo: PeerInfo,
transports: seq[Transport],
identity: Identify,
muxers: Table[string, MuxerProvider],
secureManagers: openarray[Secure] = [],
secureManagers: openArray[Secure] = [],
connManager: ConnManager,
ms: MultistreamSelect,
nameResolver: NameResolver = nil): Switch

View File

@ -26,7 +26,7 @@ type
TransportClosedError* = object of TransportError
Transport* = ref object of RootObj
addrs*: seq[Multiaddress]
addrs*: seq[MultiAddress]
running*: bool
upgrader*: Upgrade

View File

@ -174,7 +174,7 @@ method stop*(self: WsTransport) {.async, gcsafe.} =
trace "Error shutting down ws transport", exc = exc.msg
proc connHandler(self: WsTransport,
stream: WsSession,
stream: WSSession,
secure: bool,
dir: Direction): Future[Connection] {.async.} =
let observedAddr =

View File

@ -198,7 +198,7 @@ proc new*(
T: type MuxedUpgrade,
identity: Identify,
muxers: Table[string, MuxerProvider],
secureManagers: openarray[Secure] = [],
secureManagers: openArray[Secure] = [],
connManager: ConnManager,
ms: MultistreamSelect): T =

View File

@ -76,7 +76,7 @@ proc identify*(
info = await self.identity.identify(conn, conn.peerId)
peerStore = self.connManager.peerStore
if info.pubKey.isNone and isNil(conn):
if info.pubkey.isNone and isNil(conn):
raise newException(UpgradeFailedError,
"no public key provided and no existing peer identity found")

View File

@ -14,7 +14,7 @@ import stew/byteutils
const
ShortDumpMax = 12
func shortLog*(item: openarray[byte]): string =
func shortLog*(item: openArray[byte]): string =
if item.len <= ShortDumpMax:
result = item.toHex()
else:

View File

@ -103,7 +103,7 @@ proc vsizeof*(x: SomeVarint): int {.inline.} =
Leb128.len(toUleb(x))
proc getUVarint*[T: PB|LP](vtype: typedesc[T],
pbytes: openarray[byte],
pbytes: openArray[byte],
outlen: var int,
outval: var SomeUVarint): VarintResult[void] =
## Decode `unsigned varint` from buffer ``pbytes`` and store it to ``outval``.
@ -149,7 +149,7 @@ proc getUVarint*[T: PB|LP](vtype: typedesc[T],
ok()
proc putUVarint*[T: PB|LP](vtype: typedesc[T],
pbytes: var openarray[byte],
pbytes: var openArray[byte],
outlen: var int,
outval: SomeUVarint): VarintResult[void] =
## Encode `unsigned varint` ``outval`` and store it to array ``pbytes``.
@ -180,7 +180,7 @@ proc putUVarint*[T: PB|LP](vtype: typedesc[T],
else:
err(VarintError.Overrun)
proc getSVarint*(pbytes: openarray[byte], outsize: var int,
proc getSVarint*(pbytes: openArray[byte], outsize: var int,
outval: var (PBZigVarint | PBSomeSVarint)): VarintResult[void] {.inline.} =
## Decode signed integer (``int32`` or ``int64``) from buffer ``pbytes``
## and store it to ``outval``.
@ -210,7 +210,7 @@ proc getSVarint*(pbytes: openarray[byte], outsize: var int,
outval = fromUleb(value, type(outval))
res
proc putSVarint*(pbytes: var openarray[byte], outsize: var int,
proc putSVarint*(pbytes: var openArray[byte], outsize: var int,
outval: (PBZigVarint | PBSomeSVarint)): VarintResult[void] {.inline.} =
## Encode signed integer ``outval`` using ProtoBuffer's zigzag encoding
## (``sint32`` or ``sint64``) and store it to array ``pbytes``.
@ -230,7 +230,7 @@ template varintFatal(msg) =
const m = msg
{.fatal: m.}
proc putVarint*[T: PB|LP](vtype: typedesc[T], pbytes: var openarray[byte],
proc putVarint*[T: PB|LP](vtype: typedesc[T], pbytes: var openArray[byte],
nbytes: var int, value: SomeVarint): VarintResult[void] {.inline.} =
when vtype is PB:
when (type(value) is PBSomeSVarint) or (type(value) is PBZigVarint):
@ -247,7 +247,7 @@ proc putVarint*[T: PB|LP](vtype: typedesc[T], pbytes: var openarray[byte],
varintFatal("LibP2P's varint do not support type [" &
typetraits.name(type(value)) & "]")
proc getVarint*[T: PB|LP](vtype: typedesc[T], pbytes: openarray[byte],
proc getVarint*[T: PB|LP](vtype: typedesc[T], pbytes: openArray[byte],
nbytes: var int,
value: var SomeVarint): VarintResult[void] {.inline.} =
when vtype is PB:

View File

@ -53,7 +53,7 @@ proc initVBuffer*(data: seq[byte], offset = 0): VBuffer =
shallowCopy(result.buffer, data)
result.offset = offset
proc initVBuffer*(data: openarray[byte], offset = 0): VBuffer =
proc initVBuffer*(data: openArray[byte], offset = 0): VBuffer =
## Initialize VBuffer with copy of ``data``.
result.buffer = newSeq[byte](len(data))
if len(data) > 0:
@ -88,7 +88,7 @@ proc writeLPVarint*(vb: var VBuffer, value: LPSomeUVarint) =
proc writeVarint*(vb: var VBuffer, value: LPSomeUVarint) =
writeLPVarint(vb, value)
proc writeSeq*[T: byte|char](vb: var VBuffer, value: openarray[T]) =
proc writeSeq*[T: byte|char](vb: var VBuffer, value: openArray[T]) =
## Write array ``value`` to buffer ``vb``, value will be prefixed with
## varint length of the array.
var length = 0
@ -101,7 +101,7 @@ proc writeSeq*[T: byte|char](vb: var VBuffer, value: openarray[T]) =
copyMem(addr vb.buffer[vb.offset], unsafeAddr value[0], len(value))
vb.offset += len(value)
proc writeArray*[T: byte|char](vb: var VBuffer, value: openarray[T]) =
proc writeArray*[T: byte|char](vb: var VBuffer, value: openArray[T]) =
## Write array ``value`` to buffer ``vb``, value will NOT be prefixed with
## varint length of the array.
if len(value) > 0:
@ -151,7 +151,7 @@ proc peekSeq*[T: string|seq[byte]](vb: var VBuffer, value: var T): int =
vb.offset -= length
proc peekArray*[T: char|byte](vb: var VBuffer,
value: var openarray[T]): int =
value: var openArray[T]): int =
## Peek array from buffer ``vb`` and store result to ``value``.
##
## This procedure will not adjust internal offset.
@ -183,7 +183,7 @@ proc readSeq*[T: string|seq[byte]](vb: var VBuffer,
vb.offset += result
proc readArray*[T: char|byte](vb: var VBuffer,
value: var openarray[T]): int {.inline.} =
value: var openArray[T]): int {.inline.} =
## Read array from buffer ``vb`` and store result to ``value``.
##
## Returns number of bytes consumed from ``vb`` or ``-1`` on error.

View File

@ -19,14 +19,14 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
checkTrackers()
asyncTest "can handle local address":
let ma = @[Multiaddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma).tryGet()]
let transport1 = prov()
await transport1.start(ma)
check transport1.handles(transport1.addrs[0])
await transport1.stop()
asyncTest "e2e: handle observedAddr":
let ma = @[Multiaddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma).tryGet()]
let transport1 = prov()
await transport1.start(ma)
@ -54,7 +54,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
await handlerWait.wait(1.seconds) # when no issues will not wait that long!
asyncTest "e2e: handle write":
let ma = @[Multiaddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma).tryGet()]
let transport1 = prov()
await transport1.start(ma)
@ -82,7 +82,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
await handlerWait.wait(1.seconds) # when no issues will not wait that long!
asyncTest "e2e: handle read":
let ma = @[Multiaddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma).tryGet()]
let transport1 = prov()
await transport1.start(ma)
@ -108,7 +108,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
transport2.stop()))
asyncTest "e2e: handle dial cancellation":
let ma = @[Multiaddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma).tryGet()]
let transport1 = prov()
await transport1.start(ma)
@ -125,7 +125,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
transport2.stop()))
asyncTest "e2e: handle accept cancellation":
let ma = @[Multiaddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma).tryGet()]
let transport1 = prov()
await transport1.start(ma)
@ -186,7 +186,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
await transport1.stop()
asyncTest "e2e: stopping transport kills connections":
let ma = @[Multiaddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma).tryGet()]
let transport1 = prov()
await transport1.start(ma)
@ -206,7 +206,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
check conn.closed()
asyncTest "read or write on closed connection":
let ma = @[Multiaddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma).tryGet()]
let transport1 = prov()
await transport1.start(ma)

View File

@ -29,7 +29,7 @@ proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
var ceil = 15
let fsub = cast[FloodSub](sender)
while not fsub.floodsub.hasKey(key) or
not fsub.floodsub.hasPeerID(key, receiver.peerInfo.peerId):
not fsub.floodsub.hasPeerId(key, receiver.peerInfo.peerId):
await sleepAsync(100.millis)
dec ceil
doAssert(ceil > 0, "waitSub timeout!")

View File

@ -18,7 +18,7 @@ type
proc noop(data: seq[byte]) {.async, gcsafe.} = discard
proc getPubSubPeer(p: TestGossipSub, peerId: PeerID): PubSubPeer =
proc getPubSubPeer(p: TestGossipSub, peerId: PeerId): PubSubPeer =
proc getConn(): Future[Connection] =
p.switch.dial(peerId, GossipSubCodec)
@ -317,8 +317,8 @@ suite "GossipSub internal":
let peers = gossipSub.getGossipPeers()
check peers.len == gossipSub.parameters.d
for p in peers.keys:
check not gossipSub.fanout.hasPeerID(topic, p.peerId)
check not gossipSub.mesh.hasPeerID(topic, p.peerId)
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
@ -552,7 +552,7 @@ suite "GossipSub internal":
peer.sendConn = conn
gossipSub.gossipsub[topic].incl(peer)
gossipSub.backingOff
.mgetOrPut(topic, initTable[PeerID, Moment]())
.mgetOrPut(topic, initTable[PeerId, Moment]())
.add(peerId, Moment.now() + 1.hours)
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
# there must be a control prune due to violation of backoff

View File

@ -44,11 +44,11 @@ proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
ev.clear()
while (not fsub.gossipsub.hasKey(key) or
not fsub.gossipsub.hasPeerID(key, receiver.peerInfo.peerId)) and
not fsub.gossipsub.hasPeerId(key, receiver.peerInfo.peerId)) and
(not fsub.mesh.hasKey(key) or
not fsub.mesh.hasPeerID(key, receiver.peerInfo.peerId)) and
not fsub.mesh.hasPeerId(key, receiver.peerInfo.peerId)) and
(not fsub.fanout.hasKey(key) or
not fsub.fanout.hasPeerID(key , receiver.peerInfo.peerId)):
not fsub.fanout.hasPeerId(key , receiver.peerInfo.peerId)):
trace "waitSub sleeping..."
# await more heartbeats
@ -417,7 +417,7 @@ suite "GossipSub":
check:
"foobar" in gossip2.topics
"foobar" in gossip1.gossipsub
gossip1.gossipsub.hasPeerID("foobar", gossip2.peerInfo.peerId)
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
await allFuturesThrowing(
nodes[0].switch.stop(),
@ -475,11 +475,11 @@ suite "GossipSub":
"foobar" in gossip1.gossipsub
"foobar" in gossip2.gossipsub
gossip1.gossipsub.hasPeerID("foobar", gossip2.peerInfo.peerId) or
gossip1.mesh.hasPeerID("foobar", gossip2.peerInfo.peerId)
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId) or
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
gossip2.gossipsub.hasPeerID("foobar", gossip1.peerInfo.peerId) or
gossip2.mesh.hasPeerID("foobar", gossip1.peerInfo.peerId)
gossip2.gossipsub.hasPeerId("foobar", gossip1.peerInfo.peerId) or
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
await allFuturesThrowing(
nodes[0].switch.stop(),
@ -541,8 +541,8 @@ suite "GossipSub":
check:
"foobar" in gossip1.gossipsub
gossip1.fanout.hasPeerID("foobar", gossip2.peerInfo.peerId)
not gossip1.mesh.hasPeerID("foobar", gossip2.peerInfo.peerId)
gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
await passed.wait(2.seconds)
@ -604,10 +604,10 @@ suite "GossipSub":
check:
"foobar" in gossip1.gossipsub
"foobar" in gossip2.gossipsub
gossip1.mesh.hasPeerID("foobar", gossip2.peerInfo.peerId)
not gossip1.fanout.hasPeerID("foobar", gossip2.peerInfo.peerId)
gossip2.mesh.hasPeerID("foobar", gossip1.peerInfo.peerId)
not gossip2.fanout.hasPeerID("foobar", gossip1.peerInfo.peerId)
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
not gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
not gossip2.fanout.hasPeerId("foobar", gossip1.peerInfo.peerId)
await allFuturesThrowing(
nodes[0].switch.stop(),
@ -746,8 +746,8 @@ suite "GossipSub":
check:
"foobar" in gossip1.gossipsub
"foobar" notin gossip2.gossipsub
not gossip1.mesh.hasPeerID("foobar", gossip2.peerInfo.peerId)
not gossip1.fanout.hasPeerID("foobar", gossip2.peerInfo.peerId)
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
not gossip1.fanout.hasPeerId("foobar", gossip2.peerInfo.peerId)
await allFuturesThrowing(
nodes[0].switch.stop(),

View File

@ -41,11 +41,11 @@ proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
ev.clear()
while (not fsub.gossipsub.hasKey(key) or
not fsub.gossipsub.hasPeerID(key, receiver.peerInfo.peerId)) and
not fsub.gossipsub.hasPeerId(key, receiver.peerInfo.peerId)) and
(not fsub.mesh.hasKey(key) or
not fsub.mesh.hasPeerID(key, receiver.peerInfo.peerId)) and
not fsub.mesh.hasPeerId(key, receiver.peerInfo.peerId)) and
(not fsub.fanout.hasKey(key) or
not fsub.fanout.hasPeerID(key , receiver.peerInfo.peerId)):
not fsub.fanout.hasPeerId(key , receiver.peerInfo.peerId)):
trace "waitSub sleeping..."
# await more heartbeats

View File

@ -10,13 +10,13 @@ import ../../libp2p/[peerid,
var rng = newRng()
proc randomPeerID(): PeerID =
PeerID.init(PrivateKey.random(ECDSA, rng[]).get()).get()
proc randomPeerId(): PeerId =
PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).get()
suite "MCache":
test "put/get":
var mCache = MCache.init(3, 5)
var msg = Message(fromPeer: randomPeerID(), seqno: "12345".toBytes())
var msg = Message(fromPeer: randomPeerId(), seqno: "12345".toBytes())
let msgId = defaultMsgIdProvider(msg)
mCache.put(msgId, msg)
check mCache.get(msgId).isSome and mCache.get(msgId).get() == msg
@ -25,13 +25,13 @@ suite "MCache":
var mCache = MCache.init(3, 5)
for i in 0..<3:
var msg = Message(fromPeer: randomPeerID(),
var msg = Message(fromPeer: randomPeerId(),
seqno: "12345".toBytes(),
topicIDs: @["foo"])
mCache.put(defaultMsgIdProvider(msg), msg)
for i in 0..<5:
var msg = Message(fromPeer: randomPeerID(),
var msg = Message(fromPeer: randomPeerId(),
seqno: "12345".toBytes(),
topicIDs: @["bar"])
mCache.put(defaultMsgIdProvider(msg), msg)
@ -46,7 +46,7 @@ suite "MCache":
var mCache = MCache.init(1, 5)
for i in 0..<3:
var msg = Message(fromPeer: randomPeerID(),
var msg = Message(fromPeer: randomPeerId(),
seqno: "12345".toBytes(),
topicIDs: @["foo"])
mCache.put(defaultMsgIdProvider(msg), msg)
@ -55,7 +55,7 @@ suite "MCache":
check mCache.window("foo").len == 0
for i in 0..<3:
var msg = Message(fromPeer: randomPeerID(),
var msg = Message(fromPeer: randomPeerId(),
seqno: "12345".toBytes(),
topicIDs: @["bar"])
mCache.put(defaultMsgIdProvider(msg), msg)
@ -64,7 +64,7 @@ suite "MCache":
check mCache.window("bar").len == 0
for i in 0..<3:
var msg = Message(fromPeer: randomPeerID(),
var msg = Message(fromPeer: randomPeerId(),
seqno: "12345".toBytes(),
topicIDs: @["baz"])
mCache.put(defaultMsgIdProvider(msg), msg)
@ -76,19 +76,19 @@ suite "MCache":
var mCache = MCache.init(1, 5)
for i in 0..<3:
var msg = Message(fromPeer: randomPeerID(),
var msg = Message(fromPeer: randomPeerId(),
seqno: "12345".toBytes(),
topicIDs: @["foo"])
mCache.put(defaultMsgIdProvider(msg), msg)
for i in 0..<3:
var msg = Message(fromPeer: randomPeerID(),
var msg = Message(fromPeer: randomPeerId(),
seqno: "12345".toBytes(),
topicIDs: @["bar"])
mCache.put(defaultMsgIdProvider(msg), msg)
for i in 0..<3:
var msg = Message(fromPeer: randomPeerID(),
var msg = Message(fromPeer: randomPeerId(),
seqno: "12345".toBytes(),
topicIDs: @["baz"])
mCache.put(defaultMsgIdProvider(msg), msg)

View File

@ -18,7 +18,7 @@ randomize()
proc generateNodes*(
num: Natural,
secureManagers: openarray[SecureProtocol] = [
secureManagers: openArray[SecureProtocol] = [
SecureProtocol.Noise
],
msgIdProvider: MsgIdProvider = nil,
@ -79,7 +79,7 @@ proc subscribeSparseNodes*(nodes: seq[PubSub], degree: int = 2) {.async.} =
proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
for dialer in nodes:
var dialed: seq[PeerID]
var dialed: seq[PeerId]
while dialed.len < nodes.len - 1:
let node = sample(nodes)
if node.peerInfo.peerId notin dialed:

View File

@ -335,7 +335,7 @@ const
"8613E8F86D2DD1CF3CEDC52AD91423F2F31E0003",
]
proc cmp(a, b: openarray[byte]): bool =
proc cmp(a, b: openArray[byte]): bool =
result = (@a == @b)
proc testStretcher(s, e: int, cs: string, ds: string): bool =

View File

@ -36,7 +36,7 @@ suite "Identify":
conn {.threadvar.}: Connection
asyncSetup:
ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
remoteSecKey = PrivateKey.random(ECDSA, rng[]).get()
remotePeerInfo = PeerInfo.new(
remoteSecKey,
@ -72,7 +72,7 @@ suite "Identify":
discard await msDial.select(conn, IdentifyCodec)
let id = await identifyProto2.identify(conn, remotePeerInfo.peerId)
check id.pubKey.get() == remoteSecKey.getPublicKey().get()
check id.pubkey.get() == remoteSecKey.getPublicKey().get()
check id.addrs == ma
check id.protoVersion.get() == ProtoVersion
check id.agentVersion.get() == AgentVersion
@ -95,7 +95,7 @@ suite "Identify":
discard await msDial.select(conn, IdentifyCodec)
let id = await identifyProto2.identify(conn, remotePeerInfo.peerId)
check id.pubKey.get() == remoteSecKey.getPublicKey().get()
check id.pubkey.get() == remoteSecKey.getPublicKey().get()
check id.addrs == ma
check id.protoVersion.get() == ProtoVersion
check id.agentVersion.get() == customAgentVersion

View File

@ -96,7 +96,7 @@ suite "Minimal ASN.1 encode/decode suite":
ncrutils.fromHex(Asn1EdgeExpects[i]) == value
test "ASN.1 DER INTEGER encoding/decoding of native unsigned values test":
proc decodeBuffer(data: openarray[byte]): uint64 =
proc decodeBuffer(data: openArray[byte]): uint64 =
var ab = Asn1Buffer.init(data)
let fres = ab.read()
doAssert(fres.isOk() and fres.get().kind == Asn1Tag.Integer)

View File

@ -84,7 +84,7 @@ suite "MinProtobuf test suite":
pb.finish()
return pb.buffer
proc getVarintDecodedValue(data: openarray[byte]): uint64 =
proc getVarintDecodedValue(data: openArray[byte]): uint64 =
var value: uint64
var pb = initProtoBuffer(data)
let res = pb.getField(1, value)
@ -97,7 +97,7 @@ suite "MinProtobuf test suite":
pb.finish()
return pb.buffer
proc getFixed32DecodedValue(data: openarray[byte]): uint32 =
proc getFixed32DecodedValue(data: openArray[byte]): uint32 =
var value: float32
var pb = initProtoBuffer(data)
let res = pb.getField(1, value)
@ -110,7 +110,7 @@ suite "MinProtobuf test suite":
pb.finish()
return pb.buffer
proc getFixed64DecodedValue(data: openarray[byte]): uint64 =
proc getFixed64DecodedValue(data: openArray[byte]): uint64 =
var value: float64
var pb = initProtoBuffer(data)
let res = pb.getField(1, value)
@ -129,7 +129,7 @@ suite "MinProtobuf test suite":
pb.finish()
return pb.buffer
proc getLengthDecodedValue(data: openarray[byte]): string =
proc getLengthDecodedValue(data: openArray[byte]): string =
var value = newString(len(data))
var valueLen = 0
var pb = initProtoBuffer(data)
@ -138,13 +138,13 @@ suite "MinProtobuf test suite":
value.setLen(valueLen)
value
proc isFullZero[T: byte|char](data: openarray[T]): bool =
proc isFullZero[T: byte|char](data: openArray[T]): bool =
for ch in data:
if int(ch) != 0:
return false
return true
proc corruptHeader(data: var openarray[byte], index: int) =
proc corruptHeader(data: var openArray[byte], index: int) =
var values = [3, 4, 6]
data[0] = data[0] and 0xF8'u8
data[0] = data[0] or byte(values[index mod len(values)])

View File

@ -378,7 +378,7 @@ suite "Mplex":
suite "mplex e2e":
asyncTest "read/write receiver":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
@ -415,7 +415,7 @@ suite "Mplex":
await listenFut
asyncTest "read/write receiver lazy":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
@ -454,7 +454,7 @@ suite "Mplex":
asyncTest "write fragmented":
let
ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
listenJob = newFuture[void]()
var bigseq = newSeqOfCap[uint8](MaxMsgSize * 2)
@ -506,7 +506,7 @@ suite "Mplex":
await listenFut
asyncTest "read/write initiator":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
@ -542,7 +542,7 @@ suite "Mplex":
await listenFut
asyncTest "multiple streams":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport1 = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
@ -586,7 +586,7 @@ suite "Mplex":
await listenFut
asyncTest "multiple read/write streams":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
@ -633,7 +633,7 @@ suite "Mplex":
await listenFut
asyncTest "channel closes listener with EOF":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenStreams: seq[Connection]
@ -681,7 +681,7 @@ suite "Mplex":
await acceptFut
asyncTest "channel closes dialer with EOF":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport1 = TcpTransport.new(upgrade = Upgrade())
var count = 0
@ -746,7 +746,7 @@ suite "Mplex":
await acceptFut
asyncTest "dialing mplex closes both ends":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenStreams: seq[Connection]
@ -788,7 +788,7 @@ suite "Mplex":
await acceptFut
asyncTest "listening mplex closes both ends":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport1 = TcpTransport.new(upgrade = Upgrade())
var mplexListen: Mplex
@ -833,7 +833,7 @@ suite "Mplex":
await acceptFut
asyncTest "canceling mplex handler closes both ends":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport1 = TcpTransport.new(upgrade = Upgrade())
var mplexHandle: Future[void]
@ -878,7 +878,7 @@ suite "Mplex":
transport2.stop())
asyncTest "closing dialing connection should close both ends":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenStreams: seq[Connection]
@ -923,7 +923,7 @@ suite "Mplex":
await acceptFut
asyncTest "canceling listening connection should close both ends":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenConn: Connection
@ -969,7 +969,7 @@ suite "Mplex":
suite "jitter":
asyncTest "channel should be able to handle erratic read/writes":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
@ -1041,7 +1041,7 @@ suite "Mplex":
await listenFut
asyncTest "channel should handle 1 byte read/write":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)

View File

@ -234,7 +234,7 @@ suite "Multistream select":
await ms.handle(conn)
asyncTest "e2e - handle":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
@ -274,7 +274,7 @@ suite "Multistream select":
await handlerWait.wait(30.seconds)
asyncTest "e2e - ls":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let
handlerWait = newFuture[void]()
@ -326,7 +326,7 @@ suite "Multistream select":
await listenFut.wait(5.seconds)
asyncTest "e2e - select one from a list with unsupported protos":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
@ -364,7 +364,7 @@ suite "Multistream select":
await transport1.stop()
asyncTest "e2e - select one with both valid":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,

View File

@ -58,8 +58,8 @@ suite "Name resolving":
suite "Generic Resolving":
var resolver {.threadvar.}: MockResolver
proc testOne(input: string, output: seq[Multiaddress]): bool =
let resolved = waitFor resolver.resolveMAddress(Multiaddress.init(input).tryGet())
proc testOne(input: string, output: seq[MultiAddress]): bool =
let resolved = waitFor resolver.resolveMAddress(MultiAddress.init(input).tryGet())
if resolved != output:
echo "Expected ", output
echo "Got ", resolved
@ -67,10 +67,10 @@ suite "Name resolving":
return true
proc testOne(input: string, output: seq[string]): bool =
testOne(input, output.mapIt(Multiaddress.init(it).tryGet()))
testOne(input, output.mapIt(MultiAddress.init(it).tryGet()))
proc testOne(input, output: string): bool =
testOne(input, @[Multiaddress.init(output).tryGet()])
testOne(input, @[MultiAddress.init(output).tryGet()])
asyncSetup:
resolver = MockResolver.new()

View File

@ -88,7 +88,7 @@ suite "Noise":
asyncTest "e2e: handle write + noise":
let
server = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
server = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
serverInfo = PeerInfo.new(serverPrivKey, server)
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
@ -129,7 +129,7 @@ suite "Noise":
asyncTest "e2e: handle write + noise (wrong prologue)":
let
server = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
server = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
serverInfo = PeerInfo.new(serverPrivKey, server)
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
@ -169,7 +169,7 @@ suite "Noise":
asyncTest "e2e: handle read + noise":
let
server = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
server = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
serverInfo = PeerInfo.new(serverPrivKey, server)
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
@ -208,7 +208,7 @@ suite "Noise":
asyncTest "e2e: handle read + noise fragmented":
let
server = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
server = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
serverPrivKey = PrivateKey.random(ECDSA, rng[]).get()
serverInfo = PeerInfo.new(serverPrivKey, server)
serverNoise = Noise.new(rng, serverPrivKey, outgoing = false)
@ -252,8 +252,8 @@ suite "Noise":
await listenFut
asyncTest "e2e use switch dial proto string":
let ma1 = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
let ma2 = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
let ma1 = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
let ma2 = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
var peerInfo1, peerInfo2: PeerInfo
var switch1, switch2: Switch
@ -278,8 +278,8 @@ suite "Noise":
switch2.stop())
asyncTest "e2e test wrong secure negotiation":
let ma1 = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
let ma2 = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
let ma1 = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
let ma2 = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
var peerInfo1, peerInfo2: PeerInfo
var switch1, switch2: Switch

View File

@ -164,7 +164,7 @@ const
"08021220B333BE3E843339E0E2CE9E083ABC119BE05C7B65B8665ADE19E172D47BF91305"
]
PeerIDs = [
PeerIds = [
"QmeuZJbXrszW2jdT7GdduSjQskPU3S7vvGWKtKgDfkDvWs",
"QmeasUkAi1BhVUmopWzYJ5G1PGys9T5MZ2sPn87XTyaUAM",
"Qmc3PxhMhQja8N4t7mRDyGm2vHkvcxe5Kabp2iAig1DXHb",
@ -180,15 +180,15 @@ const
]
suite "Peer testing suite":
test "Go PeerID test vectors":
test "Go PeerId test vectors":
for i in 0..<len(PrivateKeys):
var seckey = PrivateKey.init(stripSpaces(PrivateKeys[i])).get()
var pubkey = seckey.getPublicKey().get()
var p1 = PeerID.init(seckey).get()
var p2 = PeerID.init(pubkey).get()
var p3 = PeerID.init(PeerIDs[i]).get()
var b1 = Base58.decode(PeerIDs[i])
var p4 = PeerID.init(b1).get()
var p1 = PeerId.init(seckey).get()
var p2 = PeerId.init(pubkey).get()
var p3 = PeerId.init(PeerIds[i]).get()
var b1 = Base58.decode(PeerIds[i])
var p4 = PeerId.init(b1).get()
var buf1 = newSeq[byte](len(p1))
var buf2 = newSeq[byte](len(p2))
var buf3 = newSeq[byte](len(p3))
@ -200,10 +200,10 @@ suite "Peer testing suite":
p1 == p2
p1 == p4
p2 == p4
$p1 == PeerIDs[i]
$p2 == PeerIDs[i]
$p3 == PeerIDs[i]
$p4 == PeerIDs[i]
$p1 == PeerIds[i]
$p2 == PeerIds[i]
$p3 == PeerIds[i]
$p4 == PeerIds[i]
p1.match(seckey) == true
p1.match(pubkey) == true
p1.getBytes() == p2.getBytes()

View File

@ -12,7 +12,7 @@ suite "PeerInfo":
test "Should init with private key":
let seckey = PrivateKey.random(ECDSA, rng[]).get()
var peerInfo = PeerInfo.new(seckey)
var peerId = PeerID.init(seckey).get()
var peerId = PeerId.init(seckey).get()
check peerId == peerInfo.peerId
check seckey.getPublicKey().get() == peerInfo.publicKey

View File

@ -12,13 +12,13 @@ suite "PeerStore":
let
# Peer 1
keyPair1 = KeyPair.random(ECDSA, rng[]).get()
peerId1 = PeerID.init(keyPair1.secKey).get()
peerId1 = PeerId.init(keyPair1.seckey).get()
multiaddrStr1 = "/ip4/127.0.0.1/udp/1234/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"
multiaddr1 = MultiAddress.init(multiaddrStr1).get()
testcodec1 = "/nim/libp2p/test/0.0.1-beta1"
# Peer 2
keyPair2 = KeyPair.random(ECDSA, rng[]).get()
peerId2 = PeerID.init(keyPair2.secKey).get()
peerId2 = PeerId.init(keyPair2.seckey).get()
multiaddrStr2 = "/ip4/0.0.0.0/tcp/1234/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC"
multiaddr2 = MultiAddress.init(multiaddrStr2).get()
testcodec2 = "/nim/libp2p/test/0.0.2-beta1"
@ -32,8 +32,8 @@ suite "PeerStore":
peerStore.addressBook.add(peerId2, multiaddr2)
peerStore.protoBook.add(peerId1, testcodec1)
peerStore.protoBook.add(peerId2, testcodec2)
peerStore.keyBook.set(peerId1, keyPair1.pubKey)
peerStore.keyBook.set(peerId2, keyPair2.pubKey)
peerStore.keyBook.set(peerId1, keyPair1.pubkey)
peerStore.keyBook.set(peerId2, keyPair2.pubkey)
# Test PeerStore::delete
check:
@ -52,13 +52,13 @@ suite "PeerStore":
protoChanged = false
keyChanged = false
proc addrChange(peerId: PeerID, addrs: HashSet[MultiAddress]) =
proc addrChange(peerId: PeerId, addrs: HashSet[MultiAddress]) =
addrChanged = true
proc protoChange(peerId: PeerID, protos: HashSet[string]) =
proc protoChange(peerId: PeerId, protos: HashSet[string]) =
protoChanged = true
proc keyChange(peerId: PeerID, publicKey: PublicKey) =
proc keyChange(peerId: PeerId, publicKey: PublicKey) =
keyChanged = true
peerStore.addHandlers(addrChangeHandler = addrChange,

View File

@ -31,7 +31,7 @@ suite "Ping":
pingReceivedCount {.threadvar.}: int
asyncSetup:
ma = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
ma = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
transport1 = TcpTransport.new(upgrade = Upgrade())
transport2 = TcpTransport.new(upgrade = Upgrade())

View File

@ -586,7 +586,7 @@ suite "Switch":
# for most of the steps in the upgrade flow -
# this is just a basic test for dials
asyncTest "e2e canceling dial should not leak":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport = TcpTransport.new(upgrade = Upgrade())
await transport.start(ma)
@ -604,7 +604,7 @@ suite "Switch":
await switch.start()
var peerId = PeerID.init(PrivateKey.random(ECDSA, rng[]).get()).get()
var peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).get()
let connectFut = switch.connect(peerId, transport.addrs)
await sleepAsync(500.millis)
connectFut.cancel()
@ -619,7 +619,7 @@ suite "Switch":
switch.stop())
asyncTest "e2e closing remote conn should not leak":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport = TcpTransport.new(upgrade = Upgrade())
await transport.start(ma)
@ -633,7 +633,7 @@ suite "Switch":
await switch.start()
var peerId = PeerID.init(PrivateKey.random(ECDSA, rng[]).get()).get()
var peerId = PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).get()
expect LPStreamClosedError, LPStreamEOFError:
await switch.connect(peerId, transport.addrs)
@ -673,7 +673,7 @@ suite "Switch":
await allFuturesThrowing(readers)
await switch2.stop() #Otherwise this leaks
check await checkExpiring(not switch1.isConnected(switch2.peerInfo.peerID))
check await checkExpiring(not switch1.isConnected(switch2.peerInfo.peerId))
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
@ -686,7 +686,7 @@ suite "Switch":
await switch2.start()
let someAddr = MultiAddress.init("/ip4/127.128.0.99").get()
let seckey = PrivateKey.random(ECDSA, rng[]).get()
let somePeer = PeerInfo.new(secKey, [someAddr])
let somePeer = PeerInfo.new(seckey, [someAddr])
expect(DialFailedError):
discard await switch2.dial(somePeer.peerId, somePeer.addrs, TestCodec)
await switch2.stop()

View File

@ -17,7 +17,7 @@ suite "TCP transport":
checkTrackers()
asyncTest "test listener: handle write":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport.start(ma)
@ -38,7 +38,7 @@ suite "TCP transport":
check string.fromBytes(msg) == "Hello!"
asyncTest "test listener: handle read":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport.start(ma)

View File

@ -171,7 +171,7 @@ proc hexChar*(c: byte, lowercase: bool = false): string =
of 0..9: result[1] = chr(t0 + ord('0'))
else: result[1] = chr(t0 - 10 + alpha)
proc toHex*(a: openarray[byte], lowercase: bool = false): string =
proc toHex*(a: openArray[byte], lowercase: bool = false): string =
result = ""
for i in a:
result = result & hexChar(i, lowercase)
@ -263,7 +263,7 @@ suite "Variable integer test suite":
buffer.setLen(PBedgeSizes[i])
check:
PB.putUVarint(buffer, length, PBedgeValues[i]).isOk()
buffer.setlen(buffer.high)
buffer.setLen(buffer.high)
check:
PB.getUVarint(buffer, length, value).error() == VarintError.Incomplete
@ -339,7 +339,7 @@ suite "Variable integer test suite":
buffer.setLen(LPedgeSizes[i])
check:
LP.putUVarint(buffer, length, LPedgeValues[i]).isOk()
buffer.setlen(buffer.high)
buffer.setLen(buffer.high)
check:
LP.getUVarint(buffer, length, value).error() == VarintError.Incomplete

View File

@ -71,7 +71,7 @@ suite "WebSocket transport":
"/ip4/0.0.0.0/tcp/0/wss")
asyncTest "Hostname verification":
let ma = @[Multiaddress.init("/ip4/0.0.0.0/tcp/0/wss").tryGet()]
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0/wss").tryGet()]
let transport1 = WsTransport.new(Upgrade(), TLSPrivateKey.init(SecureKey), TLSCertificate.init(SecureCert), {TLSFlags.NoVerifyHost})
await transport1.start(ma)