Repo limits (#319)
* initial implementation of repo store * allow isManifest on multicodec * rework with new blockstore * add raw codec * rework listBlocks * remove fsstore * reworking with repostore * bump datastore * fix listBlocks iterator * adding store's common tests * run common store tests * remove fsstore backend tests * bump datastore * add `listBlocks` tests * listBlocks filter based on block type * disabling tests in need of rewriting * allow passing block type * move BlockNotFoundError definition * fix tests * increase default advertise loop sleep to 10 mins * use `self` * add cache quota functionality * pass meta store and start repo * add `CacheQuotaNamespace` * pass meta store * bump datastore to latest master * don't use os `/` as key separator * Added quota limits support * tests for quota limits * add block expiration key * remove unnesesary space * use idleAsync in listBlocks * proper test name * re-add contrlC try/except * add storage quota and block ttl config options * clarify comments * change expires key format * check for block presence before storing * bump datastore * use dht with fixed datastore `has` * bump datastore to latest master * bump dht to latest master
This commit is contained in:
parent
9143a25fa1
commit
0beeefd760
|
@ -13,6 +13,7 @@ import pkg/chronos
|
||||||
import pkg/chronicles
|
import pkg/chronicles
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
import pkg/metrics
|
import pkg/metrics
|
||||||
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
|
|
||||||
import ../protobuf/presence
|
import ../protobuf/presence
|
||||||
|
@ -37,7 +38,7 @@ const
|
||||||
DefaultDiscoveryTimeout = 1.minutes
|
DefaultDiscoveryTimeout = 1.minutes
|
||||||
DefaultMinPeersPerBlock = 3
|
DefaultMinPeersPerBlock = 3
|
||||||
DefaultDiscoveryLoopSleep = 3.seconds
|
DefaultDiscoveryLoopSleep = 3.seconds
|
||||||
DefaultAdvertiseLoopSleep = 3.seconds
|
DefaultAdvertiseLoopSleep = 30.minutes
|
||||||
|
|
||||||
type
|
type
|
||||||
DiscoveryEngine* = ref object of RootObj
|
DiscoveryEngine* = ref object of RootObj
|
||||||
|
@ -60,6 +61,7 @@ type
|
||||||
advertiseLoopSleep: Duration # Advertise loop sleep
|
advertiseLoopSleep: Duration # Advertise loop sleep
|
||||||
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]] # Inflight discovery requests
|
inFlightDiscReqs*: Table[Cid, Future[seq[SignedPeerRecord]]] # Inflight discovery requests
|
||||||
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
|
inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests
|
||||||
|
advertiseType*: BlockType # Advertice blocks, manifests or both
|
||||||
|
|
||||||
proc discoveryQueueLoop(b: DiscoveryEngine) {.async.} =
|
proc discoveryQueueLoop(b: DiscoveryEngine) {.async.} =
|
||||||
while b.discEngineRunning:
|
while b.discEngineRunning:
|
||||||
|
@ -77,19 +79,12 @@ proc discoveryQueueLoop(b: DiscoveryEngine) {.async.} =
|
||||||
await sleepAsync(b.discoveryLoopSleep)
|
await sleepAsync(b.discoveryLoopSleep)
|
||||||
|
|
||||||
proc advertiseQueueLoop*(b: DiscoveryEngine) {.async.} =
|
proc advertiseQueueLoop*(b: DiscoveryEngine) {.async.} =
|
||||||
proc onBlock(cid: Cid) {.async.} =
|
|
||||||
try:
|
|
||||||
trace "Listed block", cid
|
|
||||||
await b.advertiseQueue.put(cid)
|
|
||||||
await sleepAsync(50.millis) # TODO: temp workaround because we're announcing all CIDs
|
|
||||||
except CancelledError as exc:
|
|
||||||
trace "Cancelling block listing"
|
|
||||||
raise exc
|
|
||||||
except CatchableError as exc:
|
|
||||||
trace "Exception listing blocks", exc = exc.msg
|
|
||||||
|
|
||||||
while b.discEngineRunning:
|
while b.discEngineRunning:
|
||||||
discard await b.localStore.listBlocks(onBlock)
|
if cids =? await b.localStore.listBlocks(blockType = b.advertiseType):
|
||||||
|
for c in cids:
|
||||||
|
if cid =? await c:
|
||||||
|
await b.advertiseQueue.put(cid)
|
||||||
|
await sleepAsync(50.millis)
|
||||||
|
|
||||||
trace "About to sleep advertise loop", sleep = b.advertiseLoopSleep
|
trace "About to sleep advertise loop", sleep = b.advertiseLoopSleep
|
||||||
await sleepAsync(b.advertiseLoopSleep)
|
await sleepAsync(b.advertiseLoopSleep)
|
||||||
|
@ -257,7 +252,8 @@ proc new*(
|
||||||
concurrentDiscReqs = DefaultConcurrentDiscRequests,
|
concurrentDiscReqs = DefaultConcurrentDiscRequests,
|
||||||
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
|
discoveryLoopSleep = DefaultDiscoveryLoopSleep,
|
||||||
advertiseLoopSleep = DefaultAdvertiseLoopSleep,
|
advertiseLoopSleep = DefaultAdvertiseLoopSleep,
|
||||||
minPeersPerBlock = DefaultMinPeersPerBlock,): DiscoveryEngine =
|
minPeersPerBlock = DefaultMinPeersPerBlock,
|
||||||
|
advertiseType = BlockType.Both): DiscoveryEngine =
|
||||||
T(
|
T(
|
||||||
localStore: localStore,
|
localStore: localStore,
|
||||||
peers: peers,
|
peers: peers,
|
||||||
|
@ -272,4 +268,5 @@ proc new*(
|
||||||
inFlightAdvReqs: initTable[Cid, Future[void]](),
|
inFlightAdvReqs: initTable[Cid, Future[void]](),
|
||||||
discoveryLoopSleep: discoveryLoopSleep,
|
discoveryLoopSleep: discoveryLoopSleep,
|
||||||
advertiseLoopSleep: advertiseLoopSleep,
|
advertiseLoopSleep: advertiseLoopSleep,
|
||||||
minPeersPerBlock: minPeersPerBlock)
|
minPeersPerBlock: minPeersPerBlock,
|
||||||
|
advertiseType: advertiseType)
|
||||||
|
|
|
@ -13,7 +13,6 @@ import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push: {.upraises: [].}
|
||||||
|
|
||||||
import pkg/questionable
|
|
||||||
import pkg/chronicles
|
import pkg/chronicles
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
|
|
|
@ -35,8 +35,6 @@ type
|
||||||
cid*: Cid
|
cid*: Cid
|
||||||
data*: seq[byte]
|
data*: seq[byte]
|
||||||
|
|
||||||
BlockNotFoundError* = object of CodexError
|
|
||||||
|
|
||||||
template EmptyCid*: untyped =
|
template EmptyCid*: untyped =
|
||||||
var
|
var
|
||||||
emptyCid {.global, threadvar.}:
|
emptyCid {.global, threadvar.}:
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/os
|
import std/os
|
||||||
import std/sugar
|
import std/sugar
|
||||||
|
import std/tables
|
||||||
|
|
||||||
import pkg/chronicles
|
import pkg/chronicles
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
|
@ -20,6 +21,7 @@ import pkg/confutils/defs
|
||||||
import pkg/nitro
|
import pkg/nitro
|
||||||
import pkg/stew/io2
|
import pkg/stew/io2
|
||||||
import pkg/stew/shims/net as stewnet
|
import pkg/stew/shims/net as stewnet
|
||||||
|
import pkg/datastore
|
||||||
|
|
||||||
import ./node
|
import ./node
|
||||||
import ./conf
|
import ./conf
|
||||||
|
@ -31,8 +33,8 @@ import ./utils/fileutils
|
||||||
import ./erasure
|
import ./erasure
|
||||||
import ./discovery
|
import ./discovery
|
||||||
import ./contracts
|
import ./contracts
|
||||||
import ./utils/keyutils
|
|
||||||
import ./utils/addrutils
|
import ./utils/addrutils
|
||||||
|
import ./namespaces
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "codex node"
|
topics = "codex node"
|
||||||
|
@ -43,15 +45,19 @@ type
|
||||||
config: CodexConf
|
config: CodexConf
|
||||||
restServer: RestServerRef
|
restServer: RestServerRef
|
||||||
codexNode: CodexNodeRef
|
codexNode: CodexNodeRef
|
||||||
|
repoStore: RepoStore
|
||||||
|
|
||||||
CodexPrivateKey* = libp2p.PrivateKey # alias
|
CodexPrivateKey* = libp2p.PrivateKey # alias
|
||||||
|
|
||||||
proc start*(s: CodexServer) {.async.} =
|
proc start*(s: CodexServer) {.async.} =
|
||||||
|
notice "Starting codex node"
|
||||||
|
|
||||||
|
await s.repoStore.start()
|
||||||
s.restServer.start()
|
s.restServer.start()
|
||||||
await s.codexNode.start()
|
await s.codexNode.start()
|
||||||
|
|
||||||
let
|
let
|
||||||
# TODO: Can't define this as constants, pity
|
# TODO: Can't define these as constants, pity
|
||||||
natIpPart = MultiAddress.init("/ip4/" & $s.config.nat & "/")
|
natIpPart = MultiAddress.init("/ip4/" & $s.config.nat & "/")
|
||||||
.expect("Should create multiaddress")
|
.expect("Should create multiaddress")
|
||||||
anyAddrIp = MultiAddress.init("/ip4/0.0.0.0/")
|
anyAddrIp = MultiAddress.init("/ip4/0.0.0.0/")
|
||||||
|
@ -79,8 +85,12 @@ proc start*(s: CodexServer) {.async.} =
|
||||||
await s.runHandle
|
await s.runHandle
|
||||||
|
|
||||||
proc stop*(s: CodexServer) {.async.} =
|
proc stop*(s: CodexServer) {.async.} =
|
||||||
|
notice "Stopping codex node"
|
||||||
|
|
||||||
await allFuturesThrowing(
|
await allFuturesThrowing(
|
||||||
s.restServer.stop(), s.codexNode.stop())
|
s.restServer.stop(),
|
||||||
|
s.codexNode.stop(),
|
||||||
|
s.repoStore.start())
|
||||||
|
|
||||||
s.runHandle.complete()
|
s.runHandle.complete()
|
||||||
|
|
||||||
|
@ -122,9 +132,17 @@ proc new*(T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey):
|
||||||
cache = CacheStore.new(cacheSize = config.cacheSize * MiB)
|
cache = CacheStore.new(cacheSize = config.cacheSize * MiB)
|
||||||
|
|
||||||
let
|
let
|
||||||
discoveryStore = Datastore(SQLiteDatastore.new(
|
discoveryDir = config.dataDir / CodexDhtNamespace
|
||||||
config.dataDir / "dht")
|
|
||||||
.expect("Should not fail!"))
|
if io2.createPath(discoveryDir).isErr:
|
||||||
|
trace "Unable to create discovery directory for block store", discoveryDir = discoveryDir
|
||||||
|
raise (ref Defect)(
|
||||||
|
msg: "Unable to create discovery directory for block store: " & discoveryDir)
|
||||||
|
|
||||||
|
let
|
||||||
|
discoveryStore = Datastore(
|
||||||
|
SQLiteDatastore.new(config.dataDir / CodexDhtProvidersNamespace)
|
||||||
|
.expect("Should create discovery datastore!"))
|
||||||
|
|
||||||
discovery = Discovery.new(
|
discovery = Discovery.new(
|
||||||
switch.peerInfo.privateKey,
|
switch.peerInfo.privateKey,
|
||||||
|
@ -136,20 +154,20 @@ proc new*(T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey):
|
||||||
|
|
||||||
wallet = WalletRef.new(EthPrivateKey.random())
|
wallet = WalletRef.new(EthPrivateKey.random())
|
||||||
network = BlockExcNetwork.new(switch)
|
network = BlockExcNetwork.new(switch)
|
||||||
repoDir = config.dataDir / "repo"
|
|
||||||
|
|
||||||
if io2.createPath(repoDir).isErr:
|
repoStore = RepoStore.new(
|
||||||
trace "Unable to create data directory for block store", dataDir = repoDir
|
repoDs = Datastore(FSDatastore.new($config.dataDir, depth = 5)
|
||||||
raise (ref Defect)(
|
.expect("Should create repo data store!")),
|
||||||
msg: "Unable to create data directory for block store: " & repoDir)
|
metaDs = SQLiteDatastore.new(config.dataDir / CodexMetaNamespace)
|
||||||
|
.expect("Should create meta data store!"),
|
||||||
|
quotaMaxBytes = config.storageQuota.uint,
|
||||||
|
blockTtl = config.blockTtl.seconds)
|
||||||
|
|
||||||
let
|
|
||||||
localStore = FSStore.new(repoDir, cache = cache)
|
|
||||||
peerStore = PeerCtxStore.new()
|
peerStore = PeerCtxStore.new()
|
||||||
pendingBlocks = PendingBlocksManager.new()
|
pendingBlocks = PendingBlocksManager.new()
|
||||||
blockDiscovery = DiscoveryEngine.new(localStore, peerStore, network, discovery, pendingBlocks)
|
blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks)
|
||||||
engine = BlockExcEngine.new(localStore, wallet, network, blockDiscovery, peerStore, pendingBlocks)
|
engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, peerStore, pendingBlocks)
|
||||||
store = NetworkStore.new(engine, localStore)
|
store = NetworkStore.new(engine, repoStore)
|
||||||
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider)
|
||||||
contracts = ContractInteractions.new(config)
|
contracts = ContractInteractions.new(config)
|
||||||
codexNode = CodexNodeRef.new(switch, store, engine, erasure, discovery, contracts)
|
codexNode = CodexNodeRef.new(switch, store, engine, erasure, discovery, contracts)
|
||||||
|
@ -164,4 +182,5 @@ proc new*(T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey):
|
||||||
T(
|
T(
|
||||||
config: config,
|
config: config,
|
||||||
codexNode: codexNode,
|
codexNode: codexNode,
|
||||||
restServer: restServer)
|
restServer: restServer,
|
||||||
|
repoStore: repoStore)
|
||||||
|
|
|
@ -29,9 +29,9 @@ import pkg/libp2p
|
||||||
import pkg/ethers
|
import pkg/ethers
|
||||||
|
|
||||||
import ./discovery
|
import ./discovery
|
||||||
import ./stores/cachestore
|
import ./stores
|
||||||
|
|
||||||
export DefaultCacheSizeMiB, net
|
export DefaultCacheSizeMiB, net, DefaultQuotaBytes, DefaultBlockTtl
|
||||||
|
|
||||||
type
|
type
|
||||||
StartUpCommand* {.pure.} = enum
|
StartUpCommand* {.pure.} = enum
|
||||||
|
@ -95,8 +95,8 @@ type
|
||||||
abbr: "i"
|
abbr: "i"
|
||||||
name: "listen-addrs" }: seq[MultiAddress]
|
name: "listen-addrs" }: seq[MultiAddress]
|
||||||
|
|
||||||
|
# TODO: change this once we integrate nat support
|
||||||
nat* {.
|
nat* {.
|
||||||
# TODO: change this once we integrate nat support
|
|
||||||
desc: "IP Addresses to announce behind a NAT"
|
desc: "IP Addresses to announce behind a NAT"
|
||||||
defaultValue: ValidIpAddress.init("127.0.0.1")
|
defaultValue: ValidIpAddress.init("127.0.0.1")
|
||||||
defaultValueDesc: "127.0.0.1"
|
defaultValueDesc: "127.0.0.1"
|
||||||
|
@ -144,6 +144,20 @@ type
|
||||||
name: "api-port"
|
name: "api-port"
|
||||||
abbr: "p" }: int
|
abbr: "p" }: int
|
||||||
|
|
||||||
|
storageQuota* {.
|
||||||
|
desc: "The size of the total storage quota dedicated to the node"
|
||||||
|
defaultValue: DefaultQuotaBytes
|
||||||
|
defaultValueDesc: $DefaultQuotaBytes
|
||||||
|
name: "storage-quota"
|
||||||
|
abbr: "q" }: Natural
|
||||||
|
|
||||||
|
blockTtl* {.
|
||||||
|
desc: "Default block timeout in seconds - 0 disables the ttl"
|
||||||
|
defaultValue: DefaultBlockTtl.secs
|
||||||
|
defaultValueDesc: $DefaultBlockTtl
|
||||||
|
name: "block-ttl"
|
||||||
|
abbr: "t" }: Natural
|
||||||
|
|
||||||
cacheSize* {.
|
cacheSize* {.
|
||||||
desc: "The size in MiB of the block cache, 0 disables the cache - might help on slow hardrives"
|
desc: "The size in MiB of the block cache, 0 disables the cache - might help on slow hardrives"
|
||||||
defaultValue: 0
|
defaultValue: 0
|
||||||
|
|
|
@ -11,7 +11,6 @@ import pkg/upraises
|
||||||
|
|
||||||
push: {.upraises: [].}
|
push: {.upraises: [].}
|
||||||
|
|
||||||
import ../manifest
|
|
||||||
import ../stores
|
import ../stores
|
||||||
|
|
||||||
type
|
type
|
||||||
|
|
|
@ -18,7 +18,6 @@ import pkg/chronicles
|
||||||
|
|
||||||
import ../manifest
|
import ../manifest
|
||||||
import ../stores
|
import ../stores
|
||||||
import ../errors
|
|
||||||
import ../blocktype as bt
|
import ../blocktype as bt
|
||||||
|
|
||||||
import ./backend
|
import ./backend
|
||||||
|
|
|
@ -187,7 +187,12 @@ func decode*(
|
||||||
decoder.decode(data)
|
decoder.decode(data)
|
||||||
|
|
||||||
func decode*(_: type Manifest, blk: Block): ?!Manifest =
|
func decode*(_: type Manifest, blk: Block): ?!Manifest =
|
||||||
without contentType =? blk.cid.contentType() and
|
## Decode a manifest using `decoder`
|
||||||
containerType =? ManifestContainers.?[$contentType]:
|
##
|
||||||
return failure "CID has invalid content type for manifest"
|
|
||||||
Manifest.decode(blk.data, containerType)
|
if not ? blk.cid.isManifest:
|
||||||
|
return failure "Cid not a manifest codec"
|
||||||
|
|
||||||
|
Manifest.decode(
|
||||||
|
blk.data,
|
||||||
|
? ManifestContainers[$(?blk.cid.contentType().mapFailure)].catch)
|
||||||
|
|
|
@ -23,7 +23,6 @@ import ../errors
|
||||||
import ../utils
|
import ../utils
|
||||||
import ../blocktype
|
import ../blocktype
|
||||||
import ./types
|
import ./types
|
||||||
import ./coders
|
|
||||||
|
|
||||||
############################################################
|
############################################################
|
||||||
# Operations on block list
|
# Operations on block list
|
||||||
|
@ -46,6 +45,12 @@ func `[]=`*(self: Manifest, i: BackwardsIndex, item: Cid) =
|
||||||
self.rootHash = Cid.none
|
self.rootHash = Cid.none
|
||||||
self.blocks[self.len - i.int] = item
|
self.blocks[self.len - i.int] = item
|
||||||
|
|
||||||
|
func isManifest*(cid: Cid): ?!bool =
|
||||||
|
($(?cid.contentType().mapFailure) in ManifestContainers).success
|
||||||
|
|
||||||
|
func isManifest*(mc: MultiCodec): ?!bool =
|
||||||
|
($mc in ManifestContainers).success
|
||||||
|
|
||||||
proc add*(self: Manifest, cid: Cid) =
|
proc add*(self: Manifest, cid: Cid) =
|
||||||
assert not self.protected # we expect that protected manifests are created with properly-sized self.blocks
|
assert not self.protected # we expect that protected manifests are created with properly-sized self.blocks
|
||||||
self.rootHash = Cid.none
|
self.rootHash = Cid.none
|
||||||
|
|
|
@ -14,6 +14,7 @@ import pkg/libp2p
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
|
|
||||||
const
|
const
|
||||||
|
BlockCodec* = multiCodec("raw")
|
||||||
DagPBCodec* = multiCodec("dag-pb")
|
DagPBCodec* = multiCodec("dag-pb")
|
||||||
|
|
||||||
type
|
type
|
||||||
|
|
|
@ -7,13 +7,15 @@
|
||||||
## This file may not be copied, modified, or distributed except according to
|
## This file may not be copied, modified, or distributed except according to
|
||||||
## those terms.
|
## those terms.
|
||||||
|
|
||||||
import std/os
|
|
||||||
|
|
||||||
const
|
const
|
||||||
CodexRepoNamespace* = "/repo" # repository namespace, blocks and manifests are subkeys
|
# Namespaces
|
||||||
CodexBlocksNamespace* = CodexRepoNamespace / "blocks" # blocks namespace
|
CodexMetaNamespace* = "meta" # meta info stored here
|
||||||
CodexManifestNamespace* = CodexRepoNamespace / "manifests" # manifest namespace
|
CodexRepoNamespace* = "repo" # repository namespace, blocks and manifests are subkeys
|
||||||
CodexBlocksPersistNamespace* = # Cid's of persisted blocks goes here
|
CodexBlocksNamespace* = CodexRepoNamespace & "/blocks" # blocks namespace
|
||||||
CodexMetaNamespace / "blocks" / "persist"
|
CodexManifestNamespace* = CodexRepoNamespace & "/manifests" # manifest namespace
|
||||||
CodexBlocksTtlNamespace* = # Cid TTL
|
CodexBlocksTtlNamespace* = # Cid TTL
|
||||||
CodexMetaNamespace / "blocks" / "ttl"
|
CodexMetaNamespace & "/ttl"
|
||||||
|
CodexDhtNamespace* = "dht" # Dht namespace
|
||||||
|
CodexDhtProvidersNamespace* = # Dht providers namespace
|
||||||
|
CodexDhtNamespace & "/providers"
|
||||||
|
CodexQuotaNamespace* = CodexMetaNamespace & "/quota" # quota's namespace
|
||||||
|
|
|
@ -69,18 +69,20 @@ proc fetchManifest*(
|
||||||
## Fetch and decode a manifest block
|
## Fetch and decode a manifest block
|
||||||
##
|
##
|
||||||
|
|
||||||
without contentType =? cid.contentType() and
|
if err =? cid.isManifest.errorOption:
|
||||||
containerType =? ManifestContainers.?[$contentType]:
|
return failure "CID has invalid content type for manifest {$cid}"
|
||||||
return failure "CID has invalid content type for manifest"
|
|
||||||
|
|
||||||
trace "Received retrieval request", cid
|
trace "Received manifest retrieval request", cid
|
||||||
|
|
||||||
without blk =? await node.blockStore.getBlock(cid), error:
|
without blk =? await node.blockStore.getBlock(cid), err:
|
||||||
return failure error
|
trace "Error retriving manifest block", cid, err = err.msg
|
||||||
|
return failure err
|
||||||
|
|
||||||
without manifest =? Manifest.decode(blk):
|
without manifest =? Manifest.decode(blk), err:
|
||||||
return failure(
|
trace "Unable to decode as manifest", err = err.msg
|
||||||
newException(CodexError, "Unable to decode as manifest"))
|
return failure("Unable to decode as manifest")
|
||||||
|
|
||||||
|
trace "Decoded manifest", cid
|
||||||
|
|
||||||
return manifest.success
|
return manifest.success
|
||||||
|
|
||||||
|
@ -120,6 +122,7 @@ proc retrieve*(
|
||||||
##
|
##
|
||||||
|
|
||||||
if manifest =? (await node.fetchManifest(cid)):
|
if manifest =? (await node.fetchManifest(cid)):
|
||||||
|
trace "Retrieving blocks from manifest", cid
|
||||||
if manifest.protected:
|
if manifest.protected:
|
||||||
# Retrieve, decode and save to the local store all EС groups
|
# Retrieve, decode and save to the local store all EС groups
|
||||||
proc erasureJob(): Future[void] {.async.} =
|
proc erasureJob(): Future[void] {.async.} =
|
||||||
|
@ -142,6 +145,7 @@ proc retrieve*(
|
||||||
# asyncSpawn prefetchBlocks() - temporarily commented out
|
# asyncSpawn prefetchBlocks() - temporarily commented out
|
||||||
#
|
#
|
||||||
# Retrieve all blocks of the dataset sequentially from the local store or network
|
# Retrieve all blocks of the dataset sequentially from the local store or network
|
||||||
|
trace "Creating store stream for manifest", cid
|
||||||
return LPStream(StoreStream.new(node.blockStore, manifest, pad = false)).success
|
return LPStream(StoreStream.new(node.blockStore, manifest, pad = false)).success
|
||||||
|
|
||||||
let
|
let
|
||||||
|
@ -189,8 +193,8 @@ proc store*(
|
||||||
return failure("Unable to init block from chunk!")
|
return failure("Unable to init block from chunk!")
|
||||||
|
|
||||||
blockManifest.add(blk.cid)
|
blockManifest.add(blk.cid)
|
||||||
if isErr (await self.blockStore.putBlock(blk)):
|
if err =? (await self.blockStore.putBlock(blk)).errorOption:
|
||||||
# trace "Unable to store block", cid = blk.cid
|
trace "Unable to store block", cid = blk.cid, err = err.msg
|
||||||
return failure(&"Unable to store block {blk.cid}")
|
return failure(&"Unable to store block {blk.cid}")
|
||||||
|
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
|
|
|
@ -13,7 +13,6 @@ push: {.upraises: [].}
|
||||||
|
|
||||||
|
|
||||||
import std/sequtils
|
import std/sequtils
|
||||||
import std/sugar
|
|
||||||
|
|
||||||
import pkg/questionable
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
|
|
|
@ -12,7 +12,6 @@ import std/sequtils
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
import pkg/chronicles
|
import pkg/chronicles
|
||||||
import pkg/questionable
|
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/contractabi/address as ca
|
import pkg/contractabi/address as ca
|
||||||
|
|
||||||
|
|
|
@ -10,8 +10,6 @@
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/libp2p/protobuf/minprotobuf
|
import pkg/libp2p/protobuf/minprotobuf
|
||||||
|
|
||||||
import ../../errors
|
|
||||||
|
|
||||||
type
|
type
|
||||||
Tag* = object
|
Tag* = object
|
||||||
idx*: int64
|
idx*: int64
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import ./stores/cachestore
|
import ./stores/cachestore
|
||||||
import ./stores/blockstore
|
import ./stores/blockstore
|
||||||
import ./stores/networkstore
|
import ./stores/networkstore
|
||||||
import ./stores/fsstore
|
import ./stores/repostore
|
||||||
|
|
||||||
export cachestore, blockstore, networkstore, fsstore
|
export cachestore, blockstore, networkstore, repostore
|
||||||
|
|
|
@ -13,23 +13,44 @@ push: {.upraises: [].}
|
||||||
|
|
||||||
import pkg/chronos
|
import pkg/chronos
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
|
import pkg/questionable
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
|
|
||||||
import ../blocktype
|
import ../blocktype
|
||||||
|
|
||||||
export blocktype, libp2p
|
export blocktype, libp2p
|
||||||
|
|
||||||
|
const
|
||||||
|
DefaultBlockTtl = 24.hours
|
||||||
|
|
||||||
type
|
type
|
||||||
OnBlock* = proc(cid: Cid): Future[void] {.upraises: [], gcsafe.}
|
BlockNotFoundError* = object of CodexError
|
||||||
|
|
||||||
|
BlockType* {.pure.} = enum
|
||||||
|
Manifest, Block, Both
|
||||||
|
|
||||||
|
GetNext* = proc(): Future[?Cid] {.upraises: [], gcsafe, closure.}
|
||||||
|
|
||||||
|
BlocksIter* = ref object
|
||||||
|
finished*: bool
|
||||||
|
next*: GetNext
|
||||||
|
|
||||||
BlockStore* = ref object of RootObj
|
BlockStore* = ref object of RootObj
|
||||||
|
|
||||||
|
iterator items*(self: BlocksIter): Future[?Cid] =
|
||||||
|
while not self.finished:
|
||||||
|
yield self.next()
|
||||||
|
|
||||||
method getBlock*(self: BlockStore, cid: Cid): Future[?!Block] {.base.} =
|
method getBlock*(self: BlockStore, cid: Cid): Future[?!Block] {.base.} =
|
||||||
## Get a block from the blockstore
|
## Get a block from the blockstore
|
||||||
##
|
##
|
||||||
|
|
||||||
raiseAssert("Not implemented!")
|
raiseAssert("Not implemented!")
|
||||||
|
|
||||||
method putBlock*(self: BlockStore, blk: Block): Future[?!void] {.base.} =
|
method putBlock*(
|
||||||
|
self: BlockStore,
|
||||||
|
blk: Block,
|
||||||
|
ttl = Duration.none): Future[?!void] {.base.} =
|
||||||
## Put a block to the blockstore
|
## Put a block to the blockstore
|
||||||
##
|
##
|
||||||
|
|
||||||
|
@ -47,7 +68,9 @@ method hasBlock*(self: BlockStore, cid: Cid): Future[?!bool] {.base.} =
|
||||||
|
|
||||||
raiseAssert("Not implemented!")
|
raiseAssert("Not implemented!")
|
||||||
|
|
||||||
method listBlocks*(self: BlockStore, onBlock: OnBlock): Future[?!void] {.base.} =
|
method listBlocks*(
|
||||||
|
self: BlockStore,
|
||||||
|
blockType = BlockType.Manifest): Future[?!BlocksIter] {.base.} =
|
||||||
## Get the list of blocks in the BlockStore. This is an intensive operation
|
## Get the list of blocks in the BlockStore. This is an intensive operation
|
||||||
##
|
##
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,7 @@ import pkg/questionable/results
|
||||||
import ./blockstore
|
import ./blockstore
|
||||||
import ../chunker
|
import ../chunker
|
||||||
import ../errors
|
import ../errors
|
||||||
|
import ../manifest
|
||||||
|
|
||||||
export blockstore
|
export blockstore
|
||||||
|
|
||||||
|
@ -73,14 +74,62 @@ method hasBlock*(self: CacheStore, cid: Cid): Future[?!bool] {.async.} =
|
||||||
|
|
||||||
return (cid in self.cache).success
|
return (cid in self.cache).success
|
||||||
|
|
||||||
method listBlocks*(s: CacheStore, onBlock: OnBlock): Future[?!void] {.async.} =
|
func cids(self: CacheStore): (iterator: Cid {.gcsafe.}) =
|
||||||
|
return iterator(): Cid =
|
||||||
|
for cid in self.cache.keys:
|
||||||
|
yield cid
|
||||||
|
|
||||||
|
method listBlocks*(
|
||||||
|
self: CacheStore,
|
||||||
|
blockType = BlockType.Manifest): Future[?!BlocksIter] {.async.} =
|
||||||
## Get the list of blocks in the BlockStore. This is an intensive operation
|
## Get the list of blocks in the BlockStore. This is an intensive operation
|
||||||
##
|
##
|
||||||
|
|
||||||
for cid in toSeq(s.cache.keys):
|
var
|
||||||
await onBlock(cid)
|
iter = BlocksIter()
|
||||||
|
|
||||||
return success()
|
let
|
||||||
|
cids = self.cids()
|
||||||
|
|
||||||
|
proc next(): Future[?Cid] {.async.} =
|
||||||
|
await idleAsync()
|
||||||
|
|
||||||
|
var cid: Cid
|
||||||
|
while true:
|
||||||
|
if iter.finished:
|
||||||
|
return Cid.none
|
||||||
|
|
||||||
|
cid = cids()
|
||||||
|
|
||||||
|
if finished(cids):
|
||||||
|
iter.finished = true
|
||||||
|
return Cid.none
|
||||||
|
|
||||||
|
without isManifest =? cid.isManifest, err:
|
||||||
|
trace "Error checking if cid is a manifest", err = err.msg
|
||||||
|
return Cid.none
|
||||||
|
|
||||||
|
case blockType:
|
||||||
|
of BlockType.Manifest:
|
||||||
|
if not isManifest:
|
||||||
|
trace "Cid is not manifest, skipping", cid
|
||||||
|
continue
|
||||||
|
|
||||||
|
break
|
||||||
|
of BlockType.Block:
|
||||||
|
if isManifest:
|
||||||
|
trace "Cid is a manifest, skipping", cid
|
||||||
|
continue
|
||||||
|
|
||||||
|
break
|
||||||
|
of BlockType.Both:
|
||||||
|
break
|
||||||
|
|
||||||
|
return cid.some
|
||||||
|
|
||||||
|
iter.next = next
|
||||||
|
|
||||||
|
return success iter
|
||||||
|
|
||||||
func putBlockSync(self: CacheStore, blk: Block): bool =
|
func putBlockSync(self: CacheStore, blk: Block): bool =
|
||||||
|
|
||||||
|
@ -104,7 +153,10 @@ func putBlockSync(self: CacheStore, blk: Block): bool =
|
||||||
self.currentSize += blkSize
|
self.currentSize += blkSize
|
||||||
return true
|
return true
|
||||||
|
|
||||||
method putBlock*(self: CacheStore, blk: Block): Future[?!void] {.async.} =
|
method putBlock*(
|
||||||
|
self: CacheStore,
|
||||||
|
blk: Block,
|
||||||
|
ttl = Duration.none): Future[?!void] {.async.} =
|
||||||
## Put a block to the blockstore
|
## Put a block to the blockstore
|
||||||
##
|
##
|
||||||
|
|
||||||
|
|
|
@ -1,207 +0,0 @@
|
||||||
## Nim-Codex
|
|
||||||
## Copyright (c) 2021 Status Research & Development GmbH
|
|
||||||
## Licensed under either of
|
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
||||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
||||||
## at your option.
|
|
||||||
## This file may not be copied, modified, or distributed except according to
|
|
||||||
## those terms.
|
|
||||||
|
|
||||||
import pkg/upraises
|
|
||||||
|
|
||||||
push: {.upraises: [].}
|
|
||||||
|
|
||||||
import std/os
|
|
||||||
|
|
||||||
import pkg/chronos
|
|
||||||
import pkg/chronicles
|
|
||||||
import pkg/libp2p
|
|
||||||
import pkg/questionable
|
|
||||||
import pkg/questionable/results
|
|
||||||
import pkg/stew/io2
|
|
||||||
|
|
||||||
import ./cachestore
|
|
||||||
import ./blockstore
|
|
||||||
|
|
||||||
export blockstore
|
|
||||||
|
|
||||||
logScope:
|
|
||||||
topics = "codex fsstore"
|
|
||||||
|
|
||||||
type
|
|
||||||
FSStore* = ref object of BlockStore
|
|
||||||
cache: BlockStore
|
|
||||||
repoDir: string
|
|
||||||
postfixLen*: int
|
|
||||||
|
|
||||||
template blockPath*(self: FSStore, cid: Cid): string =
|
|
||||||
self.repoDir / ($cid)[^self.postfixLen..^1] / $cid
|
|
||||||
|
|
||||||
method getBlock*(self: FSStore, cid: Cid): Future[?!Block] {.async.} =
|
|
||||||
## Get a block from the cache or filestore.
|
|
||||||
## Save a copy to the cache if present in the filestore but not in the cache
|
|
||||||
##
|
|
||||||
|
|
||||||
if not self.cache.isNil:
|
|
||||||
trace "Getting block from cache or filestore", cid
|
|
||||||
else:
|
|
||||||
trace "Getting block from filestore", cid
|
|
||||||
|
|
||||||
if cid.isEmpty:
|
|
||||||
trace "Empty block, ignoring"
|
|
||||||
return success cid.emptyBlock
|
|
||||||
|
|
||||||
if not self.cache.isNil:
|
|
||||||
let
|
|
||||||
cachedBlockRes = await self.cache.getBlock(cid)
|
|
||||||
|
|
||||||
if not cachedBlockRes.isErr:
|
|
||||||
return success cachedBlockRes.get
|
|
||||||
else:
|
|
||||||
trace "Unable to read block from cache", cid, error = cachedBlockRes.error.msg
|
|
||||||
|
|
||||||
# Read file contents
|
|
||||||
var
|
|
||||||
data: seq[byte]
|
|
||||||
|
|
||||||
let
|
|
||||||
path = self.blockPath(cid)
|
|
||||||
res = io2.readFile(path, data)
|
|
||||||
|
|
||||||
if res.isErr:
|
|
||||||
if not isFile(path): # May be, check instead that "res.error == ERROR_FILE_NOT_FOUND" ?
|
|
||||||
return failure (ref BlockNotFoundError)(msg: "Block not in filestore")
|
|
||||||
else:
|
|
||||||
let
|
|
||||||
error = io2.ioErrorMsg(res.error)
|
|
||||||
|
|
||||||
trace "Error requesting block from filestore", path, error
|
|
||||||
return failure "Error requesting block from filestore: " & error
|
|
||||||
|
|
||||||
without blk =? Block.new(cid, data), error:
|
|
||||||
trace "Unable to construct block from data", cid, error = error.msg
|
|
||||||
return failure error
|
|
||||||
|
|
||||||
if not self.cache.isNil:
|
|
||||||
let
|
|
||||||
putCachedRes = await self.cache.putBlock(blk)
|
|
||||||
|
|
||||||
if putCachedRes.isErr:
|
|
||||||
trace "Unable to store block in cache", cid, error = putCachedRes.error.msg
|
|
||||||
|
|
||||||
return success blk
|
|
||||||
|
|
||||||
method putBlock*(self: FSStore, blk: Block): Future[?!void] {.async.} =
|
|
||||||
## Write a block's contents to a file with name based on blk.cid.
|
|
||||||
## Save a copy to the cache
|
|
||||||
##
|
|
||||||
|
|
||||||
if not self.cache.isNil:
|
|
||||||
trace "Putting block into filestore and cache", cid = blk.cid
|
|
||||||
else:
|
|
||||||
trace "Putting block into filestore", cid = blk.cid
|
|
||||||
|
|
||||||
if blk.isEmpty:
|
|
||||||
trace "Empty block, ignoring"
|
|
||||||
return success()
|
|
||||||
|
|
||||||
let path = self.blockPath(blk.cid)
|
|
||||||
if isFile(path):
|
|
||||||
return success()
|
|
||||||
|
|
||||||
# If directory exists createPath wont fail
|
|
||||||
let dir = path.parentDir
|
|
||||||
if io2.createPath(dir).isErr:
|
|
||||||
trace "Unable to create block prefix dir", dir
|
|
||||||
return failure("Unable to create block prefix dir")
|
|
||||||
|
|
||||||
let res = io2.writeFile(path, blk.data)
|
|
||||||
if res.isErr:
|
|
||||||
let error = io2.ioErrorMsg(res.error)
|
|
||||||
trace "Unable to store block", path, cid = blk.cid, error
|
|
||||||
return failure("Unable to store block")
|
|
||||||
|
|
||||||
if not self.cache.isNil:
|
|
||||||
let
|
|
||||||
putCachedRes = await self.cache.putBlock(blk)
|
|
||||||
|
|
||||||
if putCachedRes.isErr:
|
|
||||||
trace "Unable to store block in cache", cid = blk.cid, error = putCachedRes.error.msg
|
|
||||||
|
|
||||||
return success()
|
|
||||||
|
|
||||||
method delBlock*(self: FSStore, cid: Cid): Future[?!void] {.async.} =
|
|
||||||
## Delete a block from the cache and filestore
|
|
||||||
##
|
|
||||||
|
|
||||||
if not self.cache.isNil:
|
|
||||||
trace "Deleting block from cache and filestore", cid
|
|
||||||
else:
|
|
||||||
trace "Deleting block from filestore", cid
|
|
||||||
|
|
||||||
if cid.isEmpty:
|
|
||||||
trace "Empty block, ignoring"
|
|
||||||
return success()
|
|
||||||
|
|
||||||
if not self.cache.isNil:
|
|
||||||
let
|
|
||||||
delCachedRes = await self.cache.delBlock(cid)
|
|
||||||
|
|
||||||
if delCachedRes.isErr:
|
|
||||||
trace "Unable to delete block from cache", cid, error = delCachedRes.error.msg
|
|
||||||
|
|
||||||
let
|
|
||||||
path = self.blockPath(cid)
|
|
||||||
res = io2.removeFile(path)
|
|
||||||
|
|
||||||
if res.isErr:
|
|
||||||
let error = io2.ioErrorMsg(res.error)
|
|
||||||
trace "Unable to delete block", path, cid, error
|
|
||||||
return error.failure
|
|
||||||
|
|
||||||
return success()
|
|
||||||
|
|
||||||
method hasBlock*(self: FSStore, cid: Cid): Future[?!bool] {.async.} =
|
|
||||||
## Check if a block exists in the filestore
|
|
||||||
##
|
|
||||||
|
|
||||||
trace "Checking filestore for block existence", cid
|
|
||||||
if cid.isEmpty:
|
|
||||||
trace "Empty block, ignoring"
|
|
||||||
return true.success
|
|
||||||
|
|
||||||
return self.blockPath(cid).isFile().success
|
|
||||||
|
|
||||||
method listBlocks*(self: FSStore, onBlock: OnBlock): Future[?!void] {.async.} =
|
|
||||||
## Process list of all blocks in the filestore via callback.
|
|
||||||
## This is an intensive operation
|
|
||||||
##
|
|
||||||
|
|
||||||
trace "Listing all blocks in filestore"
|
|
||||||
for (pkind, folderPath) in self.repoDir.walkDir():
|
|
||||||
if pkind != pcDir: continue
|
|
||||||
if len(folderPath.basename) != self.postfixLen: continue
|
|
||||||
|
|
||||||
for (fkind, filename) in folderPath.walkDir(relative = true):
|
|
||||||
if fkind != pcFile: continue
|
|
||||||
let cid = Cid.init(filename)
|
|
||||||
if cid.isOk:
|
|
||||||
await onBlock(cid.get())
|
|
||||||
|
|
||||||
return success()
|
|
||||||
|
|
||||||
method close*(self: FSStore): Future[void] {.async.} =
|
|
||||||
## Close the underlying cache
|
|
||||||
##
|
|
||||||
|
|
||||||
if not self.cache.isNil: await self.cache.close
|
|
||||||
|
|
||||||
proc new*(
|
|
||||||
T: type FSStore,
|
|
||||||
repoDir: string,
|
|
||||||
postfixLen = 2,
|
|
||||||
cache: BlockStore = nil): T =
|
|
||||||
T(
|
|
||||||
postfixLen: postfixLen,
|
|
||||||
repoDir: repoDir,
|
|
||||||
cache: cache)
|
|
|
@ -1,118 +0,0 @@
|
||||||
## Nim-Codex
|
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
|
||||||
## Licensed under either of
|
|
||||||
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
||||||
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
||||||
## at your option.
|
|
||||||
## This file may not be copied, modified, or distributed except according to
|
|
||||||
## those terms.
|
|
||||||
|
|
||||||
import std/os
|
|
||||||
|
|
||||||
import pkg/upraises
|
|
||||||
|
|
||||||
push: {.upraises: [].}
|
|
||||||
|
|
||||||
import pkg/chronos
|
|
||||||
import pkg/libp2p
|
|
||||||
import pkg/questionable
|
|
||||||
import pkg/questionable/results
|
|
||||||
import pkg/datastore
|
|
||||||
|
|
||||||
import ./blockstore
|
|
||||||
import ../blocktype
|
|
||||||
import ../namespaces
|
|
||||||
import ../manifest
|
|
||||||
|
|
||||||
export blocktype, libp2p
|
|
||||||
|
|
||||||
const
|
|
||||||
CacheBytesKey* = CodexMetaNamespace / "bytes" / "cache"
|
|
||||||
CachePersistentKey* = CodexMetaNamespace / "bytes" / "persistent"
|
|
||||||
|
|
||||||
type
|
|
||||||
LocalStore* = ref object of BlockStore
|
|
||||||
ds*: Datastore
|
|
||||||
blocksRepo*: BlockStore # TODO: Should be a Datastore
|
|
||||||
manifestRepo*: BlockStore # TODO: Should be a Datastore
|
|
||||||
cacheBytes*: uint
|
|
||||||
persistBytes*: uint
|
|
||||||
|
|
||||||
method getBlock*(self: LocalStore, cid: Cid): Future[?!Block] =
|
|
||||||
## Get a block from the blockstore
|
|
||||||
##
|
|
||||||
|
|
||||||
if cid.isManifest:
|
|
||||||
self.manifestRepo.getBlock(cid)
|
|
||||||
else:
|
|
||||||
self.blocksRepo.getBlock(cid)
|
|
||||||
|
|
||||||
method putBlock*(self: LocalStore, blk: Block): Future[?!void] =
|
|
||||||
## Put a block to the blockstore
|
|
||||||
##
|
|
||||||
|
|
||||||
if blk.cid.isManifest:
|
|
||||||
self.manifestRepo.putBlock(blk)
|
|
||||||
else:
|
|
||||||
self.blocksRepo.putBlock(blk)
|
|
||||||
|
|
||||||
method delBlock*(self: LocalStore, cid: Cid): Future[?!void] =
|
|
||||||
## Delete a block from the blockstore
|
|
||||||
##
|
|
||||||
|
|
||||||
if cid.isManifest:
|
|
||||||
self.manifestRepo.delBlock(cid)
|
|
||||||
else:
|
|
||||||
self.blocksRepo.delBlock(cid)
|
|
||||||
|
|
||||||
method hasBlock*(self: LocalStore, cid: Cid): Future[?!bool] =
|
|
||||||
## Check if the block exists in the blockstore
|
|
||||||
##
|
|
||||||
|
|
||||||
if cid.isManifest:
|
|
||||||
self.manifestRepo.hasBlock(cid)
|
|
||||||
else:
|
|
||||||
self.blocksRepo.hasBlock(cid)
|
|
||||||
|
|
||||||
method listBlocks*(
|
|
||||||
self: LocalStore,
|
|
||||||
blkType: MultiCodec,
|
|
||||||
batch = 100,
|
|
||||||
onBlock: OnBlock): Future[?!void] =
|
|
||||||
## Get the list of blocks in the LocalStore.
|
|
||||||
## This is an intensive operation
|
|
||||||
##
|
|
||||||
|
|
||||||
if $blkType in ManifestContainers:
|
|
||||||
self.manifestRepo.listBlocks(blkType, batch, onBlock)
|
|
||||||
else:
|
|
||||||
self.blocksRepo.listBlocks(onBlock)
|
|
||||||
|
|
||||||
method close*(self: LocalStore) {.async.} =
|
|
||||||
## Close the blockstore, cleaning up resources managed by it.
|
|
||||||
## For some implementations this may be a no-op
|
|
||||||
##
|
|
||||||
|
|
||||||
await self.manifestRepo.close()
|
|
||||||
await self.blocksRepo.close()
|
|
||||||
|
|
||||||
proc contains*(self: LocalStore, blk: Cid): Future[bool] {.async.} =
|
|
||||||
## Check if the block exists in the blockstore.
|
|
||||||
## Return false if error encountered
|
|
||||||
##
|
|
||||||
|
|
||||||
return (await self.hasBlock(blk)) |? false
|
|
||||||
|
|
||||||
func new*(
|
|
||||||
T: type LocalStore,
|
|
||||||
datastore: Datastore,
|
|
||||||
blocksRepo: BlockStore,
|
|
||||||
manifestRepo: BlockStore,
|
|
||||||
cacheBytes: uint,
|
|
||||||
persistBytes: uint): T =
|
|
||||||
T(
|
|
||||||
datastore: datastore,
|
|
||||||
blocksRepo: blocksRepo,
|
|
||||||
manifestRepo: manifestRepo,
|
|
||||||
cacheBytes: cacheBytes,
|
|
||||||
persistBytes: persistBytes)
|
|
|
@ -46,13 +46,16 @@ method getBlock*(self: NetworkStore, cid: Cid): Future[?!bt.Block] {.async.} =
|
||||||
|
|
||||||
return success blk
|
return success blk
|
||||||
|
|
||||||
method putBlock*(self: NetworkStore, blk: bt.Block): Future[?!void] {.async.} =
|
method putBlock*(
|
||||||
|
self: NetworkStore,
|
||||||
|
blk: bt.Block,
|
||||||
|
ttl = Duration.none): Future[?!void] {.async.} =
|
||||||
## Store block locally and notify the network
|
## Store block locally and notify the network
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "Puting block into network store", cid = blk.cid
|
trace "Puting block into network store", cid = blk.cid
|
||||||
|
|
||||||
let res = await self.localStore.putBlock(blk)
|
let res = await self.localStore.putBlock(blk, ttl)
|
||||||
if res.isErr:
|
if res.isErr:
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
@ -79,15 +82,17 @@ method close*(self: NetworkStore): Future[void] {.async.} =
|
||||||
## Close the underlying local blockstore
|
## Close the underlying local blockstore
|
||||||
##
|
##
|
||||||
|
|
||||||
if not self.localStore.isNil: await self.localStore.close
|
if not self.localStore.isNil:
|
||||||
|
await self.localStore.close
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type NetworkStore,
|
T: type NetworkStore,
|
||||||
engine: BlockExcEngine,
|
engine: BlockExcEngine,
|
||||||
localStore: BlockStore): T =
|
localStore: BlockStore): T =
|
||||||
|
|
||||||
let b = NetworkStore(
|
let
|
||||||
localStore: localStore,
|
self = NetworkStore(
|
||||||
engine: engine)
|
localStore: localStore,
|
||||||
|
engine: engine)
|
||||||
|
|
||||||
return b
|
return self
|
||||||
|
|
|
@ -0,0 +1,357 @@
|
||||||
|
## Nim-Codex
|
||||||
|
## Copyright (c) 2022 Status Research & Development GmbH
|
||||||
|
## Licensed under either of
|
||||||
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
## at your option.
|
||||||
|
## This file may not be copied, modified, or distributed except according to
|
||||||
|
## those terms.
|
||||||
|
|
||||||
|
import pkg/upraises
|
||||||
|
|
||||||
|
push: {.upraises: [].}
|
||||||
|
|
||||||
|
import pkg/chronos
|
||||||
|
import pkg/chronicles
|
||||||
|
import pkg/libp2p
|
||||||
|
import pkg/questionable
|
||||||
|
import pkg/questionable/results
|
||||||
|
import pkg/datastore
|
||||||
|
import pkg/stew/endians2
|
||||||
|
|
||||||
|
import ./blockstore
|
||||||
|
import ../blocktype
|
||||||
|
import ../namespaces
|
||||||
|
import ../manifest
|
||||||
|
|
||||||
|
export blocktype, libp2p
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "codex repostore"
|
||||||
|
|
||||||
|
const
|
||||||
|
CodexMetaKey* = Key.init(CodexMetaNamespace).tryGet
|
||||||
|
CodexRepoKey* = Key.init(CodexRepoNamespace).tryGet
|
||||||
|
CodexBlocksKey* = Key.init(CodexBlocksNamespace).tryGet
|
||||||
|
CodexManifestKey* = Key.init(CodexManifestNamespace).tryGet
|
||||||
|
|
||||||
|
QuotaKey* = Key.init(CodexQuotaNamespace).tryGet
|
||||||
|
QuotaUsedKey* = (QuotaKey / "used").tryGet
|
||||||
|
QuotaReservedKey* = (QuotaKey / "reserved").tryGet
|
||||||
|
|
||||||
|
BlocksTtlKey* = Key.init(CodexBlocksTtlNamespace).tryGet
|
||||||
|
|
||||||
|
DefaultBlockTtl* = 24.hours
|
||||||
|
DefaultQuotaBytes* = 1'u shl 33'u # ~8GB
|
||||||
|
|
||||||
|
ZeroMoment = Moment.init(0, Nanosecond) # used for converting between Duration and Moment
|
||||||
|
|
||||||
|
type
|
||||||
|
QuotaUsedError* = object of CodexError
|
||||||
|
QuotaNotEnoughError* = object of CodexError
|
||||||
|
|
||||||
|
RepoStore* = ref object of BlockStore
|
||||||
|
postFixLen*: int
|
||||||
|
repoDs*: Datastore
|
||||||
|
metaDs*: Datastore
|
||||||
|
quotaMaxBytes*: uint
|
||||||
|
quotaUsedBytes*: uint
|
||||||
|
quotaReservedBytes*: uint
|
||||||
|
blockTtl*: Duration
|
||||||
|
started*: bool
|
||||||
|
|
||||||
|
func makePrefixKey*(self: RepoStore, cid: Cid): ?!Key =
|
||||||
|
let
|
||||||
|
cidKey = ? Key.init(($cid)[^self.postFixLen..^1] & "/" & $cid)
|
||||||
|
|
||||||
|
if ? cid.isManifest:
|
||||||
|
success CodexManifestKey / cidKey
|
||||||
|
else:
|
||||||
|
success CodexBlocksKey / cidKey
|
||||||
|
|
||||||
|
func makeExpiresKey(expires: Duration, cid: Cid): ?!Key =
|
||||||
|
BlocksTtlKey / $cid / $expires.seconds
|
||||||
|
|
||||||
|
func totalUsed*(self: RepoStore): uint =
|
||||||
|
(self.quotaUsedBytes + self.quotaReservedBytes)
|
||||||
|
|
||||||
|
method getBlock*(self: RepoStore, cid: Cid): Future[?!Block] {.async.} =
|
||||||
|
## Get a block from the blockstore
|
||||||
|
##
|
||||||
|
|
||||||
|
without key =? self.makePrefixKey(cid), err:
|
||||||
|
trace "Error getting key from provider", err = err.msg
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
without data =? await self.repoDs.get(key), err:
|
||||||
|
if not (err of DatastoreKeyNotFound):
|
||||||
|
trace "Error getting block from datastore", err = err.msg, key
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
return failure(newException(BlockNotFoundError, err.msg))
|
||||||
|
|
||||||
|
trace "Got block for cid", cid
|
||||||
|
return Block.new(cid, data)
|
||||||
|
|
||||||
|
method putBlock*(
|
||||||
|
self: RepoStore,
|
||||||
|
blk: Block,
|
||||||
|
ttl = Duration.none): Future[?!void] {.async.} =
|
||||||
|
## Put a block to the blockstore
|
||||||
|
##
|
||||||
|
|
||||||
|
without key =? self.makePrefixKey(blk.cid), err:
|
||||||
|
trace "Error getting key from provider", err = err.msg
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
if await key in self.repoDs:
|
||||||
|
trace "Block already in store", cid = blk.cid
|
||||||
|
return success()
|
||||||
|
|
||||||
|
if (self.totalUsed + blk.data.len.uint) > self.quotaMaxBytes:
|
||||||
|
error "Cannot store block, quota used!", used = self.totalUsed
|
||||||
|
return failure(
|
||||||
|
newException(QuotaUsedError, "Cannot store block, quota used!"))
|
||||||
|
|
||||||
|
trace "Storing block with key", key
|
||||||
|
|
||||||
|
without var expires =? ttl:
|
||||||
|
expires = Moment.fromNow(self.blockTtl) - ZeroMoment
|
||||||
|
|
||||||
|
var
|
||||||
|
batch: seq[BatchEntry]
|
||||||
|
|
||||||
|
let
|
||||||
|
used = self.quotaUsedBytes + blk.data.len.uint
|
||||||
|
|
||||||
|
if err =? (await self.repoDs.put(key, blk.data)).errorOption:
|
||||||
|
trace "Error storing block", err = err.msg
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
trace "Updating quota", used
|
||||||
|
batch.add((QuotaUsedKey, @(used.uint64.toBytesBE)))
|
||||||
|
|
||||||
|
without expiresKey =? makeExpiresKey(expires, blk.cid), err:
|
||||||
|
trace "Unable make block ttl key",
|
||||||
|
err = err.msg, cid = blk.cid, expires, expiresKey
|
||||||
|
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
trace "Adding expires key", expiresKey, expires
|
||||||
|
batch.add((expiresKey, @[]))
|
||||||
|
|
||||||
|
if err =? (await self.metaDs.put(batch)).errorOption:
|
||||||
|
trace "Error updating quota bytes", err = err.msg
|
||||||
|
|
||||||
|
if err =? (await self.repoDs.delete(key)).errorOption:
|
||||||
|
trace "Error deleting block after failed quota update", err = err.msg
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
self.quotaUsedBytes = used
|
||||||
|
return success()
|
||||||
|
|
||||||
|
method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} =
|
||||||
|
## Delete a block from the blockstore
|
||||||
|
##
|
||||||
|
|
||||||
|
trace "Deleting block", cid
|
||||||
|
|
||||||
|
if blk =? (await self.getBlock(cid)):
|
||||||
|
if key =? self.makePrefixKey(cid) and
|
||||||
|
err =? (await self.repoDs.delete(key)).errorOption:
|
||||||
|
trace "Error deleting block!", err = err.msg
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
let
|
||||||
|
used = self.quotaUsedBytes - blk.data.len.uint
|
||||||
|
|
||||||
|
if err =? (await self.metaDs.put(
|
||||||
|
QuotaUsedKey,
|
||||||
|
@(used.uint64.toBytesBE))).errorOption:
|
||||||
|
trace "Error updating quota key!", err = err.msg
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
self.quotaUsedBytes = used
|
||||||
|
|
||||||
|
trace "Deleted block", cid, totalUsed = self.totalUsed
|
||||||
|
|
||||||
|
return success()
|
||||||
|
|
||||||
|
method hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} =
|
||||||
|
## Check if the block exists in the blockstore
|
||||||
|
##
|
||||||
|
|
||||||
|
without key =? self.makePrefixKey(cid), err:
|
||||||
|
trace "Error getting key from provider", err = err.msg
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
return await self.repoDs.has(key)
|
||||||
|
|
||||||
|
method listBlocks*(
|
||||||
|
self: RepoStore,
|
||||||
|
blockType = BlockType.Manifest): Future[?!BlocksIter] {.async.} =
|
||||||
|
## Get the list of blocks in the RepoStore.
|
||||||
|
## This is an intensive operation
|
||||||
|
##
|
||||||
|
|
||||||
|
var
|
||||||
|
iter = BlocksIter()
|
||||||
|
|
||||||
|
let key =
|
||||||
|
case blockType:
|
||||||
|
of BlockType.Manifest: CodexManifestKey
|
||||||
|
of BlockType.Block: CodexBlocksKey
|
||||||
|
of BlockType.Both: CodexRepoKey
|
||||||
|
|
||||||
|
without queryIter =? (await self.repoDs.query(Query.init(key))), err:
|
||||||
|
trace "Error querying cids in repo", blockType, err = err.msg
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
proc next(): Future[?Cid] {.async.} =
|
||||||
|
await idleAsync()
|
||||||
|
iter.finished = queryIter.finished
|
||||||
|
if not queryIter.finished:
|
||||||
|
if pair =? (await queryIter.next()) and cid =? pair.key:
|
||||||
|
trace "Retrieved record from repo", cid
|
||||||
|
return Cid.init(cid.value).option
|
||||||
|
|
||||||
|
return Cid.none
|
||||||
|
|
||||||
|
iter.next = next
|
||||||
|
return success iter
|
||||||
|
|
||||||
|
method close*(self: RepoStore): Future[void] {.async.} =
|
||||||
|
## Close the blockstore, cleaning up resources managed by it.
|
||||||
|
## For some implementations this may be a no-op
|
||||||
|
##
|
||||||
|
|
||||||
|
(await self.repoDs.close()).expect("Should close datastore")
|
||||||
|
|
||||||
|
proc hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} =
|
||||||
|
## Check if the block exists in the blockstore.
|
||||||
|
## Return false if error encountered
|
||||||
|
##
|
||||||
|
|
||||||
|
without key =? self.makePrefixKey(cid), err:
|
||||||
|
trace "Error getting key from provider", err = err.msg
|
||||||
|
return failure(err.msg)
|
||||||
|
|
||||||
|
return await self.repoDs.has(key)
|
||||||
|
|
||||||
|
proc reserve*(self: RepoStore, bytes: uint): Future[?!void] {.async.} =
|
||||||
|
## Reserve bytes
|
||||||
|
##
|
||||||
|
|
||||||
|
trace "Reserving bytes", reserved = self.quotaReservedBytes, bytes
|
||||||
|
|
||||||
|
if (self.totalUsed + bytes) > self.quotaMaxBytes:
|
||||||
|
trace "Not enough storage quota to reserver", reserve = self.totalUsed + bytes
|
||||||
|
return failure(
|
||||||
|
newException(QuotaNotEnoughError, "Not enough storage quota to reserver"))
|
||||||
|
|
||||||
|
self.quotaReservedBytes += bytes
|
||||||
|
if err =? (await self.metaDs.put(
|
||||||
|
QuotaReservedKey,
|
||||||
|
@(toBytesBE(self.quotaReservedBytes.uint64)))).errorOption:
|
||||||
|
|
||||||
|
trace "Error reserving bytes", err = err.msg
|
||||||
|
|
||||||
|
self.quotaReservedBytes += bytes
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
return success()
|
||||||
|
|
||||||
|
proc release*(self: RepoStore, bytes: uint): Future[?!void] {.async.} =
|
||||||
|
## Release bytes
|
||||||
|
##
|
||||||
|
|
||||||
|
trace "Releasing bytes", reserved = self.quotaReservedBytes, bytes
|
||||||
|
|
||||||
|
if (self.quotaReservedBytes.int - bytes.int) < 0:
|
||||||
|
trace "Cannot release this many bytes",
|
||||||
|
quotaReservedBytes = self.quotaReservedBytes, bytes
|
||||||
|
|
||||||
|
return failure("Cannot release this many bytes")
|
||||||
|
|
||||||
|
self.quotaReservedBytes -= bytes
|
||||||
|
if err =? (await self.metaDs.put(
|
||||||
|
QuotaReservedKey,
|
||||||
|
@(toBytesBE(self.quotaReservedBytes.uint64)))).errorOption:
|
||||||
|
|
||||||
|
trace "Error releasing bytes", err = err.msg
|
||||||
|
|
||||||
|
self.quotaReservedBytes -= bytes
|
||||||
|
|
||||||
|
return failure(err)
|
||||||
|
|
||||||
|
trace "Released bytes", bytes
|
||||||
|
return success()
|
||||||
|
|
||||||
|
proc start*(self: RepoStore): Future[void] {.async.} =
|
||||||
|
## Start repo
|
||||||
|
##
|
||||||
|
|
||||||
|
if self.started:
|
||||||
|
trace "Repo already started"
|
||||||
|
return
|
||||||
|
|
||||||
|
trace "Starting repo"
|
||||||
|
|
||||||
|
## load current persist and cache bytes from meta ds
|
||||||
|
without quotaUsedBytes =? await self.metaDs.get(QuotaUsedKey), err:
|
||||||
|
if not (err of DatastoreKeyNotFound):
|
||||||
|
error "Error getting cache bytes from datastore",
|
||||||
|
err = err.msg, key = $QuotaUsedKey
|
||||||
|
|
||||||
|
raise newException(Defect, err.msg)
|
||||||
|
|
||||||
|
if quotaUsedBytes.len > 0:
|
||||||
|
self.quotaUsedBytes = uint64.fromBytesBE(quotaUsedBytes).uint
|
||||||
|
|
||||||
|
notice "Current bytes used for cache quota", bytes = self.quotaUsedBytes
|
||||||
|
|
||||||
|
without quotaReservedBytes =? await self.metaDs.get(QuotaReservedKey), err:
|
||||||
|
if not (err of DatastoreKeyNotFound):
|
||||||
|
error "Error getting persist bytes from datastore",
|
||||||
|
err = err.msg, key = $QuotaReservedKey
|
||||||
|
|
||||||
|
raise newException(Defect, err.msg)
|
||||||
|
|
||||||
|
if quotaReservedBytes.len > 0:
|
||||||
|
self.quotaReservedBytes = uint64.fromBytesBE(quotaReservedBytes).uint
|
||||||
|
|
||||||
|
if self.quotaUsedBytes > self.quotaMaxBytes:
|
||||||
|
raiseAssert "All storage quota used, increase storage quota!"
|
||||||
|
|
||||||
|
notice "Current bytes used for persist quota", bytes = self.quotaReservedBytes
|
||||||
|
|
||||||
|
self.started = true
|
||||||
|
|
||||||
|
proc stop*(self: RepoStore): Future[void] {.async.} =
|
||||||
|
## Stop repo
|
||||||
|
##
|
||||||
|
|
||||||
|
if self.started:
|
||||||
|
trace "Repo is not started"
|
||||||
|
return
|
||||||
|
|
||||||
|
trace "Stopping repo"
|
||||||
|
(await self.repoDs.close()).expect("Should close repo store!")
|
||||||
|
(await self.metaDs.close()).expect("Should close meta store!")
|
||||||
|
|
||||||
|
func new*(
|
||||||
|
T: type RepoStore,
|
||||||
|
repoDs: Datastore,
|
||||||
|
metaDs: Datastore,
|
||||||
|
postFixLen = 2,
|
||||||
|
quotaMaxBytes = DefaultQuotaBytes,
|
||||||
|
blockTtl = DefaultBlockTtl): T =
|
||||||
|
|
||||||
|
T(
|
||||||
|
repoDs: repoDs,
|
||||||
|
metaDs: metaDs,
|
||||||
|
postFixLen: postFixLen,
|
||||||
|
quotaMaxBytes: quotaMaxBytes,
|
||||||
|
blockTtl: blockTtl)
|
|
@ -1,4 +1,3 @@
|
||||||
|
|
||||||
## Nim-Codex
|
## Nim-Codex
|
||||||
## Copyright (c) 2022 Status Research & Development GmbH
|
## Copyright (c) 2022 Status Research & Development GmbH
|
||||||
## Licensed under either of
|
## Licensed under either of
|
||||||
|
@ -11,14 +10,11 @@
|
||||||
import pkg/upraises
|
import pkg/upraises
|
||||||
push: {.upraises: [].}
|
push: {.upraises: [].}
|
||||||
|
|
||||||
import std/os
|
|
||||||
|
|
||||||
import pkg/chronicles
|
import pkg/chronicles
|
||||||
import pkg/questionable/results
|
import pkg/questionable/results
|
||||||
import pkg/libp2p
|
import pkg/libp2p
|
||||||
|
|
||||||
import ./fileutils
|
import ./fileutils
|
||||||
import ../conf
|
|
||||||
import ../errors
|
import ../errors
|
||||||
import ../rng
|
import ../rng
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ import pkg/codex/rng
|
||||||
import pkg/codex/stores
|
import pkg/codex/stores
|
||||||
import pkg/codex/blockexchange
|
import pkg/codex/blockexchange
|
||||||
import pkg/codex/chunker
|
import pkg/codex/chunker
|
||||||
|
import pkg/codex/manifest
|
||||||
import pkg/codex/blocktype as bt
|
import pkg/codex/blocktype as bt
|
||||||
|
|
||||||
import ../../helpers/mockdiscovery
|
import ../../helpers/mockdiscovery
|
||||||
|
@ -24,6 +25,8 @@ suite "Block Advertising and Discovery":
|
||||||
|
|
||||||
var
|
var
|
||||||
blocks: seq[bt.Block]
|
blocks: seq[bt.Block]
|
||||||
|
manifest: Manifest
|
||||||
|
manifestBlock: bt.Block
|
||||||
switch: Switch
|
switch: Switch
|
||||||
peerStore: PeerCtxStore
|
peerStore: PeerCtxStore
|
||||||
blockDiscovery: MockDiscovery
|
blockDiscovery: MockDiscovery
|
||||||
|
@ -50,6 +53,12 @@ suite "Block Advertising and Discovery":
|
||||||
peerStore = PeerCtxStore.new()
|
peerStore = PeerCtxStore.new()
|
||||||
pendingBlocks = PendingBlocksManager.new()
|
pendingBlocks = PendingBlocksManager.new()
|
||||||
|
|
||||||
|
manifest = Manifest.new( blocks.mapIt( it.cid ) ).tryGet()
|
||||||
|
manifestBlock = bt.Block.new(
|
||||||
|
manifest.encode().tryGet(), codec = DagPBCodec).tryGet()
|
||||||
|
|
||||||
|
(await localStore.putBlock(manifestBlock)).tryGet()
|
||||||
|
|
||||||
discovery = DiscoveryEngine.new(
|
discovery = DiscoveryEngine.new(
|
||||||
localStore,
|
localStore,
|
||||||
peerStore,
|
peerStore,
|
||||||
|
@ -89,19 +98,50 @@ suite "Block Advertising and Discovery":
|
||||||
|
|
||||||
await engine.stop()
|
await engine.stop()
|
||||||
|
|
||||||
test "Should advertise have blocks":
|
test "Should advertise both manifests and blocks":
|
||||||
|
let
|
||||||
|
advertised = initTable.collect:
|
||||||
|
for b in (blocks & manifestBlock): {b.cid: newFuture[void]()}
|
||||||
|
|
||||||
|
blockDiscovery
|
||||||
|
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid) {.async.} =
|
||||||
|
if cid in advertised and not advertised[cid].finished():
|
||||||
|
advertised[cid].complete()
|
||||||
|
|
||||||
|
discovery.advertiseType = BlockType.Both
|
||||||
|
await engine.start() # fire up advertise loop
|
||||||
|
await allFuturesThrowing(
|
||||||
|
allFinished(toSeq(advertised.values)))
|
||||||
|
await engine.stop()
|
||||||
|
|
||||||
|
test "Should advertise local manifests":
|
||||||
|
let
|
||||||
|
advertised = newFuture[Cid]()
|
||||||
|
|
||||||
|
blockDiscovery
|
||||||
|
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid) {.async.} =
|
||||||
|
check manifestBlock.cid == cid
|
||||||
|
advertised.complete(cid)
|
||||||
|
|
||||||
|
discovery.advertiseType = BlockType.Manifest
|
||||||
|
await engine.start() # fire up advertise loop
|
||||||
|
check (await advertised.wait(10.millis)) == manifestBlock.cid
|
||||||
|
await engine.stop()
|
||||||
|
|
||||||
|
test "Should advertise local blocks":
|
||||||
let
|
let
|
||||||
advertised = initTable.collect:
|
advertised = initTable.collect:
|
||||||
for b in blocks: {b.cid: newFuture[void]()}
|
for b in blocks: {b.cid: newFuture[void]()}
|
||||||
|
|
||||||
blockDiscovery.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid) {.async.} =
|
blockDiscovery
|
||||||
if cid in advertised and not advertised[cid].finished():
|
.publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid) {.async.} =
|
||||||
advertised[cid].complete()
|
if cid in advertised and not advertised[cid].finished():
|
||||||
|
advertised[cid].complete()
|
||||||
|
|
||||||
|
discovery.advertiseType = BlockType.Block
|
||||||
await engine.start() # fire up advertise loop
|
await engine.start() # fire up advertise loop
|
||||||
await allFuturesThrowing(
|
await allFuturesThrowing(
|
||||||
allFinished(toSeq(advertised.values)))
|
allFinished(toSeq(advertised.values)))
|
||||||
|
|
||||||
await engine.stop()
|
await engine.stop()
|
||||||
|
|
||||||
test "Should not launch discovery if remote peer has block":
|
test "Should not launch discovery if remote peer has block":
|
||||||
|
|
|
@ -0,0 +1,149 @@
|
||||||
|
import std/sequtils
|
||||||
|
import std/strutils
|
||||||
|
import std/options
|
||||||
|
|
||||||
|
import pkg/chronos
|
||||||
|
import pkg/asynctest
|
||||||
|
import pkg/libp2p
|
||||||
|
import pkg/stew/byteutils
|
||||||
|
import pkg/questionable/results
|
||||||
|
import pkg/codex/stores/cachestore
|
||||||
|
import pkg/codex/chunker
|
||||||
|
import pkg/codex/manifest
|
||||||
|
|
||||||
|
import ../helpers
|
||||||
|
|
||||||
|
type
|
||||||
|
StoreProvider* = proc(): BlockStore {.gcsafe.}
|
||||||
|
Before* = proc(): Future[void] {.gcsafe.}
|
||||||
|
After* = proc(): Future[void] {.gcsafe.}
|
||||||
|
|
||||||
|
proc commonBlockStoreTests*(
|
||||||
|
name: string,
|
||||||
|
provider: StoreProvider,
|
||||||
|
before: Before = nil,
|
||||||
|
after: After = nil) =
|
||||||
|
|
||||||
|
suite name & " Store Common":
|
||||||
|
var
|
||||||
|
newBlock, newBlock1, newBlock2, newBlock3: Block
|
||||||
|
store: BlockStore
|
||||||
|
|
||||||
|
setup:
|
||||||
|
newBlock = Block.new("New Kids on the Block".toBytes()).tryGet()
|
||||||
|
newBlock1 = Block.new("1".repeat(100).toBytes()).tryGet()
|
||||||
|
newBlock2 = Block.new("2".repeat(100).toBytes()).tryGet()
|
||||||
|
newBlock3 = Block.new("3".repeat(100).toBytes()).tryGet()
|
||||||
|
|
||||||
|
if not isNil(before):
|
||||||
|
await before()
|
||||||
|
|
||||||
|
store = provider()
|
||||||
|
|
||||||
|
teardown:
|
||||||
|
await store.close()
|
||||||
|
|
||||||
|
if not isNil(after):
|
||||||
|
await after()
|
||||||
|
|
||||||
|
test "putBlock":
|
||||||
|
(await store.putBlock(newBlock1)).tryGet()
|
||||||
|
check (await store.hasBlock(newBlock1.cid)).tryGet()
|
||||||
|
|
||||||
|
test "getBlock":
|
||||||
|
(await store.putBlock(newBlock)).tryGet()
|
||||||
|
let blk = await store.getBlock(newBlock.cid)
|
||||||
|
check blk.tryGet() == newBlock
|
||||||
|
|
||||||
|
test "fail getBlock":
|
||||||
|
expect BlockNotFoundError:
|
||||||
|
discard (await store.getBlock(newBlock.cid)).tryGet()
|
||||||
|
|
||||||
|
test "hasBlock":
|
||||||
|
(await store.putBlock(newBlock)).tryGet()
|
||||||
|
|
||||||
|
check:
|
||||||
|
(await store.hasBlock(newBlock.cid)).tryGet()
|
||||||
|
await newBlock.cid in store
|
||||||
|
|
||||||
|
test "fail hasBlock":
|
||||||
|
check:
|
||||||
|
not (await store.hasBlock(newBlock.cid)).tryGet()
|
||||||
|
not (await newBlock.cid in store)
|
||||||
|
|
||||||
|
test "delBlock":
|
||||||
|
(await store.putBlock(newBlock1)).tryGet()
|
||||||
|
check (await store.hasBlock(newBlock1.cid)).tryGet()
|
||||||
|
|
||||||
|
(await store.delBlock(newBlock1.cid)).tryGet()
|
||||||
|
|
||||||
|
check not (await store.hasBlock(newBlock1.cid)).tryGet()
|
||||||
|
|
||||||
|
test "listBlocks Blocks":
|
||||||
|
let
|
||||||
|
blocks = @[newBlock1, newBlock2, newBlock3]
|
||||||
|
|
||||||
|
putHandles = await allFinished(
|
||||||
|
blocks.mapIt( store.putBlock( it ) ))
|
||||||
|
|
||||||
|
for handle in putHandles:
|
||||||
|
check not handle.failed
|
||||||
|
check handle.read.isOK
|
||||||
|
|
||||||
|
let
|
||||||
|
cids = (await store.listBlocks(blockType = BlockType.Block)).tryGet()
|
||||||
|
|
||||||
|
var count = 0
|
||||||
|
for c in cids:
|
||||||
|
if cid =? (await c):
|
||||||
|
check (await store.hasBlock(cid)).tryGet()
|
||||||
|
count.inc
|
||||||
|
|
||||||
|
check count == 3
|
||||||
|
|
||||||
|
test "listBlocks Manifest":
|
||||||
|
let
|
||||||
|
blocks = @[newBlock1, newBlock2, newBlock3]
|
||||||
|
manifest = Manifest.new(blocks = blocks.mapIt( it.cid )).tryGet()
|
||||||
|
manifestBlock = Block.new(manifest.encode().tryGet(), codec = DagPBCodec).tryGet()
|
||||||
|
putHandles = await allFinished(
|
||||||
|
(manifestBlock & blocks).mapIt( store.putBlock( it ) ))
|
||||||
|
|
||||||
|
for handle in putHandles:
|
||||||
|
check not handle.failed
|
||||||
|
check handle.read.isOK
|
||||||
|
|
||||||
|
let
|
||||||
|
cids = (await store.listBlocks(blockType = BlockType.Manifest)).tryGet()
|
||||||
|
|
||||||
|
var count = 0
|
||||||
|
for c in cids:
|
||||||
|
if cid =? (await c):
|
||||||
|
check manifestBlock.cid == cid
|
||||||
|
check (await store.hasBlock(cid)).tryGet()
|
||||||
|
count.inc
|
||||||
|
|
||||||
|
check count == 1
|
||||||
|
|
||||||
|
test "listBlocks Both":
|
||||||
|
let
|
||||||
|
blocks = @[newBlock1, newBlock2, newBlock3]
|
||||||
|
manifest = Manifest.new(blocks = blocks.mapIt( it.cid )).tryGet()
|
||||||
|
manifestBlock = Block.new(manifest.encode().tryGet(), codec = DagPBCodec).tryGet()
|
||||||
|
putHandles = await allFinished(
|
||||||
|
(manifestBlock & blocks).mapIt( store.putBlock( it ) ))
|
||||||
|
|
||||||
|
for handle in putHandles:
|
||||||
|
check not handle.failed
|
||||||
|
check handle.read.isOK
|
||||||
|
|
||||||
|
let
|
||||||
|
cids = (await store.listBlocks(blockType = BlockType.Both)).tryGet()
|
||||||
|
|
||||||
|
var count = 0
|
||||||
|
for c in cids:
|
||||||
|
if cid =? (await c):
|
||||||
|
check (await store.hasBlock(cid)).tryGet()
|
||||||
|
count.inc
|
||||||
|
|
||||||
|
check count == 4
|
|
@ -9,6 +9,8 @@ import pkg/questionable/results
|
||||||
import pkg/codex/stores/cachestore
|
import pkg/codex/stores/cachestore
|
||||||
import pkg/codex/chunker
|
import pkg/codex/chunker
|
||||||
|
|
||||||
|
import ./commonstoretests
|
||||||
|
|
||||||
import ../helpers
|
import ../helpers
|
||||||
|
|
||||||
suite "Cache Store":
|
suite "Cache Store":
|
||||||
|
@ -30,6 +32,7 @@ suite "Cache Store":
|
||||||
|
|
||||||
store = CacheStore.new(cacheSize = 100, chunkSize = 1)
|
store = CacheStore.new(cacheSize = 100, chunkSize = 1)
|
||||||
check store.currentSize == 0
|
check store.currentSize == 0
|
||||||
|
|
||||||
store = CacheStore.new(@[newBlock1, newBlock2, newBlock3])
|
store = CacheStore.new(@[newBlock1, newBlock2, newBlock3])
|
||||||
check store.currentSize == 300
|
check store.currentSize == 300
|
||||||
|
|
||||||
|
@ -48,7 +51,6 @@ suite "Cache Store":
|
||||||
chunkSize = 100)
|
chunkSize = 100)
|
||||||
|
|
||||||
test "putBlock":
|
test "putBlock":
|
||||||
|
|
||||||
(await store.putBlock(newBlock1)).tryGet()
|
(await store.putBlock(newBlock1)).tryGet()
|
||||||
check (await store.hasBlock(newBlock1.cid)).tryGet()
|
check (await store.hasBlock(newBlock1.cid)).tryGet()
|
||||||
|
|
||||||
|
@ -68,60 +70,6 @@ suite "Cache Store":
|
||||||
(await store.hasBlock(newBlock2.cid)).tryGet()
|
(await store.hasBlock(newBlock2.cid)).tryGet()
|
||||||
store.currentSize == newBlock2.data.len + newBlock3.data.len # 200
|
store.currentSize == newBlock2.data.len + newBlock3.data.len # 200
|
||||||
|
|
||||||
test "getBlock":
|
commonBlockStoreTests(
|
||||||
store = CacheStore.new(@[newBlock])
|
"Cache", proc: BlockStore =
|
||||||
|
BlockStore(CacheStore.new(cacheSize = 500, chunkSize = 1)))
|
||||||
let blk = await store.getBlock(newBlock.cid)
|
|
||||||
check blk.tryGet() == newBlock
|
|
||||||
|
|
||||||
test "fail getBlock":
|
|
||||||
let blk = await store.getBlock(newBlock.cid)
|
|
||||||
check:
|
|
||||||
blk.isErr
|
|
||||||
blk.error of BlockNotFoundError
|
|
||||||
|
|
||||||
test "hasBlock":
|
|
||||||
let store = CacheStore.new(@[newBlock])
|
|
||||||
check:
|
|
||||||
(await store.hasBlock(newBlock.cid)).tryGet()
|
|
||||||
await newBlock.cid in store
|
|
||||||
|
|
||||||
test "fail hasBlock":
|
|
||||||
check:
|
|
||||||
not (await store.hasBlock(newBlock.cid)).tryGet()
|
|
||||||
not (await newBlock.cid in store)
|
|
||||||
|
|
||||||
test "delBlock":
|
|
||||||
# empty cache
|
|
||||||
(await store.delBlock(newBlock1.cid)).tryGet()
|
|
||||||
check not (await store.hasBlock(newBlock1.cid)).tryGet()
|
|
||||||
|
|
||||||
(await store.putBlock(newBlock1)).tryGet()
|
|
||||||
check (await store.hasBlock(newBlock1.cid)).tryGet()
|
|
||||||
|
|
||||||
# successfully deleted
|
|
||||||
(await store.delBlock(newBlock1.cid)).tryGet()
|
|
||||||
check not (await store.hasBlock(newBlock1.cid)).tryGet()
|
|
||||||
|
|
||||||
# deletes item should decrement size
|
|
||||||
store = CacheStore.new(@[newBlock1, newBlock2, newBlock3])
|
|
||||||
check:
|
|
||||||
store.currentSize == 300
|
|
||||||
|
|
||||||
(await store.delBlock(newBlock2.cid)).tryGet()
|
|
||||||
|
|
||||||
check:
|
|
||||||
store.currentSize == 200
|
|
||||||
not (await store.hasBlock(newBlock2.cid)).tryGet()
|
|
||||||
|
|
||||||
test "listBlocks":
|
|
||||||
(await store.putBlock(newBlock1)).tryGet()
|
|
||||||
|
|
||||||
var listed = false
|
|
||||||
(await store.listBlocks(
|
|
||||||
proc(cid: Cid) {.gcsafe, async.} =
|
|
||||||
check (await store.hasBlock(cid)).tryGet()
|
|
||||||
listed = true
|
|
||||||
)).tryGet()
|
|
||||||
|
|
||||||
check listed
|
|
||||||
|
|
|
@ -1,88 +0,0 @@
|
||||||
import std/os
|
|
||||||
import std/options
|
|
||||||
|
|
||||||
import pkg/questionable
|
|
||||||
import pkg/questionable/results
|
|
||||||
|
|
||||||
import pkg/chronos
|
|
||||||
import pkg/asynctest
|
|
||||||
import pkg/libp2p
|
|
||||||
import pkg/stew/byteutils
|
|
||||||
|
|
||||||
import pkg/codex/stores/cachestore
|
|
||||||
import pkg/codex/chunker
|
|
||||||
import pkg/codex/stores
|
|
||||||
import pkg/codex/blocktype as bt
|
|
||||||
|
|
||||||
import ../helpers
|
|
||||||
|
|
||||||
proc runSuite(cache: bool) =
|
|
||||||
suite "FS Store " & (if cache: "(cache enabled)" else: "(cache disabled)"):
|
|
||||||
var
|
|
||||||
store: FSStore
|
|
||||||
repoDir: string
|
|
||||||
newBlock = bt.Block.new("New Block".toBytes()).tryGet()
|
|
||||||
|
|
||||||
setup:
|
|
||||||
repoDir = getAppDir() / "repo"
|
|
||||||
createDir(repoDir)
|
|
||||||
|
|
||||||
if cache:
|
|
||||||
store = FSStore.new(repoDir)
|
|
||||||
else:
|
|
||||||
store = FSStore.new(repoDir, postfixLen = 2, cache = nil)
|
|
||||||
|
|
||||||
teardown:
|
|
||||||
removeDir(repoDir)
|
|
||||||
|
|
||||||
test "putBlock":
|
|
||||||
(await store.putBlock(newBlock)).tryGet()
|
|
||||||
check:
|
|
||||||
fileExists(store.blockPath(newBlock.cid))
|
|
||||||
(await store.hasBlock(newBlock.cid)).tryGet()
|
|
||||||
await newBlock.cid in store
|
|
||||||
|
|
||||||
test "getBlock":
|
|
||||||
createDir(store.blockPath(newBlock.cid).parentDir)
|
|
||||||
writeFile(store.blockPath(newBlock.cid), newBlock.data)
|
|
||||||
let blk = await store.getBlock(newBlock.cid)
|
|
||||||
check blk.tryGet() == newBlock
|
|
||||||
|
|
||||||
test "fail getBlock":
|
|
||||||
let blk = await store.getBlock(newBlock.cid)
|
|
||||||
check:
|
|
||||||
blk.isErr
|
|
||||||
blk.error of BlockNotFoundError
|
|
||||||
|
|
||||||
test "hasBlock":
|
|
||||||
createDir(store.blockPath(newBlock.cid).parentDir)
|
|
||||||
writeFile(store.blockPath(newBlock.cid), newBlock.data)
|
|
||||||
|
|
||||||
check:
|
|
||||||
(await store.hasBlock(newBlock.cid)).tryGet()
|
|
||||||
await newBlock.cid in store
|
|
||||||
|
|
||||||
test "fail hasBlock":
|
|
||||||
check:
|
|
||||||
not (await store.hasBlock(newBlock.cid)).tryGet()
|
|
||||||
not (await newBlock.cid in store)
|
|
||||||
|
|
||||||
test "listBlocks":
|
|
||||||
createDir(store.blockPath(newBlock.cid).parentDir)
|
|
||||||
writeFile(store.blockPath(newBlock.cid), newBlock.data)
|
|
||||||
|
|
||||||
(await store.listBlocks(
|
|
||||||
proc(cid: Cid) {.gcsafe, async.} =
|
|
||||||
check cid == newBlock.cid
|
|
||||||
)).tryGet()
|
|
||||||
|
|
||||||
test "delBlock":
|
|
||||||
createDir(store.blockPath(newBlock.cid).parentDir)
|
|
||||||
writeFile(store.blockPath(newBlock.cid), newBlock.data)
|
|
||||||
|
|
||||||
(await store.delBlock(newBlock.cid)).tryGet()
|
|
||||||
|
|
||||||
check not fileExists(store.blockPath(newBlock.cid))
|
|
||||||
|
|
||||||
runSuite(cache = true)
|
|
||||||
runSuite(cache = false)
|
|
|
@ -0,0 +1,170 @@
|
||||||
|
import std/os
|
||||||
|
import std/options
|
||||||
|
import std/strutils
|
||||||
|
|
||||||
|
import pkg/questionable
|
||||||
|
import pkg/questionable/results
|
||||||
|
|
||||||
|
import pkg/chronos
|
||||||
|
import pkg/asynctest
|
||||||
|
import pkg/libp2p
|
||||||
|
import pkg/stew/byteutils
|
||||||
|
import pkg/stew/endians2
|
||||||
|
import pkg/datastore
|
||||||
|
|
||||||
|
import pkg/codex/stores/cachestore
|
||||||
|
import pkg/codex/chunker
|
||||||
|
import pkg/codex/stores
|
||||||
|
import pkg/codex/blocktype as bt
|
||||||
|
|
||||||
|
import ../helpers
|
||||||
|
import ./commonstoretests
|
||||||
|
|
||||||
|
suite "Test RepoStore Quota":
|
||||||
|
|
||||||
|
var
|
||||||
|
repoDs: Datastore
|
||||||
|
metaDs: Datastore
|
||||||
|
|
||||||
|
setup:
|
||||||
|
repoDs = SQLiteDatastore.new(Memory).tryGet()
|
||||||
|
metaDs = SQLiteDatastore.new(Memory).tryGet()
|
||||||
|
|
||||||
|
teardown:
|
||||||
|
(await repoDs.close()).tryGet
|
||||||
|
(await metaDs.close()).tryGet
|
||||||
|
|
||||||
|
test "Should update current used bytes on block put":
|
||||||
|
let
|
||||||
|
blk = bt.Block.new('a'.repeat(100).toBytes).tryGet()
|
||||||
|
repo = RepoStore.new(repoDs, metaDs, quotaMaxBytes = 100)
|
||||||
|
|
||||||
|
check repo.quotaUsedBytes == 0
|
||||||
|
(await repo.putBlock(blk)).tryGet
|
||||||
|
|
||||||
|
check:
|
||||||
|
repo.quotaUsedBytes == 100
|
||||||
|
uint64.fromBytesBE((await metaDs.get(QuotaUsedKey)).tryGet) == 100'u
|
||||||
|
|
||||||
|
test "Should update current used bytes on block delete":
|
||||||
|
let
|
||||||
|
blk = bt.Block.new('a'.repeat(100).toBytes).tryGet()
|
||||||
|
repo = RepoStore.new(repoDs, metaDs, quotaMaxBytes = 100)
|
||||||
|
|
||||||
|
check repo.quotaUsedBytes == 0
|
||||||
|
(await repo.putBlock(blk)).tryGet
|
||||||
|
check repo.quotaUsedBytes == 100
|
||||||
|
|
||||||
|
(await repo.delBlock(blk.cid)).tryGet
|
||||||
|
|
||||||
|
check:
|
||||||
|
repo.quotaUsedBytes == 0
|
||||||
|
uint64.fromBytesBE((await metaDs.get(QuotaUsedKey)).tryGet) == 0'u
|
||||||
|
|
||||||
|
test "Should fail storing passed the quota":
|
||||||
|
let
|
||||||
|
blk = bt.Block.new('a'.repeat(200).toBytes).tryGet()
|
||||||
|
repo = RepoStore.new(repoDs, metaDs, quotaMaxBytes = 100)
|
||||||
|
|
||||||
|
check repo.totalUsed == 0
|
||||||
|
expect QuotaUsedError:
|
||||||
|
(await repo.putBlock(blk)).tryGet
|
||||||
|
|
||||||
|
test "Should reserve bytes":
|
||||||
|
let
|
||||||
|
blk = bt.Block.new('a'.repeat(100).toBytes).tryGet()
|
||||||
|
repo = RepoStore.new(repoDs, metaDs, quotaMaxBytes = 200)
|
||||||
|
|
||||||
|
check repo.totalUsed == 0
|
||||||
|
(await repo.putBlock(blk)).tryGet
|
||||||
|
check repo.totalUsed == 100
|
||||||
|
|
||||||
|
(await repo.reserve(100)).tryGet
|
||||||
|
|
||||||
|
check:
|
||||||
|
repo.totalUsed == 200
|
||||||
|
repo.quotaUsedBytes == 100
|
||||||
|
repo.quotaReservedBytes == 100
|
||||||
|
uint64.fromBytesBE((await metaDs.get(QuotaReservedKey)).tryGet) == 100'u
|
||||||
|
|
||||||
|
test "Should not reserve bytes over max quota":
|
||||||
|
let
|
||||||
|
blk = bt.Block.new('a'.repeat(100).toBytes).tryGet()
|
||||||
|
repo = RepoStore.new(repoDs, metaDs, quotaMaxBytes = 200)
|
||||||
|
|
||||||
|
check repo.totalUsed == 0
|
||||||
|
(await repo.putBlock(blk)).tryGet
|
||||||
|
check repo.totalUsed == 100
|
||||||
|
|
||||||
|
expect QuotaNotEnoughError:
|
||||||
|
(await repo.reserve(101)).tryGet
|
||||||
|
|
||||||
|
check:
|
||||||
|
repo.totalUsed == 100
|
||||||
|
repo.quotaUsedBytes == 100
|
||||||
|
repo.quotaReservedBytes == 0
|
||||||
|
|
||||||
|
expect DatastoreKeyNotFound:
|
||||||
|
discard (await metaDs.get(QuotaReservedKey)).tryGet
|
||||||
|
|
||||||
|
test "Should release bytes":
|
||||||
|
let
|
||||||
|
blk = bt.Block.new('a'.repeat(100).toBytes).tryGet()
|
||||||
|
repo = RepoStore.new(repoDs, metaDs, quotaMaxBytes = 200)
|
||||||
|
|
||||||
|
check repo.totalUsed == 0
|
||||||
|
(await repo.reserve(100)).tryGet
|
||||||
|
check repo.totalUsed == 100
|
||||||
|
|
||||||
|
(await repo.release(100)).tryGet
|
||||||
|
|
||||||
|
check:
|
||||||
|
repo.totalUsed == 0
|
||||||
|
repo.quotaUsedBytes == 0
|
||||||
|
repo.quotaReservedBytes == 0
|
||||||
|
uint64.fromBytesBE((await metaDs.get(QuotaReservedKey)).tryGet) == 0'u
|
||||||
|
|
||||||
|
test "Should not release bytes less than quota":
|
||||||
|
let
|
||||||
|
repo = RepoStore.new(repoDs, metaDs, quotaMaxBytes = 200)
|
||||||
|
|
||||||
|
check repo.totalUsed == 0
|
||||||
|
(await repo.reserve(100)).tryGet
|
||||||
|
check repo.totalUsed == 100
|
||||||
|
|
||||||
|
expect CatchableError:
|
||||||
|
(await repo.release(101)).tryGet
|
||||||
|
|
||||||
|
check:
|
||||||
|
repo.totalUsed == 100
|
||||||
|
repo.quotaUsedBytes == 0
|
||||||
|
repo.quotaReservedBytes == 100
|
||||||
|
uint64.fromBytesBE((await metaDs.get(QuotaReservedKey)).tryGet) == 100'u
|
||||||
|
|
||||||
|
commonBlockStoreTests(
|
||||||
|
"RepoStore Sql backend", proc: BlockStore =
|
||||||
|
BlockStore(
|
||||||
|
RepoStore.new(
|
||||||
|
SQLiteDatastore.new(Memory).tryGet(),
|
||||||
|
SQLiteDatastore.new(Memory).tryGet())))
|
||||||
|
|
||||||
|
const
|
||||||
|
path = currentSourcePath().parentDir / "test"
|
||||||
|
|
||||||
|
proc before() {.async.} =
|
||||||
|
createDir(path)
|
||||||
|
|
||||||
|
proc after() {.async.} =
|
||||||
|
removeDir(path)
|
||||||
|
|
||||||
|
let
|
||||||
|
depth = path.split(DirSep).len
|
||||||
|
|
||||||
|
commonBlockStoreTests(
|
||||||
|
"RepoStore FS backend", proc: BlockStore =
|
||||||
|
BlockStore(
|
||||||
|
RepoStore.new(
|
||||||
|
FSDatastore.new(path, depth).tryGet(),
|
||||||
|
SQLiteDatastore.new(Memory).tryGet())),
|
||||||
|
before = before,
|
||||||
|
after = after)
|
|
@ -1,4 +1,4 @@
|
||||||
import ./stores/testcachestore
|
import ./stores/testcachestore
|
||||||
import ./stores/testfsstore
|
import ./stores/testrepostore
|
||||||
|
|
||||||
{.warning[UnusedImport]: off.}
|
{.warning[UnusedImport]: off.}
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 6c06a3b095d1935aaf5eec66295862c9c3b4bac5
|
Subproject commit 44c198b96a2d687f94c9971f4a3ece02b330347b
|
|
@ -1 +1 @@
|
||||||
Subproject commit e4e7a3e11fe635de3f15e37164b3ace96f588993
|
Subproject commit 4375b9229815c332a3b1a9d0091d5cf5a74adb2e
|
Loading…
Reference in New Issue