mirror of
https://github.com/status-im/nim-dagger.git
synced 2025-02-22 11:28:22 +00:00
* use PeerInfo in event handlers * use CidV1 and raw multicodec as default * add block stream abstraction * raises defect * adding dataset abstraction * move blockstream into own dir * reorg files and fix tests * rename dataset to blockset * wip * wip * adding basic test for treehash algo * run blockset tests along with with the rest * remove obsolete contents * fix chunker tests * rename bitswap and move to stores * rename bitwsap to blockexc and move to stores * moare project structure reorg
41 lines
1.2 KiB
Nim
41 lines
1.2 KiB
Nim
import std/unittest
|
|
import pkg/stew/byteutils
|
|
import pkg/dagger/chunker
|
|
|
|
suite "Chunking":
|
|
test "should return proper size chunks":
|
|
proc reader(data: var openArray[byte], offset: Natural = 0): int
|
|
{.gcsafe, closure, raises: [Defect].} =
|
|
let contents = "1234567890".toBytes
|
|
copyMem(addr data[0], unsafeAddr contents[offset], data.len)
|
|
return data.len
|
|
|
|
let chunker = Chunker.new(
|
|
reader = reader,
|
|
size = 10,
|
|
chunkSize = 2)
|
|
|
|
check chunker.getBytes() == "12".toBytes
|
|
check chunker.getBytes() == "34".toBytes
|
|
check chunker.getBytes() == "56".toBytes
|
|
check chunker.getBytes() == "78".toBytes
|
|
check chunker.getBytes() == "90".toBytes
|
|
check chunker.getBytes() == "".toBytes
|
|
|
|
test "should chunk file":
|
|
let (fileName, _, _) = instantiationInfo() # get this file's name
|
|
let path = "tests/dagger/" & filename
|
|
let file = open(path)
|
|
let fileChunker = newFileChunker(file = file)
|
|
|
|
var data: seq[byte]
|
|
while true:
|
|
let buff = fileChunker.getBytes()
|
|
if buff.len <= 0:
|
|
break
|
|
|
|
check buff.len <= fileChunker.chunkSize
|
|
data.add(buff)
|
|
|
|
check string.fromBytes(data) == readFile(path)
|