mirror of
https://github.com/logos-storage/logos-storage-nim.git
synced 2026-01-04 06:23:06 +00:00
* settup basic nim node * adding http utils * adding confutils * rough rest api proto * adding missing deps * turn tls emulation off * adding toml serialization * wip * adding missing deps * make sure to clean old state in teardown * adding file upload rest endpoint * renaming blockexchange to networkstore * updating nim-presto * updating libp2p * wip adding streaming upload * reworked chunking * bump to latest unstable * adding asyncfutures stream * make streamable * deleting unused files * reworking stores api * use new stores api * rework blockset and remove blockstream * don't return option from constructor * rework chunker * wip implement upload * fix tests * move unrelated logic to engine * don't print entire message * logging * basic encode/decode to/from dag-pb * add basic upload/download support * fix tests * renaming blockset to manifest * don't pass config to node * remove config and use new manifest * wip: make endpoints more reliable * wip: adding node tests * include correct manifest test * removing asyncfutures * proper chunking of files * simplify stream reading * test with encoding/decoding with many blocks * add block storing tests * adding retrieval test * add logging * tidy up chunker * tidy up manifest and node * use default chunk size * fix tests * fix tests * make sure Eof is set properly * wip * minor cleanup * add file utils * cleanup config * splitout DaggerServer and "main" * remove events since they are not used * add broadcast method to network peer * add and wire localstore * use localstore in the node * wip * logging * move file utils * use the constant * updating deps * fix memstore * use latest libp2p unstable * fix tests * rework block streaming * don't fail storing if the block already exists * add helper info endpoint * correct comment * rename localstore to fsstore * fix tests * remove unused tests * add test to retrieve one block * move some test files around * consolidate setup * Update dagger/blockexchange/engine.nim Co-authored-by: Tanguy <tanguy@status.im> * typo * better block path handling * don't inherit rootobj * remove useless template * Update tests/dagger/blockexc/testblockexc.nim Co-authored-by: markspanbroek <mark@spanbroek.net> * use isMainModule * use proper flag for starter/stoped * cleanup optional use * wrap in isMainModule * use `cancelAndAwait` * remove unused imports * wip * don't use optional * use functional error api * rework store tests and add fs tests * Block.new() to Block.init() * don't use optional for engine blocks * use result instead of optional for getBlock * remove unused imports * move stopping servers to `shutdown` * use result instead of optional * rework with results * fix tests * use waitFor in signal handlers * error helper * use `?` and mapFailure where possible * remove unnecesary `=?` * improve empty cid digest initialization Co-authored-by: Tanguy <tanguy@status.im> Co-authored-by: markspanbroek <mark@spanbroek.net>
191 lines
5.2 KiB
Nim
191 lines
5.2 KiB
Nim
## Nim-Dagger
|
|
## Copyright (c) 2021 Status Research & Development GmbH
|
|
## Licensed under either of
|
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
## at your option.
|
|
## This file may not be copied, modified, or distributed except according to
|
|
## those terms.
|
|
|
|
{.push raises: [Defect].}
|
|
|
|
import std/sequtils
|
|
|
|
import pkg/questionable
|
|
import pkg/questionable/results
|
|
import pkg/chronicles
|
|
import pkg/chronos
|
|
import pkg/presto
|
|
import pkg/libp2p
|
|
|
|
import pkg/libp2p/routing_record
|
|
|
|
import ../node
|
|
|
|
proc validate(
|
|
pattern: string,
|
|
value: string): int
|
|
{.gcsafe, raises: [Defect].} =
|
|
0
|
|
|
|
proc encodeString(cid: type Cid): Result[string, cstring] =
|
|
ok($cid)
|
|
|
|
proc decodeString(T: type Cid, value: string): Result[Cid, cstring] =
|
|
Cid.init(value)
|
|
.mapErr do(e: CidError) -> cstring:
|
|
case e
|
|
of CidError.Incorrect: "Incorrect Cid"
|
|
of CidError.Unsupported: "Unsupported Cid"
|
|
of CidError.Overrun: "Overrun Cid"
|
|
else: "Error parsing Cid"
|
|
|
|
proc encodeString(peerId: PeerID): Result[string, cstring] =
|
|
ok($peerId)
|
|
|
|
proc decodeString(T: type PeerID, value: string): Result[PeerID, cstring] =
|
|
PeerID.init(value)
|
|
|
|
proc encodeString(address: MultiAddress): Result[string, cstring] =
|
|
ok($address)
|
|
|
|
proc decodeString(T: type MultiAddress, value: string): Result[MultiAddress, cstring] =
|
|
MultiAddress
|
|
.init(value)
|
|
.mapErr do(e: string) -> cstring: cstring(e)
|
|
|
|
proc initRestApi*(node: DaggerNodeRef): RestRouter =
|
|
var router = RestRouter.init(validate)
|
|
router.api(
|
|
MethodGet,
|
|
"/api/dagger/v1/connect/{peerId}") do (
|
|
peerId: PeerID,
|
|
addrs: seq[MultiAddress]) -> RestApiResponse:
|
|
if peerId.isErr:
|
|
return RestApiResponse.error(
|
|
Http400,
|
|
$peerId.error())
|
|
|
|
let addresses = if addrs.isOk and addrs.get().len > 0:
|
|
addrs.get()
|
|
else:
|
|
let peerRecord = await node.findPeer(peerId.get())
|
|
if peerRecord.isErr:
|
|
return RestApiResponse.error(
|
|
Http400,
|
|
"Unable to find Peer!")
|
|
|
|
peerRecord.get().addresses.mapIt(
|
|
it.address
|
|
)
|
|
|
|
await node.connect(peerId.get(), addresses)
|
|
return RestApiResponse.response("")
|
|
|
|
router.api(
|
|
MethodGet,
|
|
"/api/dagger/v1/download/{id}") do (
|
|
id: Cid, resp: HttpResponseRef) -> RestApiResponse:
|
|
if id.isErr:
|
|
return RestApiResponse.error(
|
|
Http400,
|
|
$id.error())
|
|
|
|
let
|
|
stream = BufferStream.new()
|
|
|
|
var bytes = 0
|
|
try:
|
|
if (
|
|
let retr = await node.retrieve(stream, id.get());
|
|
retr.isErr):
|
|
return RestApiResponse.error(Http400, retr.error.msg)
|
|
|
|
await resp.prepareChunked()
|
|
while not stream.atEof:
|
|
var
|
|
buff = newSeqUninitialized[byte](FileChunkSize)
|
|
len = await stream.readOnce(addr buff[0], buff.len)
|
|
|
|
buff.setLen(len)
|
|
if buff.len <= 0:
|
|
break
|
|
|
|
bytes += buff.len
|
|
trace "Sending cunk", size = buff.len
|
|
await resp.sendChunk(addr buff[0], buff.len)
|
|
except CatchableError as exc:
|
|
trace "Excepting streaming blocks", exc = exc.msg
|
|
return RestApiResponse.error(Http500)
|
|
finally:
|
|
trace "Sent bytes", cid = id.get(), bytes
|
|
await stream.close()
|
|
await resp.finish()
|
|
|
|
router.rawApi(
|
|
MethodPost,
|
|
"/api/dagger/v1/upload") do (
|
|
) -> RestApiResponse:
|
|
trace "Handling file upload"
|
|
var bodyReader = request.getBodyReader()
|
|
if bodyReader.isErr():
|
|
return RestApiResponse.error(Http500)
|
|
|
|
# Attempt to handle `Expect` header
|
|
# some clients (curl), waits 1000ms
|
|
# before giving up
|
|
#
|
|
await request.handleExpect()
|
|
|
|
let
|
|
reader = bodyReader.get()
|
|
stream = BufferStream.new()
|
|
storeFut = node.store(stream)
|
|
|
|
var bytes = 0
|
|
try:
|
|
while not reader.atEof:
|
|
var
|
|
buff = newSeqUninitialized[byte](FileChunkSize)
|
|
len = await reader.readOnce(addr buff[0], buff.len)
|
|
|
|
buff.setLen(len)
|
|
if len <= 0:
|
|
break
|
|
|
|
trace "Got chunk from endpoint", len = buff.len
|
|
await stream.pushData(buff)
|
|
bytes += len
|
|
|
|
await stream.pushEof()
|
|
without cid =? (await storeFut):
|
|
return RestApiResponse.error(Http500)
|
|
|
|
trace "Uploaded file", bytes, cid = $cid
|
|
return RestApiResponse.response($cid)
|
|
except CancelledError as exc:
|
|
await reader.closeWait()
|
|
return RestApiResponse.error(Http500)
|
|
except AsyncStreamError:
|
|
await reader.closeWait()
|
|
return RestApiResponse.error(Http500)
|
|
finally:
|
|
await stream.close()
|
|
await reader.closeWait()
|
|
|
|
# if we got here something went wrong?
|
|
return RestApiResponse.error(Http500)
|
|
|
|
router.api(
|
|
MethodGet,
|
|
"/api/dagger/v1/info") do () -> RestApiResponse:
|
|
var addrs: string
|
|
for a in node.switch.peerInfo.addrs:
|
|
addrs &= "- " & $a & "\n"
|
|
|
|
return RestApiResponse.response(
|
|
"Id: " & $node.switch.peerInfo.peerId &
|
|
"\nAddrs: \n" & addrs & "\n")
|
|
|
|
return router
|