2023-03-27 13:47:25 +00:00
|
|
|
import std/httpclient
|
|
|
|
import std/strutils
|
2023-11-21 00:14:06 +00:00
|
|
|
|
2023-09-01 05:44:41 +00:00
|
|
|
from pkg/libp2p import Cid, `$`, init
|
2023-03-27 13:47:25 +00:00
|
|
|
import pkg/stint
|
|
|
|
import pkg/questionable/results
|
2024-06-26 20:02:39 +00:00
|
|
|
import pkg/chronos/apps/http/[httpserver, shttpserver, httpclient]
|
feat: create logging proxy (#663)
* implement a logging proxy
The logging proxy:
- prevents the need to import chronicles (as well as export except toJson),
- prevents the need to override `writeValue` or use or import nim-json-seralization elsewhere in the codebase, allowing for sole use of utils/json for de/serialization,
- and handles json formatting correctly in chronicles json sinks
* Rename logging -> logutils to avoid ambiguity with common names
* clean up
* add setProperty for JsonRecord, remove nim-json-serialization conflict
* Allow specifying textlines and json format separately
Not specifying a LogFormat will apply the formatting to both textlines and json sinks.
Specifying a LogFormat will apply the formatting to only that sink.
* remove unneeded usages of std/json
We only need to import utils/json instead of std/json
* move serialization from rest/json to utils/json so it can be shared
* fix NoColors ambiguity
Was causing unit tests to fail on Windows.
* Remove nre usage to fix Windows error
Windows was erroring with `could not load: pcre64.dll`. Instead of fixing that error, remove the pcre usage :)
* Add logutils module doc
* Shorten logutils.formatIt for `NBytes`
Both json and textlines formatIt were not needed, and could be combined into one formatIt
* remove debug integration test config
debug output and logformat of json for integration test logs
* Use ## module doc to support docgen
* bump nim-poseidon2 to export fromBytes
Before the changes in this branch, fromBytes was likely being resolved by nim-stew, or other dependency. With the changes in this branch, that dependency was removed and fromBytes could no longer be resolved. By exporting fromBytes from nim-poseidon, the correct resolution is now happening.
* fixes to get compiling after rebasing master
* Add support for Result types being logged using formatIt
2024-01-23 07:35:03 +00:00
|
|
|
import pkg/codex/logutils
|
2023-09-01 05:44:41 +00:00
|
|
|
import pkg/codex/rest/json
|
|
|
|
import pkg/codex/purchasing
|
|
|
|
import pkg/codex/errors
|
|
|
|
import pkg/codex/sales/reservations
|
2023-03-27 13:47:25 +00:00
|
|
|
|
2023-12-07 01:16:36 +00:00
|
|
|
export purchasing
|
|
|
|
|
2023-03-27 13:47:25 +00:00
|
|
|
type CodexClient* = ref object
|
|
|
|
http: HttpClient
|
|
|
|
baseurl: string
|
2024-06-26 20:02:39 +00:00
|
|
|
session: HttpSessionRef
|
|
|
|
|
|
|
|
type CodexClientError* = object of CatchableError
|
2023-03-27 13:47:25 +00:00
|
|
|
|
|
|
|
proc new*(_: type CodexClient, baseurl: string): CodexClient =
|
2024-06-26 20:02:39 +00:00
|
|
|
CodexClient(
|
|
|
|
http: newHttpClient(),
|
|
|
|
baseurl: baseurl,
|
|
|
|
session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline})
|
|
|
|
)
|
2023-03-27 13:47:25 +00:00
|
|
|
|
2024-03-19 03:25:13 +00:00
|
|
|
proc info*(client: CodexClient): ?!JsonNode =
|
2023-03-27 13:47:25 +00:00
|
|
|
let url = client.baseurl & "/debug/info"
|
2024-03-19 03:25:13 +00:00
|
|
|
JsonNode.parse( client.http.getContent(url) )
|
2023-03-27 13:47:25 +00:00
|
|
|
|
|
|
|
proc setLogLevel*(client: CodexClient, level: string) =
|
|
|
|
let url = client.baseurl & "/debug/chronicles/loglevel?level=" & level
|
|
|
|
let headers = newHttpHeaders({"Content-Type": "text/plain"})
|
|
|
|
let response = client.http.request(url, httpMethod=HttpPost, headers=headers)
|
|
|
|
assert response.status == "200 OK"
|
|
|
|
|
2023-09-01 05:44:41 +00:00
|
|
|
proc upload*(client: CodexClient, contents: string): ?!Cid =
|
2023-11-09 08:47:09 +00:00
|
|
|
let response = client.http.post(client.baseurl & "/data", contents)
|
2023-03-27 13:47:25 +00:00
|
|
|
assert response.status == "200 OK"
|
2023-09-01 05:44:41 +00:00
|
|
|
Cid.init(response.body).mapFailure
|
2023-03-27 13:47:25 +00:00
|
|
|
|
2023-11-21 00:14:06 +00:00
|
|
|
proc download*(client: CodexClient, cid: Cid, local = false): ?!string =
|
|
|
|
let
|
|
|
|
response = client.http.get(
|
|
|
|
client.baseurl & "/data/" & $cid &
|
2024-10-17 16:54:28 +00:00
|
|
|
(if local: "" else: "/network/stream"))
|
|
|
|
|
|
|
|
if response.status != "200 OK":
|
|
|
|
return failure(response.status)
|
|
|
|
|
|
|
|
success response.body
|
|
|
|
|
|
|
|
proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string =
|
|
|
|
let
|
|
|
|
response = client.http.get(
|
|
|
|
client.baseurl & "/data/" & $cid & "/network/manifest")
|
|
|
|
|
|
|
|
if response.status != "200 OK":
|
|
|
|
return failure(response.status)
|
|
|
|
|
|
|
|
success response.body
|
|
|
|
|
|
|
|
proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string =
|
|
|
|
let
|
|
|
|
response = client.http.post(
|
|
|
|
client.baseurl & "/data/" & $cid & "/network")
|
2023-11-21 00:14:06 +00:00
|
|
|
|
|
|
|
if response.status != "200 OK":
|
|
|
|
return failure(response.status)
|
|
|
|
|
|
|
|
success response.body
|
|
|
|
|
2024-06-26 20:02:39 +00:00
|
|
|
proc downloadBytes*(
|
|
|
|
client: CodexClient,
|
|
|
|
cid: Cid,
|
|
|
|
local = false): Future[?!seq[byte]] {.async.} =
|
|
|
|
|
|
|
|
let uri = parseUri(
|
|
|
|
client.baseurl & "/data/" & $cid &
|
2024-10-17 16:54:28 +00:00
|
|
|
(if local: "" else: "/network/stream")
|
2024-06-26 20:02:39 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
let (status, bytes) = await client.session.fetch(uri)
|
|
|
|
|
|
|
|
if status != 200:
|
|
|
|
return failure("fetch failed with status " & $status)
|
|
|
|
|
|
|
|
success bytes
|
|
|
|
|
2024-03-26 15:12:15 +00:00
|
|
|
proc list*(client: CodexClient): ?!RestContentList =
|
2023-11-21 00:14:06 +00:00
|
|
|
let url = client.baseurl & "/data"
|
|
|
|
let response = client.http.get(url)
|
|
|
|
|
|
|
|
if response.status != "200 OK":
|
|
|
|
return failure(response.status)
|
|
|
|
|
2024-03-26 15:12:15 +00:00
|
|
|
RestContentList.fromJson(response.body)
|
2023-11-21 00:14:06 +00:00
|
|
|
|
2023-12-14 10:57:16 +00:00
|
|
|
proc space*(client: CodexClient): ?!RestRepoStore =
|
|
|
|
let url = client.baseurl & "/space"
|
|
|
|
let response = client.http.get(url)
|
|
|
|
|
|
|
|
if response.status != "200 OK":
|
|
|
|
return failure(response.status)
|
|
|
|
|
2024-03-19 03:25:13 +00:00
|
|
|
RestRepoStore.fromJson(response.body)
|
2023-12-14 10:57:16 +00:00
|
|
|
|
2023-11-22 11:35:26 +00:00
|
|
|
proc requestStorageRaw*(
|
|
|
|
client: CodexClient,
|
|
|
|
cid: Cid,
|
|
|
|
duration: UInt256,
|
|
|
|
reward: UInt256,
|
|
|
|
proofProbability: UInt256,
|
|
|
|
collateral: UInt256,
|
2024-05-06 15:35:46 +00:00
|
|
|
expiry: uint = 0,
|
2024-09-24 08:37:08 +00:00
|
|
|
nodes: uint = 3,
|
|
|
|
tolerance: uint = 1
|
2023-11-22 11:35:26 +00:00
|
|
|
): Response =
|
|
|
|
|
|
|
|
## Call request storage REST endpoint
|
|
|
|
##
|
|
|
|
let url = client.baseurl & "/storage/request/" & $cid
|
|
|
|
let json = %*{
|
|
|
|
"duration": duration,
|
|
|
|
"reward": reward,
|
|
|
|
"proofProbability": proofProbability,
|
|
|
|
"collateral": collateral,
|
|
|
|
"nodes": nodes,
|
|
|
|
"tolerance": tolerance
|
|
|
|
}
|
|
|
|
|
|
|
|
if expiry != 0:
|
2024-05-06 15:35:46 +00:00
|
|
|
json["expiry"] = %($expiry)
|
2023-11-22 11:35:26 +00:00
|
|
|
|
|
|
|
return client.http.post(url, $json)
|
|
|
|
|
2023-06-22 15:11:18 +00:00
|
|
|
proc requestStorage*(
|
|
|
|
client: CodexClient,
|
2023-09-01 05:44:41 +00:00
|
|
|
cid: Cid,
|
|
|
|
duration: UInt256,
|
|
|
|
reward: UInt256,
|
|
|
|
proofProbability: UInt256,
|
2024-05-06 15:35:46 +00:00
|
|
|
expiry: uint,
|
2023-09-01 05:44:41 +00:00
|
|
|
collateral: UInt256,
|
2024-09-24 08:37:08 +00:00
|
|
|
nodes: uint = 3,
|
|
|
|
tolerance: uint = 1
|
2023-09-01 05:44:41 +00:00
|
|
|
): ?!PurchaseId =
|
2023-06-22 15:11:18 +00:00
|
|
|
## Call request storage REST endpoint
|
2023-09-01 05:44:41 +00:00
|
|
|
##
|
2023-11-22 11:35:26 +00:00
|
|
|
let response = client.requestStorageRaw(cid, duration, reward, proofProbability, collateral, expiry, nodes, tolerance)
|
refactor: multinode integration test refactor (#662)
* refactor multi node test suite
Refactor the multinode test suite into the marketplace test suite.
- Arbitrary number of nodes can be started with each test: clients, providers, validators
- Hardhat can also be started locally with each test, usually for the purpose of saving and inspecting its log file.
- Log files for all nodes can be persisted on disk, with configuration at the test-level
- Log files, if persisted (as specified in the test), will be persisted to a CI artifact
- Node config is specified at the test-level instead of the suite-level
- Node/Hardhat process starting/stopping is now async, and runs much faster
- Per-node config includes:
- simulating proof failures
- logging to file
- log level
- log topics
- storage quota
- debug (print logs to stdout)
- Tests find next available ports when starting nodes, as closing ports on Windows can lag
- Hardhat is no longer required to be running prior to starting the integration tests (as long as Hardhat is configured to run in the tests).
- If Hardhat is already running, a snapshot will be taken and reverted before and after each test, respectively.
- If Hardhat is not already running and configured to run at the test-level, a Hardhat process will be spawned and torn down before and after each test, respectively.
* additional logging for debug purposes
* address PR feedback
- fix spelling
- revert change from catching ProviderError to SignerError -- this should be handled more consistently in the Market abstraction, and will be handled in another PR.
- remove method label from raiseAssert
- remove unused import
* Use API instead of command exec to test for free port
Use chronos `createStreamServer` API to test for free port by binding localhost address and port. Use `ServerFlags.ReuseAddr` to enable reuse of same IP/Port on multiple test runs.
* clean up
* remove upraises annotations from tests
* Update tests to work with updated erasure coding slot sizes
* update dataset size, nodes, tolerance to match valid ec params
Integration tests now have valid dataset sizes (blocks), tolerances, and number of nodes, to work with valid ec params. These values are validated when requested storage.
Print the rest api failure message (via doAssert) when a rest api call fails (eg the rest api may validate some ec params).
All integration tests pass when the async `clock.now` changes are reverted.
* dont use async clock for now
* fix workflow
* move integration logs uplod to reusable
---------
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2024-02-19 04:55:39 +00:00
|
|
|
if response.status != "200 OK":
|
|
|
|
doAssert(false, response.body)
|
2023-09-01 05:44:41 +00:00
|
|
|
PurchaseId.fromHex(response.body).catch
|
2023-03-27 13:47:25 +00:00
|
|
|
|
2023-09-01 05:44:41 +00:00
|
|
|
proc getPurchase*(client: CodexClient, purchaseId: PurchaseId): ?!RestPurchase =
|
|
|
|
let url = client.baseurl & "/storage/purchases/" & purchaseId.toHex
|
refactor: multinode integration test refactor (#662)
* refactor multi node test suite
Refactor the multinode test suite into the marketplace test suite.
- Arbitrary number of nodes can be started with each test: clients, providers, validators
- Hardhat can also be started locally with each test, usually for the purpose of saving and inspecting its log file.
- Log files for all nodes can be persisted on disk, with configuration at the test-level
- Log files, if persisted (as specified in the test), will be persisted to a CI artifact
- Node config is specified at the test-level instead of the suite-level
- Node/Hardhat process starting/stopping is now async, and runs much faster
- Per-node config includes:
- simulating proof failures
- logging to file
- log level
- log topics
- storage quota
- debug (print logs to stdout)
- Tests find next available ports when starting nodes, as closing ports on Windows can lag
- Hardhat is no longer required to be running prior to starting the integration tests (as long as Hardhat is configured to run in the tests).
- If Hardhat is already running, a snapshot will be taken and reverted before and after each test, respectively.
- If Hardhat is not already running and configured to run at the test-level, a Hardhat process will be spawned and torn down before and after each test, respectively.
* additional logging for debug purposes
* address PR feedback
- fix spelling
- revert change from catching ProviderError to SignerError -- this should be handled more consistently in the Market abstraction, and will be handled in another PR.
- remove method label from raiseAssert
- remove unused import
* Use API instead of command exec to test for free port
Use chronos `createStreamServer` API to test for free port by binding localhost address and port. Use `ServerFlags.ReuseAddr` to enable reuse of same IP/Port on multiple test runs.
* clean up
* remove upraises annotations from tests
* Update tests to work with updated erasure coding slot sizes
* update dataset size, nodes, tolerance to match valid ec params
Integration tests now have valid dataset sizes (blocks), tolerances, and number of nodes, to work with valid ec params. These values are validated when requested storage.
Print the rest api failure message (via doAssert) when a rest api call fails (eg the rest api may validate some ec params).
All integration tests pass when the async `clock.now` changes are reverted.
* dont use async clock for now
* fix workflow
* move integration logs uplod to reusable
---------
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2024-02-19 04:55:39 +00:00
|
|
|
try:
|
|
|
|
let body = client.http.getContent(url)
|
2024-03-19 03:25:13 +00:00
|
|
|
return RestPurchase.fromJson(body)
|
refactor: multinode integration test refactor (#662)
* refactor multi node test suite
Refactor the multinode test suite into the marketplace test suite.
- Arbitrary number of nodes can be started with each test: clients, providers, validators
- Hardhat can also be started locally with each test, usually for the purpose of saving and inspecting its log file.
- Log files for all nodes can be persisted on disk, with configuration at the test-level
- Log files, if persisted (as specified in the test), will be persisted to a CI artifact
- Node config is specified at the test-level instead of the suite-level
- Node/Hardhat process starting/stopping is now async, and runs much faster
- Per-node config includes:
- simulating proof failures
- logging to file
- log level
- log topics
- storage quota
- debug (print logs to stdout)
- Tests find next available ports when starting nodes, as closing ports on Windows can lag
- Hardhat is no longer required to be running prior to starting the integration tests (as long as Hardhat is configured to run in the tests).
- If Hardhat is already running, a snapshot will be taken and reverted before and after each test, respectively.
- If Hardhat is not already running and configured to run at the test-level, a Hardhat process will be spawned and torn down before and after each test, respectively.
* additional logging for debug purposes
* address PR feedback
- fix spelling
- revert change from catching ProviderError to SignerError -- this should be handled more consistently in the Market abstraction, and will be handled in another PR.
- remove method label from raiseAssert
- remove unused import
* Use API instead of command exec to test for free port
Use chronos `createStreamServer` API to test for free port by binding localhost address and port. Use `ServerFlags.ReuseAddr` to enable reuse of same IP/Port on multiple test runs.
* clean up
* remove upraises annotations from tests
* Update tests to work with updated erasure coding slot sizes
* update dataset size, nodes, tolerance to match valid ec params
Integration tests now have valid dataset sizes (blocks), tolerances, and number of nodes, to work with valid ec params. These values are validated when requested storage.
Print the rest api failure message (via doAssert) when a rest api call fails (eg the rest api may validate some ec params).
All integration tests pass when the async `clock.now` changes are reverted.
* dont use async clock for now
* fix workflow
* move integration logs uplod to reusable
---------
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2024-02-19 04:55:39 +00:00
|
|
|
except CatchableError as e:
|
|
|
|
return failure e.msg
|
2023-03-27 13:47:25 +00:00
|
|
|
|
2023-12-07 01:16:36 +00:00
|
|
|
proc getSalesAgent*(client: CodexClient, slotId: SlotId): ?!RestSalesAgent =
|
|
|
|
let url = client.baseurl & "/sales/slots/" & slotId.toHex
|
|
|
|
try:
|
|
|
|
let body = client.http.getContent(url)
|
2024-03-19 03:25:13 +00:00
|
|
|
return RestSalesAgent.fromJson(body)
|
2023-12-07 01:16:36 +00:00
|
|
|
except CatchableError as e:
|
|
|
|
return failure e.msg
|
|
|
|
|
2023-10-24 10:12:54 +00:00
|
|
|
proc getSlots*(client: CodexClient): ?!seq[Slot] =
|
2023-06-20 12:52:15 +00:00
|
|
|
let url = client.baseurl & "/sales/slots"
|
|
|
|
let body = client.http.getContent(url)
|
2024-03-19 03:25:13 +00:00
|
|
|
seq[Slot].fromJson(body)
|
2023-06-20 12:52:15 +00:00
|
|
|
|
2023-06-22 15:11:18 +00:00
|
|
|
proc postAvailability*(
|
|
|
|
client: CodexClient,
|
2024-03-21 10:53:45 +00:00
|
|
|
totalSize, duration, minPrice, maxCollateral: UInt256
|
2023-09-01 05:44:41 +00:00
|
|
|
): ?!Availability =
|
2023-06-22 15:11:18 +00:00
|
|
|
## Post sales availability endpoint
|
2023-09-01 05:44:41 +00:00
|
|
|
##
|
2023-03-27 13:47:25 +00:00
|
|
|
let url = client.baseurl & "/sales/availability"
|
|
|
|
let json = %*{
|
2024-03-21 10:53:45 +00:00
|
|
|
"totalSize": totalSize,
|
2023-09-01 05:44:41 +00:00
|
|
|
"duration": duration,
|
|
|
|
"minPrice": minPrice,
|
|
|
|
"maxCollateral": maxCollateral,
|
2023-03-27 13:47:25 +00:00
|
|
|
}
|
|
|
|
let response = client.http.post(url, $json)
|
2024-03-21 10:53:45 +00:00
|
|
|
doAssert response.status == "201 Created", "expected 201 Created, got " & response.status & ", body: " & response.body
|
2024-03-19 03:25:13 +00:00
|
|
|
Availability.fromJson(response.body)
|
2023-03-27 13:47:25 +00:00
|
|
|
|
2024-03-21 10:53:45 +00:00
|
|
|
proc patchAvailabilityRaw*(
|
|
|
|
client: CodexClient,
|
|
|
|
availabilityId: AvailabilityId,
|
|
|
|
totalSize, freeSize, duration, minPrice, maxCollateral: ?UInt256 = UInt256.none
|
|
|
|
): Response =
|
|
|
|
## Updates availability
|
|
|
|
##
|
|
|
|
let url = client.baseurl & "/sales/availability/" & $availabilityId
|
|
|
|
|
|
|
|
# TODO: Optionalize macro does not keep `serialize` pragmas so we can't use `Optionalize(RestAvailability)` here.
|
|
|
|
var json = %*{}
|
|
|
|
|
|
|
|
if totalSize =? totalSize:
|
|
|
|
json["totalSize"] = %totalSize
|
|
|
|
|
|
|
|
if freeSize =? freeSize:
|
|
|
|
json["freeSize"] = %freeSize
|
|
|
|
|
|
|
|
if duration =? duration:
|
|
|
|
json["duration"] = %duration
|
|
|
|
|
|
|
|
if minPrice =? minPrice:
|
|
|
|
json["minPrice"] = %minPrice
|
|
|
|
|
|
|
|
if maxCollateral =? maxCollateral:
|
|
|
|
json["maxCollateral"] = %maxCollateral
|
|
|
|
|
|
|
|
client.http.patch(url, $json)
|
|
|
|
|
|
|
|
proc patchAvailability*(
|
|
|
|
client: CodexClient,
|
|
|
|
availabilityId: AvailabilityId,
|
|
|
|
totalSize, duration, minPrice, maxCollateral: ?UInt256 = UInt256.none
|
|
|
|
): void =
|
|
|
|
let response = client.patchAvailabilityRaw(availabilityId, totalSize=totalSize, duration=duration, minPrice=minPrice, maxCollateral=maxCollateral)
|
|
|
|
doAssert response.status == "200 OK", "expected 200 OK, got " & response.status
|
|
|
|
|
2023-09-01 05:44:41 +00:00
|
|
|
proc getAvailabilities*(client: CodexClient): ?!seq[Availability] =
|
2023-06-22 15:11:18 +00:00
|
|
|
## Call sales availability REST endpoint
|
2023-03-27 13:47:25 +00:00
|
|
|
let url = client.baseurl & "/sales/availability"
|
|
|
|
let body = client.http.getContent(url)
|
2024-03-19 03:25:13 +00:00
|
|
|
seq[Availability].fromJson(body)
|
2023-03-27 13:47:25 +00:00
|
|
|
|
2024-03-21 10:53:45 +00:00
|
|
|
proc getAvailabilityReservations*(client: CodexClient, availabilityId: AvailabilityId): ?!seq[Reservation] =
|
|
|
|
## Retrieves Availability's Reservations
|
|
|
|
let url = client.baseurl & "/sales/availability/" & $availabilityId & "/reservations"
|
|
|
|
let body = client.http.getContent(url)
|
|
|
|
seq[Reservation].fromJson(body)
|
|
|
|
|
2023-03-27 13:47:25 +00:00
|
|
|
proc close*(client: CodexClient) =
|
|
|
|
client.http.close()
|
|
|
|
|
|
|
|
proc restart*(client: CodexClient) =
|
|
|
|
client.http.close()
|
|
|
|
client.http = newHttpClient()
|
2023-12-07 01:16:36 +00:00
|
|
|
|
|
|
|
proc purchaseStateIs*(client: CodexClient, id: PurchaseId, state: string): bool =
|
|
|
|
client.getPurchase(id).option.?state == some state
|
|
|
|
|
|
|
|
proc saleStateIs*(client: CodexClient, id: SlotId, state: string): bool =
|
|
|
|
client.getSalesAgent(id).option.?state == some state
|
|
|
|
|
|
|
|
proc requestId*(client: CodexClient, id: PurchaseId): ?RequestId =
|
|
|
|
return client.getPurchase(id).option.?requestId
|
2024-10-25 13:43:19 +00:00
|
|
|
|
|
|
|
proc uploadRaw*(client: CodexClient, contents: string, headers = newHttpHeaders()): Response =
|
|
|
|
return client.http.request(client.baseurl & "/data", body = contents, httpMethod=HttpPost, headers = headers)
|
|
|
|
|
|
|
|
proc listRaw*(client: CodexClient): Response =
|
|
|
|
return client.http.request(client.baseurl & "/data", httpMethod=HttpGet)
|
|
|
|
|
|
|
|
proc downloadRaw*(client: CodexClient, cid: string, local = false): Response =
|
|
|
|
return client.http.request(client.baseurl & "/data/" & cid &
|
|
|
|
(if local: "" else: "/network/stream"), httpMethod=HttpGet)
|