2019-05-07 20:11:40 +00:00
|
|
|
#
|
|
|
|
# Chronos Asynchronous Chunked-Encoding Stream
|
|
|
|
# (c) Copyright 2019-Present
|
|
|
|
# Status Research & Development GmbH
|
|
|
|
#
|
|
|
|
# Licensed under either of
|
|
|
|
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
|
|
|
# MIT license (LICENSE-MIT)
|
2019-06-12 15:26:20 +00:00
|
|
|
|
|
|
|
## This module implements HTTP/1.1 chunked-encoded stream reading and writing.
|
2023-11-17 22:18:09 +00:00
|
|
|
|
|
|
|
{.push raises: [].}
|
|
|
|
|
2024-03-26 20:33:19 +00:00
|
|
|
import ../[asyncloop, timer, bipbuffer, config]
|
2024-03-05 23:56:40 +00:00
|
|
|
import asyncstream, ../transports/[stream, common]
|
2023-11-13 09:56:19 +00:00
|
|
|
import results
|
2021-08-26 11:22:29 +00:00
|
|
|
export asyncloop, asyncstream, stream, timer, common, results
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
const
|
2024-03-05 23:56:40 +00:00
|
|
|
ChunkBufferSize = chronosStreamDefaultBufferSize
|
2021-04-22 12:32:28 +00:00
|
|
|
MaxChunkHeaderSize = 1024
|
|
|
|
ChunkHeaderValueSize = 8
|
2021-02-17 00:03:12 +00:00
|
|
|
# This is limit for chunk size to 8 hexadecimal digits, so maximum
|
|
|
|
# chunk size for this implementation become:
|
|
|
|
# 2^32 == FFFF_FFFF'u32 == 4,294,967,295 bytes.
|
2019-05-07 20:11:40 +00:00
|
|
|
CRLF = @[byte(0x0D), byte(0x0A)]
|
|
|
|
|
|
|
|
type
|
|
|
|
ChunkedStreamReader* = ref object of AsyncStreamReader
|
|
|
|
ChunkedStreamWriter* = ref object of AsyncStreamWriter
|
|
|
|
|
2021-01-20 13:40:15 +00:00
|
|
|
ChunkedStreamError* = object of AsyncStreamError
|
2019-05-07 20:11:40 +00:00
|
|
|
ChunkedStreamProtocolError* = object of ChunkedStreamError
|
2021-01-20 13:40:15 +00:00
|
|
|
ChunkedStreamIncompleteError* = object of ChunkedStreamError
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-01-20 13:40:15 +00:00
|
|
|
proc `-`(x: uint32): uint32 {.inline.} =
|
|
|
|
result = (0xFFFF_FFFF'u32 - x) + 1'u32
|
|
|
|
|
|
|
|
proc LT(x, y: uint32): uint32 {.inline.} =
|
|
|
|
let z = x - y
|
|
|
|
(z xor ((y xor x) and (y xor z))) shr 31
|
|
|
|
|
2021-02-18 14:39:36 +00:00
|
|
|
proc hexValue*(c: byte): int =
|
2021-02-18 14:27:00 +00:00
|
|
|
# This is nim adaptation of
|
|
|
|
# https://github.com/pornin/CTTK/blob/master/src/hex.c#L28-L52
|
2021-01-20 13:40:15 +00:00
|
|
|
let x = uint32(c) - 0x30'u32
|
|
|
|
let y = uint32(c) - 0x41'u32
|
|
|
|
let z = uint32(c) - 0x61'u32
|
|
|
|
let r = ((x + 1'u32) and -LT(x, 10)) or
|
|
|
|
((y + 11'u32) and -LT(y, 6)) or
|
|
|
|
((z + 11'u32) and -LT(z, 6))
|
|
|
|
int(r) - 1
|
|
|
|
|
2021-12-08 14:58:24 +00:00
|
|
|
proc getChunkSize(buffer: openArray[byte]): Result[uint64, cstring] =
|
2019-05-07 20:11:40 +00:00
|
|
|
# We using `uint64` representation, but allow only 2^32 chunk size,
|
2021-04-22 12:32:28 +00:00
|
|
|
# ChunkHeaderValueSize.
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = 0'u64
|
2021-04-22 12:32:28 +00:00
|
|
|
for i in 0 ..< min(len(buffer), ChunkHeaderValueSize + 1):
|
2021-01-20 13:40:15 +00:00
|
|
|
let value = hexValue(buffer[i])
|
2021-02-17 00:03:12 +00:00
|
|
|
if value < 0:
|
2021-02-18 12:08:21 +00:00
|
|
|
if buffer[i] == byte(';'):
|
|
|
|
# chunk-extension is present, so chunk size is already decoded in res.
|
|
|
|
return ok(res)
|
|
|
|
else:
|
|
|
|
return err("Incorrect chunk size encoding")
|
|
|
|
else:
|
2021-04-22 12:32:28 +00:00
|
|
|
if i >= ChunkHeaderValueSize:
|
2021-02-18 12:08:21 +00:00
|
|
|
return err("The chunk size exceeds the limit")
|
|
|
|
res = (res shl 4) or uint64(value)
|
2021-02-17 00:03:12 +00:00
|
|
|
ok(res)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-12-08 14:58:24 +00:00
|
|
|
proc setChunkSize(buffer: var openArray[byte], length: int64): int =
|
2019-05-07 20:11:40 +00:00
|
|
|
# Store length as chunk header size (hexadecimal value) with CRLF.
|
|
|
|
# Maximum stored value is ``0xFFFF_FFFF``.
|
|
|
|
# Buffer ``buffer`` length must be at least 10 octets.
|
|
|
|
doAssert(length <= int64(uint32.high))
|
|
|
|
var n = 0xF000_0000'i64
|
|
|
|
var i = 32
|
|
|
|
var c = 0
|
|
|
|
if length == 0:
|
|
|
|
buffer[0] = byte('0')
|
|
|
|
buffer[1] = byte(0x0D)
|
|
|
|
buffer[2] = byte(0x0A)
|
2021-01-20 13:40:15 +00:00
|
|
|
3
|
2019-05-07 20:11:40 +00:00
|
|
|
else:
|
|
|
|
while n != 0:
|
|
|
|
var v = length and n
|
|
|
|
if v != 0 or c != 0:
|
|
|
|
let digit = byte((length and n) shr (i - 4))
|
|
|
|
var ch = digit + byte('0')
|
|
|
|
if ch > byte('9'):
|
|
|
|
ch = ch + 0x07'u8
|
|
|
|
buffer[c] = ch
|
|
|
|
inc(c)
|
|
|
|
n = n shr 4
|
|
|
|
i = i - 4
|
|
|
|
buffer[c] = byte(0x0D)
|
|
|
|
buffer[c + 1] = byte(0x0A)
|
2021-02-17 00:03:12 +00:00
|
|
|
(c + 2)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2023-11-17 22:18:09 +00:00
|
|
|
proc chunkedReadLoop(stream: AsyncStreamReader) {.async: (raises: []).} =
|
2021-02-17 00:03:12 +00:00
|
|
|
var rstream = ChunkedStreamReader(stream)
|
2021-04-22 12:32:28 +00:00
|
|
|
var buffer = newSeq[byte](MaxChunkHeaderSize)
|
2019-10-08 07:28:43 +00:00
|
|
|
rstream.state = AsyncStreamState.Running
|
|
|
|
|
2021-01-20 13:40:15 +00:00
|
|
|
while true:
|
|
|
|
try:
|
2019-10-08 07:28:43 +00:00
|
|
|
# Reading chunk size
|
2021-02-17 00:03:12 +00:00
|
|
|
let res = await rstream.rsource.readUntil(addr buffer[0], len(buffer),
|
|
|
|
CRLF)
|
|
|
|
let cres = getChunkSize(buffer.toOpenArray(0, res - len(CRLF) - 1))
|
2019-06-06 11:00:47 +00:00
|
|
|
|
2021-02-17 00:03:12 +00:00
|
|
|
if cres.isErr():
|
2021-08-06 10:13:55 +00:00
|
|
|
if rstream.state == AsyncStreamState.Running:
|
|
|
|
rstream.error = newException(ChunkedStreamProtocolError, $cres.error)
|
|
|
|
rstream.state = AsyncStreamState.Error
|
2019-10-08 07:28:43 +00:00
|
|
|
else:
|
2021-02-17 00:03:12 +00:00
|
|
|
var chunksize = cres.get()
|
|
|
|
if chunksize > 0'u64:
|
|
|
|
while chunksize > 0'u64:
|
2024-03-26 20:33:19 +00:00
|
|
|
let
|
|
|
|
(data, rsize) = rstream.buffer.backend.reserve()
|
|
|
|
toRead = int(min(chunksize, uint64(rsize)))
|
|
|
|
await rstream.rsource.readExactly(data, toRead)
|
|
|
|
rstream.buffer.backend.commit(toRead)
|
2021-02-17 00:03:12 +00:00
|
|
|
await rstream.buffer.transfer()
|
|
|
|
chunksize = chunksize - uint64(toRead)
|
|
|
|
|
|
|
|
if rstream.state == AsyncStreamState.Running:
|
|
|
|
# Reading chunk trailing CRLF
|
|
|
|
await rstream.rsource.readExactly(addr buffer[0], 2)
|
|
|
|
|
|
|
|
if buffer[0] != CRLF[0] or buffer[1] != CRLF[1]:
|
2021-08-06 10:13:55 +00:00
|
|
|
if rstream.state == AsyncStreamState.Running:
|
|
|
|
rstream.error = newException(ChunkedStreamProtocolError,
|
|
|
|
"Unexpected trailing bytes")
|
|
|
|
rstream.state = AsyncStreamState.Error
|
2021-02-17 00:03:12 +00:00
|
|
|
else:
|
|
|
|
# Reading trailing line for last chunk
|
|
|
|
discard await rstream.rsource.readUntil(addr buffer[0],
|
|
|
|
len(buffer), CRLF)
|
2021-08-06 10:13:55 +00:00
|
|
|
if rstream.state == AsyncStreamState.Running:
|
|
|
|
rstream.state = AsyncStreamState.Finished
|
|
|
|
await rstream.buffer.transfer()
|
2021-01-20 13:40:15 +00:00
|
|
|
except CancelledError:
|
2021-08-06 10:13:55 +00:00
|
|
|
if rstream.state == AsyncStreamState.Running:
|
|
|
|
rstream.state = AsyncStreamState.Stopped
|
2021-04-22 12:32:28 +00:00
|
|
|
except AsyncStreamLimitError:
|
2021-08-06 10:13:55 +00:00
|
|
|
if rstream.state == AsyncStreamState.Running:
|
|
|
|
rstream.state = AsyncStreamState.Error
|
|
|
|
rstream.error = newException(ChunkedStreamProtocolError,
|
|
|
|
"Chunk header exceeds maximum size")
|
2021-01-20 13:40:15 +00:00
|
|
|
except AsyncStreamIncompleteError:
|
2021-08-06 10:13:55 +00:00
|
|
|
if rstream.state == AsyncStreamState.Running:
|
|
|
|
rstream.state = AsyncStreamState.Error
|
|
|
|
rstream.error = newException(ChunkedStreamIncompleteError,
|
|
|
|
"Incomplete chunk received")
|
2021-01-20 13:40:15 +00:00
|
|
|
except AsyncStreamReadError as exc:
|
2021-08-06 10:13:55 +00:00
|
|
|
if rstream.state == AsyncStreamState.Running:
|
|
|
|
rstream.state = AsyncStreamState.Error
|
|
|
|
rstream.error = exc
|
2023-11-17 22:18:09 +00:00
|
|
|
except AsyncStreamError as exc:
|
|
|
|
if rstream.state == AsyncStreamState.Running:
|
|
|
|
rstream.state = AsyncStreamState.Error
|
|
|
|
rstream.error = exc
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2021-02-18 12:08:21 +00:00
|
|
|
if rstream.state != AsyncStreamState.Running:
|
2019-10-08 07:28:43 +00:00
|
|
|
# We need to notify consumer about error/close, but we do not care about
|
|
|
|
# incoming data anymore.
|
|
|
|
rstream.buffer.forget()
|
2021-01-20 13:40:15 +00:00
|
|
|
break
|
2019-05-07 20:11:40 +00:00
|
|
|
|
2023-11-17 22:18:09 +00:00
|
|
|
proc chunkedWriteLoop(stream: AsyncStreamWriter) {.async: (raises: []).} =
|
2021-02-17 00:03:12 +00:00
|
|
|
var wstream = ChunkedStreamWriter(stream)
|
2019-05-07 20:11:40 +00:00
|
|
|
var buffer: array[16, byte]
|
2021-01-20 13:40:15 +00:00
|
|
|
var error: ref AsyncStreamError
|
2019-05-07 20:11:40 +00:00
|
|
|
wstream.state = AsyncStreamState.Running
|
|
|
|
|
2021-01-20 13:40:15 +00:00
|
|
|
while true:
|
|
|
|
var item: WriteItem
|
|
|
|
# Getting new item from stream's queue.
|
|
|
|
try:
|
|
|
|
item = await wstream.queue.get()
|
2019-10-08 07:28:43 +00:00
|
|
|
# `item.size == 0` is marker of stream finish, while `item.size != 0` is
|
|
|
|
# data's marker.
|
|
|
|
if item.size > 0:
|
|
|
|
let length = setChunkSize(buffer, int64(item.size))
|
|
|
|
# Writing chunk header <length>CRLF.
|
2021-01-20 13:40:15 +00:00
|
|
|
await wstream.wsource.write(addr buffer[0], length)
|
2019-10-08 07:28:43 +00:00
|
|
|
# Writing chunk data.
|
2021-01-20 13:40:15 +00:00
|
|
|
case item.kind
|
|
|
|
of WriteType.Pointer:
|
2021-02-18 12:08:21 +00:00
|
|
|
await wstream.wsource.write(item.dataPtr, item.size)
|
2021-01-20 13:40:15 +00:00
|
|
|
of WriteType.Sequence:
|
2021-02-18 12:08:21 +00:00
|
|
|
await wstream.wsource.write(addr item.dataSeq[0], item.size)
|
2021-01-20 13:40:15 +00:00
|
|
|
of WriteType.String:
|
2021-02-18 12:08:21 +00:00
|
|
|
await wstream.wsource.write(addr item.dataStr[0], item.size)
|
2019-10-08 07:28:43 +00:00
|
|
|
# Writing chunk footer CRLF.
|
2021-01-20 13:40:15 +00:00
|
|
|
await wstream.wsource.write(CRLF)
|
2019-10-08 07:28:43 +00:00
|
|
|
# Everything is fine, completing queue item's future.
|
|
|
|
item.future.complete()
|
|
|
|
else:
|
|
|
|
let length = setChunkSize(buffer, 0'i64)
|
|
|
|
# Write finish chunk `0`.
|
2021-01-20 13:40:15 +00:00
|
|
|
await wstream.wsource.write(addr buffer[0], length)
|
2019-10-08 07:28:43 +00:00
|
|
|
# Write trailing CRLF.
|
2021-01-20 13:40:15 +00:00
|
|
|
await wstream.wsource.write(CRLF)
|
2019-10-08 07:28:43 +00:00
|
|
|
# Everything is fine, completing queue item's future.
|
|
|
|
item.future.complete()
|
|
|
|
# Set stream state to Finished.
|
2021-08-06 10:13:55 +00:00
|
|
|
if wstream.state == AsyncStreamState.Running:
|
|
|
|
wstream.state = AsyncStreamState.Finished
|
2021-01-20 13:40:15 +00:00
|
|
|
except CancelledError:
|
2021-08-06 10:13:55 +00:00
|
|
|
if wstream.state == AsyncStreamState.Running:
|
|
|
|
wstream.state = AsyncStreamState.Stopped
|
|
|
|
error = newAsyncStreamUseClosedError()
|
2021-01-20 13:40:15 +00:00
|
|
|
except AsyncStreamError as exc:
|
2021-08-06 10:13:55 +00:00
|
|
|
if wstream.state == AsyncStreamState.Running:
|
|
|
|
wstream.state = AsyncStreamState.Error
|
|
|
|
error = exc
|
2021-01-20 13:40:15 +00:00
|
|
|
|
|
|
|
if wstream.state != AsyncStreamState.Running:
|
|
|
|
if wstream.state == AsyncStreamState.Finished:
|
|
|
|
error = newAsyncStreamUseClosedError()
|
|
|
|
else:
|
|
|
|
if not(isNil(item.future)):
|
|
|
|
if not(item.future.finished()):
|
2019-10-08 07:28:43 +00:00
|
|
|
item.future.fail(error)
|
2021-01-20 13:40:15 +00:00
|
|
|
while not(wstream.queue.empty()):
|
2023-11-17 22:18:09 +00:00
|
|
|
let pitem =
|
|
|
|
try:
|
|
|
|
wstream.queue.popFirstNoWait()
|
|
|
|
except AsyncQueueEmptyError:
|
|
|
|
raiseAssert "AsyncQueue should not be empty at this moment"
|
2021-01-20 13:40:15 +00:00
|
|
|
if not(pitem.future.finished()):
|
|
|
|
pitem.future.fail(error)
|
|
|
|
break
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc init*[T](child: ChunkedStreamReader, rsource: AsyncStreamReader,
|
2019-06-06 11:00:47 +00:00
|
|
|
bufferSize = ChunkBufferSize, udata: ref T) =
|
2021-02-17 00:03:12 +00:00
|
|
|
init(AsyncStreamReader(child), rsource, chunkedReadLoop, bufferSize,
|
2019-05-07 20:11:40 +00:00
|
|
|
udata)
|
|
|
|
|
|
|
|
proc init*(child: ChunkedStreamReader, rsource: AsyncStreamReader,
|
2019-06-06 11:00:47 +00:00
|
|
|
bufferSize = ChunkBufferSize) =
|
2021-02-17 00:03:12 +00:00
|
|
|
init(AsyncStreamReader(child), rsource, chunkedReadLoop, bufferSize)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc newChunkedStreamReader*[T](rsource: AsyncStreamReader,
|
|
|
|
bufferSize = AsyncStreamDefaultBufferSize,
|
|
|
|
udata: ref T): ChunkedStreamReader =
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = ChunkedStreamReader()
|
|
|
|
res.init(rsource, bufferSize, udata)
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc newChunkedStreamReader*(rsource: AsyncStreamReader,
|
|
|
|
bufferSize = AsyncStreamDefaultBufferSize,
|
|
|
|
): ChunkedStreamReader =
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = ChunkedStreamReader()
|
|
|
|
res.init(rsource, bufferSize)
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc init*[T](child: ChunkedStreamWriter, wsource: AsyncStreamWriter,
|
|
|
|
queueSize = AsyncStreamDefaultQueueSize, udata: ref T) =
|
2021-02-17 00:03:12 +00:00
|
|
|
init(AsyncStreamWriter(child), wsource, chunkedWriteLoop, queueSize,
|
2019-05-07 20:11:40 +00:00
|
|
|
udata)
|
|
|
|
|
|
|
|
proc init*(child: ChunkedStreamWriter, wsource: AsyncStreamWriter,
|
|
|
|
queueSize = AsyncStreamDefaultQueueSize) =
|
2021-02-17 00:03:12 +00:00
|
|
|
init(AsyncStreamWriter(child), wsource, chunkedWriteLoop, queueSize)
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc newChunkedStreamWriter*[T](wsource: AsyncStreamWriter,
|
|
|
|
queueSize = AsyncStreamDefaultQueueSize,
|
|
|
|
udata: ref T): ChunkedStreamWriter =
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = ChunkedStreamWriter()
|
|
|
|
res.init(wsource, queueSize, udata)
|
|
|
|
res
|
2019-05-07 20:11:40 +00:00
|
|
|
|
|
|
|
proc newChunkedStreamWriter*(wsource: AsyncStreamWriter,
|
|
|
|
queueSize = AsyncStreamDefaultQueueSize,
|
|
|
|
): ChunkedStreamWriter =
|
2021-01-20 13:40:15 +00:00
|
|
|
var res = ChunkedStreamWriter()
|
|
|
|
res.init(wsource, queueSize)
|
|
|
|
res
|