mirror of
https://github.com/status-im/nim-chronos.git
synced 2025-01-21 08:49:45 +00:00
Httpclient (#182)
* Initial commit. * Some refactoring. * Allow boundstream to accept uint64. Fix httpserver and asyncstream tests to follow new uint64 requirement. * send() and getBodyBytes() implementations. * Add closeWait for response and request. Refactor finish/close flow. * Changes in state machine Add first test. * Missing test file. * Fixed tests Add http leaking trackers and tests. * Some fixes in multipart. Fix automatic Content-Length header for requests with body. Fix getBodyBytes() assertions. Merging tests to main suite. * Post rebase fixes. * Fix tests big message generation. * Fix response state management and leaks for getBodyXXX() procedures. * Add redirection support to client and server. Add fetch(url) procedure with redirection support. Add tests for redirection.
This commit is contained in:
parent
d270dba8a3
commit
be184a815c
24
chronos/apps/http/httpagent.nim
Normal file
24
chronos/apps/http/httpagent.nim
Normal file
@ -0,0 +1,24 @@
|
||||
#
|
||||
# Chronos HTTP/S client implementation
|
||||
# (c) Copyright 2021-Present
|
||||
# Status Research & Development GmbH
|
||||
#
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import strutils
|
||||
|
||||
const
|
||||
ChronosName* = "nim-chronos"
|
||||
## Project name string
|
||||
ChronosMajor* {.intdefine.}: int = 3
|
||||
## Major number of Chronos' version.
|
||||
ChronosMinor* {.intdefine.}: int = 0
|
||||
## Minor number of Chronos' version.
|
||||
ChronosPatch* {.intdefine.}: int = 2
|
||||
## Patch number of Chronos' version.
|
||||
ChronosVersion* = $ChronosMajor & "." & $ChronosMinor & "." & $ChronosPatch
|
||||
## Version of Chronos as a string.
|
||||
ChronosIdent* = "$1/$2 ($3/$4)" % [ChronosName, ChronosVersion, hostCPU,
|
||||
hostOS]
|
||||
## Project ident name for networking services
|
146
chronos/apps/http/httpbodyrw.nim
Normal file
146
chronos/apps/http/httpbodyrw.nim
Normal file
@ -0,0 +1,146 @@
|
||||
#
|
||||
# Chronos HTTP/S body reader/writer
|
||||
# (c) Copyright 2021-Present
|
||||
# Status Research & Development GmbH
|
||||
#
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import ../../asyncloop, ../../asyncsync
|
||||
import ../../streams/[asyncstream, boundstream]
|
||||
|
||||
const
|
||||
HttpBodyReaderTrackerName* = "http.body.reader"
|
||||
## HTTP body reader leaks tracker name
|
||||
HttpBodyWriterTrackerName* = "http.body.writer"
|
||||
## HTTP body writer leaks tracker name
|
||||
|
||||
type
|
||||
HttpBodyReader* = ref object of AsyncStreamReader
|
||||
streams*: seq[AsyncStreamReader]
|
||||
|
||||
HttpBodyWriter* = ref object of AsyncStreamWriter
|
||||
streams*: seq[AsyncStreamWriter]
|
||||
|
||||
HttpBodyTracker* = ref object of TrackerBase
|
||||
opened*: int64
|
||||
closed*: int64
|
||||
|
||||
proc setupHttpBodyWriterTracker(): HttpBodyTracker {.gcsafe, raises: [Defect].}
|
||||
proc setupHttpBodyReaderTracker(): HttpBodyTracker {.gcsafe, raises: [Defect].}
|
||||
|
||||
proc getHttpBodyWriterTracker(): HttpBodyTracker {.inline.} =
|
||||
var res = cast[HttpBodyTracker](getTracker(HttpBodyWriterTrackerName))
|
||||
if isNil(res):
|
||||
res = setupHttpBodyWriterTracker()
|
||||
res
|
||||
|
||||
proc getHttpBodyReaderTracker(): HttpBodyTracker {.inline.} =
|
||||
var res = cast[HttpBodyTracker](getTracker(HttpBodyReaderTrackerName))
|
||||
if isNil(res):
|
||||
res = setupHttpBodyReaderTracker()
|
||||
res
|
||||
|
||||
proc dumpHttpBodyWriterTracking(): string {.gcsafe.} =
|
||||
let tracker = getHttpBodyWriterTracker()
|
||||
"Opened HTTP body writers: " & $tracker.opened & "\n" &
|
||||
"Closed HTTP body writers: " & $tracker.closed
|
||||
|
||||
proc dumpHttpBodyReaderTracking(): string {.gcsafe.} =
|
||||
let tracker = getHttpBodyReaderTracker()
|
||||
"Opened HTTP body readers: " & $tracker.opened & "\n" &
|
||||
"Closed HTTP body readers: " & $tracker.closed
|
||||
|
||||
proc leakHttpBodyWriter(): bool {.gcsafe.} =
|
||||
var tracker = getHttpBodyWriterTracker()
|
||||
tracker.opened != tracker.closed
|
||||
|
||||
proc leakHttpBodyReader(): bool {.gcsafe.} =
|
||||
var tracker = getHttpBodyReaderTracker()
|
||||
tracker.opened != tracker.closed
|
||||
|
||||
proc trackHttpBodyWriter(t: HttpBodyWriter) {.inline.} =
|
||||
inc(getHttpBodyWriterTracker().opened)
|
||||
|
||||
proc untrackHttpBodyWriter*(t: HttpBodyWriter) {.inline.} =
|
||||
inc(getHttpBodyWriterTracker().closed)
|
||||
|
||||
proc trackHttpBodyReader(t: HttpBodyReader) {.inline.} =
|
||||
inc(getHttpBodyReaderTracker().opened)
|
||||
|
||||
proc untrackHttpBodyReader*(t: HttpBodyReader) {.inline.} =
|
||||
inc(getHttpBodyReaderTracker().closed)
|
||||
|
||||
proc setupHttpBodyWriterTracker(): HttpBodyTracker {.gcsafe.} =
|
||||
var res = HttpBodyTracker(opened: 0, closed: 0,
|
||||
dump: dumpHttpBodyWriterTracking,
|
||||
isLeaked: leakHttpBodyWriter
|
||||
)
|
||||
addTracker(HttpBodyWriterTrackerName, res)
|
||||
res
|
||||
|
||||
proc setupHttpBodyReaderTracker(): HttpBodyTracker {.gcsafe.} =
|
||||
var res = HttpBodyTracker(opened: 0, closed: 0,
|
||||
dump: dumpHttpBodyReaderTracking,
|
||||
isLeaked: leakHttpBodyReader
|
||||
)
|
||||
addTracker(HttpBodyReaderTrackerName, res)
|
||||
res
|
||||
|
||||
proc newHttpBodyReader*(streams: varargs[AsyncStreamReader]): HttpBodyReader =
|
||||
## HttpBodyReader is AsyncStreamReader which holds references to all the
|
||||
## ``streams``. Also on close it will close all the ``streams``.
|
||||
##
|
||||
## First stream in sequence will be used as a source.
|
||||
doAssert(len(streams) > 0, "At least one stream must be added")
|
||||
var res = HttpBodyReader(streams: @streams)
|
||||
res.init(streams[0])
|
||||
trackHttpBodyReader(res)
|
||||
res
|
||||
|
||||
proc closeWait*(bstream: HttpBodyReader) {.async.} =
|
||||
## Close and free resource allocated by body reader.
|
||||
var res = newSeq[Future[void]]()
|
||||
# We closing streams in reversed order because stream at position [0], uses
|
||||
# data from stream at position [1].
|
||||
for index in countdown((len(bstream.streams) - 1), 0):
|
||||
res.add(bstream.streams[index].closeWait())
|
||||
await allFutures(res)
|
||||
await procCall(closeWait(AsyncStreamReader(bstream)))
|
||||
untrackHttpBodyReader(bstream)
|
||||
|
||||
proc newHttpBodyWriter*(streams: varargs[AsyncStreamWriter]): HttpBodyWriter =
|
||||
## HttpBodyWriter is AsyncStreamWriter which holds references to all the
|
||||
## ``streams``. Also on close it will close all the ``streams``.
|
||||
##
|
||||
## First stream in sequence will be used as a destination.
|
||||
doAssert(len(streams) > 0, "At least one stream must be added")
|
||||
var res = HttpBodyWriter(streams: @streams)
|
||||
res.init(streams[0])
|
||||
trackHttpBodyWriter(res)
|
||||
res
|
||||
|
||||
proc closeWait*(bstream: HttpBodyWriter) {.async.} =
|
||||
## Close and free all the resources allocated by body writer.
|
||||
var res = newSeq[Future[void]]()
|
||||
for index in countdown(len(bstream.streams) - 1, 0):
|
||||
res.add(bstream.streams[index].closeWait())
|
||||
await allFutures(res)
|
||||
await procCall(closeWait(AsyncStreamWriter(bstream)))
|
||||
untrackHttpBodyWriter(bstream)
|
||||
|
||||
proc hasOverflow*(bstream: HttpBodyReader): bool {.raises: [Defect].} =
|
||||
if len(bstream.streams) == 1:
|
||||
# If HttpBodyReader has only one stream it has ``BoundedStreamReader``, in
|
||||
# such case its impossible to get more bytes then expected amount.
|
||||
false
|
||||
else:
|
||||
# If HttpBodyReader has two or more streams, we check if
|
||||
# ``BoundedStreamReader`` at EOF.
|
||||
if bstream.streams[0].atEof():
|
||||
for i in 1 ..< len(bstream.streams):
|
||||
if not(bstream.streams[1].atEof()):
|
||||
return true
|
||||
false
|
||||
else:
|
||||
false
|
1233
chronos/apps/http/httpclient.nim
Normal file
1233
chronos/apps/http/httpclient.nim
Normal file
File diff suppressed because it is too large
Load Diff
@ -7,17 +7,33 @@
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import std/[strutils, uri]
|
||||
import stew/results, httputils
|
||||
import stew/[results, endians2], httputils
|
||||
import ../../asyncloop, ../../asyncsync
|
||||
import ../../streams/[asyncstream, boundstream]
|
||||
export results, httputils, strutils
|
||||
|
||||
const
|
||||
HeadersMark* = @[byte(0x0D), byte(0x0A), byte(0x0D), byte(0x0A)]
|
||||
HeadersMark* = @[0x0d'u8, 0x0a'u8, 0x0d'u8, 0x0a'u8]
|
||||
PostMethods* = {MethodPost, MethodPatch, MethodPut, MethodDelete}
|
||||
|
||||
MaximumBodySizeError* = "Maximum size of request's body reached"
|
||||
|
||||
UserAgentHeader* = "user-agent"
|
||||
DateHeader* = "date"
|
||||
HostHeader* = "host"
|
||||
ConnectionHeader* = "connection"
|
||||
AcceptHeader* = "accept"
|
||||
ContentLengthHeader* = "content-length"
|
||||
TransferEncodingHeader* = "transfer-encoding"
|
||||
ContentEncodingHeader* = "content-encoding"
|
||||
ContentTypeHeader* = "content-type"
|
||||
ExpectHeader* = "expect"
|
||||
ServerHeader* = "server"
|
||||
LocationHeader* = "location"
|
||||
|
||||
UrlEncodedContentType* = "application/x-www-form-urlencoded"
|
||||
MultipartContentType* = "multipart/form-data"
|
||||
|
||||
type
|
||||
HttpResult*[T] = Result[T, string]
|
||||
HttpResultCode*[T] = Result[T, HttpCode]
|
||||
@ -29,6 +45,13 @@ type
|
||||
HttpRecoverableError* = object of HttpError
|
||||
code*: HttpCode
|
||||
HttpDisconnectError* = object of HttpError
|
||||
HttpConnectionError* = object of HttpError
|
||||
HttpInterruptError* = object of HttpError
|
||||
HttpReadError* = object of HttpError
|
||||
HttpWriteError* = object of HttpError
|
||||
HttpProtocolError* = object of HttpError
|
||||
HttpRedirectError* = object of HttpError
|
||||
HttpAddressError* = object of HttpError
|
||||
|
||||
TransferEncodingFlags* {.pure.} = enum
|
||||
Identity, Chunked, Compress, Deflate, Gzip
|
||||
@ -36,45 +59,6 @@ type
|
||||
ContentEncodingFlags* {.pure.} = enum
|
||||
Identity, Br, Compress, Deflate, Gzip
|
||||
|
||||
HttpBodyReader* = ref object of AsyncStreamReader
|
||||
streams*: seq[AsyncStreamReader]
|
||||
|
||||
proc newHttpBodyReader*(streams: varargs[AsyncStreamReader]): HttpBodyReader =
|
||||
## HttpBodyReader is AsyncStreamReader which holds references to all the
|
||||
## ``streams``. Also on close it will close all the ``streams``.
|
||||
##
|
||||
## First stream in sequence will be used as a source.
|
||||
doAssert(len(streams) > 0, "At least one stream must be added")
|
||||
var res = HttpBodyReader(streams: @streams)
|
||||
res.init(streams[0])
|
||||
res
|
||||
|
||||
proc closeWait*(bstream: HttpBodyReader) {.async.} =
|
||||
## Close and free resource allocated by body reader.
|
||||
var res = newSeq[Future[void]]()
|
||||
# We closing streams in reversed order because stream at position [0], uses
|
||||
# data from stream at position [1].
|
||||
for index in countdown((len(bstream.streams) - 1), 0):
|
||||
res.add(bstream.streams[index].closeWait())
|
||||
await allFutures(res)
|
||||
await procCall(closeWait(AsyncStreamReader(bstream)))
|
||||
|
||||
proc hasOverflow*(bstream: HttpBodyReader): bool {.raises: [Defect].} =
|
||||
if len(bstream.streams) == 1:
|
||||
# If HttpBodyReader has only one stream it has ``BoundedStreamReader``, in
|
||||
# such case its impossible to get more bytes then expected amount.
|
||||
false
|
||||
else:
|
||||
# If HttpBodyReader has two or more streams, we check if
|
||||
# ``BoundedStreamReader`` at EOF.
|
||||
if bstream.streams[0].atEof():
|
||||
for i in 1 ..< len(bstream.streams):
|
||||
if not(bstream.streams[1].atEof()):
|
||||
return true
|
||||
false
|
||||
else:
|
||||
false
|
||||
|
||||
proc raiseHttpCriticalError*(msg: string,
|
||||
code = Http400) {.noinline, noreturn.} =
|
||||
raise (ref HttpCriticalError)(code: code, msg: msg)
|
||||
@ -85,6 +69,36 @@ proc raiseHttpDisconnectError*() {.noinline, noreturn.} =
|
||||
proc raiseHttpDefect*(msg: string) {.noinline, noreturn.} =
|
||||
raise (ref HttpDefect)(msg: msg)
|
||||
|
||||
proc raiseHttpConnectionError*(msg: string) {.noinline, noreturn.} =
|
||||
raise (ref HttpConnectionError)(msg: msg)
|
||||
|
||||
proc raiseHttpInterruptError*() {.noinline, noreturn.} =
|
||||
raise (ref HttpInterruptError)(msg: "Connection was interrupted")
|
||||
|
||||
proc raiseHttpReadError*(msg: string) {.noinline, noreturn.} =
|
||||
raise (ref HttpReadError)(msg: msg)
|
||||
|
||||
proc raiseHttpProtocolError*(msg: string) {.noinline, noreturn.} =
|
||||
raise (ref HttpProtocolError)(msg: msg)
|
||||
|
||||
proc raiseHttpWriteError*(msg: string) {.noinline, noreturn.} =
|
||||
raise (ref HttpWriteError)(msg: msg)
|
||||
|
||||
proc raiseHttpRedirectError*(msg: string) {.noinline, noreturn.} =
|
||||
raise (ref HttpRedirectError)(msg: msg)
|
||||
|
||||
proc raiseHttpAddressError*(msg: string) {.noinline, noreturn.} =
|
||||
raise (ref HttpAddressError)(msg: msg)
|
||||
|
||||
template newHttpInterruptError*(): ref HttpInterruptError =
|
||||
newException(HttpInterruptError, "Connection was interrupted")
|
||||
|
||||
template newHttpReadError*(message: string): ref HttpReadError =
|
||||
newException(HttpReadError, message)
|
||||
|
||||
template newHttpWriteError*(message: string): ref HttpWriteError =
|
||||
newException(HttpWriteError, message)
|
||||
|
||||
iterator queryParams*(query: string): tuple[key: string, value: string] {.
|
||||
raises: [Defect].} =
|
||||
## Iterate over url-encoded query string.
|
||||
@ -211,3 +225,48 @@ func stringToBytes*(src: openarray[char]): seq[byte] =
|
||||
dst
|
||||
else:
|
||||
default
|
||||
|
||||
proc dumpHex*(pbytes: openarray[byte], groupBy = 1, ascii = true): string =
|
||||
## Get hexadecimal dump of memory for array ``pbytes``.
|
||||
var res = ""
|
||||
var offset = 0
|
||||
var ascii = ""
|
||||
|
||||
while offset < len(pbytes):
|
||||
if (offset mod 16) == 0:
|
||||
res = res & toHex(uint64(offset)) & ": "
|
||||
|
||||
for k in 0 ..< groupBy:
|
||||
let ch = pbytes[offset + k]
|
||||
ascii.add(if ord(ch) > 31 and ord(ch) < 127: char(ch) else: '.')
|
||||
|
||||
let item =
|
||||
case groupBy:
|
||||
of 1:
|
||||
toHex(pbytes[offset])
|
||||
of 2:
|
||||
toHex(uint16.fromBytes(pbytes.toOpenArray(offset, len(pbytes) - 1)))
|
||||
of 4:
|
||||
toHex(uint32.fromBytes(pbytes.toOpenArray(offset, len(pbytes) - 1)))
|
||||
of 8:
|
||||
toHex(uint64.fromBytes(pbytes.toOpenArray(offset, len(pbytes) - 1)))
|
||||
else:
|
||||
""
|
||||
res.add(item)
|
||||
res.add(" ")
|
||||
offset = offset + groupBy
|
||||
|
||||
if (offset mod 16) == 0:
|
||||
res.add(" ")
|
||||
res.add(ascii)
|
||||
ascii.setLen(0)
|
||||
res.add("\p")
|
||||
|
||||
if (offset mod 16) != 0:
|
||||
let spacesCount = ((16 - (offset mod 16)) div groupBy) *
|
||||
(groupBy * 2 + 1) + 1
|
||||
res = res & repeat(' ', spacesCount)
|
||||
res = res & ascii
|
||||
|
||||
res.add("\p")
|
||||
res
|
||||
|
@ -289,18 +289,19 @@ proc prepareRequest(conn: HttpConnectionRef,
|
||||
table.add(key, value)
|
||||
# Validating HTTP request headers
|
||||
# Some of the headers must be present only once.
|
||||
if table.count("content-type") > 1:
|
||||
if table.count(ContentTypeHeader) > 1:
|
||||
return err(Http400)
|
||||
if table.count("content-length") > 1:
|
||||
if table.count(ContentLengthHeader) > 1:
|
||||
return err(Http400)
|
||||
if table.count("transfer-encoding") > 1:
|
||||
if table.count(TransferEncodingHeader) > 1:
|
||||
return err(Http400)
|
||||
table
|
||||
|
||||
# Preprocessing "Content-Encoding" header.
|
||||
request.contentEncoding =
|
||||
block:
|
||||
let res = getContentEncoding(request.headers.getList("content-encoding"))
|
||||
let res = getContentEncoding(
|
||||
request.headers.getList(ContentEncodingHeader))
|
||||
if res.isErr():
|
||||
return err(Http400)
|
||||
else:
|
||||
@ -310,7 +311,7 @@ proc prepareRequest(conn: HttpConnectionRef,
|
||||
request.transferEncoding =
|
||||
block:
|
||||
let res = getTransferEncoding(
|
||||
request.headers.getList("transfer-encoding"))
|
||||
request.headers.getList(TransferEncodingHeader))
|
||||
if res.isErr():
|
||||
return err(Http400)
|
||||
else:
|
||||
@ -318,8 +319,8 @@ proc prepareRequest(conn: HttpConnectionRef,
|
||||
|
||||
# Almost all HTTP requests could have body (except TRACE), we perform some
|
||||
# steps to reveal information about body.
|
||||
if "content-length" in request.headers:
|
||||
let length = request.headers.getInt("content-length")
|
||||
if ContentLengthHeader in request.headers:
|
||||
let length = request.headers.getInt(ContentLengthHeader)
|
||||
if length > 0:
|
||||
if request.meth == MethodTrace:
|
||||
return err(Http400)
|
||||
@ -337,20 +338,16 @@ proc prepareRequest(conn: HttpConnectionRef,
|
||||
|
||||
if request.hasBody():
|
||||
# If request has body, we going to understand how its encoded.
|
||||
const
|
||||
UrlEncodedType = "application/x-www-form-urlencoded"
|
||||
MultipartType = "multipart/form-data"
|
||||
|
||||
if "content-type" in request.headers:
|
||||
let contentType = request.headers.getString("content-type")
|
||||
if ContentTypeHeader in request.headers:
|
||||
let contentType = request.headers.getString(ContentTypeHeader)
|
||||
let tmp = strip(contentType).toLowerAscii()
|
||||
if tmp.startsWith(UrlEncodedType):
|
||||
if tmp.startsWith(UrlEncodedContentType):
|
||||
request.requestFlags.incl(HttpRequestFlags.UrlencodedForm)
|
||||
elif tmp.startsWith(MultipartType):
|
||||
elif tmp.startsWith(MultipartContentType):
|
||||
request.requestFlags.incl(HttpRequestFlags.MultipartForm)
|
||||
|
||||
if "expect" in request.headers:
|
||||
let expectHeader = request.headers.getString("expect")
|
||||
if ExpectHeader in request.headers:
|
||||
let expectHeader = request.headers.getString(ExpectHeader)
|
||||
if strip(expectHeader).toLowerAscii() == "100-continue":
|
||||
request.requestFlags.incl(HttpRequestFlags.ClientExpect)
|
||||
|
||||
@ -430,17 +427,29 @@ proc sendErrorResponse(conn: HttpConnectionRef, version: HttpVersion,
|
||||
datatype = "text/text",
|
||||
databody = ""): Future[bool] {.async.} =
|
||||
var answer = $version & " " & $code & "\r\n"
|
||||
answer.add("Date: " & httpDate() & "\r\n")
|
||||
if len(datatype) > 0:
|
||||
answer.add("Content-Type: " & datatype & "\r\n")
|
||||
answer.add("Content-Length: " &
|
||||
Base10.toString(uint64(len(databody))) & "\r\n")
|
||||
if keepAlive:
|
||||
answer.add("Connection: keep-alive\r\n")
|
||||
else:
|
||||
answer.add("Connection: close\r\n")
|
||||
answer.add("Host: " & conn.server.getHostname() & "\r\n")
|
||||
answer.add(DateHeader)
|
||||
answer.add(": ")
|
||||
answer.add(httpDate())
|
||||
answer.add("\r\n")
|
||||
if len(datatype) > 0:
|
||||
answer.add(ContentTypeHeader)
|
||||
answer.add(": ")
|
||||
answer.add(datatype)
|
||||
answer.add("\r\n")
|
||||
answer.add(ContentLengthHeader)
|
||||
answer.add(": ")
|
||||
answer.add(Base10.toString(uint64(len(databody))))
|
||||
answer.add("\r\n")
|
||||
if keepAlive:
|
||||
answer.add(ConnectionHeader)
|
||||
answer.add(": keep-alive\r\n")
|
||||
else:
|
||||
answer.add(ConnectionHeader)
|
||||
answer.add(": close\r\n")
|
||||
answer.add(HostHeader)
|
||||
answer.add(": ")
|
||||
answer.add(conn.server.getHostname())
|
||||
answer.add("\r\n\r\n")
|
||||
if len(databody) > 0:
|
||||
answer.add(databody)
|
||||
try:
|
||||
@ -744,12 +753,12 @@ proc getMultipartReader*(req: HttpRequestRef): HttpResult[MultiPartReaderRef] =
|
||||
## Create new MultiPartReader interface for specific request.
|
||||
if req.meth in PostMethods:
|
||||
if MultipartForm in req.requestFlags:
|
||||
let ctype = ? getContentType(req.headers.getList("content-type"))
|
||||
if ctype != "multipart/form-data":
|
||||
let ctype = ? getContentType(req.headers.getList(ContentTypeHeader))
|
||||
if ctype != MultipartContentType:
|
||||
err("Content type is not supported")
|
||||
else:
|
||||
let boundary = ? getMultipartBoundary(
|
||||
req.headers.getList("content-type")
|
||||
req.headers.getList(ContentTypeHeader)
|
||||
)
|
||||
var stream = ? req.getBodyReader()
|
||||
ok(MultiPartReaderRef.new(stream, boundary))
|
||||
@ -877,21 +886,21 @@ template checkPending(t: untyped) =
|
||||
|
||||
proc prepareLengthHeaders(resp: HttpResponseRef, length: int): string {.
|
||||
raises: [Defect].}=
|
||||
if not(resp.hasHeader("date")):
|
||||
resp.setHeader("date", httpDate())
|
||||
if not(resp.hasHeader("content-type")):
|
||||
resp.setHeader("content-type", "text/html; charset=utf-8")
|
||||
if not(resp.hasHeader("content-length")):
|
||||
resp.setHeader("content-length", Base10.toString(uint64(length)))
|
||||
if not(resp.hasHeader("server")):
|
||||
resp.setHeader("server", resp.connection.server.serverIdent)
|
||||
if not(resp.hasHeader("host")):
|
||||
resp.setHeader("host", resp.connection.server.getHostname())
|
||||
if not(resp.hasHeader("connection")):
|
||||
if not(resp.hasHeader(DateHeader)):
|
||||
resp.setHeader(DateHeader, httpDate())
|
||||
if not(resp.hasHeader(ContentTypeHeader)):
|
||||
resp.setHeader(ContentTypeHeader, "text/html; charset=utf-8")
|
||||
if not(resp.hasHeader(ContentLengthHeader)):
|
||||
resp.setHeader(ContentLengthHeader, Base10.toString(uint64(length)))
|
||||
if not(resp.hasHeader(ServerHeader)):
|
||||
resp.setHeader(ServerHeader, resp.connection.server.serverIdent)
|
||||
if not(resp.hasHeader(HostHeader)):
|
||||
resp.setHeader(HostHeader, resp.connection.server.getHostname())
|
||||
if not(resp.hasHeader(ConnectionHeader)):
|
||||
if KeepAlive in resp.flags:
|
||||
resp.setHeader("connection", "keep-alive")
|
||||
resp.setHeader(ConnectionHeader, "keep-alive")
|
||||
else:
|
||||
resp.setHeader("connection", "close")
|
||||
resp.setHeader(ConnectionHeader, "close")
|
||||
var answer = $(resp.version) & " " & $(resp.status) & "\r\n"
|
||||
for k, v in resp.headersTable.stringItems():
|
||||
if len(v) > 0:
|
||||
@ -904,21 +913,21 @@ proc prepareLengthHeaders(resp: HttpResponseRef, length: int): string {.
|
||||
|
||||
proc prepareChunkedHeaders(resp: HttpResponseRef): string {.
|
||||
raises: [Defect].} =
|
||||
if not(resp.hasHeader("date")):
|
||||
resp.setHeader("date", httpDate())
|
||||
if not(resp.hasHeader("content-type")):
|
||||
resp.setHeader("content-type", "text/html; charset=utf-8")
|
||||
if not(resp.hasHeader("transfer-encoding")):
|
||||
resp.setHeader("transfer-encoding", "chunked")
|
||||
if not(resp.hasHeader("server")):
|
||||
resp.setHeader("server", resp.connection.server.serverIdent)
|
||||
if not(resp.hasHeader("host")):
|
||||
resp.setHeader("host", resp.connection.server.getHostname())
|
||||
if not(resp.hasHeader("connection")):
|
||||
if not(resp.hasHeader(DateHeader)):
|
||||
resp.setHeader(DateHeader, httpDate())
|
||||
if not(resp.hasHeader(ContentTypeHeader)):
|
||||
resp.setHeader(ContentTypeHeader, "text/html; charset=utf-8")
|
||||
if not(resp.hasHeader(TransferEncodingHeader)):
|
||||
resp.setHeader(TransferEncodingHeader, "chunked")
|
||||
if not(resp.hasHeader(ServerHeader)):
|
||||
resp.setHeader(ServerHeader, resp.connection.server.serverIdent)
|
||||
if not(resp.hasHeader(HostHeader)):
|
||||
resp.setHeader(HostHeader, resp.connection.server.getHostname())
|
||||
if not(resp.hasHeader(ConnectionHeader)):
|
||||
if KeepAlive in resp.flags:
|
||||
resp.setHeader("connection", "keep-alive")
|
||||
resp.setHeader(ConnectionHeader, "keep-alive")
|
||||
else:
|
||||
resp.setHeader("connection", "close")
|
||||
resp.setHeader(ConnectionHeader, "close")
|
||||
var answer = $(resp.version) & " " & $(resp.status) & "\r\n"
|
||||
for k, v in resp.headersTable.stringItems():
|
||||
if len(v) > 0:
|
||||
@ -1076,9 +1085,21 @@ proc respond*(req: HttpRequestRef, code: HttpCode,
|
||||
respond(req, code, content, HttpTable.init())
|
||||
|
||||
proc respond*(req: HttpRequestRef, code: HttpCode): Future[HttpResponseRef] =
|
||||
## Reponds to the request with specified ``HttpCode`` only.
|
||||
## Responds to the request with specified ``HttpCode`` only.
|
||||
respond(req, code, "", HttpTable.init())
|
||||
|
||||
proc redirect*(req: HttpRequestRef, code: HttpCode,
|
||||
location: Uri): Future[HttpResponseRef] =
|
||||
## Responds to the request with redirection to location ``location``.
|
||||
let headers = HttpTable.init([("location", $location)])
|
||||
respond(req, code, "", headers)
|
||||
|
||||
proc redirect*(req: HttpRequestRef, code: HttpCode,
|
||||
location: string): Future[HttpResponseRef] =
|
||||
## Responds to the request with redirection to location ``location``.
|
||||
let headers = HttpTable.init([("location", location)])
|
||||
respond(req, code, "", headers)
|
||||
|
||||
proc responded*(req: HttpRequestRef): bool =
|
||||
## Returns ``true`` if request ``req`` has been responded or responding.
|
||||
if isSome(req.response):
|
||||
|
@ -190,3 +190,10 @@ proc `$`*(ht: HttpTables): string =
|
||||
res.add(item)
|
||||
res.add("\p")
|
||||
res
|
||||
|
||||
proc toList*(ht: HttpTables, normKey = false): auto =
|
||||
## Returns sequence of (key, value) pairs.
|
||||
var res: seq[tuple[key: string, value: string]]
|
||||
for key, value in ht.stringItems(normKey):
|
||||
res.add((key, value))
|
||||
res
|
||||
|
@ -11,8 +11,8 @@ import std/[monotimes, strutils]
|
||||
import stew/results
|
||||
import ../../asyncloop
|
||||
import ../../streams/[asyncstream, boundstream, chunkstream]
|
||||
import httptable, httpcommon
|
||||
export httptable, httpcommon, asyncstream
|
||||
import httptable, httpcommon, httpbodyrw
|
||||
export httptable, httpcommon, httpbodyrw, asyncstream
|
||||
|
||||
const
|
||||
UnableToReadMultipartBody = "Unable to read multipart message body"
|
||||
@ -21,8 +21,12 @@ type
|
||||
MultiPartSource* {.pure.} = enum
|
||||
Stream, Buffer
|
||||
|
||||
MultiPartWriterState* {.pure.} = enum
|
||||
MessagePreparing, MessageStarted, PartStarted, PartFinished,
|
||||
MessageFinished, MessageFailure
|
||||
|
||||
MultiPartReader* = object
|
||||
case kind: MultiPartSource
|
||||
case kind*: MultiPartSource
|
||||
of MultiPartSource.Stream:
|
||||
stream*: HttpBodyReader
|
||||
of MultiPartSource.Buffer:
|
||||
@ -35,6 +39,20 @@ type
|
||||
|
||||
MultiPartReaderRef* = ref MultiPartReader
|
||||
|
||||
MultiPartWriter* = object
|
||||
case kind*: MultiPartSource
|
||||
of MultiPartSource.Stream:
|
||||
stream*: HttpBodyWriter
|
||||
of MultiPartSource.Buffer:
|
||||
buffer*: seq[byte]
|
||||
beginMark: seq[byte]
|
||||
finishMark: seq[byte]
|
||||
beginPartMark: seq[byte]
|
||||
finishPartMark: seq[byte]
|
||||
state*: MultiPartWriterState
|
||||
|
||||
MultiPartWriterRef* = ref MultiPartWriter
|
||||
|
||||
MultiPart* = object
|
||||
case kind: MultiPartSource
|
||||
of MultiPartSource.Stream:
|
||||
@ -409,6 +427,18 @@ func isEmpty*(mp: MultiPart): bool {.
|
||||
## Returns ``true`` is multipart ``mp`` is not initialized/filled yet.
|
||||
mp.counter == 0
|
||||
|
||||
func validateBoundary[B: BChar](boundary: openarray[B]): HttpResult[void] =
|
||||
if len(boundary) == 0:
|
||||
err("Content-Type boundary must be at least 1 character size")
|
||||
elif len(boundary) > 70:
|
||||
err("Content-Type boundary must be less then 70 characters")
|
||||
else:
|
||||
for ch in boundary:
|
||||
if chr(ord(ch)) notin {'a' .. 'z', 'A' .. 'Z', '0' .. '9',
|
||||
'\'' .. ')', '+' .. '/', ':', '=', '?', '_'}:
|
||||
return err("Content-Type boundary alphabet incorrect")
|
||||
ok()
|
||||
|
||||
func getMultipartBoundary*(ch: openarray[string]): HttpResult[string] {.
|
||||
raises: [Defect].} =
|
||||
## Returns ``multipart/form-data`` boundary value from ``Content-Type``
|
||||
@ -453,13 +483,280 @@ func getMultipartBoundary*(ch: openarray[string]): HttpResult[string] {.
|
||||
err("Missing Content-Type boundary")
|
||||
else:
|
||||
let candidate = strip(bparts[1])
|
||||
if len(candidate) == 0:
|
||||
err("Content-Type boundary must be at least 1 character size")
|
||||
elif len(candidate) > 70:
|
||||
err("Content-Type boundary must be less then 70 characters")
|
||||
let res = validateBoundary(candidate)
|
||||
if res.isErr():
|
||||
err($res.error())
|
||||
else:
|
||||
for ch in candidate:
|
||||
if ch notin {'a' .. 'z', 'A' .. 'Z', '0' .. '9',
|
||||
'\'' .. ')', '+' .. '/', ':', '=', '?', '_'}:
|
||||
return err("Content-Type boundary alphabet incorrect")
|
||||
ok(candidate)
|
||||
|
||||
proc quoteCheck(name: string): HttpResult[string] =
|
||||
if len(name) > 0:
|
||||
var res = newStringOfCap(len(name))
|
||||
for ch in name:
|
||||
case ch
|
||||
of '\x00' .. '\x08', '\x0a' .. '\x1f':
|
||||
return err("Incorrect character encountered")
|
||||
of '\x09', '\x20', '\x21':
|
||||
res.add(ch)
|
||||
of '\x22':
|
||||
res.add('\\')
|
||||
res.add('"')
|
||||
of '\x23' .. '\x7f':
|
||||
res.add(ch)
|
||||
else:
|
||||
return err("Incorrect character encountered")
|
||||
ok(res)
|
||||
else:
|
||||
ok(name)
|
||||
|
||||
proc init*[B: BChar](mpt: typedesc[MultiPartWriter],
|
||||
boundary: openarray[B]): MultiPartWriter {.
|
||||
raises: [Defect].} =
|
||||
## Create new MultiPartWriter instance with `buffer` interface.
|
||||
##
|
||||
## ``boundary`` - is multipart boundary, this value must not be empty.
|
||||
doAssert(validateBoundary(boundary).isOk())
|
||||
|
||||
let sboundary =
|
||||
when B is char:
|
||||
@(boundary.toOpenArrayByte(0, len(boundary) - 1))
|
||||
else:
|
||||
@boundary
|
||||
|
||||
var finishMark = sboundary
|
||||
finishMark.add([0x2d'u8, 0x2d'u8, 0x0d'u8, 0x0a'u8])
|
||||
var beginPartMark = sboundary
|
||||
beginPartMark.add([0x0d'u8, 0x0a'u8])
|
||||
|
||||
MultiPartWriter(
|
||||
kind: MultiPartSource.Buffer,
|
||||
buffer: newSeq[byte](),
|
||||
beginMark: @[0x2d'u8, 0x2d'u8],
|
||||
finishMark: finishMark,
|
||||
beginPartMark: beginPartMark,
|
||||
finishPartMark: @[0x0d'u8, 0x0a'u8, 0x2d'u8, 0x2d'u8],
|
||||
state: MultiPartWriterState.MessagePreparing
|
||||
)
|
||||
|
||||
proc new*[B: BChar](mpt: typedesc[MultiPartWriterRef],
|
||||
stream: HttpBodyWriter,
|
||||
boundary: openarray[B]): MultiPartWriterRef {.
|
||||
raises: [Defect].} =
|
||||
doAssert(validateBoundary(boundary).isOk())
|
||||
doAssert(not(isNil(stream)))
|
||||
|
||||
let sboundary =
|
||||
when B is char:
|
||||
@(boundary.toOpenArrayByte(0, len(boundary) - 1))
|
||||
else:
|
||||
@boundary
|
||||
|
||||
var finishMark = sboundary
|
||||
finishMark.add([0x2d'u8, 0x2d'u8, 0x0d'u8, 0x0a'u8])
|
||||
var beginPartMark = sboundary
|
||||
beginPartMark.add([0x0d'u8, 0x0a'u8])
|
||||
|
||||
MultiPartWriterRef(
|
||||
kind: MultiPartSource.Stream,
|
||||
stream: stream,
|
||||
beginMark: @[0x2d'u8, 0x2d'u8],
|
||||
finishMark: finishMark,
|
||||
beginPartMark: beginPartMark,
|
||||
finishPartMark: @[0x0d'u8, 0x0a'u8, 0x2d'u8, 0x2d'u8],
|
||||
state: MultiPartWriterState.MessagePreparing
|
||||
)
|
||||
|
||||
proc prepareHeaders(partMark: openarray[byte], name: string, filename: string,
|
||||
headers: HttpTable): string =
|
||||
const ContentDisposition = "Content-Disposition"
|
||||
let qname =
|
||||
block:
|
||||
let res = quoteCheck(name)
|
||||
doAssert(res.isOk())
|
||||
res.get()
|
||||
let qfilename =
|
||||
block:
|
||||
let res = quoteCheck(filename)
|
||||
doAssert(res.isOk())
|
||||
res.get()
|
||||
var buffer = newString(len(partMark))
|
||||
copyMem(addr buffer[0], unsafeAddr partMark[0], len(partMark))
|
||||
buffer.add(ContentDisposition)
|
||||
buffer.add(": ")
|
||||
if ContentDisposition in headers:
|
||||
buffer.add(headers.getString(ContentDisposition))
|
||||
buffer.add("\r\n")
|
||||
else:
|
||||
buffer.add("form-data; name=\"")
|
||||
buffer.add(qname)
|
||||
buffer.add("\"")
|
||||
if len(qfilename) > 0:
|
||||
buffer.add("; filename=\"")
|
||||
buffer.add(qfilename)
|
||||
buffer.add("\"")
|
||||
buffer.add("\r\n")
|
||||
|
||||
for k, v in headers.stringItems():
|
||||
if k != toLowerAscii(ContentDisposition):
|
||||
if len(v) > 0:
|
||||
buffer.add(k)
|
||||
buffer.add(": ")
|
||||
buffer.add(v)
|
||||
buffer.add("\r\n")
|
||||
buffer.add("\r\n")
|
||||
buffer
|
||||
|
||||
proc begin*(mpw: MultiPartWriterRef) {.async.} =
|
||||
## Starts multipart message form and write approprate markers to output
|
||||
## stream.
|
||||
doAssert(mpw.kind == MultiPartSource.Stream)
|
||||
doAssert(mpw.state == MultiPartWriterState.MessagePreparing)
|
||||
# write "--"
|
||||
try:
|
||||
await mpw.stream.write(mpw.beginMark)
|
||||
except AsyncStreamError:
|
||||
mpw.state = MultiPartWriterState.MessageFailure
|
||||
raiseHttpCriticalError("Unable to start multipart message")
|
||||
mpw.state = MultiPartWriterState.MessageStarted
|
||||
|
||||
proc begin*(mpw: var MultiPartWriter) =
|
||||
## Starts multipart message form and write approprate markers to output
|
||||
## buffer.
|
||||
doAssert(mpw.kind == MultiPartSource.Buffer)
|
||||
doAssert(mpw.state == MultiPartWriterState.MessagePreparing)
|
||||
# write "--"
|
||||
mpw.buffer.add(mpw.beginMark)
|
||||
mpw.state = MultiPartWriterState.MessageStarted
|
||||
|
||||
proc beginPart*(mpw: MultiPartWriterRef, name: string,
|
||||
filename: string, headers: HttpTable) {.async.} =
|
||||
## Starts part of multipart message and write appropriate ``headers`` to the
|
||||
## output stream.
|
||||
##
|
||||
## Note: `filename` and `name` arguments could be only ASCII strings.
|
||||
const ContentDisposition = "Content-Disposition"
|
||||
doAssert(mpw.kind == MultiPartSource.Stream)
|
||||
doAssert(mpw.state in {MultiPartWriterState.MessageStarted,
|
||||
MultiPartWriterState.PartFinished})
|
||||
# write "<boundary><CR><LF>"
|
||||
# write "<part headers><CR><LF>"
|
||||
# write "<CR><LF>"
|
||||
let buffer = prepareHeaders(mpw.beginPartMark, name, filename, headers)
|
||||
try:
|
||||
await mpw.stream.write(buffer)
|
||||
mpw.state = MultiPartWriterState.PartStarted
|
||||
except AsyncStreamError:
|
||||
mpw.state = MultiPartWriterState.MessageFailure
|
||||
raiseHttpCriticalError("Unable to start multipart part")
|
||||
|
||||
proc beginPart*(mpw: var MultiPartWriter, name: string,
|
||||
filename: string, headers: HttpTable) =
|
||||
## Starts part of multipart message and write appropriate ``headers`` to the
|
||||
## output stream.
|
||||
##
|
||||
## Note: `filename` and `name` arguments could be only ASCII strings.
|
||||
doAssert(mpw.kind == MultiPartSource.Buffer)
|
||||
doAssert(mpw.state in {MultiPartWriterState.MessageStarted,
|
||||
MultiPartWriterState.PartFinished})
|
||||
let buffer = prepareHeaders(mpw.beginPartMark, name, filename, headers)
|
||||
# write "<boundary><CR><LF>"
|
||||
# write "<part headers><CR><LF>"
|
||||
# write "<CR><LF>"
|
||||
mpw.buffer.add(buffer.toOpenArrayByte(0, len(buffer) - 1))
|
||||
mpw.state = MultiPartWriterState.PartStarted
|
||||
|
||||
proc write*(mpw: MultiPartWriterRef, pbytes: pointer, nbytes: int) {.async.} =
|
||||
## Write part's data ``data`` to the output stream.
|
||||
doAssert(mpw.kind == MultiPartSource.Stream)
|
||||
doAssert(mpw.state == MultiPartWriterState.PartStarted)
|
||||
try:
|
||||
# write <chunk> of data
|
||||
await mpw.stream.write(pbytes, nbytes)
|
||||
except AsyncStreamError:
|
||||
mpw.state = MultiPartWriterState.MessageFailure
|
||||
raiseHttpCriticalError("Unable to write multipart data")
|
||||
|
||||
proc write*(mpw: MultiPartWriterRef, data: seq[byte]) {.async.} =
|
||||
## Write part's data ``data`` to the output stream.
|
||||
doAssert(mpw.kind == MultiPartSource.Stream)
|
||||
doAssert(mpw.state == MultiPartWriterState.PartStarted)
|
||||
try:
|
||||
# write <chunk> of data
|
||||
await mpw.stream.write(data)
|
||||
except AsyncStreamError:
|
||||
mpw.state = MultiPartWriterState.MessageFailure
|
||||
raiseHttpCriticalError("Unable to write multipart data")
|
||||
|
||||
proc write*(mpw: MultiPartWriterRef, data: string) {.async.} =
|
||||
## Write part's data ``data`` to the output stream.
|
||||
doAssert(mpw.kind == MultiPartSource.Stream)
|
||||
doAssert(mpw.state == MultiPartWriterState.PartStarted)
|
||||
try:
|
||||
# write <chunk> of data
|
||||
await mpw.stream.write(data)
|
||||
except AsyncStreamError:
|
||||
mpw.state = MultiPartWriterState.MessageFailure
|
||||
raiseHttpCriticalError("Unable to write multipart data")
|
||||
|
||||
proc write*(mpw: var MultiPartWriter, pbytes: pointer, nbytes: int) =
|
||||
## Write part's data ``data`` to the output stream.
|
||||
doAssert(mpw.kind == MultiPartSource.Buffer)
|
||||
doAssert(mpw.state == MultiPartWriterState.PartStarted)
|
||||
let index = len(mpw.buffer)
|
||||
if nbytes > 0:
|
||||
mpw.buffer.setLen(index + nbytes)
|
||||
copyMem(addr mpw.buffer[0], pbytes, nbytes)
|
||||
|
||||
proc write*(mpw: var MultiPartWriter, data: openarray[byte]) =
|
||||
## Write part's data ``data`` to the output stream.
|
||||
doAssert(mpw.kind == MultiPartSource.Buffer)
|
||||
doAssert(mpw.state == MultiPartWriterState.PartStarted)
|
||||
mpw.buffer.add(data)
|
||||
|
||||
proc write*(mpw: var MultiPartWriter, data: openarray[char]) =
|
||||
## Write part's data ``data`` to the output stream.
|
||||
doAssert(mpw.kind == MultiPartSource.Buffer)
|
||||
doAssert(mpw.state == MultiPartWriterState.PartStarted)
|
||||
mpw.buffer.add(data.toOpenArrayByte(0, len(data) - 1))
|
||||
|
||||
proc finishPart*(mpw: MultiPartWriterRef) {.async.} =
|
||||
## Finish multipart's message part and send proper markers to output stream.
|
||||
doAssert(mpw.state == MultiPartWriterState.PartStarted)
|
||||
try:
|
||||
# write "<CR><LF>--"
|
||||
await mpw.stream.write(mpw.finishPartMark)
|
||||
mpw.state = MultiPartWriterState.PartFinished
|
||||
except AsyncStreamError:
|
||||
mpw.state = MultiPartWriterState.MessageFailure
|
||||
raiseHttpCriticalError("Unable to finish multipart message part")
|
||||
|
||||
proc finishPart*(mpw: var MultiPartWriter) =
|
||||
## Finish multipart's message part and send proper markers to output stream.
|
||||
doAssert(mpw.kind == MultiPartSource.Buffer)
|
||||
doAssert(mpw.state == MultiPartWriterState.PartStarted)
|
||||
# write "<CR><LF>--"
|
||||
mpw.buffer.add(mpw.finishPartMark)
|
||||
mpw.state = MultiPartWriterState.PartFinished
|
||||
|
||||
proc finish*(mpw: MultiPartWriterRef) {.async.} =
|
||||
## Finish multipart's message form and send finishing markers to the output
|
||||
## stream.
|
||||
doAssert(mpw.kind == MultiPartSource.Stream)
|
||||
doAssert(mpw.state == MultiPartWriterState.PartFinished)
|
||||
try:
|
||||
# write "<boundary>--"
|
||||
await mpw.stream.write(mpw.finishMark)
|
||||
mpw.state = MultiPartWriterState.MessageFinished
|
||||
except AsyncStreamError:
|
||||
mpw.state = MultiPartWriterState.MessageFailure
|
||||
raiseHttpCriticalError("Unable to finish multipart message")
|
||||
|
||||
proc finish*(mpw: var MultiPartWriter): seq[byte] =
|
||||
## Finish multipart's message form and send finishing markers to the output
|
||||
## stream.
|
||||
doAssert(mpw.kind == MultiPartSource.Buffer)
|
||||
doAssert(mpw.state == MultiPartWriterState.PartFinished)
|
||||
# write "<boundary>--"
|
||||
mpw.buffer.add(mpw.finishMark)
|
||||
mpw.state = MultiPartWriterState.MessageFinished
|
||||
mpw.buffer
|
||||
|
@ -377,6 +377,21 @@ proc address*(ta: TransportAddress): IpAddress {.
|
||||
else:
|
||||
raise newException(ValueError, "IpAddress supports only IPv4/IPv6!")
|
||||
|
||||
proc host*(ta: TransportAddress): string {.raises: [Defect].} =
|
||||
## Returns ``host`` of TransportAddress ``ta``.
|
||||
##
|
||||
## For IPv4 and IPv6 addresses it will return IP address as string, or empty
|
||||
## string for Unix address.
|
||||
case ta.family
|
||||
of AddressFamily.IPv4:
|
||||
$IpAddress(family: IpAddressFamily.IPv4, address_v4: ta.address_v4)
|
||||
of AddressFamily.IPv6:
|
||||
let a = $IpAddress(family: IpAddressFamily.IPv6,
|
||||
address_v6: ta.address_v6)
|
||||
"[" & a & "]"
|
||||
else:
|
||||
""
|
||||
|
||||
proc resolveTAddress*(address: string, port: Port,
|
||||
domain: Domain): seq[TransportAddress] {.
|
||||
raises: [Defect, TransportAddressError].} =
|
||||
|
@ -7,5 +7,5 @@
|
||||
# MIT license (LICENSE-MIT)
|
||||
import testmacro, testsync, testsoon, testtime, testfut, testsignal,
|
||||
testaddress, testdatagram, teststream, testserver, testbugs, testnet,
|
||||
testasyncstream, testhttpserver, testshttpserver
|
||||
testasyncstream, testhttpserver, testshttpserver, testhttpclient
|
||||
import testutils
|
||||
|
769
tests/testhttpclient.nim
Normal file
769
tests/testhttpclient.nim
Normal file
@ -0,0 +1,769 @@
|
||||
# Chronos Test Suite
|
||||
# (c) Copyright 2021-Present
|
||||
# Status Research & Development GmbH
|
||||
#
|
||||
# Licensed under either of
|
||||
# Apache License, version 2.0, (LICENSE-APACHEv2)
|
||||
# MIT license (LICENSE-MIT)
|
||||
import std/[strutils, strutils, sha1]
|
||||
import unittest2
|
||||
import ../chronos, ../chronos/apps/http/[httpserver, shttpserver, httpclient]
|
||||
import stew/base10
|
||||
|
||||
when defined(nimHasUsed): {.used.}
|
||||
|
||||
# To create self-signed certificate and key you can use openssl
|
||||
# openssl req -new -x509 -sha256 -newkey rsa:2048 -nodes \
|
||||
# -keyout example-com.key.pem -days 3650 -out example-com.cert.pem
|
||||
const HttpsSelfSignedRsaKey = """
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCn7tXGLKMIMzOG
|
||||
tVzUixax1/ftlSLcpEAkZMORuiCCnYjtIJhGZdzRFZC8fBlfAJZpLIAOfX2L2f1J
|
||||
ZuwpwDkOIvNqKMBrl5Mvkl5azPT0rtnjuwrcqN5NFtbmZPKFYvbjex2aXGqjl5MW
|
||||
nQIs/ZA++DVEXmaN9oDxcZsvRMDKfrGQf9iLeoVL47Gx9KpqNqD/JLIn4LpieumV
|
||||
yYidm6ukTOqHRvrWm36y6VvKW4TE97THacULmkeahtTf8zDJbbh4EO+gifgwgJ2W
|
||||
BUS0+5hMcWu8111mXmanlOVlcoW8fH8RmPjL1eK1Z3j3SVHEf7oWZtIVW5gGA0jQ
|
||||
nfA4K51RAgMBAAECggEANZ7/R13tWKrwouy6DWuz/WlWUtgx333atUQvZhKmWs5u
|
||||
cDjeJmxUC7b1FhoSB9GqNT7uTLIpKkSaqZthgRtNnIPwcU890Zz+dEwqMJgNByvl
|
||||
it+oYjjRco/+YmaNQaYN6yjelPE5Y678WlYb4b29Fz4t0/zIhj/VgEKkKH2tiXpS
|
||||
TIicoM7pSOscEUfaW3yp5bS5QwNU6/AaF1wws0feBACd19ZkcdPvr52jopbhxlXw
|
||||
h3XTV/vXIJd5zWGp0h/Jbd4xcD4MVo2GjfkeORKY6SjDaNzt8OGtePcKnnbUVu8b
|
||||
2XlDxukhDQXqJ3g0sHz47mhvo4JeIM+FgymRm+3QmQKBgQDTawrEA3Zy9WvucaC7
|
||||
Zah02oE9nuvpF12lZ7WJh7+tZ/1ss+Fm7YspEKaUiEk7nn1CAVFtem4X4YCXTBiC
|
||||
Oqq/o+ipv1yTur0ae6m4pwLm5wcMWBh3H5zjfQTfrClNN8yjWv8u3/sq8KesHPnT
|
||||
R92/sMAptAChPgTzQphWbxFiYwKBgQDLWFaBqXfZYVnTyUvKX8GorS6jGWc6Eh4l
|
||||
lAFA+2EBWDICrUxsDPoZjEXrWCixdqLhyehaI3KEFIx2bcPv6X2c7yx3IG5lA/Gx
|
||||
TZiKlY74c6jOTstkdLW9RJbg1VUHUVZMf/Owt802YmEfUI5S5v7jFmKW6VG+io+K
|
||||
+5KYeHD1uwKBgQDMf53KPA82422jFwYCPjLT1QduM2q97HwIomhWv5gIg63+l4BP
|
||||
rzYMYq6+vZUYthUy41OAMgyLzPQ1ZMXQMi83b7R9fTxvKRIBq9xfYCzObGnE5vHD
|
||||
SDDZWvR75muM5Yxr9nkfPkgVIPMO6Hg+hiVYZf96V0LEtNjU9HWmJYkLQQKBgQCQ
|
||||
ULGUdGHKtXy7AjH3/t3CiKaAupa4cANVSCVbqQy/l4hmvfdu+AbH+vXkgTzgNgKD
|
||||
nHh7AI1Vj//gTSayLlQn/Nbh9PJkXtg5rYiFUn+VdQBo6yMOuIYDPZqXFtCx0Nge
|
||||
kvCwisHpxwiG4PUhgS+Em259DDonsM8PJFx2OYRx4QKBgEQpGhg71Oi9MhPJshN7
|
||||
dYTowaMS5eLTk2264ARaY+hAIV7fgvUa+5bgTVaWL+Cfs33hi4sMRqlEwsmfds2T
|
||||
cnQiJ4cU20Euldfwa5FLnk6LaWdOyzYt/ICBJnKFRwfCUbS4Bu5rtMEM+3t0wxnJ
|
||||
IgaD04WhoL9EX0Qo3DC1+0kG
|
||||
-----END PRIVATE KEY-----
|
||||
"""
|
||||
|
||||
# This SSL certificate will expire 13 October 2030.
|
||||
const HttpsSelfSignedRsaCert = """
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDnzCCAoegAwIBAgIUUdcusjDd3XQi3FPM8urdFG3qI+8wDQYJKoZIhvcNAQEL
|
||||
BQAwXzELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEYMBYGA1UEAwwPMTI3LjAuMC4xOjQz
|
||||
ODA4MB4XDTIwMTAxMjIxNDUwMVoXDTMwMTAxMDIxNDUwMVowXzELMAkGA1UEBhMC
|
||||
QVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdp
|
||||
dHMgUHR5IEx0ZDEYMBYGA1UEAwwPMTI3LjAuMC4xOjQzODA4MIIBIjANBgkqhkiG
|
||||
9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp+7VxiyjCDMzhrVc1IsWsdf37ZUi3KRAJGTD
|
||||
kboggp2I7SCYRmXc0RWQvHwZXwCWaSyADn19i9n9SWbsKcA5DiLzaijAa5eTL5Je
|
||||
Wsz09K7Z47sK3KjeTRbW5mTyhWL243sdmlxqo5eTFp0CLP2QPvg1RF5mjfaA8XGb
|
||||
L0TAyn6xkH/Yi3qFS+OxsfSqajag/ySyJ+C6YnrplcmInZurpEzqh0b61pt+sulb
|
||||
yluExPe0x2nFC5pHmobU3/MwyW24eBDvoIn4MICdlgVEtPuYTHFrvNddZl5mp5Tl
|
||||
ZXKFvHx/EZj4y9XitWd490lRxH+6FmbSFVuYBgNI0J3wOCudUQIDAQABo1MwUTAd
|
||||
BgNVHQ4EFgQUBKha84woY5WkFxKw7qx1cONg1H8wHwYDVR0jBBgwFoAUBKha84wo
|
||||
Y5WkFxKw7qx1cONg1H8wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC
|
||||
AQEAHZMYt9Ry+Xj3vTbzpGFQzYQVTJlfJWSN6eWNOivRFQE5io9kOBEe5noa8aLo
|
||||
dLkw6ztxRP2QRJmlhGCO9/HwS17ckrkgZp3EC2LFnzxcBmoZu+owfxOT1KqpO52O
|
||||
IKOl8eVohi1pEicE4dtTJVcpI7VCMovnXUhzx1Ci4Vibns4a6H+BQa19a1JSpifN
|
||||
tO8U5jkjJ8Jprs/VPFhJj2O3di53oDHaYSE5eOrm2ZO14KFHSk9cGcOGmcYkUv8B
|
||||
nV5vnGadH5Lvfxb/BCpuONabeRdOxMt9u9yQ89vNpxFtRdZDCpGKZBCfmUP+5m3m
|
||||
N8r5CwGcIX/XPC3lKazzbZ8baA==
|
||||
-----END CERTIFICATE-----
|
||||
"""
|
||||
|
||||
suite "HTTP client testing suite":
|
||||
|
||||
proc createBigMessage(message: string, size: int): seq[byte] =
|
||||
var res = newSeq[byte](size)
|
||||
for i in 0 ..< len(res):
|
||||
res[i] = byte(message[i mod len(message)])
|
||||
res
|
||||
|
||||
proc createServer(address: TransportAddress,
|
||||
process: HttpProcessCallback, secure: bool): HttpServerRef =
|
||||
let socketFlags = {ServerFlags.TcpNoDelay, ServerFlags.ReuseAddr}
|
||||
if secure:
|
||||
let secureKey = TLSPrivateKey.init(HttpsSelfSignedRsaKey)
|
||||
let secureCert = TLSCertificate.init(HttpsSelfSignedRsaCert)
|
||||
let res = SecureHttpServerRef.new(address, process,
|
||||
socketFlags = socketFlags,
|
||||
tlsPrivateKey = secureKey,
|
||||
tlsCertificate = secureCert)
|
||||
HttpServerRef(res.get())
|
||||
else:
|
||||
let res = HttpServerRef.new(address, process, socketFlags = socketFlags)
|
||||
res.get()
|
||||
|
||||
proc createSession(secure: bool,
|
||||
maxRedirections = HttpMaxRedirections): HttpSessionRef =
|
||||
if secure:
|
||||
HttpSessionRef.new({HttpClientFlag.NoVerifyHost,
|
||||
HttpClientFlag.NoVerifyServerName},
|
||||
maxRedirections = maxRedirections)
|
||||
else:
|
||||
HttpSessionRef.new(maxRedirections = maxRedirections)
|
||||
|
||||
proc testMethods(address: TransportAddress,
|
||||
secure: bool): Future[int] {.async.} =
|
||||
let RequestTests = [
|
||||
(MethodGet, "/test/get"),
|
||||
(MethodPost, "/test/post"),
|
||||
(MethodHead, "/test/head"),
|
||||
(MethodPut, "/test/put"),
|
||||
(MethodDelete, "/test/delete"),
|
||||
(MethodTrace, "/test/trace"),
|
||||
(MethodOptions, "/test/options"),
|
||||
(MethodConnect, "/test/connect"),
|
||||
(MethodPatch, "/test/patch")
|
||||
]
|
||||
proc process(r: RequestFence): Future[HttpResponseRef] {.
|
||||
async.} =
|
||||
if r.isOk():
|
||||
let request = r.get()
|
||||
case request.uri.path
|
||||
of "/test/get", "/test/post", "/test/head", "/test/put",
|
||||
"/test/delete", "/test/trace", "/test/options", "/test/connect",
|
||||
"/test/patch", "/test/error":
|
||||
return await request.respond(Http200, request.uri.path)
|
||||
else:
|
||||
return await request.respond(Http404, "Page not found")
|
||||
else:
|
||||
return dumbResponse()
|
||||
|
||||
var server = createServer(address, process, secure)
|
||||
server.start()
|
||||
var counter = 0
|
||||
|
||||
var session = createSession(secure)
|
||||
|
||||
for item in RequestTests:
|
||||
let ha =
|
||||
if secure:
|
||||
getAddress(address, HttpClientScheme.Secure, item[1])
|
||||
else:
|
||||
getAddress(address, HttpClientScheme.NonSecure, item[1])
|
||||
var req = HttpClientRequestRef.new(session, ha, item[0])
|
||||
let response = await fetch(req)
|
||||
if response.status == 200:
|
||||
let data = cast[string](response.data)
|
||||
if data == item[1]:
|
||||
inc(counter)
|
||||
await req.closeWait()
|
||||
await session.closeWait()
|
||||
|
||||
for item in RequestTests:
|
||||
var session = createSession(secure)
|
||||
let ha =
|
||||
if secure:
|
||||
getAddress(address, HttpClientScheme.Secure, item[1])
|
||||
else:
|
||||
getAddress(address, HttpClientScheme.NonSecure, item[1])
|
||||
var req = HttpClientRequestRef.new(session, ha, item[0])
|
||||
let response = await fetch(req)
|
||||
if response.status == 200:
|
||||
let data = cast[string](response.data)
|
||||
if data == item[1]:
|
||||
inc(counter)
|
||||
await req.closeWait()
|
||||
await session.closeWait()
|
||||
|
||||
await server.stop()
|
||||
await server.closeWait()
|
||||
return counter
|
||||
|
||||
proc testResponseStreamReadingTest(address: TransportAddress,
|
||||
secure: bool): Future[int] {.async.} =
|
||||
let ResponseTests = [
|
||||
(MethodGet, "/test/short_size_response", 65600, 1024,
|
||||
"SHORTSIZERESPONSE"),
|
||||
(MethodGet, "/test/long_size_response", 262400, 1024,
|
||||
"LONGSIZERESPONSE"),
|
||||
(MethodGet, "/test/short_chunked_response", 65600, 1024,
|
||||
"SHORTCHUNKRESPONSE"),
|
||||
(MethodGet, "/test/long_chunked_response", 262400, 1024,
|
||||
"LONGCHUNKRESPONSE")
|
||||
]
|
||||
proc process(r: RequestFence): Future[HttpResponseRef] {.
|
||||
async.} =
|
||||
if r.isOk():
|
||||
let request = r.get()
|
||||
case request.uri.path
|
||||
of "/test/short_size_response":
|
||||
var response = request.getResponse()
|
||||
var data = createBigMessage(ResponseTests[0][4], ResponseTests[0][2])
|
||||
response.status = Http200
|
||||
await response.sendBody(data)
|
||||
return response
|
||||
of "/test/long_size_response":
|
||||
var response = request.getResponse()
|
||||
var data = createBigMessage(ResponseTests[1][4], ResponseTests[1][2])
|
||||
response.status = Http200
|
||||
await response.sendBody(data)
|
||||
return response
|
||||
of "/test/short_chunked_response":
|
||||
var response = request.getResponse()
|
||||
var data = createBigMessage(ResponseTests[2][4], ResponseTests[2][2])
|
||||
response.status = Http200
|
||||
await response.prepare()
|
||||
var offset = 0
|
||||
while true:
|
||||
if len(data) == offset:
|
||||
break
|
||||
let toWrite = min(1024, len(data) - offset)
|
||||
await response.sendChunk(addr data[offset], toWrite)
|
||||
offset = offset + toWrite
|
||||
await response.finish()
|
||||
return response
|
||||
of "/test/long_chunked_response":
|
||||
var response = request.getResponse()
|
||||
var data = createBigMessage(ResponseTests[3][4], ResponseTests[3][2])
|
||||
response.status = Http200
|
||||
await response.prepare()
|
||||
var offset = 0
|
||||
while true:
|
||||
if len(data) == offset:
|
||||
break
|
||||
let toWrite = min(1024, len(data) - offset)
|
||||
await response.sendChunk(addr data[offset], toWrite)
|
||||
offset = offset + toWrite
|
||||
await response.finish()
|
||||
return response
|
||||
else:
|
||||
return await request.respond(Http404, "Page not found")
|
||||
else:
|
||||
return dumbResponse()
|
||||
|
||||
var server = createServer(address, process, secure)
|
||||
server.start()
|
||||
var counter = 0
|
||||
|
||||
var session = createSession(secure)
|
||||
for item in ResponseTests:
|
||||
let ha =
|
||||
if secure:
|
||||
getAddress(address, HttpClientScheme.Secure, item[1])
|
||||
else:
|
||||
getAddress(address, HttpClientScheme.NonSecure, item[1])
|
||||
var req = HttpClientRequestRef.new(session, ha, item[0])
|
||||
var response = await send(req)
|
||||
if response.status == 200:
|
||||
var reader = response.getBodyReader()
|
||||
var res: seq[byte]
|
||||
while true:
|
||||
var data = await reader.read(item[3])
|
||||
res.add(data)
|
||||
if len(data) != item[3]:
|
||||
break
|
||||
await reader.closeWait()
|
||||
if len(res) == item[2]:
|
||||
let expect = createBigMessage(item[4], len(res))
|
||||
if expect == res:
|
||||
inc(counter)
|
||||
await response.closeWait()
|
||||
await req.closeWait()
|
||||
await session.closeWait()
|
||||
|
||||
for item in ResponseTests:
|
||||
var session = createSession(secure)
|
||||
let ha =
|
||||
if secure:
|
||||
getAddress(address, HttpClientScheme.Secure, item[1])
|
||||
else:
|
||||
getAddress(address, HttpClientScheme.NonSecure, item[1])
|
||||
var req = HttpClientRequestRef.new(session, ha, item[0])
|
||||
var response = await send(req)
|
||||
if response.status == 200:
|
||||
var reader = response.getBodyReader()
|
||||
var res: seq[byte]
|
||||
while true:
|
||||
var data = await reader.read(item[3])
|
||||
res.add(data)
|
||||
if len(data) != item[3]:
|
||||
break
|
||||
await reader.closeWait()
|
||||
if len(res) == item[2]:
|
||||
let expect = createBigMessage(item[4], len(res))
|
||||
if expect == res:
|
||||
inc(counter)
|
||||
await response.closeWait()
|
||||
await req.closeWait()
|
||||
await session.closeWait()
|
||||
|
||||
await server.stop()
|
||||
await server.closeWait()
|
||||
return counter
|
||||
|
||||
proc testRequestSizeStreamWritingTest(address: TransportAddress,
|
||||
secure: bool): Future[int] {.async.} =
|
||||
let RequestTests = [
|
||||
(MethodPost, "/test/big_request", 65600),
|
||||
(MethodPost, "/test/big_request", 262400)
|
||||
]
|
||||
proc process(r: RequestFence): Future[HttpResponseRef] {.
|
||||
async.} =
|
||||
if r.isOk():
|
||||
let request = r.get()
|
||||
case request.uri.path
|
||||
of "/test/big_request":
|
||||
if request.hasBody():
|
||||
let body = await request.getBody()
|
||||
let digest = $secureHash(cast[string](body))
|
||||
return await request.respond(Http200, digest)
|
||||
else:
|
||||
return await request.respond(Http400, "Missing content body")
|
||||
else:
|
||||
return await request.respond(Http404, "Page not found")
|
||||
else:
|
||||
return dumbResponse()
|
||||
|
||||
var server = createServer(address, process, secure)
|
||||
server.start()
|
||||
var counter = 0
|
||||
|
||||
var session = createSession(secure)
|
||||
for item in RequestTests:
|
||||
let ha =
|
||||
if secure:
|
||||
getAddress(address, HttpClientScheme.Secure, item[1])
|
||||
else:
|
||||
getAddress(address, HttpClientScheme.NonSecure, item[1])
|
||||
var data = createBigMessage("REQUESTSTREAMMESSAGE", item[2])
|
||||
let headers = [
|
||||
("Content-Type", "application/octet-stream"),
|
||||
("Content-Length", Base10.toString(uint64(len(data))))
|
||||
]
|
||||
var request = HttpClientRequestRef.new(
|
||||
session, ha, item[0], headers = headers
|
||||
)
|
||||
|
||||
var expectDigest = $secureHash(cast[string](data))
|
||||
# Sending big request by 1024bytes long chunks
|
||||
var writer = await open(request)
|
||||
var offset = 0
|
||||
while true:
|
||||
if len(data) == offset:
|
||||
break
|
||||
let toWrite = min(1024, len(data) - offset)
|
||||
await writer.write(addr data[offset], toWrite)
|
||||
offset = offset + toWrite
|
||||
await writer.finish()
|
||||
await writer.closeWait()
|
||||
var response = await request.finish()
|
||||
|
||||
if response.status == 200:
|
||||
var res = await response.getBodyBytes()
|
||||
if cast[string](res) == expectDigest:
|
||||
inc(counter)
|
||||
await response.closeWait()
|
||||
await request.closeWait()
|
||||
await session.closeWait()
|
||||
|
||||
await server.stop()
|
||||
await server.closeWait()
|
||||
return counter
|
||||
|
||||
proc testRequestChunkedStreamWritingTest(address: TransportAddress,
|
||||
secure: bool): Future[int] {.async.} =
|
||||
let RequestTests = [
|
||||
(MethodPost, "/test/big_chunk_request", 65600),
|
||||
(MethodPost, "/test/big_chunk_request", 262400)
|
||||
]
|
||||
proc process(r: RequestFence): Future[HttpResponseRef] {.
|
||||
async.} =
|
||||
if r.isOk():
|
||||
let request = r.get()
|
||||
case request.uri.path
|
||||
of "/test/big_chunk_request":
|
||||
if request.hasBody():
|
||||
let body = await request.getBody()
|
||||
let digest = $secureHash(cast[string](body))
|
||||
return await request.respond(Http200, digest)
|
||||
else:
|
||||
return await request.respond(Http400, "Missing content body")
|
||||
else:
|
||||
return await request.respond(Http404, "Page not found")
|
||||
else:
|
||||
return dumbResponse()
|
||||
|
||||
var server = createServer(address, process, secure)
|
||||
server.start()
|
||||
var counter = 0
|
||||
|
||||
var session = createSession(secure)
|
||||
for item in RequestTests:
|
||||
let ha =
|
||||
if secure:
|
||||
getAddress(address, HttpClientScheme.Secure, item[1])
|
||||
else:
|
||||
getAddress(address, HttpClientScheme.NonSecure, item[1])
|
||||
var data = createBigMessage("REQUESTSTREAMMESSAGE", item[2])
|
||||
let headers = [
|
||||
("Content-Type", "application/octet-stream"),
|
||||
("Transfer-Encoding", "chunked")
|
||||
]
|
||||
var request = HttpClientRequestRef.new(
|
||||
session, ha, item[0], headers = headers
|
||||
)
|
||||
|
||||
var expectDigest = $secureHash(cast[string](data))
|
||||
# Sending big request by 1024bytes long chunks
|
||||
var writer = await open(request)
|
||||
var offset = 0
|
||||
while true:
|
||||
if len(data) == offset:
|
||||
break
|
||||
let toWrite = min(1024, len(data) - offset)
|
||||
await writer.write(addr data[offset], toWrite)
|
||||
offset = offset + toWrite
|
||||
await writer.finish()
|
||||
await writer.closeWait()
|
||||
var response = await request.finish()
|
||||
|
||||
if response.status == 200:
|
||||
var res = await response.getBodyBytes()
|
||||
if cast[string](res) == expectDigest:
|
||||
inc(counter)
|
||||
await response.closeWait()
|
||||
await request.closeWait()
|
||||
await session.closeWait()
|
||||
|
||||
await server.stop()
|
||||
await server.closeWait()
|
||||
return counter
|
||||
|
||||
proc testRequestPostUrlEncodedTest(address: TransportAddress,
|
||||
secure: bool): Future[int] {.async.} =
|
||||
let PostRequests = [
|
||||
("/test/post/urlencoded_size",
|
||||
"field1=value1&field2=value2&field3=value3", "value1:value2:value3"),
|
||||
("/test/post/urlencoded_chunked",
|
||||
"field1=longlonglongvalue1&field2=longlonglongvalue2&" &
|
||||
"field3=longlonglongvalue3", "longlonglongvalue1:longlonglongvalue2:" &
|
||||
"longlonglongvalue3")
|
||||
]
|
||||
|
||||
proc process(r: RequestFence): Future[HttpResponseRef] {.
|
||||
async.} =
|
||||
if r.isOk():
|
||||
let request = r.get()
|
||||
case request.uri.path
|
||||
of "/test/post/urlencoded_size", "/test/post/urlencoded_chunked":
|
||||
if request.hasBody():
|
||||
var postTable = await request.post()
|
||||
let body = postTable.getString("field1") & ":" &
|
||||
postTable.getString("field2") & ":" &
|
||||
postTable.getString("field3")
|
||||
return await request.respond(Http200, body)
|
||||
else:
|
||||
return await request.respond(Http400, "Missing content body")
|
||||
else:
|
||||
return await request.respond(Http404, "Page not found")
|
||||
else:
|
||||
return dumbResponse()
|
||||
|
||||
var server = createServer(address, process, secure)
|
||||
server.start()
|
||||
var counter = 0
|
||||
|
||||
## Sized url-encoded form
|
||||
block:
|
||||
var session = createSession(secure)
|
||||
let ha =
|
||||
if secure:
|
||||
getAddress(address, HttpClientScheme.Secure, PostRequests[0][0])
|
||||
else:
|
||||
getAddress(address, HttpClientScheme.NonSecure, PostRequests[0][0])
|
||||
let headers = [
|
||||
("Content-Type", "application/x-www-form-urlencoded"),
|
||||
]
|
||||
var request = HttpClientRequestRef.new(
|
||||
session, ha, MethodPost, headers = headers,
|
||||
body = cast[seq[byte]](PostRequests[0][1]))
|
||||
var response = await send(request)
|
||||
|
||||
if response.status == 200:
|
||||
var res = await response.getBodyBytes()
|
||||
if cast[string](res) == PostRequests[0][2]:
|
||||
inc(counter)
|
||||
await response.closeWait()
|
||||
await request.closeWait()
|
||||
await session.closeWait()
|
||||
|
||||
## Chunked url-encoded form
|
||||
block:
|
||||
var session = createSession(secure)
|
||||
let ha =
|
||||
if secure:
|
||||
getAddress(address, HttpClientScheme.Secure, PostRequests[1][0])
|
||||
else:
|
||||
getAddress(address, HttpClientScheme.NonSecure, PostRequests[1][0])
|
||||
let headers = [
|
||||
("Content-Type", "application/x-www-form-urlencoded"),
|
||||
("Transfer-Encoding", "chunked")
|
||||
]
|
||||
var request = HttpClientRequestRef.new(
|
||||
session, ha, MethodPost, headers = headers)
|
||||
|
||||
var data = PostRequests[1][1]
|
||||
|
||||
var writer = await open(request)
|
||||
var offset = 0
|
||||
while true:
|
||||
if len(data) == offset:
|
||||
break
|
||||
let toWrite = min(16, len(data) - offset)
|
||||
await writer.write(addr data[offset], toWrite)
|
||||
offset = offset + toWrite
|
||||
await writer.finish()
|
||||
await writer.closeWait()
|
||||
var response = await request.finish()
|
||||
if response.status == 200:
|
||||
var res = await response.getBodyBytes()
|
||||
if cast[string](res) == PostRequests[1][2]:
|
||||
inc(counter)
|
||||
await response.closeWait()
|
||||
await request.closeWait()
|
||||
await session.closeWait()
|
||||
|
||||
await server.stop()
|
||||
await server.closeWait()
|
||||
return counter
|
||||
|
||||
proc testRequestPostMultipartTest(address: TransportAddress,
|
||||
secure: bool): Future[int] {.async.} =
|
||||
let PostRequests = [
|
||||
("/test/post/multipart_size", "some-part-boundary",
|
||||
[("field1", "value1"), ("field2", "value2"), ("field3", "value3")],
|
||||
"value1:value2:value3"),
|
||||
("/test/post/multipart_chunked", "some-part-boundary",
|
||||
[("field1", "longlonglongvalue1"), ("field2", "longlonglongvalue2"),
|
||||
("field3", "longlonglongvalue3")],
|
||||
"longlonglongvalue1:longlonglongvalue2:longlonglongvalue3")
|
||||
]
|
||||
|
||||
proc process(r: RequestFence): Future[HttpResponseRef] {.
|
||||
async.} =
|
||||
if r.isOk():
|
||||
let request = r.get()
|
||||
case request.uri.path
|
||||
of "/test/post/multipart_size", "/test/post/multipart_chunked":
|
||||
if request.hasBody():
|
||||
var postTable = await request.post()
|
||||
let body = postTable.getString("field1") & ":" &
|
||||
postTable.getString("field2") & ":" &
|
||||
postTable.getString("field3")
|
||||
return await request.respond(Http200, body)
|
||||
else:
|
||||
return await request.respond(Http400, "Missing content body")
|
||||
else:
|
||||
return await request.respond(Http404, "Page not found")
|
||||
else:
|
||||
return dumbResponse()
|
||||
|
||||
var server = createServer(address, process, secure)
|
||||
server.start()
|
||||
var counter = 0
|
||||
|
||||
## Sized multipart form
|
||||
block:
|
||||
var mp = MultiPartWriter.init(PostRequests[0][1])
|
||||
mp.begin()
|
||||
for item in PostRequests[0][2]:
|
||||
mp.beginPart(item[0], "", HttpTable.init())
|
||||
mp.write(item[1])
|
||||
mp.finishPart()
|
||||
let data = mp.finish()
|
||||
|
||||
var session = createSession(secure)
|
||||
let ha =
|
||||
if secure:
|
||||
getAddress(address, HttpClientScheme.Secure, PostRequests[0][0])
|
||||
else:
|
||||
getAddress(address, HttpClientScheme.NonSecure, PostRequests[0][0])
|
||||
let headers = [
|
||||
("Content-Type", "multipart/form-data; boundary=" & PostRequests[0][1]),
|
||||
]
|
||||
var request = HttpClientRequestRef.new(
|
||||
session, ha, MethodPost, headers = headers, body = data)
|
||||
var response = await send(request)
|
||||
if response.status == 200:
|
||||
var res = await response.getBodyBytes()
|
||||
if cast[string](res) == PostRequests[0][3]:
|
||||
inc(counter)
|
||||
await response.closeWait()
|
||||
await request.closeWait()
|
||||
await session.closeWait()
|
||||
|
||||
## Chunked multipart form
|
||||
block:
|
||||
var session = createSession(secure)
|
||||
let ha =
|
||||
if secure:
|
||||
getAddress(address, HttpClientScheme.Secure, PostRequests[0][0])
|
||||
else:
|
||||
getAddress(address, HttpClientScheme.NonSecure, PostRequests[0][0])
|
||||
let headers = [
|
||||
("Content-Type", "multipart/form-data; boundary=" & PostRequests[1][1]),
|
||||
("Transfer-Encoding", "chunked")
|
||||
]
|
||||
var request = HttpClientRequestRef.new(
|
||||
session, ha, MethodPost, headers = headers)
|
||||
var writer = await open(request)
|
||||
var mpw = MultiPartWriterRef.new(writer, PostRequests[1][1])
|
||||
await mpw.begin()
|
||||
for item in PostRequests[1][2]:
|
||||
await mpw.beginPart(item[0], "", HttpTable.init())
|
||||
await mpw.write(item[1])
|
||||
await mpw.finishPart()
|
||||
await mpw.finish()
|
||||
await writer.finish()
|
||||
await writer.closeWait()
|
||||
let response = await request.finish()
|
||||
if response.status == 200:
|
||||
var res = await response.getBodyBytes()
|
||||
if cast[string](res) == PostRequests[1][3]:
|
||||
inc(counter)
|
||||
await response.closeWait()
|
||||
await request.closeWait()
|
||||
await session.closeWait()
|
||||
|
||||
await server.stop()
|
||||
await server.closeWait()
|
||||
return counter
|
||||
|
||||
proc testRequestRedirectTest(address: TransportAddress,
|
||||
secure: bool,
|
||||
max: int): Future[string] {.async.} =
|
||||
var session = createSession(secure, maxRedirections = max)
|
||||
|
||||
let ha =
|
||||
if secure:
|
||||
getAddress(address, HttpClientScheme.Secure, "/")
|
||||
else:
|
||||
getAddress(address, HttpClientScheme.NonSecure, "/")
|
||||
let lastAddress = ha.getUri().combine(parseUri("/final/5"))
|
||||
|
||||
proc process(r: RequestFence): Future[HttpResponseRef] {.
|
||||
async.} =
|
||||
if r.isOk():
|
||||
let request = r.get()
|
||||
case request.uri.path
|
||||
of "/":
|
||||
return await request.redirect(Http302, "/redirect/1")
|
||||
of "/redirect/1":
|
||||
return await request.redirect(Http302, "/next/redirect/2")
|
||||
of "/next/redirect/2":
|
||||
return await request.redirect(Http302, "redirect/3")
|
||||
of "/next/redirect/redirect/3":
|
||||
return await request.redirect(Http302, "next/redirect/4")
|
||||
of "/next/redirect/redirect/next/redirect/4":
|
||||
return await request.redirect(Http302, lastAddress)
|
||||
of "/final/5":
|
||||
return await request.respond(Http200, "ok-5")
|
||||
else:
|
||||
return await request.respond(Http404, "Page not found")
|
||||
else:
|
||||
return dumbResponse()
|
||||
|
||||
var server = createServer(address, process, secure)
|
||||
server.start()
|
||||
if session.maxRedirections >= 5:
|
||||
let (code, data) = await session.fetch(ha.getUri())
|
||||
await session.closeWait()
|
||||
await server.stop()
|
||||
await server.closeWait()
|
||||
return data.bytesToString() & "-" & $code
|
||||
else:
|
||||
let res =
|
||||
try:
|
||||
let (code {.used.}, data {.used.}) = await session.fetch(ha.getUri())
|
||||
false
|
||||
except HttpRedirectError:
|
||||
true
|
||||
except CatchableError:
|
||||
false
|
||||
await session.closeWait()
|
||||
await server.stop()
|
||||
await server.closeWait()
|
||||
return "redirect-" & $res
|
||||
|
||||
test "HTTP all request methods test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testMethods(address, false)) == 18
|
||||
|
||||
test "HTTP(S) all request methods test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testMethods(address, true)) == 18
|
||||
|
||||
test "HTTP client response streaming test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testResponseStreamReadingTest(address, false)) == 8
|
||||
|
||||
test "HTTP(S) client response streaming test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testResponseStreamReadingTest(address, true)) == 8
|
||||
|
||||
test "HTTP client (size) request streaming test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testRequestSizeStreamWritingTest(address, false)) == 2
|
||||
|
||||
test "HTTP(S) client (size) request streaming test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testRequestSizeStreamWritingTest(address, true)) == 2
|
||||
|
||||
test "HTTP client (chunked) request streaming test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testRequestChunkedStreamWritingTest(address, false)) == 2
|
||||
|
||||
test "HTTP(S) client (chunked) request streaming test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testRequestChunkedStreamWritingTest(address, true)) == 2
|
||||
|
||||
test "HTTP client (size + chunked) url-encoded POST test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testRequestPostUrlEncodedTest(address, false)) == 2
|
||||
|
||||
test "HTTP(S) client (size + chunked) url-encoded POST test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testRequestPostUrlEncodedTest(address, true)) == 2
|
||||
|
||||
test "HTTP client (size + chunked) multipart POST test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testRequestPostMultipartTest(address, false)) == 2
|
||||
|
||||
test "HTTP(S) client (size + chunked) multipart POST test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testRequestPostMultipartTest(address, true)) == 2
|
||||
|
||||
test "HTTP client redirection test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testRequestRedirectTest(address, false, 5)) == "ok-5-200"
|
||||
|
||||
test "HTTP(S) client redirection test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testRequestRedirectTest(address, true, 5)) == "ok-5-200"
|
||||
|
||||
test "HTTP client maximum redirections test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testRequestRedirectTest(address, false, 4)) == "redirect-true"
|
||||
|
||||
test "HTTP(S) client maximum redirections test":
|
||||
let address = initTAddress("127.0.0.1:30080")
|
||||
check waitFor(testRequestRedirectTest(address, true, 4)) == "redirect-true"
|
||||
|
||||
test "Leaks test":
|
||||
proc getTrackerLeaks(tracker: string): bool =
|
||||
let tracker = getTracker(tracker)
|
||||
if isNil(tracker): false else: tracker.isLeaked()
|
||||
|
||||
check:
|
||||
getTrackerLeaks("http.body.reader") == false
|
||||
getTrackerLeaks("http.body.writer") == false
|
||||
getTrackerLeaks("httpclient.connection") == false
|
||||
getTrackerLeaks("httpclient.request") == false
|
||||
getTrackerLeaks("httpclient.response") == false
|
||||
getTrackerLeaks("async.stream.reader") == false
|
||||
getTrackerLeaks("async.stream.writer") == false
|
||||
getTrackerLeaks("stream.server") == false
|
||||
getTrackerLeaks("stream.transport") == false
|
Loading…
x
Reference in New Issue
Block a user