Prevent Snappy decompression bombs

This commit is contained in:
Zahary Karadjov 2020-08-18 23:31:55 +03:00 committed by zah
parent 46c94a18ba
commit 3433c77c35
4 changed files with 31 additions and 9 deletions

View File

@ -35,6 +35,9 @@ type
## past the weak subjectivity period. ## past the weak subjectivity period.
kBlockSlotStateRoot ## BlockSlot -> state_root mapping kBlockSlotStateRoot ## BlockSlot -> state_root mapping
const
maxDecompressedDbRecordSize = 16*1024*1024
# Subkeys essentially create "tables" within the key-value store by prefixing # Subkeys essentially create "tables" within the key-value store by prefixing
# each entry with a table id # each entry with a table id
@ -99,7 +102,7 @@ type GetResult = enum
notFound notFound
corrupted corrupted
proc get(db: BeaconChainDB, key: openArray[byte], output: var auto): GetResult = proc get[T](db: BeaconChainDB, key: openArray[byte], output: var T): GetResult =
var status = GetResult.notFound var status = GetResult.notFound
# TODO address is needed because there's no way to express lifetimes in nim # TODO address is needed because there's no way to express lifetimes in nim
@ -107,13 +110,18 @@ proc get(db: BeaconChainDB, key: openArray[byte], output: var auto): GetResult =
var outputPtr = unsafeAddr output # callback is local, ptr wont escape var outputPtr = unsafeAddr output # callback is local, ptr wont escape
proc decode(data: openArray[byte]) = proc decode(data: openArray[byte]) =
try: try:
outputPtr[] = SSZ.decode(snappy.decode(data), type output) let decompressed = snappy.decode(data, maxDecompressedDbRecordSize)
status = GetResult.found if decompressed.len > 0:
outputPtr[] = SSZ.decode(decompressed, T)
status = GetResult.found
else:
warn "Corrupt snappy record found in database", typ = name(T)
status = GetResult.corrupted
except SerializationError as e: except SerializationError as e:
# If the data can't be deserialized, it could be because it's from a # If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding # version of the software that uses a different SSZ encoding
warn "Unable to deserialize data, old database?", warn "Unable to deserialize data, old database?",
err = e.msg, typ = name(type output), dataLen = data.len err = e.msg, typ = name(T), dataLen = data.len
status = GetResult.corrupted status = GetResult.corrupted
discard db.backend.get(key, decode).expect("working database") discard db.backend.get(key, decode).expect("working database")

View File

@ -1254,7 +1254,12 @@ proc subscribe*[MsgType](node: Eth2Node,
trace "Incoming pubsub message received", trace "Incoming pubsub message received",
len = data.len, topic, msgId = gossipId(data) len = data.len, topic, msgId = gossipId(data)
try: try:
msgHandler SSZ.decode(snappy.decode(data), MsgType) let decompressed = snappy.decode(data, GOSSIP_MAX_SIZE)
if decompressed.len > 0:
msgHandler SSZ.decode(decompressed, MsgType)
else:
# TODO penalize peer?
debug "Failed to decompress gossip payload"
except CatchableError as err: except CatchableError as err:
debug "Gossip msg handler error", debug "Gossip msg handler error",
msg = err.msg, len = data.len, topic, msgId = gossipId(data) msg = err.msg, len = data.len, topic, msgId = gossipId(data)
@ -1270,7 +1275,12 @@ proc addValidator*[MsgType](node: Eth2Node,
trace "Validating incoming gossip message", trace "Validating incoming gossip message",
len = message.data.len, topic, msgId = gossipId(message.data) len = message.data.len, topic, msgId = gossipId(message.data)
try: try:
return msgValidator SSZ.decode(snappy.decode(message.data), MsgType) let decompressed = snappy.decode(message.data, GOSSIP_MAX_SIZE)
if decompressed.len > 0:
return msgValidator SSZ.decode(decompressed, MsgType)
else:
# TODO penalize peer?
debug "Failed to decompress gossip payload"
except CatchableError as err: except CatchableError as err:
debug "Gossip validation error", debug "Gossip validation error",
msg = err.msg, msgId = gossipId(message.data) msg = err.msg, msgId = gossipId(message.data)

View File

@ -50,11 +50,14 @@ proc uncompressFramedStream*(conn: Connection,
# data is longer than that, snappyUncompress will fail and we will not # data is longer than that, snappyUncompress will fail and we will not
# decompress the chunk at all, instead reporting failure. # decompress the chunk at all, instead reporting failure.
let let
uncompressedLen = snappyUncompress( # The `int` conversion below is safe, because `uncompressedLen` is
# bounded to `chunkLen` (which in turn is bounded by `MAX_CHUNK_SIZE`).
# TODO: Use a range type for the parameter.
uncompressedLen = int snappyUncompress(
frameData.toOpenArray(4, dataLen - 1), frameData.toOpenArray(4, dataLen - 1),
uncompressedData.toOpenArray(0, chunkLen - 1)) uncompressedData.toOpenArray(0, chunkLen - 1))
if uncompressedLen <= 0: if uncompressedLen == 0:
return err "Failed to decompress snappy frame" return err "Failed to decompress snappy frame"
doAssert output.len + uncompressedLen <= expectedSize, doAssert output.len + uncompressedLen <= expectedSize,
"enforced by `remains` limit above" "enforced by `remains` limit above"
@ -109,6 +112,7 @@ proc readChunkPayload(conn: Connection, peer: Peer,
if size == 0: if size == 0:
return neterr ZeroSizePrefix return neterr ZeroSizePrefix
# The `size.int` conversion is safe because `size` is bounded to `MAX_CHUNK_SIZE`
let data = await conn.uncompressFramedStream(size.int) let data = await conn.uncompressFramedStream(size.int)
if data.isOk: if data.isOk:
# `10` is the maximum size of variable integer on wire, so error could # `10` is the maximum size of variable integer on wire, so error could

2
vendor/nim-snappy vendored

@ -1 +1 @@
Subproject commit 676fa656d3a6f4e24691f3f48829c979b1b1bcdd Subproject commit a368549c1a473d2e580ac814f4c21342259ed9b6