From 3433c77c35f3025729ac9204ab6b91ea136dce9e Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Tue, 18 Aug 2020 23:31:55 +0300 Subject: [PATCH] Prevent Snappy decompression bombs --- beacon_chain/beacon_chain_db.nim | 16 ++++++++++++---- beacon_chain/eth2_network.nim | 14 ++++++++++++-- beacon_chain/libp2p_streams_backend.nim | 8 ++++++-- vendor/nim-snappy | 2 +- 4 files changed, 31 insertions(+), 9 deletions(-) diff --git a/beacon_chain/beacon_chain_db.nim b/beacon_chain/beacon_chain_db.nim index 9e27e26d2..ffe734040 100644 --- a/beacon_chain/beacon_chain_db.nim +++ b/beacon_chain/beacon_chain_db.nim @@ -35,6 +35,9 @@ type ## past the weak subjectivity period. kBlockSlotStateRoot ## BlockSlot -> state_root mapping +const + maxDecompressedDbRecordSize = 16*1024*1024 + # Subkeys essentially create "tables" within the key-value store by prefixing # each entry with a table id @@ -99,7 +102,7 @@ type GetResult = enum notFound corrupted -proc get(db: BeaconChainDB, key: openArray[byte], output: var auto): GetResult = +proc get[T](db: BeaconChainDB, key: openArray[byte], output: var T): GetResult = var status = GetResult.notFound # TODO address is needed because there's no way to express lifetimes in nim @@ -107,13 +110,18 @@ proc get(db: BeaconChainDB, key: openArray[byte], output: var auto): GetResult = var outputPtr = unsafeAddr output # callback is local, ptr wont escape proc decode(data: openArray[byte]) = try: - outputPtr[] = SSZ.decode(snappy.decode(data), type output) - status = GetResult.found + let decompressed = snappy.decode(data, maxDecompressedDbRecordSize) + if decompressed.len > 0: + outputPtr[] = SSZ.decode(decompressed, T) + status = GetResult.found + else: + warn "Corrupt snappy record found in database", typ = name(T) + status = GetResult.corrupted except SerializationError as e: # If the data can't be deserialized, it could be because it's from a # version of the software that uses a different SSZ encoding warn "Unable to deserialize data, old database?", - err = e.msg, typ = name(type output), dataLen = data.len + err = e.msg, typ = name(T), dataLen = data.len status = GetResult.corrupted discard db.backend.get(key, decode).expect("working database") diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index 4b644f2b5..b4a8174ff 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -1254,7 +1254,12 @@ proc subscribe*[MsgType](node: Eth2Node, trace "Incoming pubsub message received", len = data.len, topic, msgId = gossipId(data) try: - msgHandler SSZ.decode(snappy.decode(data), MsgType) + let decompressed = snappy.decode(data, GOSSIP_MAX_SIZE) + if decompressed.len > 0: + msgHandler SSZ.decode(decompressed, MsgType) + else: + # TODO penalize peer? + debug "Failed to decompress gossip payload" except CatchableError as err: debug "Gossip msg handler error", msg = err.msg, len = data.len, topic, msgId = gossipId(data) @@ -1270,7 +1275,12 @@ proc addValidator*[MsgType](node: Eth2Node, trace "Validating incoming gossip message", len = message.data.len, topic, msgId = gossipId(message.data) try: - return msgValidator SSZ.decode(snappy.decode(message.data), MsgType) + let decompressed = snappy.decode(message.data, GOSSIP_MAX_SIZE) + if decompressed.len > 0: + return msgValidator SSZ.decode(decompressed, MsgType) + else: + # TODO penalize peer? + debug "Failed to decompress gossip payload" except CatchableError as err: debug "Gossip validation error", msg = err.msg, msgId = gossipId(message.data) diff --git a/beacon_chain/libp2p_streams_backend.nim b/beacon_chain/libp2p_streams_backend.nim index 5d938af73..e3a8a374e 100644 --- a/beacon_chain/libp2p_streams_backend.nim +++ b/beacon_chain/libp2p_streams_backend.nim @@ -50,11 +50,14 @@ proc uncompressFramedStream*(conn: Connection, # data is longer than that, snappyUncompress will fail and we will not # decompress the chunk at all, instead reporting failure. let - uncompressedLen = snappyUncompress( + # The `int` conversion below is safe, because `uncompressedLen` is + # bounded to `chunkLen` (which in turn is bounded by `MAX_CHUNK_SIZE`). + # TODO: Use a range type for the parameter. + uncompressedLen = int snappyUncompress( frameData.toOpenArray(4, dataLen - 1), uncompressedData.toOpenArray(0, chunkLen - 1)) - if uncompressedLen <= 0: + if uncompressedLen == 0: return err "Failed to decompress snappy frame" doAssert output.len + uncompressedLen <= expectedSize, "enforced by `remains` limit above" @@ -109,6 +112,7 @@ proc readChunkPayload(conn: Connection, peer: Peer, if size == 0: return neterr ZeroSizePrefix + # The `size.int` conversion is safe because `size` is bounded to `MAX_CHUNK_SIZE` let data = await conn.uncompressFramedStream(size.int) if data.isOk: # `10` is the maximum size of variable integer on wire, so error could diff --git a/vendor/nim-snappy b/vendor/nim-snappy index 676fa656d..a368549c1 160000 --- a/vendor/nim-snappy +++ b/vendor/nim-snappy @@ -1 +1 @@ -Subproject commit 676fa656d3a6f4e24691f3f48829c979b1b1bcdd +Subproject commit a368549c1a473d2e580ac814f4c21342259ed9b6