Add some size checks to make code more robust and more clear (#11512)

Motivation:

While its technical impossible that a chunk is larger than 64kb it still makes things easier to read and more robust to add some size checks to LzfDecoder.

Modifications:

Check the maximum length

Result:

More robust and easier to reason about code
This commit is contained in:
Norman Maurer 2021-07-26 17:11:18 +02:00
parent 6559f2fc25
commit f5f3107cc3

View File

@ -17,6 +17,7 @@ package io.netty.handler.codec.compression;
import com.ning.compress.BufferRecycler;
import com.ning.compress.lzf.ChunkDecoder;
import com.ning.compress.lzf.LZFChunk;
import com.ning.compress.lzf.util.ChunkDecoderFactory;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
@ -135,6 +136,15 @@ public class LzfDecoder extends ByteToMessageDecoder {
}
chunkLength = in.readUnsignedShort();
// chunkLength can never exceed MAX_CHUNK_LEN as MAX_CHUNK_LEN is 64kb and readUnsignedShort can
// never return anything bigger as well. Let's add some check any way to make things easier in terms
// of debugging if we ever hit this because of an bug.
if (chunkLength > LZFChunk.MAX_CHUNK_LEN) {
throw new DecompressionException(String.format(
"chunk length exceeds maximum: %d (expected: =< %d)",
chunkLength, LZFChunk.MAX_CHUNK_LEN));
}
if (type != BLOCK_TYPE_COMPRESSED) {
break;
}
@ -145,6 +155,15 @@ public class LzfDecoder extends ByteToMessageDecoder {
}
originalLength = in.readUnsignedShort();
// originalLength can never exceed MAX_CHUNK_LEN as MAX_CHUNK_LEN is 64kb and readUnsignedShort can
// never return anything bigger as well. Let's add some check any way to make things easier in terms
// of debugging if we ever hit this because of an bug.
if (originalLength > LZFChunk.MAX_CHUNK_LEN) {
throw new DecompressionException(String.format(
"original length exceeds maximum: %d (expected: =< %d)",
chunkLength, LZFChunk.MAX_CHUNK_LEN));
}
currentState = State.DECOMPRESS_DATA;
// fall through
case DECOMPRESS_DATA: