Add some size checks to make code more robust and more clear (#11512)
Motivation: While its technical impossible that a chunk is larger than 64kb it still makes things easier to read and more robust to add some size checks to LzfDecoder. Modifications: Check the maximum length Result: More robust and easier to reason about code
This commit is contained in:
parent
8af59e4b40
commit
165a035a15
@ -17,6 +17,7 @@ package io.netty.handler.codec.compression;
|
|||||||
|
|
||||||
import com.ning.compress.BufferRecycler;
|
import com.ning.compress.BufferRecycler;
|
||||||
import com.ning.compress.lzf.ChunkDecoder;
|
import com.ning.compress.lzf.ChunkDecoder;
|
||||||
|
import com.ning.compress.lzf.LZFChunk;
|
||||||
import com.ning.compress.lzf.util.ChunkDecoderFactory;
|
import com.ning.compress.lzf.util.ChunkDecoderFactory;
|
||||||
import io.netty.buffer.ByteBuf;
|
import io.netty.buffer.ByteBuf;
|
||||||
import io.netty.channel.ChannelHandlerContext;
|
import io.netty.channel.ChannelHandlerContext;
|
||||||
@ -137,6 +138,15 @@ public class LzfDecoder extends ByteToMessageDecoder {
|
|||||||
}
|
}
|
||||||
chunkLength = in.readUnsignedShort();
|
chunkLength = in.readUnsignedShort();
|
||||||
|
|
||||||
|
// chunkLength can never exceed MAX_CHUNK_LEN as MAX_CHUNK_LEN is 64kb and readUnsignedShort can
|
||||||
|
// never return anything bigger as well. Let's add some check any way to make things easier in terms
|
||||||
|
// of debugging if we ever hit this because of an bug.
|
||||||
|
if (chunkLength > LZFChunk.MAX_CHUNK_LEN) {
|
||||||
|
throw new DecompressionException(String.format(
|
||||||
|
"chunk length exceeds maximum: %d (expected: =< %d)",
|
||||||
|
chunkLength, LZFChunk.MAX_CHUNK_LEN));
|
||||||
|
}
|
||||||
|
|
||||||
if (type != BLOCK_TYPE_COMPRESSED) {
|
if (type != BLOCK_TYPE_COMPRESSED) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -147,6 +157,15 @@ public class LzfDecoder extends ByteToMessageDecoder {
|
|||||||
}
|
}
|
||||||
originalLength = in.readUnsignedShort();
|
originalLength = in.readUnsignedShort();
|
||||||
|
|
||||||
|
// originalLength can never exceed MAX_CHUNK_LEN as MAX_CHUNK_LEN is 64kb and readUnsignedShort can
|
||||||
|
// never return anything bigger as well. Let's add some check any way to make things easier in terms
|
||||||
|
// of debugging if we ever hit this because of an bug.
|
||||||
|
if (originalLength > LZFChunk.MAX_CHUNK_LEN) {
|
||||||
|
throw new DecompressionException(String.format(
|
||||||
|
"original length exceeds maximum: %d (expected: =< %d)",
|
||||||
|
chunkLength, LZFChunk.MAX_CHUNK_LEN));
|
||||||
|
}
|
||||||
|
|
||||||
currentState = State.DECOMPRESS_DATA;
|
currentState = State.DECOMPRESS_DATA;
|
||||||
// fall through
|
// fall through
|
||||||
case DECOMPRESS_DATA:
|
case DECOMPRESS_DATA:
|
||||||
|
Loading…
Reference in New Issue
Block a user