Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Jestan Nirojan 2012-08-12 20:12:51 +08:00
commit 7ab8ca4d14
236 changed files with 8525 additions and 10101 deletions

View File

@ -20,7 +20,7 @@
<parent>
<groupId>io.netty</groupId>
<artifactId>netty-parent</artifactId>
<version>4.0.0.Alpha1-SNAPSHOT</version>
<version>4.0.0.Alpha2-SNAPSHOT</version>
</parent>
<artifactId>netty</artifactId>
@ -109,7 +109,7 @@
<executions>
<execution>
<id>unpack-sources</id>
<phase>generate-resources</phase>
<phase>prepare-package</phase>
<goals>
<goal>unpack-dependencies</goal>
</goals>
@ -122,7 +122,7 @@
</execution>
<execution>
<id>unpack-jars</id>
<phase>generate-resources</phase>
<phase>prepare-package</phase>
<goals>
<goal>unpack-dependencies</goal>
</goals>
@ -141,7 +141,7 @@
<executions>
<execution>
<id>add-source</id>
<phase>generate-sources</phase>
<phase>prepare-package</phase>
<goals>
<goal>add-source</goal>
</goals>
@ -244,10 +244,9 @@
</plugin>
<plugin>
<artifactId>maven-javadoc-plugin</artifactId>
<version>2.8.1</version>
<executions>
<execution>
<id>javadoc</id>
<id>attach-javadocs</id>
<phase>package</phase>
<goals>
<goal>jar</goal>

View File

@ -20,7 +20,7 @@
<parent>
<groupId>io.netty</groupId>
<artifactId>netty-parent</artifactId>
<version>4.0.0.Alpha1-SNAPSHOT</version>
<version>4.0.0.Alpha2-SNAPSHOT</version>
</parent>
<artifactId>netty-buffer</artifactId>

View File

@ -32,18 +32,25 @@ public abstract class AbstractByteBuf implements ByteBuf {
private final SwappedByteBuf swappedBuf;
private final ByteOrder order;
private final int maxCapacity;
private int readerIndex;
private int writerIndex;
private int markedReaderIndex;
private int markedWriterIndex;
protected AbstractByteBuf(ByteOrder endianness) {
int refCnt = 1;
protected AbstractByteBuf(ByteOrder endianness, int maxCapacity) {
if (endianness == null) {
throw new NullPointerException("endianness");
}
if (maxCapacity < 0) {
throw new IllegalArgumentException("maxCapacity: " + maxCapacity + " (expected: >= 0)");
}
order = endianness;
swappedBuf = new SwappedByteBuf(this);
this.maxCapacity = maxCapacity;
}
@Override
@ -56,6 +63,11 @@ public abstract class AbstractByteBuf implements ByteBuf {
return ChannelBufType.BYTE;
}
@Override
public int maxCapacity() {
return maxCapacity;
}
@Override
public int readerIndex() {
return readerIndex;
@ -146,24 +158,72 @@ public abstract class AbstractByteBuf implements ByteBuf {
return;
}
if (readerIndex == writerIndex) {
clear();
return;
}
if (readerIndex != writerIndex) {
setBytes(0, this, readerIndex, writerIndex - readerIndex);
writerIndex -= readerIndex;
markedReaderIndex = Math.max(markedReaderIndex - readerIndex, 0);
markedWriterIndex = Math.max(markedWriterIndex - readerIndex, 0);
adjustMarkers(readerIndex);
readerIndex = 0;
} else {
adjustMarkers(readerIndex);
writerIndex = readerIndex = 0;
}
}
protected void adjustMarkers(int decrement) {
markedReaderIndex = Math.max(markedReaderIndex - decrement, 0);
markedWriterIndex = Math.max(markedWriterIndex - decrement, 0);
}
@Override
public void ensureWritableBytes(int writableBytes) {
if (writableBytes > writableBytes()) {
throw new IndexOutOfBoundsException("Writable bytes exceeded: Got "
+ writableBytes + ", maximum is " + writableBytes());
public void ensureWritableBytes(int minWritableBytes) {
if (minWritableBytes <= writableBytes()) {
return;
}
if (minWritableBytes > maxCapacity - writerIndex) {
throw new IllegalArgumentException(String.format(
"minWritableBytes(%d) + writerIndex(%d) > maxCapacity(%d)",
minWritableBytes, writerIndex, maxCapacity));
}
int minNewCapacity = writerIndex + minWritableBytes;
if (minNewCapacity > maxCapacity) {
throw new IllegalArgumentException(String.format(
"minWritableBytes: %d (exceeds maxCapacity(%d))", minWritableBytes, maxCapacity));
}
// Normalize the current capacity to the power of 2.
int newCapacity = calculateNewCapacity(minNewCapacity);
// Adjust to the new capacity.
capacity(newCapacity);
}
private int calculateNewCapacity(int minNewCapacity) {
final int threshold = 1048576 * 4; // 4 MiB page
if (minNewCapacity == threshold) {
return minNewCapacity;
}
// If over threshold, do not double but just increase by threshold.
if (minNewCapacity > threshold) {
int newCapacity = minNewCapacity / threshold * threshold;
if (newCapacity > maxCapacity - threshold) {
newCapacity = maxCapacity;
} else {
newCapacity += threshold;
}
return newCapacity;
}
// Not over threshold. Double up to 4 MiB, starting from 64.
int newCapacity = 64;
while (newCapacity < minNewCapacity) {
newCapacity <<= 1;
}
return newCapacity;
}
@Override
@ -410,7 +470,7 @@ public abstract class AbstractByteBuf implements ByteBuf {
if (length == 0) {
return Unpooled.EMPTY_BUFFER;
}
ByteBuf buf = factory().getBuffer(order(), length);
ByteBuf buf = unsafe().newBuffer(length);
buf.writeBytes(this, readerIndex, length);
readerIndex += length;
return buf;
@ -498,29 +558,34 @@ public abstract class AbstractByteBuf implements ByteBuf {
@Override
public void writeByte(int value) {
ensureWritableBytes(1);
setByte(writerIndex ++, value);
}
@Override
public void writeShort(int value) {
ensureWritableBytes(2);
setShort(writerIndex, value);
writerIndex += 2;
}
@Override
public void writeMedium(int value) {
ensureWritableBytes(3);
setMedium(writerIndex, value);
writerIndex += 3;
}
@Override
public void writeInt(int value) {
ensureWritableBytes(4);
setInt(writerIndex, value);
writerIndex += 4;
}
@Override
public void writeLong(long value) {
ensureWritableBytes(8);
setLong(writerIndex, value);
writerIndex += 8;
}
@ -542,6 +607,7 @@ public abstract class AbstractByteBuf implements ByteBuf {
@Override
public void writeBytes(byte[] src, int srcIndex, int length) {
ensureWritableBytes(length);
setBytes(writerIndex, src, srcIndex, length);
writerIndex += length;
}
@ -568,6 +634,7 @@ public abstract class AbstractByteBuf implements ByteBuf {
@Override
public void writeBytes(ByteBuf src, int srcIndex, int length) {
ensureWritableBytes(length);
setBytes(writerIndex, src, srcIndex, length);
writerIndex += length;
}
@ -575,6 +642,7 @@ public abstract class AbstractByteBuf implements ByteBuf {
@Override
public void writeBytes(ByteBuffer src) {
int length = src.remaining();
ensureWritableBytes(length);
setBytes(writerIndex, src);
writerIndex += length;
}
@ -582,6 +650,7 @@ public abstract class AbstractByteBuf implements ByteBuf {
@Override
public int writeBytes(InputStream in, int length)
throws IOException {
ensureWritableBytes(length);
int writtenBytes = setBytes(writerIndex, in, length);
if (writtenBytes > 0) {
writerIndex += writtenBytes;
@ -592,6 +661,7 @@ public abstract class AbstractByteBuf implements ByteBuf {
@Override
public int writeBytes(ScatteringByteChannel in, int length)
throws IOException {
ensureWritableBytes(length);
int writtenBytes = setBytes(writerIndex, in, length);
if (writtenBytes > 0) {
writerIndex += writtenBytes;
@ -632,11 +702,25 @@ public abstract class AbstractByteBuf implements ByteBuf {
return copy(readerIndex, readableBytes());
}
@Override
public ByteBuf duplicate() {
return new DuplicatedByteBuf(this);
}
@Override
public ByteBuf slice() {
return slice(readerIndex, readableBytes());
}
@Override
public ByteBuf slice(int index, int length) {
if (length == 0) {
return Unpooled.EMPTY_BUFFER;
}
return new SlicedByteBuf(this, index, length);
}
@Override
public ByteBuffer nioBuffer() {
return nioBuffer(readerIndex, readableBytes());

View File

@ -1,61 +0,0 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import java.nio.ByteOrder;
/**
* A skeletal implementation of {@link ByteBufFactory}.
*/
public abstract class AbstractByteBufFactory implements ByteBufFactory {
private final ByteOrder defaultOrder;
/**
* Creates a new factory whose default {@link ByteOrder} is
* {@link ByteOrder#BIG_ENDIAN}.
*/
protected AbstractByteBufFactory() {
this(ByteOrder.BIG_ENDIAN);
}
/**
* Creates a new factory with the specified default {@link ByteOrder}.
*
* @param defaultOrder the default {@link ByteOrder} of this factory
*/
protected AbstractByteBufFactory(ByteOrder defaultOrder) {
if (defaultOrder == null) {
throw new NullPointerException("defaultOrder");
}
this.defaultOrder = defaultOrder;
}
@Override
public ByteBuf getBuffer(int capacity) {
return getBuffer(getDefaultOrder(), capacity);
}
@Override
public ByteBuf getBuffer(byte[] array, int offset, int length) {
return getBuffer(getDefaultOrder(), array, offset, length);
}
@Override
public ByteOrder getDefaultOrder() {
return defaultOrder;
}
}

View File

@ -230,17 +230,27 @@ import java.nio.charset.UnsupportedCharsetException;
*/
public interface ByteBuf extends ChannelBuf, Comparable<ByteBuf> {
/**
* Returns the factory which creates a {@link ByteBuf} whose
* type and default {@link ByteOrder} are same with this buffer.
*/
ByteBufFactory factory();
/**
* Returns the number of bytes (octets) this buffer can contain.
*/
int capacity();
/**
* Adjusts the capacity of this buffer. If the {@code newCapacity} is less than the current
* capacity, the content of this buffer is truncated. If the {@code newCapacity} is greater
* than the current capacity, the buffer is appended with unspecified data whose length is
* {@code (newCapacity - currentCapacity)}.
*/
void capacity(int newCapacity);
/**
* Returns the maximum allowed capacity of this buffer. If a user attempts to increase the
* capacity of this buffer beyond the maximum capacity using {@link #capacity(int)} or
* {@link #ensureWritableBytes(int)}, those methods will raise an
* {@link IllegalArgumentException}.
*/
int maxCapacity();
/**
* Returns the <a href="http://en.wikipedia.org/wiki/Endianness">endianness</a>
* of this buffer.
@ -1776,4 +1786,46 @@ public interface ByteBuf extends ChannelBuf, Comparable<ByteBuf> {
*/
@Override
String toString();
/**
* Returns an object that exposes unsafe expert-only operations which can lead to unspecified
* behavior.
*/
Unsafe unsafe();
interface Unsafe {
/**
* Returns the internal NIO buffer that is reused for I/O.
*
* @throws UnsupportedOperationException if the buffer has no internal NIO buffer
*/
ByteBuffer nioBuffer();
/**
* Returns a new buffer whose type is identical to the callee.
*
* @param initialCapacity the initial capacity of the new buffer
*/
ByteBuf newBuffer(int initialCapacity);
/**
* Similar to {@link ByteBuf#discardReadBytes()} except that this method might discard
* some, all, or none of read bytes depending on its internal implementation to reduce
* overall memory bandwidth consumption at the cost of potentially additional memory
* consumption.
*/
void discardSomeReadBytes();
/**
* Increases the reference count of the buffer.
*/
void acquire();
/**
* Decreases the reference count of the buffer. If decreased to 0, the internal memory
* block of the buffer will be deallocated. The result of accessing a freed buffer is
* unspecified and can even cause JVM crash.
*/
void release();
}
}

View File

@ -1,104 +0,0 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
/**
* A factory that creates or pools {@link ByteBuf}s.
*/
public interface ByteBufFactory {
/**
* Returns a {@link ByteBuf} with the specified {@code capacity}.
* This method is identical to {@code getBuffer(getDefaultOrder(), capacity)}.
*
* @param capacity the capacity of the returned {@link ByteBuf}
* @return a {@link ByteBuf} with the specified {@code capacity},
* whose {@code readerIndex} and {@code writerIndex} are {@code 0}
*/
ByteBuf getBuffer(int capacity);
/**
* Returns a {@link ByteBuf} with the specified {@code endianness}
* and {@code capacity}.
*
* @param endianness the endianness of the returned {@link ByteBuf}
* @param capacity the capacity of the returned {@link ByteBuf}
* @return a {@link ByteBuf} with the specified {@code endianness} and
* {@code capacity}, whose {@code readerIndex} and {@code writerIndex}
* are {@code 0}
*/
ByteBuf getBuffer(ByteOrder endianness, int capacity);
/**
* Returns a {@link ByteBuf} whose content is equal to the sub-region
* of the specified {@code array}. Depending on the factory implementation,
* the returned buffer could wrap the {@code array} or create a new copy of
* the {@code array}.
* This method is identical to {@code getBuffer(getDefaultOrder(), array, offset, length)}.
*
* @param array the byte array
* @param offset the offset of the byte array
* @param length the length of the byte array
*
* @return a {@link ByteBuf} with the specified content,
* whose {@code readerIndex} and {@code writerIndex}
* are {@code 0} and {@code (length - offset)} respectively
*/
ByteBuf getBuffer(byte[] array, int offset, int length);
/**
* Returns a {@link ByteBuf} whose content is equal to the sub-region
* of the specified {@code array}. Depending on the factory implementation,
* the returned buffer could wrap the {@code array} or create a new copy of
* the {@code array}.
*
* @param endianness the endianness of the returned {@link ByteBuf}
* @param array the byte array
* @param offset the offset of the byte array
* @param length the length of the byte array
*
* @return a {@link ByteBuf} with the specified content,
* whose {@code readerIndex} and {@code writerIndex}
* are {@code 0} and {@code (length - offset)} respectively
*/
ByteBuf getBuffer(ByteOrder endianness, byte[] array, int offset, int length);
/**
* Returns a {@link ByteBuf} whose content is equal to the sub-region
* of the specified {@code nioBuffer}. Depending on the factory
* implementation, the returned buffer could wrap the {@code nioBuffer} or
* create a new copy of the {@code nioBuffer}.
*
* @param nioBuffer the NIO {@link ByteBuffer}
*
* @return a {@link ByteBuf} with the specified content,
* whose {@code readerIndex} and {@code writerIndex}
* are {@code 0} and {@code nioBuffer.remaining()} respectively
*/
ByteBuf getBuffer(ByteBuffer nioBuffer);
/**
* Returns the default endianness of the {@link ByteBuf} which is
* returned by {@link #getBuffer(int)}.
*
* @return the default endianness of the {@link ByteBuf} which is
* returned by {@link #getBuffer(int)}
*/
ByteOrder getDefaultOrder();
}

View File

@ -15,732 +15,65 @@
*/
package io.netty.buffer;
import io.netty.util.internal.DetectionUtil;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.channels.GatheringByteChannel;
import java.nio.channels.ScatteringByteChannel;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public interface CompositeByteBuf extends ByteBuf, Iterable<ByteBuf> {
/**
* A virtual buffer which shows multiple buffers as a single merged buffer. It
* is recommended to use {@link Unpooled#wrappedBuffer(ByteBuf...)}
* instead of calling the constructor explicitly.
*/
public class CompositeByteBuf extends AbstractByteBuf {
void addComponent(ByteBuf buffer);
void addComponent(int cIndex, ByteBuf buffer);
private ByteBuf[] components;
private int[] indices;
private int lastAccessedComponentId;
void addComponents(ByteBuf... buffers);
void addComponents(Iterable<ByteBuf> buffers);
void addComponents(int cIndex, ByteBuf... buffers);
void addComponents(int cIndex, Iterable<ByteBuf> buffers);
public CompositeByteBuf(ByteOrder endianness, List<ByteBuf> buffers) {
super(endianness);
setComponents(buffers);
}
void removeComponent(int cIndex);
void removeComponents(int cIndex, int numComponents);
int numComponents();
int maxNumComponents();
ByteBuf component(int cIndex);
ByteBuf componentAtOffset(int offset);
void discardReadComponents();
void consolidate();
void consolidate(int cIndex, int numComponents);
int toComponentIndex(int offset);
int toByteIndex(int cIndex);
/**
* Same with {@link #slice(int, int)} except that this method returns a list.
*/
public List<ByteBuf> decompose(int index, int length) {
if (length == 0) {
return Collections.emptyList();
}
if (index + length > capacity()) {
throw new IndexOutOfBoundsException("Too many bytes to decompose - Need "
+ (index + length) + ", capacity is " + capacity());
}
int componentId = componentId(index);
List<ByteBuf> slice = new ArrayList<ByteBuf>(components.length);
// The first component
ByteBuf first = components[componentId].duplicate();
first.readerIndex(index - indices[componentId]);
ByteBuf buf = first;
int bytesToSlice = length;
do {
int readableBytes = buf.readableBytes();
if (bytesToSlice <= readableBytes) {
// Last component
buf.writerIndex(buf.readerIndex() + bytesToSlice);
slice.add(buf);
break;
} else {
// Not the last component
slice.add(buf);
bytesToSlice -= readableBytes;
componentId ++;
// Fetch the next component.
buf = components[componentId].duplicate();
}
} while (bytesToSlice > 0);
// Slice all components because only readable bytes are interesting.
for (int i = 0; i < slice.size(); i ++) {
slice.set(i, slice.get(i).slice());
}
return slice;
}
List<ByteBuf> decompose(int offset, int length);
/**
* Setup this ChannelBuffer from the list
* Exposes this buffer's readable bytes as an NIO {@link ByteBuffer}'s. The returned buffer
* shares the content with this buffer, while changing the position and limit of the returned
* NIO buffer does not affect the indexes and marks of this buffer. This method does not
* modify {@code readerIndex} or {@code writerIndex} of this buffer. Please note that the
* returned NIO buffer will not see the changes of this buffer if this buffer is a dynamic
* buffer and it adjusted its capacity.
*
*
* @throws UnsupportedOperationException
* if this buffer cannot create a {@link ByteBuffer} that shares the content with itself
*/
private void setComponents(List<ByteBuf> newComponents) {
assert !newComponents.isEmpty();
// Clear the cache.
lastAccessedComponentId = 0;
// Build the component array.
components = new ByteBuf[newComponents.size()];
for (int i = 0; i < components.length; i ++) {
ByteBuf c = newComponents.get(i);
if (c.order() != order()) {
throw new IllegalArgumentException(
"All buffers must have the same endianness.");
}
assert c.readerIndex() == 0;
assert c.writerIndex() == c.capacity();
components[i] = c;
}
// Build the component lookup table.
indices = new int[components.length + 1];
indices[0] = 0;
for (int i = 1; i <= components.length; i ++) {
indices[i] = indices[i - 1] + components[i - 1].capacity();
}
// Reset the indexes.
setIndex(0, capacity());
}
private CompositeByteBuf(CompositeByteBuf buffer) {
super(buffer.order());
components = buffer.components.clone();
indices = buffer.indices.clone();
setIndex(buffer.readerIndex(), buffer.writerIndex());
}
@Override
public ByteBufFactory factory() {
return HeapByteBufFactory.getInstance(order());
}
@Override
public boolean isDirect() {
return false;
}
@Override
public boolean hasArray() {
return false;
}
@Override
public byte[] array() {
throw new UnsupportedOperationException();
}
@Override
public int arrayOffset() {
throw new UnsupportedOperationException();
}
@Override
public int capacity() {
return indices[components.length];
}
@Override
public byte getByte(int index) {
int componentId = componentId(index);
return components[componentId].getByte(index - indices[componentId]);
}
@Override
public short getShort(int index) {
int componentId = componentId(index);
if (index + 2 <= indices[componentId + 1]) {
return components[componentId].getShort(index - indices[componentId]);
} else if (order() == ByteOrder.BIG_ENDIAN) {
return (short) ((getByte(index) & 0xff) << 8 | getByte(index + 1) & 0xff);
} else {
return (short) (getByte(index) & 0xff | (getByte(index + 1) & 0xff) << 8);
}
}
@Override
public int getUnsignedMedium(int index) {
int componentId = componentId(index);
if (index + 3 <= indices[componentId + 1]) {
return components[componentId].getUnsignedMedium(index - indices[componentId]);
} else if (order() == ByteOrder.BIG_ENDIAN) {
return (getShort(index) & 0xffff) << 8 | getByte(index + 2) & 0xff;
} else {
return getShort(index) & 0xFFFF | (getByte(index + 2) & 0xFF) << 16;
}
}
@Override
public int getInt(int index) {
int componentId = componentId(index);
if (index + 4 <= indices[componentId + 1]) {
return components[componentId].getInt(index - indices[componentId]);
} else if (order() == ByteOrder.BIG_ENDIAN) {
return (getShort(index) & 0xffff) << 16 | getShort(index + 2) & 0xffff;
} else {
return getShort(index) & 0xFFFF | (getShort(index + 2) & 0xFFFF) << 16;
}
}
@Override
public long getLong(int index) {
int componentId = componentId(index);
if (index + 8 <= indices[componentId + 1]) {
return components[componentId].getLong(index - indices[componentId]);
} else if (order() == ByteOrder.BIG_ENDIAN) {
return (getInt(index) & 0xffffffffL) << 32 | getInt(index + 4) & 0xffffffffL;
} else {
return getInt(index) & 0xFFFFFFFFL | (getInt(index + 4) & 0xFFFFFFFFL) << 32;
}
}
@Override
public void getBytes(int index, byte[] dst, int dstIndex, int length) {
int componentId = componentId(index);
if (index > capacity() - length || dstIndex > dst.length - length) {
throw new IndexOutOfBoundsException("Too many bytes to read - Needs "
+ (index + length) + ", maximum is " + capacity() + " or "
+ dst.length);
}
int i = componentId;
while (length > 0) {
ByteBuf s = components[i];
int adjustment = indices[i];
int localLength = Math.min(length, s.capacity() - (index - adjustment));
s.getBytes(index - adjustment, dst, dstIndex, localLength);
index += localLength;
dstIndex += localLength;
length -= localLength;
i ++;
}
}
@Override
public void getBytes(int index, ByteBuffer dst) {
int componentId = componentId(index);
int limit = dst.limit();
int length = dst.remaining();
if (index > capacity() - length) {
throw new IndexOutOfBoundsException("Too many bytes to be read - Needs "
+ (index + length) + ", maximum is " + capacity());
}
int i = componentId;
try {
while (length > 0) {
ByteBuf s = components[i];
int adjustment = indices[i];
int localLength = Math.min(length, s.capacity() - (index - adjustment));
dst.limit(dst.position() + localLength);
s.getBytes(index - adjustment, dst);
index += localLength;
length -= localLength;
i ++;
}
} finally {
dst.limit(limit);
}
}
@Override
public void getBytes(int index, ByteBuf dst, int dstIndex, int length) {
int componentId = componentId(index);
if (index > capacity() - length || dstIndex > dst.capacity() - length) {
throw new IndexOutOfBoundsException("Too many bytes to be read - Needs "
+ (index + length) + " or " + (dstIndex + length) + ", maximum is "
+ capacity() + " or " + dst.capacity());
}
int i = componentId;
while (length > 0) {
ByteBuf s = components[i];
int adjustment = indices[i];
int localLength = Math.min(length, s.capacity() - (index - adjustment));
s.getBytes(index - adjustment, dst, dstIndex, localLength);
index += localLength;
dstIndex += localLength;
length -= localLength;
i ++;
}
}
@Override
public int getBytes(int index, GatheringByteChannel out, int length)
throws IOException {
if (DetectionUtil.javaVersion() < 7) {
// XXX Gathering write is not supported because of a known issue.
// See http://bugs.sun.com/view_bug.do?bug_id=6210541
return out.write(copiedNioBuffer(index, length));
} else {
long writtenBytes = out.write(nioBuffers(index, length));
if (writtenBytes > Integer.MAX_VALUE) {
return Integer.MAX_VALUE;
} else {
return (int) writtenBytes;
}
}
}
@Override
public void getBytes(int index, OutputStream out, int length)
throws IOException {
int componentId = componentId(index);
if (index > capacity() - length) {
throw new IndexOutOfBoundsException("Too many bytes to be read - needs "
+ (index + length) + ", maximum of " + capacity());
}
int i = componentId;
while (length > 0) {
ByteBuf s = components[i];
int adjustment = indices[i];
int localLength = Math.min(length, s.capacity() - (index - adjustment));
s.getBytes(index - adjustment, out, localLength);
index += localLength;
length -= localLength;
i ++;
}
}
@Override
public void setByte(int index, int value) {
int componentId = componentId(index);
components[componentId].setByte(index - indices[componentId], value);
}
@Override
public void setShort(int index, int value) {
int componentId = componentId(index);
if (index + 2 <= indices[componentId + 1]) {
components[componentId].setShort(index - indices[componentId], value);
} else if (order() == ByteOrder.BIG_ENDIAN) {
setByte(index, (byte) (value >>> 8));
setByte(index + 1, (byte) value);
} else {
setByte(index , (byte) value);
setByte(index + 1, (byte) (value >>> 8));
}
}
@Override
public void setMedium(int index, int value) {
int componentId = componentId(index);
if (index + 3 <= indices[componentId + 1]) {
components[componentId].setMedium(index - indices[componentId], value);
} else if (order() == ByteOrder.BIG_ENDIAN) {
setShort(index, (short) (value >> 8));
setByte(index + 2, (byte) value);
} else {
setShort(index , (short) value);
setByte(index + 2, (byte) (value >>> 16));
}
}
@Override
public void setInt(int index, int value) {
int componentId = componentId(index);
if (index + 4 <= indices[componentId + 1]) {
components[componentId].setInt(index - indices[componentId], value);
} else if (order() == ByteOrder.BIG_ENDIAN) {
setShort(index, (short) (value >>> 16));
setShort(index + 2, (short) value);
} else {
setShort(index , (short) value);
setShort(index + 2, (short) (value >>> 16));
}
}
@Override
public void setLong(int index, long value) {
int componentId = componentId(index);
if (index + 8 <= indices[componentId + 1]) {
components[componentId].setLong(index - indices[componentId], value);
} else if (order() == ByteOrder.BIG_ENDIAN) {
setInt(index, (int) (value >>> 32));
setInt(index + 4, (int) value);
} else {
setInt(index , (int) value);
setInt(index + 4, (int) (value >>> 32));
}
}
@Override
public void setBytes(int index, byte[] src, int srcIndex, int length) {
int componentId = componentId(index);
if (index > capacity() - length || srcIndex > src.length - length) {
throw new IndexOutOfBoundsException("Too many bytes to read - needs "
+ (index + length) + " or " + (srcIndex + length) + ", maximum is "
+ capacity() + " or " + src.length);
}
int i = componentId;
while (length > 0) {
ByteBuf s = components[i];
int adjustment = indices[i];
int localLength = Math.min(length, s.capacity() - (index - adjustment));
s.setBytes(index - adjustment, src, srcIndex, localLength);
index += localLength;
srcIndex += localLength;
length -= localLength;
i ++;
}
}
@Override
public void setBytes(int index, ByteBuffer src) {
int componentId = componentId(index);
int limit = src.limit();
int length = src.remaining();
if (index > capacity() - length) {
throw new IndexOutOfBoundsException("Too many bytes to be written - Needs "
+ (index + length) + ", maximum is " + capacity());
}
int i = componentId;
try {
while (length > 0) {
ByteBuf s = components[i];
int adjustment = indices[i];
int localLength = Math.min(length, s.capacity() - (index - adjustment));
src.limit(src.position() + localLength);
s.setBytes(index - adjustment, src);
index += localLength;
length -= localLength;
i ++;
}
} finally {
src.limit(limit);
}
}
@Override
public void setBytes(int index, ByteBuf src, int srcIndex, int length) {
int componentId = componentId(index);
if (index > capacity() - length || srcIndex > src.capacity() - length) {
throw new IndexOutOfBoundsException("Too many bytes to be written - Needs "
+ (index + length) + " or " + (srcIndex + length) + ", maximum is "
+ capacity() + " or " + src.capacity());
}
int i = componentId;
while (length > 0) {
ByteBuf s = components[i];
int adjustment = indices[i];
int localLength = Math.min(length, s.capacity() - (index - adjustment));
s.setBytes(index - adjustment, src, srcIndex, localLength);
index += localLength;
srcIndex += localLength;
length -= localLength;
i ++;
}
}
@Override
public int setBytes(int index, InputStream in, int length)
throws IOException {
int componentId = componentId(index);
if (index > capacity() - length) {
throw new IndexOutOfBoundsException("Too many bytes to write - Needs "
+ (index + length) + ", maximum is " + capacity());
}
int i = componentId;
int readBytes = 0;
do {
ByteBuf s = components[i];
int adjustment = indices[i];
int localLength = Math.min(length, s.capacity() - (index - adjustment));
int localReadBytes = s.setBytes(index - adjustment, in, localLength);
if (localReadBytes < 0) {
if (readBytes == 0) {
return -1;
} else {
break;
}
}
if (localReadBytes == localLength) {
index += localLength;
length -= localLength;
readBytes += localLength;
i ++;
} else {
index += localReadBytes;
length -= localReadBytes;
readBytes += localReadBytes;
}
} while (length > 0);
return readBytes;
}
@Override
public int setBytes(int index, ScatteringByteChannel in, int length)
throws IOException {
int componentId = componentId(index);
if (index > capacity() - length) {
throw new IndexOutOfBoundsException("Too many bytes to write - Needs "
+ (index + length) + ", maximum is " + capacity());
}
int i = componentId;
int readBytes = 0;
do {
ByteBuf s = components[i];
int adjustment = indices[i];
int localLength = Math.min(length, s.capacity() - (index - adjustment));
int localReadBytes = s.setBytes(index - adjustment, in, localLength);
if (localReadBytes == localLength) {
index += localLength;
length -= localLength;
readBytes += localLength;
i ++;
} else {
index += localReadBytes;
length -= localReadBytes;
readBytes += localReadBytes;
}
} while (length > 0);
return readBytes;
}
@Override
public ByteBuf duplicate() {
ByteBuf duplicate = new CompositeByteBuf(this);
duplicate.setIndex(readerIndex(), writerIndex());
return duplicate;
}
@Override
public ByteBuf copy(int index, int length) {
int componentId = componentId(index);
if (index > capacity() - length) {
throw new IndexOutOfBoundsException("Too many bytes to copy - Needs "
+ (index + length) + ", maximum is " + capacity());
}
ByteBuf dst = factory().getBuffer(order(), length);
copyTo(index, length, componentId, dst);
return dst;
}
private void copyTo(int index, int length, int componentId, ByteBuf dst) {
int dstIndex = 0;
int i = componentId;
while (length > 0) {
ByteBuf s = components[i];
int adjustment = indices[i];
int localLength = Math.min(length, s.capacity() - (index - adjustment));
s.getBytes(index - adjustment, dst, dstIndex, localLength);
index += localLength;
dstIndex += localLength;
length -= localLength;
i ++;
}
dst.writerIndex(dst.capacity());
}
@Override
public ByteBuf slice(int index, int length) {
if (index == 0) {
if (length == 0) {
return Unpooled.EMPTY_BUFFER;
}
} else if (index < 0 || index > capacity() - length) {
throw new IndexOutOfBoundsException("Invalid index: " + index
+ " - Bytes needed: " + (index + length) + ", maximum is "
+ capacity());
} else if (length == 0) {
return Unpooled.EMPTY_BUFFER;
}
List<ByteBuf> components = decompose(index, length);
switch (components.size()) {
case 0:
return Unpooled.EMPTY_BUFFER;
case 1:
return components.get(0);
default:
return new CompositeByteBuf(order(), components);
}
}
@Override
public boolean hasNioBuffer() {
return false;
}
@Override
public ByteBuffer nioBuffer(int index, int length) {
throw new UnsupportedOperationException();
}
private ByteBuffer copiedNioBuffer(int index, int length) {
if (components.length == 1) {
return toNioBuffer(components[0], index, length);
}
ByteBuffer[] buffers = nioBuffers(index, length);
ByteBuffer merged = ByteBuffer.allocate(length).order(order());
for (ByteBuffer b: buffers) {
merged.put(b);
}
merged.flip();
return merged;
}
private ByteBuffer[] nioBuffers(int index, int length) {
int componentId = componentId(index);
if (index + length > capacity()) {
throw new IndexOutOfBoundsException("Too many bytes to convert - Needs"
+ (index + length) + ", maximum is " + capacity());
}
List<ByteBuffer> buffers = new ArrayList<ByteBuffer>(components.length);
int i = componentId;
while (length > 0) {
ByteBuf c = components[i];
int adjustment = indices[i];
int localLength = Math.min(length, c.capacity() - (index - adjustment));
buffers.add(toNioBuffer(c, index - adjustment, localLength));
index += localLength;
length -= localLength;
i ++;
}
return buffers.toArray(new ByteBuffer[buffers.size()]);
}
private static ByteBuffer toNioBuffer(ByteBuf buf, int index, int length) {
if (buf.hasNioBuffer()) {
return buf.nioBuffer(index, length);
} else {
return buf.copy(index, length).nioBuffer(0, length);
}
}
private int componentId(int index) {
int lastComponentId = lastAccessedComponentId;
if (index >= indices[lastComponentId]) {
if (index < indices[lastComponentId + 1]) {
return lastComponentId;
}
// Search right
for (int i = lastComponentId + 1; i < components.length; i ++) {
if (index < indices[i + 1]) {
lastAccessedComponentId = i;
return i;
}
}
} else {
// Search left
for (int i = lastComponentId - 1; i >= 0; i --) {
if (index >= indices[i]) {
lastAccessedComponentId = i;
return i;
}
}
}
throw new IndexOutOfBoundsException("Invalid index: " + index + ", maximum: " + indices.length);
}
@Override
public void discardReadBytes() {
// Only the bytes between readerIndex and writerIndex will be kept.
// New readerIndex and writerIndex will become 0 and
// (previous writerIndex - previous readerIndex) respectively.
final int localReaderIndex = readerIndex();
if (localReaderIndex == 0) {
return;
}
int localWriterIndex = writerIndex();
final int bytesToMove = capacity() - localReaderIndex;
List<ByteBuf> list = decompose(localReaderIndex, bytesToMove);
// If the list is empty we need to assign a new one because
// we get a List that is immutable.
//
// See https://github.com/netty/netty/issues/325
if (list.isEmpty()) {
list = new ArrayList<ByteBuf>(1);
}
// Add a new buffer so that the capacity of this composite buffer does
// not decrease due to the discarded components.
// XXX Might create too many components if discarded by small amount.
final ByteBuf padding = Unpooled.buffer(localReaderIndex).order(order());
padding.writerIndex(localReaderIndex);
list.add(padding);
// Reset the index markers to get the index marker values.
int localMarkedReaderIndex = localReaderIndex;
try {
resetReaderIndex();
localMarkedReaderIndex = readerIndex();
} catch (IndexOutOfBoundsException e) {
// ignore
}
int localMarkedWriterIndex = localWriterIndex;
try {
resetWriterIndex();
localMarkedWriterIndex = writerIndex();
} catch (IndexOutOfBoundsException e) {
// ignore
}
setComponents(list);
// reset marked Indexes
localMarkedReaderIndex = Math.max(localMarkedReaderIndex - localReaderIndex, 0);
localMarkedWriterIndex = Math.max(localMarkedWriterIndex - localReaderIndex, 0);
setIndex(localMarkedReaderIndex, localMarkedWriterIndex);
markReaderIndex();
markWriterIndex();
// reset real indexes
localWriterIndex = Math.max(localWriterIndex - localReaderIndex, 0);
setIndex(0, localWriterIndex);
}
@Override
public String toString() {
String result = super.toString();
result = result.substring(0, result.length() - 1);
return result + ", components=" + components.length + ")";
}
ByteBuffer[] nioBuffers();
/**
* Exposes this buffer's bytes as an NIO {@link ByteBuffer}'s for the specified offset and length
* The returned buffer shares the content with this buffer, while changing the position and limit
* of the returned NIO buffer does not affect the indexes and marks of this buffer. This method does
* not modify {@code readerIndex} or {@code writerIndex} of this buffer. Please note that the
* returned NIO buffer will not see the changes of this buffer if this buffer is a dynamic
* buffer and it adjusted its capacity.
*
*
* @throws UnsupportedOperationException
* if this buffer cannot create a {@link ByteBuffer} that shares the content with itself
*/
ByteBuffer[] nioBuffers(int offset, int length);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,455 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.Field;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.GatheringByteChannel;
import java.nio.channels.ScatteringByteChannel;
import sun.misc.Cleaner;
/**
* A NIO {@link ByteBuffer} based buffer. It is recommended to use {@link Unpooled#directBuffer(int)}
* and {@link Unpooled#wrappedBuffer(ByteBuffer)} instead of calling the
* constructor explicitly.
*/
@SuppressWarnings("restriction")
public class DirectByteBuf extends AbstractByteBuf {
private static final Field CLEANER_FIELD;
static {
ByteBuffer direct = ByteBuffer.allocateDirect(1);
Field cleanerField;
try {
cleanerField = direct.getClass().getDeclaredField("cleaner");
cleanerField.setAccessible(true);
Cleaner cleaner = (Cleaner) cleanerField.get(direct);
cleaner.clean();
} catch (Throwable t) {
cleanerField = null;
}
CLEANER_FIELD = cleanerField;
}
private static void freeDirect(ByteBuffer buffer) {
Cleaner cleaner;
try {
cleaner = (Cleaner) CLEANER_FIELD.get(buffer);
cleaner.clean();
} catch (Throwable t) {
// Nothing we can do here.
}
}
private final Unsafe unsafe = new DirectUnsafe();
private boolean doNotFree;
private ByteBuffer buffer;
private ByteBuffer tmpBuf;
private int capacity;
/**
* Creates a new direct buffer.
*
* @param initialCapacity the initial capacity of the underlying direct buffer
* @param maxCapacity the maximum capacity of the underlying direct buffer
*/
public DirectByteBuf(int initialCapacity, int maxCapacity) {
super(ByteOrder.BIG_ENDIAN, maxCapacity);
if (initialCapacity < 0) {
throw new IllegalArgumentException("initialCapacity: " + initialCapacity);
}
if (maxCapacity < 0) {
throw new IllegalArgumentException("maxCapacity: " + maxCapacity);
}
if (initialCapacity > maxCapacity) {
throw new IllegalArgumentException(String.format(
"initialCapacity(%d) > maxCapacity(%d)", initialCapacity, maxCapacity));
}
setByteBuffer(ByteBuffer.allocateDirect(initialCapacity));
}
/**
* Creates a new direct buffer by wrapping the specified initial buffer.
*
* @param maxCapacity the maximum capacity of the underlying direct buffer
*/
public DirectByteBuf(ByteBuffer initialBuffer, int maxCapacity) {
super(ByteOrder.BIG_ENDIAN, maxCapacity);
if (initialBuffer == null) {
throw new NullPointerException("initialBuffer");
}
if (!initialBuffer.isDirect()) {
throw new IllegalArgumentException("initialBuffer is not a direct buffer.");
}
if (initialBuffer.isReadOnly()) {
throw new IllegalArgumentException("initialBuffer is a read-only buffer.");
}
int initialCapacity = initialBuffer.remaining();
if (initialCapacity > maxCapacity) {
throw new IllegalArgumentException(String.format(
"initialCapacity(%d) > maxCapacity(%d)", initialCapacity, maxCapacity));
}
doNotFree = true;
setByteBuffer(initialBuffer.slice().order(ByteOrder.BIG_ENDIAN));
writerIndex(initialCapacity);
}
private void setByteBuffer(ByteBuffer buffer) {
ByteBuffer oldBuffer = this.buffer;
if (oldBuffer != null) {
if (doNotFree) {
doNotFree = false;
} else {
freeDirect(oldBuffer);
}
}
this.buffer = buffer;
tmpBuf = buffer.duplicate();
capacity = buffer.remaining();
}
@Override
public boolean isDirect() {
return true;
}
@Override
public int capacity() {
return capacity;
}
@Override
public void capacity(int newCapacity) {
if (newCapacity < 0 || newCapacity > maxCapacity()) {
throw new IllegalArgumentException("newCapacity: " + newCapacity);
}
int readerIndex = readerIndex();
int writerIndex = writerIndex();
int oldCapacity = capacity;
if (newCapacity > oldCapacity) {
ByteBuffer oldBuffer = buffer;
ByteBuffer newBuffer = ByteBuffer.allocateDirect(newCapacity);
oldBuffer.position(readerIndex).limit(writerIndex);
newBuffer.position(readerIndex).limit(writerIndex);
newBuffer.put(oldBuffer);
newBuffer.clear();
setByteBuffer(newBuffer);
} else if (newCapacity < oldCapacity) {
ByteBuffer oldBuffer = buffer;
ByteBuffer newBuffer = ByteBuffer.allocateDirect(newCapacity);
if (readerIndex < newCapacity) {
if (writerIndex > newCapacity) {
writerIndex(writerIndex = newCapacity);
}
oldBuffer.position(readerIndex).limit(writerIndex);
newBuffer.position(readerIndex).limit(writerIndex);
newBuffer.put(oldBuffer);
newBuffer.clear();
} else {
setIndex(newCapacity, newCapacity);
}
setByteBuffer(newBuffer);
}
}
@Override
public boolean hasArray() {
return false;
}
@Override
public byte[] array() {
throw new UnsupportedOperationException("direct buffer");
}
@Override
public int arrayOffset() {
throw new UnsupportedOperationException("direct buffer");
}
@Override
public byte getByte(int index) {
return buffer.get(index);
}
@Override
public short getShort(int index) {
return buffer.getShort(index);
}
@Override
public int getUnsignedMedium(int index) {
return (getByte(index) & 0xff) << 16 | (getByte(index + 1) & 0xff) << 8 |
(getByte(index + 2) & 0xff) << 0;
}
@Override
public int getInt(int index) {
return buffer.getInt(index);
}
@Override
public long getLong(int index) {
return buffer.getLong(index);
}
@Override
public void getBytes(int index, ByteBuf dst, int dstIndex, int length) {
if (dst instanceof DirectByteBuf) {
DirectByteBuf bbdst = (DirectByteBuf) dst;
ByteBuffer data = bbdst.tmpBuf;
data.clear().position(dstIndex).limit(dstIndex + length);
getBytes(index, data);
} else if (buffer.hasArray()) {
dst.setBytes(dstIndex, buffer.array(), index + buffer.arrayOffset(), length);
} else {
dst.setBytes(dstIndex, this, index, length);
}
}
@Override
public void getBytes(int index, byte[] dst, int dstIndex, int length) {
try {
tmpBuf.clear().position(index).limit(index + length);
} catch (IllegalArgumentException e) {
throw new IndexOutOfBoundsException("Too many bytes to read - Need " +
(index + length) + ", maximum is " + buffer.limit());
}
tmpBuf.get(dst, dstIndex, length);
}
@Override
public void getBytes(int index, ByteBuffer dst) {
int bytesToCopy = Math.min(capacity() - index, dst.remaining());
try {
tmpBuf.clear().position(index).limit(index + bytesToCopy);
} catch (IllegalArgumentException e) {
throw new IndexOutOfBoundsException("Too many bytes to read - Need " +
(index + bytesToCopy) + ", maximum is " + buffer.limit());
}
dst.put(tmpBuf);
}
@Override
public void setByte(int index, int value) {
buffer.put(index, (byte) value);
}
@Override
public void setShort(int index, int value) {
buffer.putShort(index, (short) value);
}
@Override
public void setMedium(int index, int value) {
setByte(index, (byte) (value >>> 16));
setByte(index + 1, (byte) (value >>> 8));
setByte(index + 2, (byte) (value >>> 0));
}
@Override
public void setInt(int index, int value) {
buffer.putInt(index, value);
}
@Override
public void setLong(int index, long value) {
buffer.putLong(index, value);
}
@Override
public void setBytes(int index, ByteBuf src, int srcIndex, int length) {
if (src instanceof DirectByteBuf) {
DirectByteBuf bbsrc = (DirectByteBuf) src;
ByteBuffer data = bbsrc.tmpBuf;
data.clear().position(srcIndex).limit(srcIndex + length);
setBytes(index, data);
} else if (buffer.hasArray()) {
src.getBytes(srcIndex, buffer.array(), index + buffer.arrayOffset(), length);
} else {
src.getBytes(srcIndex, this, index, length);
}
}
@Override
public void setBytes(int index, byte[] src, int srcIndex, int length) {
tmpBuf.clear().position(index).limit(index + length);
tmpBuf.put(src, srcIndex, length);
}
@Override
public void setBytes(int index, ByteBuffer src) {
if (src == tmpBuf) {
src = src.duplicate();
}
tmpBuf.clear().position(index).limit(index + src.remaining());
tmpBuf.put(src);
}
@Override
public void getBytes(int index, OutputStream out, int length) throws IOException {
if (length == 0) {
return;
}
if (buffer.hasArray()) {
out.write(buffer.array(), index + buffer.arrayOffset(), length);
} else {
byte[] tmp = new byte[length];
tmpBuf.clear().position(index);
tmpBuf.get(tmp);
out.write(tmp);
}
}
@Override
public int getBytes(int index, GatheringByteChannel out, int length) throws IOException {
if (length == 0) {
return 0;
}
tmpBuf.clear().position(index).limit(index + length);
return out.write(tmpBuf);
}
@Override
public int setBytes(int index, InputStream in, int length) throws IOException {
if (buffer.hasArray()) {
return in.read(buffer.array(), buffer.arrayOffset() + index, length);
} else {
byte[] tmp = new byte[length];
int readBytes = in.read(tmp);
tmpBuf.clear().position(index);
tmpBuf.put(tmp);
return readBytes;
}
}
@Override
public int setBytes(int index, ScatteringByteChannel in, int length) throws IOException {
tmpBuf.clear().position(index).limit(index + length);
try {
return in.read(tmpBuf);
} catch (ClosedChannelException e) {
return -1;
}
}
@Override
public boolean hasNioBuffer() {
return true;
}
@Override
public ByteBuffer nioBuffer(int index, int length) {
if (index == 0 && length == capacity()) {
return buffer.duplicate();
} else {
return ((ByteBuffer) tmpBuf.clear().position(index).limit(index + length)).slice();
}
}
@Override
public ByteBuf copy(int index, int length) {
ByteBuffer src;
try {
src = (ByteBuffer) tmpBuf.clear().position(index).limit(index + length);
} catch (IllegalArgumentException e) {
throw new IndexOutOfBoundsException("Too many bytes to read - Need " + (index + length));
}
ByteBuffer dst =
src.isDirect()? ByteBuffer.allocateDirect(length) : ByteBuffer.allocate(length);
dst.put(src);
dst.order(order());
dst.clear();
return new DirectByteBuf(dst, maxCapacity());
}
@Override
public Unsafe unsafe() {
return unsafe;
}
private class DirectUnsafe implements Unsafe {
@Override
public ByteBuffer nioBuffer() {
return tmpBuf;
}
@Override
public ByteBuf newBuffer(int initialCapacity) {
return new DirectByteBuf(initialCapacity, Math.max(initialCapacity, maxCapacity()));
}
@Override
public void discardSomeReadBytes() {
final int readerIndex = readerIndex();
if (readerIndex == writerIndex()) {
discardReadBytes();
return;
}
if (readerIndex > 0 && readerIndex >= capacity >>> 1) {
discardReadBytes();
}
}
@Override
public void acquire() {
if (refCnt <= 0) {
throw new IllegalStateException();
}
refCnt ++;
}
@Override
public void release() {
if (refCnt <= 0) {
throw new IllegalStateException();
}
refCnt --;
if (refCnt == 0) {
if (doNotFree) {
doNotFree = false;
} else {
freeDirect(buffer);
}
buffer = null;
tmpBuf = null;
}
}
}
}

View File

@ -1,202 +0,0 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import java.lang.ref.ReferenceQueue;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
/**
* A {@link ByteBufFactory} which pre-allocates a large chunk of direct
* buffer and returns its slice on demand. Direct buffers are reclaimed via
* {@link ReferenceQueue} in most JDK implementations, and therefore they are
* deallocated less efficiently than an ordinary heap buffer. Consequently,
* a user will get {@link OutOfMemoryError} when one tries to allocate small
* direct buffers more often than the GC throughput of direct buffers, which
* is much lower than the GC throughput of heap buffers. This factory avoids
* this problem by allocating a large chunk of pre-allocated direct buffer and
* reducing the number of the garbage collected internal direct buffer objects.
*/
public class DirectByteBufFactory extends AbstractByteBufFactory {
private static final DirectByteBufFactory INSTANCE_BE =
new DirectByteBufFactory(ByteOrder.BIG_ENDIAN);
private static final DirectByteBufFactory INSTANCE_LE =
new DirectByteBufFactory(ByteOrder.LITTLE_ENDIAN);
public static ByteBufFactory getInstance() {
return INSTANCE_BE;
}
public static ByteBufFactory getInstance(ByteOrder defaultEndianness) {
if (defaultEndianness == ByteOrder.BIG_ENDIAN) {
return INSTANCE_BE;
} else if (defaultEndianness == ByteOrder.LITTLE_ENDIAN) {
return INSTANCE_LE;
} else if (defaultEndianness == null) {
throw new NullPointerException("defaultEndianness");
} else {
throw new IllegalStateException("Should not reach here");
}
}
private final Object bigEndianLock = new Object();
private final Object littleEndianLock = new Object();
private final int preallocatedBufCapacity;
private ByteBuf preallocatedBEBuf;
private int preallocatedBEBufPos;
private ByteBuf preallocatedLEBuf;
private int preallocatedLEBufPos;
/**
* Creates a new factory whose default {@link ByteOrder} is
* {@link ByteOrder#BIG_ENDIAN}.
*/
public DirectByteBufFactory() {
this(ByteOrder.BIG_ENDIAN);
}
/**
* Creates a new factory whose default {@link ByteOrder} is
* {@link ByteOrder#BIG_ENDIAN}.
*/
public DirectByteBufFactory(int preallocatedBufferCapacity) {
this(ByteOrder.BIG_ENDIAN, preallocatedBufferCapacity);
}
/**
* Creates a new factory with the specified default {@link ByteOrder}.
*
* @param defaultOrder the default {@link ByteOrder} of this factory
*/
public DirectByteBufFactory(ByteOrder defaultOrder) {
this(defaultOrder, 1048576);
}
/**
* Creates a new factory with the specified default {@link ByteOrder}.
*
* @param defaultOrder the default {@link ByteOrder} of this factory
*/
public DirectByteBufFactory(ByteOrder defaultOrder, int preallocatedBufferCapacity) {
super(defaultOrder);
if (preallocatedBufferCapacity <= 0) {
throw new IllegalArgumentException(
"preallocatedBufCapacity must be greater than 0: " + preallocatedBufferCapacity);
}
preallocatedBufCapacity = preallocatedBufferCapacity;
}
@Override
public ByteBuf getBuffer(ByteOrder order, int capacity) {
if (order == null) {
throw new NullPointerException("order");
}
if (capacity < 0) {
throw new IllegalArgumentException("capacity: " + capacity);
}
if (capacity == 0) {
return Unpooled.EMPTY_BUFFER;
}
if (capacity >= preallocatedBufCapacity) {
return Unpooled.directBuffer(capacity).order(order);
}
ByteBuf slice;
if (order == ByteOrder.BIG_ENDIAN) {
slice = allocateBigEndianBuffer(capacity);
} else {
slice = allocateLittleEndianBuffer(capacity);
}
slice.clear();
return slice;
}
@Override
public ByteBuf getBuffer(ByteOrder order, byte[] array, int offset, int length) {
if (array == null) {
throw new NullPointerException("array");
}
if (offset < 0) {
throw new IndexOutOfBoundsException("offset: " + offset);
}
if (length == 0) {
return Unpooled.EMPTY_BUFFER;
}
if (offset + length > array.length) {
throw new IndexOutOfBoundsException("length: " + length);
}
ByteBuf buf = getBuffer(order, length);
buf.writeBytes(array, offset, length);
return buf;
}
@Override
public ByteBuf getBuffer(ByteBuffer nioBuffer) {
if (!nioBuffer.isReadOnly() && nioBuffer.isDirect()) {
return Unpooled.wrappedBuffer(nioBuffer);
}
ByteBuf buf = getBuffer(nioBuffer.order(), nioBuffer.remaining());
int pos = nioBuffer.position();
buf.writeBytes(nioBuffer);
nioBuffer.position(pos);
return buf;
}
private ByteBuf allocateBigEndianBuffer(int capacity) {
ByteBuf slice;
synchronized (bigEndianLock) {
if (preallocatedBEBuf == null) {
preallocatedBEBuf = Unpooled.directBuffer(preallocatedBufCapacity);
slice = preallocatedBEBuf.slice(0, capacity);
preallocatedBEBufPos = capacity;
} else if (preallocatedBEBuf.capacity() - preallocatedBEBufPos >= capacity) {
slice = preallocatedBEBuf.slice(preallocatedBEBufPos, capacity);
preallocatedBEBufPos += capacity;
} else {
preallocatedBEBuf = Unpooled.directBuffer(preallocatedBufCapacity);
slice = preallocatedBEBuf.slice(0, capacity);
preallocatedBEBufPos = capacity;
}
}
return slice;
}
private ByteBuf allocateLittleEndianBuffer(int capacity) {
ByteBuf slice;
synchronized (littleEndianLock) {
if (preallocatedLEBuf == null) {
preallocatedLEBuf = Unpooled.directBuffer(
preallocatedBufCapacity).order(ByteOrder.LITTLE_ENDIAN);
slice = preallocatedLEBuf.slice(0, capacity);
preallocatedLEBufPos = capacity;
} else if (preallocatedLEBuf.capacity() - preallocatedLEBufPos >= capacity) {
slice = preallocatedLEBuf.slice(preallocatedLEBufPos, capacity);
preallocatedLEBufPos += capacity;
} else {
preallocatedLEBuf = Unpooled.directBuffer(
preallocatedBufCapacity).order(ByteOrder.LITTLE_ENDIAN);
slice = preallocatedLEBuf.slice(0, capacity);
preallocatedLEBufPos = capacity;
}
}
return slice;
}
}

View File

@ -30,18 +30,21 @@ import java.nio.channels.ScatteringByteChannel;
*/
public class DuplicatedByteBuf extends AbstractByteBuf implements WrappedByteBuf {
private final ByteBuf buffer;
private final Unsafe unsafe = new DuplicatedUnsafe();
final ByteBuf buffer;
public DuplicatedByteBuf(ByteBuf buffer) {
super(buffer.order());
super(buffer.order(), buffer.maxCapacity());
if (buffer instanceof DuplicatedByteBuf) {
this.buffer = ((DuplicatedByteBuf) buffer).buffer;
} else {
this.buffer = buffer;
setIndex(buffer.readerIndex(), buffer.writerIndex());
}
private DuplicatedByteBuf(DuplicatedByteBuf buffer) {
super(buffer.buffer.order());
this.buffer = buffer.buffer;
setIndex(buffer.readerIndex(), buffer.writerIndex());
buffer.unsafe().acquire();
}
@Override
@ -49,11 +52,6 @@ public class DuplicatedByteBuf extends AbstractByteBuf implements WrappedByteBuf
return buffer;
}
@Override
public ByteBufFactory factory() {
return buffer.factory();
}
@Override
public boolean isDirect() {
return buffer.isDirect();
@ -64,6 +62,11 @@ public class DuplicatedByteBuf extends AbstractByteBuf implements WrappedByteBuf
return buffer.capacity();
}
@Override
public void capacity(int newCapacity) {
buffer.capacity(newCapacity);
}
@Override
public boolean hasArray() {
return buffer.hasArray();
@ -207,4 +210,37 @@ public class DuplicatedByteBuf extends AbstractByteBuf implements WrappedByteBuf
public ByteBuffer nioBuffer(int index, int length) {
return buffer.nioBuffer(index, length);
}
@Override
public Unsafe unsafe() {
return unsafe;
}
private final class DuplicatedUnsafe implements Unsafe {
@Override
public ByteBuffer nioBuffer() {
return buffer.unsafe().nioBuffer();
}
@Override
public ByteBuf newBuffer(int initialCapacity) {
return buffer.unsafe().newBuffer(initialCapacity);
}
@Override
public void discardSomeReadBytes() {
throw new UnsupportedOperationException();
}
@Override
public void acquire() {
buffer.unsafe().acquire();
}
@Override
public void release() {
buffer.unsafe().release();
}
}
}

View File

@ -1,320 +0,0 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.channels.GatheringByteChannel;
import java.nio.channels.ScatteringByteChannel;
/**
* A dynamic capacity buffer which increases its capacity as needed. It is
* recommended to use {@link Unpooled#dynamicBuffer(int)} instead of
* calling the constructor explicitly.
*/
public class DynamicByteBuf extends AbstractByteBuf {
private final ByteBufFactory factory;
private ByteBuf buffer;
public DynamicByteBuf(int estimatedLength) {
this(estimatedLength, HeapByteBufFactory.getInstance(ByteOrder.BIG_ENDIAN));
}
public DynamicByteBuf(int estimatedLength, ByteBufFactory factory) {
super(ByteOrder.BIG_ENDIAN);
if (estimatedLength < 0) {
throw new IllegalArgumentException("estimatedLength: " + estimatedLength);
}
if (factory == null) {
throw new NullPointerException("factory");
}
this.factory = factory;
buffer = factory.getBuffer(ByteOrder.BIG_ENDIAN, estimatedLength);
}
@Override
public void ensureWritableBytes(int minWritableBytes) {
if (minWritableBytes <= writableBytes()) {
return;
}
int newCapacity;
if (capacity() == 0) {
newCapacity = 1;
} else {
newCapacity = capacity();
}
int minNewCapacity = writerIndex() + minWritableBytes;
while (newCapacity < minNewCapacity) {
newCapacity <<= 1;
// Check if we exceeded the maximum size of 2gb if this is the case then
// newCapacity == 0
//
// https://github.com/netty/netty/issues/258
if (newCapacity == 0) {
throw new IllegalStateException("buffer capacity over 2GiB");
}
}
ByteBuf newBuffer = factory().getBuffer(order(), newCapacity);
newBuffer.writeBytes(buffer, 0, writerIndex());
buffer = newBuffer;
}
@Override
public ByteBufFactory factory() {
return factory;
}
@Override
public boolean isDirect() {
return buffer.isDirect();
}
@Override
public int capacity() {
return buffer.capacity();
}
@Override
public boolean hasArray() {
return buffer.hasArray();
}
@Override
public byte[] array() {
return buffer.array();
}
@Override
public int arrayOffset() {
return buffer.arrayOffset();
}
@Override
public byte getByte(int index) {
return buffer.getByte(index);
}
@Override
public short getShort(int index) {
return buffer.getShort(index);
}
@Override
public int getUnsignedMedium(int index) {
return buffer.getUnsignedMedium(index);
}
@Override
public int getInt(int index) {
return buffer.getInt(index);
}
@Override
public long getLong(int index) {
return buffer.getLong(index);
}
@Override
public void getBytes(int index, byte[] dst, int dstIndex, int length) {
buffer.getBytes(index, dst, dstIndex, length);
}
@Override
public void getBytes(int index, ByteBuf dst, int dstIndex, int length) {
buffer.getBytes(index, dst, dstIndex, length);
}
@Override
public void getBytes(int index, ByteBuffer dst) {
buffer.getBytes(index, dst);
}
@Override
public int getBytes(int index, GatheringByteChannel out, int length)
throws IOException {
return buffer.getBytes(index, out, length);
}
@Override
public void getBytes(int index, OutputStream out, int length)
throws IOException {
buffer.getBytes(index, out, length);
}
@Override
public void setByte(int index, int value) {
buffer.setByte(index, value);
}
@Override
public void setShort(int index, int value) {
buffer.setShort(index, value);
}
@Override
public void setMedium(int index, int value) {
buffer.setMedium(index, value);
}
@Override
public void setInt(int index, int value) {
buffer.setInt(index, value);
}
@Override
public void setLong(int index, long value) {
buffer.setLong(index, value);
}
@Override
public void setBytes(int index, byte[] src, int srcIndex, int length) {
buffer.setBytes(index, src, srcIndex, length);
}
@Override
public void setBytes(int index, ByteBuf src, int srcIndex, int length) {
buffer.setBytes(index, src, srcIndex, length);
}
@Override
public void setBytes(int index, ByteBuffer src) {
buffer.setBytes(index, src);
}
@Override
public int setBytes(int index, InputStream in, int length)
throws IOException {
return buffer.setBytes(index, in, length);
}
@Override
public int setBytes(int index, ScatteringByteChannel in, int length)
throws IOException {
return buffer.setBytes(index, in, length);
}
@Override
public void writeByte(int value) {
ensureWritableBytes(1);
super.writeByte(value);
}
@Override
public void writeShort(int value) {
ensureWritableBytes(2);
super.writeShort(value);
}
@Override
public void writeMedium(int value) {
ensureWritableBytes(3);
super.writeMedium(value);
}
@Override
public void writeInt(int value) {
ensureWritableBytes(4);
super.writeInt(value);
}
@Override
public void writeLong(long value) {
ensureWritableBytes(8);
super.writeLong(value);
}
@Override
public void writeBytes(byte[] src, int srcIndex, int length) {
ensureWritableBytes(length);
super.writeBytes(src, srcIndex, length);
}
@Override
public void writeBytes(ByteBuf src, int srcIndex, int length) {
ensureWritableBytes(length);
super.writeBytes(src, srcIndex, length);
}
@Override
public void writeBytes(ByteBuffer src) {
ensureWritableBytes(src.remaining());
super.writeBytes(src);
}
@Override
public int writeBytes(InputStream in, int length) throws IOException {
ensureWritableBytes(length);
return super.writeBytes(in, length);
}
@Override
public int writeBytes(ScatteringByteChannel in, int length)
throws IOException {
ensureWritableBytes(length);
return super.writeBytes(in, length);
}
@Override
public void writeZero(int length) {
ensureWritableBytes(length);
super.writeZero(length);
}
@Override
public ByteBuf duplicate() {
return new DuplicatedByteBuf(this);
}
@Override
public ByteBuf copy(int index, int length) {
DynamicByteBuf copiedBuffer = new DynamicByteBuf(Math.max(length, 64), factory());
copiedBuffer.buffer = buffer.copy(index, length);
copiedBuffer.setIndex(0, length);
return copiedBuffer;
}
@Override
public ByteBuf slice(int index, int length) {
if (index == 0) {
if (length == 0) {
return Unpooled.EMPTY_BUFFER;
}
return new TruncatedByteBuf(this, length);
} else {
if (length == 0) {
return Unpooled.EMPTY_BUFFER;
}
return new SlicedByteBuf(this, index, length);
}
}
@Override
public boolean hasNioBuffer() {
return buffer.hasNioBuffer();
}
@Override
public ByteBuffer nioBuffer(int index, int length) {
return buffer.nioBuffer(index, length);
}
}

View File

@ -29,46 +29,48 @@ import java.nio.channels.ScatteringByteChannel;
*/
public class HeapByteBuf extends AbstractByteBuf {
/**
* The underlying heap byte array that this buffer is wrapping.
*/
protected final byte[] array;
private final Unsafe unsafe = new HeapUnsafe();
protected final ByteBuffer nioBuf;
private byte[] array;
private ByteBuffer nioBuf;
/**
* Creates a new heap buffer with a newly allocated byte array.
*
* @param length the length of the new byte array
* @param initialCapacity the initial capacity of the underlying byte array
* @param maxCapacity the max capacity of the underlying byte array
*/
public HeapByteBuf(int length) {
this(new byte[length], 0, 0);
public HeapByteBuf(int initialCapacity, int maxCapacity) {
this(new byte[initialCapacity], 0, 0, maxCapacity);
}
/**
* Creates a new heap buffer with an existing byte array.
*
* @param array the byte array to wrap
* @param initialArray the initial underlying byte array
* @param maxCapacity the max capacity of the underlying byte array
*/
public HeapByteBuf(byte[] array) {
this(array, 0, array.length);
public HeapByteBuf(byte[] initialArray, int maxCapacity) {
this(initialArray, 0, initialArray.length, maxCapacity);
}
/**
* Creates a new heap buffer with an existing byte array.
*
* @param array the byte array to wrap
* @param readerIndex the initial reader index of this buffer
* @param writerIndex the initial writer index of this buffer
*/
protected HeapByteBuf(byte[] array, int readerIndex, int writerIndex) {
super(ByteOrder.BIG_ENDIAN);
if (array == null) {
throw new NullPointerException("array");
private HeapByteBuf(byte[] initialArray, int readerIndex, int writerIndex, int maxCapacity) {
super(ByteOrder.BIG_ENDIAN, maxCapacity);
if (initialArray == null) {
throw new NullPointerException("initialArray");
}
this.array = array;
if (initialArray.length > maxCapacity) {
throw new IllegalArgumentException(String.format(
"initialCapacity(%d) > maxCapacity(%d)", initialArray.length, maxCapacity));
}
setArray(initialArray);
setIndex(readerIndex, writerIndex);
nioBuf = ByteBuffer.wrap(array);
}
private void setArray(byte[] initialArray) {
array = initialArray;
nioBuf = ByteBuffer.wrap(initialArray);
}
@Override
@ -81,6 +83,33 @@ public class HeapByteBuf extends AbstractByteBuf {
return array.length;
}
@Override
public void capacity(int newCapacity) {
if (newCapacity < 0 || newCapacity > maxCapacity()) {
throw new IllegalArgumentException("newCapacity: " + newCapacity);
}
int oldCapacity = array.length;
if (newCapacity > oldCapacity) {
byte[] newArray = new byte[newCapacity];
System.arraycopy(array, readerIndex(), newArray, readerIndex(), readableBytes());
setArray(newArray);
} else if (newCapacity < oldCapacity) {
byte[] newArray = new byte[newCapacity];
int readerIndex = readerIndex();
if (readerIndex < newCapacity) {
int writerIndex = writerIndex();
if (writerIndex > newCapacity) {
writerIndex(writerIndex = newCapacity);
}
System.arraycopy(array, readerIndex, newArray, readerIndex, writerIndex - readerIndex);
} else {
setIndex(newCapacity, newCapacity);
}
setArray(newArray);
}
}
@Override
public boolean hasArray() {
return true;
@ -170,27 +199,6 @@ public class HeapByteBuf extends AbstractByteBuf {
}
}
@Override
public ByteBuf slice(int index, int length) {
if (index == 0) {
if (length == 0) {
return Unpooled.EMPTY_BUFFER;
}
if (length == array.length) {
ByteBuf slice = duplicate();
slice.setIndex(0, length);
return slice;
} else {
return new TruncatedByteBuf(this, length);
}
} else {
if (length == 0) {
return Unpooled.EMPTY_BUFFER;
}
return new SlicedByteBuf(this, index, length);
}
}
@Override
public boolean hasNioBuffer() {
return true;
@ -198,12 +206,7 @@ public class HeapByteBuf extends AbstractByteBuf {
@Override
public ByteBuffer nioBuffer(int index, int length) {
return ByteBuffer.wrap(array, index, length).order(order());
}
@Override
public ByteBufFactory factory() {
return HeapByteBufFactory.getInstance(ByteOrder.BIG_ENDIAN);
return ByteBuffer.wrap(array, index, length);
}
@Override
@ -271,11 +274,6 @@ public class HeapByteBuf extends AbstractByteBuf {
array[index + 7] = (byte) (value >>> 0);
}
@Override
public ByteBuf duplicate() {
return new HeapByteBuf(array, readerIndex(), writerIndex());
}
@Override
public ByteBuf copy(int index, int length) {
if (index < 0 || length < 0 || index + length > array.length) {
@ -285,6 +283,56 @@ public class HeapByteBuf extends AbstractByteBuf {
byte[] copiedArray = new byte[length];
System.arraycopy(array, index, copiedArray, 0, length);
return new HeapByteBuf(copiedArray);
return new HeapByteBuf(copiedArray, maxCapacity());
}
@Override
public Unsafe unsafe() {
return unsafe;
}
private class HeapUnsafe implements Unsafe {
@Override
public ByteBuffer nioBuffer() {
return nioBuf;
}
@Override
public ByteBuf newBuffer(int initialCapacity) {
return new HeapByteBuf(initialCapacity, Math.max(initialCapacity, maxCapacity()));
}
@Override
public void discardSomeReadBytes() {
final int readerIndex = readerIndex();
if (readerIndex == writerIndex()) {
discardReadBytes();
return;
}
if (readerIndex > 0 && readerIndex >= capacity() >>> 1) {
discardReadBytes();
}
}
@Override
public void acquire() {
if (refCnt <= 0) {
throw new IllegalStateException();
}
refCnt ++;
}
@Override
public void release() {
if (refCnt <= 0) {
throw new IllegalStateException();
}
refCnt --;
if (refCnt == 0) {
array = null;
nioBuf = null;
}
}
}
}

View File

@ -1,89 +0,0 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
/**
* A {@link ByteBufFactory} which merely allocates a heap buffer with
* the specified capacity. {@link HeapByteBufFactory} should perform
* very well in most situations because it relies on the JVM garbage collector,
* which is highly optimized for heap allocation.
*/
public class HeapByteBufFactory extends AbstractByteBufFactory {
private static final HeapByteBufFactory INSTANCE_BE =
new HeapByteBufFactory(ByteOrder.BIG_ENDIAN);
private static final HeapByteBufFactory INSTANCE_LE =
new HeapByteBufFactory(ByteOrder.LITTLE_ENDIAN);
public static ByteBufFactory getInstance() {
return INSTANCE_BE;
}
public static ByteBufFactory getInstance(ByteOrder endianness) {
if (endianness == ByteOrder.BIG_ENDIAN) {
return INSTANCE_BE;
} else if (endianness == ByteOrder.LITTLE_ENDIAN) {
return INSTANCE_LE;
} else if (endianness == null) {
throw new NullPointerException("endianness");
} else {
throw new IllegalStateException("Should not reach here");
}
}
/**
* Creates a new factory whose default {@link ByteOrder} is
* {@link ByteOrder#BIG_ENDIAN}.
*/
public HeapByteBufFactory() {
}
/**
* Creates a new factory with the specified default {@link ByteOrder}.
*
* @param defaultOrder the default {@link ByteOrder} of this factory
*/
public HeapByteBufFactory(ByteOrder defaultOrder) {
super(defaultOrder);
}
@Override
public ByteBuf getBuffer(ByteOrder order, int capacity) {
return Unpooled.buffer(capacity).order(order);
}
@Override
public ByteBuf getBuffer(ByteOrder order, byte[] array, int offset, int length) {
return Unpooled.wrappedBuffer(array, offset, length).order(order);
}
@Override
public ByteBuf getBuffer(ByteBuffer nioBuffer) {
if (nioBuffer.hasArray()) {
return Unpooled.wrappedBuffer(nioBuffer);
}
ByteBuf buf = getBuffer(nioBuffer.order(), nioBuffer.remaining());
int pos = nioBuffer.position();
buf.writeBytes(nioBuffer);
nioBuffer.position(pos);
return buf;
}
}

View File

@ -1,320 +0,0 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.GatheringByteChannel;
import java.nio.channels.ScatteringByteChannel;
/**
* A NIO {@link ByteBuffer} based buffer. It is recommended to use {@link Unpooled#directBuffer(int)}
* and {@link Unpooled#wrappedBuffer(ByteBuffer)} instead of calling the
* constructor explicitly.
*/
public class NioBufferBackedByteBuf extends AbstractByteBuf {
private final ByteBuffer buffer;
private final ByteBuffer tmpBuf;
private final int capacity;
/**
* Creates a new buffer which wraps the specified buffer's slice.
*/
public NioBufferBackedByteBuf(ByteBuffer buffer) {
super(buffer.order());
this.buffer = buffer.slice().order(order());
tmpBuf = this.buffer.duplicate();
capacity = buffer.remaining();
writerIndex(capacity);
}
private NioBufferBackedByteBuf(NioBufferBackedByteBuf buffer) {
super(buffer.order());
this.buffer = buffer.buffer;
tmpBuf = this.buffer.duplicate();
capacity = buffer.capacity;
setIndex(buffer.readerIndex(), buffer.writerIndex());
}
@Override
public ByteBufFactory factory() {
if (buffer.isDirect()) {
return DirectByteBufFactory.getInstance(order());
} else {
return HeapByteBufFactory.getInstance(order());
}
}
@Override
public boolean isDirect() {
return buffer.isDirect();
}
@Override
public int capacity() {
return capacity;
}
@Override
public boolean hasArray() {
return buffer.hasArray();
}
@Override
public byte[] array() {
return buffer.array();
}
@Override
public int arrayOffset() {
return buffer.arrayOffset();
}
@Override
public byte getByte(int index) {
return buffer.get(index);
}
@Override
public short getShort(int index) {
return buffer.getShort(index);
}
@Override
public int getUnsignedMedium(int index) {
return (getByte(index) & 0xff) << 16 |
(getByte(index + 1) & 0xff) << 8 |
(getByte(index + 2) & 0xff) << 0;
}
@Override
public int getInt(int index) {
return buffer.getInt(index);
}
@Override
public long getLong(int index) {
return buffer.getLong(index);
}
@Override
public void getBytes(int index, ByteBuf dst, int dstIndex, int length) {
if (dst instanceof NioBufferBackedByteBuf) {
NioBufferBackedByteBuf bbdst = (NioBufferBackedByteBuf) dst;
ByteBuffer data = bbdst.tmpBuf;
data.clear().position(dstIndex).limit(dstIndex + length);
getBytes(index, data);
} else if (buffer.hasArray()) {
dst.setBytes(dstIndex, buffer.array(), index + buffer.arrayOffset(), length);
} else {
dst.setBytes(dstIndex, this, index, length);
}
}
@Override
public void getBytes(int index, byte[] dst, int dstIndex, int length) {
try {
tmpBuf.clear().position(index).limit(index + length);
} catch (IllegalArgumentException e) {
throw new IndexOutOfBoundsException("Too many bytes to read - Need "
+ (index + length) + ", maximum is " + buffer.limit());
}
tmpBuf.get(dst, dstIndex, length);
}
@Override
public void getBytes(int index, ByteBuffer dst) {
int bytesToCopy = Math.min(capacity() - index, dst.remaining());
try {
tmpBuf.clear().position(index).limit(index + bytesToCopy);
} catch (IllegalArgumentException e) {
throw new IndexOutOfBoundsException("Too many bytes to read - Need "
+ (index + bytesToCopy) + ", maximum is " + buffer.limit());
}
dst.put(tmpBuf);
}
@Override
public void setByte(int index, int value) {
buffer.put(index, (byte) value);
}
@Override
public void setShort(int index, int value) {
buffer.putShort(index, (short) value);
}
@Override
public void setMedium(int index, int value) {
setByte(index, (byte) (value >>> 16));
setByte(index + 1, (byte) (value >>> 8));
setByte(index + 2, (byte) (value >>> 0));
}
@Override
public void setInt(int index, int value) {
buffer.putInt(index, value);
}
@Override
public void setLong(int index, long value) {
buffer.putLong(index, value);
}
@Override
public void setBytes(int index, ByteBuf src, int srcIndex, int length) {
if (src instanceof NioBufferBackedByteBuf) {
NioBufferBackedByteBuf bbsrc = (NioBufferBackedByteBuf) src;
ByteBuffer data = bbsrc.tmpBuf;
data.clear().position(srcIndex).limit(srcIndex + length);
setBytes(index, data);
} else if (buffer.hasArray()) {
src.getBytes(srcIndex, buffer.array(), index + buffer.arrayOffset(), length);
} else {
src.getBytes(srcIndex, this, index, length);
}
}
@Override
public void setBytes(int index, byte[] src, int srcIndex, int length) {
tmpBuf.clear().position(index).limit(index + length);
tmpBuf.put(src, srcIndex, length);
}
@Override
public void setBytes(int index, ByteBuffer src) {
if (src == tmpBuf) {
src = src.duplicate();
}
tmpBuf.clear().position(index).limit(index + src.remaining());
tmpBuf.put(src);
}
@Override
public void getBytes(int index, OutputStream out, int length) throws IOException {
if (length == 0) {
return;
}
if (buffer.hasArray()) {
out.write(
buffer.array(),
index + buffer.arrayOffset(),
length);
} else {
byte[] tmp = new byte[length];
tmpBuf.clear().position(index);
tmpBuf.get(tmp);
out.write(tmp);
}
}
@Override
public int getBytes(int index, GatheringByteChannel out, int length) throws IOException {
if (length == 0) {
return 0;
}
tmpBuf.clear().position(index).limit(index + length);
return out.write(tmpBuf);
}
@Override
public int setBytes(int index, InputStream in, int length)
throws IOException {
if (buffer.hasArray()) {
return in.read(buffer.array(), buffer.arrayOffset() + index, length);
} else {
byte[] tmp = new byte[length];
int readBytes = in.read(tmp);
tmpBuf.clear().position(index);
tmpBuf.put(tmp);
return readBytes;
}
}
@Override
public int setBytes(int index, ScatteringByteChannel in, int length)
throws IOException {
tmpBuf.clear().position(index).limit(index + length);
try {
return in.read(tmpBuf);
} catch (ClosedChannelException e) {
return -1;
}
}
@Override
public boolean hasNioBuffer() {
return true;
}
@Override
public ByteBuffer nioBuffer(int index, int length) {
if (index == 0 && length == capacity()) {
return buffer.duplicate().order(order());
} else {
return ((ByteBuffer) tmpBuf.clear().position(
index).limit(index + length)).slice().order(order());
}
}
@Override
public ByteBuf slice(int index, int length) {
if (index == 0 && length == capacity()) {
ByteBuf slice = duplicate();
slice.setIndex(0, length);
return slice;
} else {
if (index >= 0 && length == 0) {
return Unpooled.EMPTY_BUFFER;
}
return new NioBufferBackedByteBuf(
((ByteBuffer) tmpBuf.clear().position(
index).limit(index + length)).order(order()));
}
}
@Override
public ByteBuf duplicate() {
return new NioBufferBackedByteBuf(this);
}
@Override
public ByteBuf copy(int index, int length) {
ByteBuffer src;
try {
src = (ByteBuffer) tmpBuf.clear().position(index).limit(index + length);
} catch (IllegalArgumentException e) {
throw new IndexOutOfBoundsException("Too many bytes to read - Need "
+ (index + length));
}
ByteBuffer dst = src.isDirect() ? ByteBuffer.allocateDirect(length) : ByteBuffer.allocate(length);
dst.put(src);
dst.order(order());
dst.clear();
return new NioBufferBackedByteBuf(dst);
}
}

View File

@ -33,13 +33,13 @@ public class ReadOnlyByteBuf extends AbstractByteBuf implements WrappedByteBuf {
private final ByteBuf buffer;
public ReadOnlyByteBuf(ByteBuf buffer) {
super(buffer.order());
super(buffer.order(), buffer.maxCapacity());
this.buffer = buffer;
setIndex(buffer.readerIndex(), buffer.writerIndex());
}
private ReadOnlyByteBuf(ReadOnlyByteBuf buffer) {
super(buffer.buffer.order());
super(buffer.buffer.order(), buffer.maxCapacity());
this.buffer = buffer.buffer;
setIndex(buffer.readerIndex(), buffer.writerIndex());
}
@ -49,11 +49,6 @@ public class ReadOnlyByteBuf extends AbstractByteBuf implements WrappedByteBuf {
return buffer;
}
@Override
public ByteBufFactory factory() {
return buffer.factory();
}
@Override
public boolean isDirect() {
return buffer.isDirect();
@ -212,4 +207,14 @@ public class ReadOnlyByteBuf extends AbstractByteBuf implements WrappedByteBuf {
public int capacity() {
return buffer.capacity();
}
@Override
public void capacity(int newCapacity) {
throw new ReadOnlyBufferException();
}
@Override
public Unsafe unsafe() {
return buffer.unsafe();
}
}

View File

@ -31,12 +31,13 @@ import java.nio.channels.ScatteringByteChannel;
*/
public class SlicedByteBuf extends AbstractByteBuf implements WrappedByteBuf {
private final Unsafe unsafe = new SlicedUnsafe();
private final ByteBuf buffer;
private final int adjustment;
private final int length;
public SlicedByteBuf(ByteBuf buffer, int index, int length) {
super(buffer.order());
super(buffer.order(), length);
if (index < 0 || index > buffer.capacity()) {
throw new IndexOutOfBoundsException("Invalid index of " + index
+ ", maximum is " + buffer.capacity());
@ -47,10 +48,21 @@ public class SlicedByteBuf extends AbstractByteBuf implements WrappedByteBuf {
+ (index + length) + ", maximum is " + buffer.capacity());
}
if (buffer instanceof SlicedByteBuf) {
this.buffer = ((SlicedByteBuf) buffer).buffer;
adjustment = ((SlicedByteBuf) buffer).adjustment + index;
} else if (buffer instanceof DuplicatedByteBuf) {
this.buffer = ((DuplicatedByteBuf) buffer).buffer;
adjustment = index;
} else {
this.buffer = buffer;
adjustment = index;
}
this.length = length;
writerIndex(length);
buffer.unsafe().acquire();
}
@Override
@ -58,11 +70,6 @@ public class SlicedByteBuf extends AbstractByteBuf implements WrappedByteBuf {
return buffer;
}
@Override
public ByteBufFactory factory() {
return buffer.factory();
}
@Override
public boolean isDirect() {
return buffer.isDirect();
@ -73,6 +80,11 @@ public class SlicedByteBuf extends AbstractByteBuf implements WrappedByteBuf {
return length;
}
@Override
public void capacity(int newCapacity) {
throw new UnsupportedOperationException("sliced buffer");
}
@Override
public boolean hasArray() {
return buffer.hasArray();
@ -265,4 +277,37 @@ public class SlicedByteBuf extends AbstractByteBuf implements WrappedByteBuf {
+ (startIndex + length) + ", maximum is " + capacity());
}
}
@Override
public Unsafe unsafe() {
return unsafe;
}
private final class SlicedUnsafe implements Unsafe {
@Override
public ByteBuffer nioBuffer() {
return buffer.nioBuffer(adjustment, length);
}
@Override
public ByteBuf newBuffer(int initialCapacity) {
return buffer.unsafe().newBuffer(initialCapacity);
}
@Override
public void discardSomeReadBytes() {
throw new UnsupportedOperationException();
}
@Override
public void acquire() {
buffer.unsafe().acquire();
}
@Override
public void release() {
buffer.unsafe().release();
}
}
}

View File

@ -73,13 +73,18 @@ public class SwappedByteBuf implements WrappedByteBuf {
}
@Override
public ByteBufFactory factory() {
return buf.factory();
public int capacity() {
return buf.capacity();
}
@Override
public int capacity() {
return buf.capacity();
public void capacity(int newCapacity) {
buf.capacity(newCapacity);
}
@Override
public int maxCapacity() {
return buf.maxCapacity();
}
@Override
@ -677,6 +682,11 @@ public class SwappedByteBuf implements WrappedByteBuf {
return buf.toString(index, length, charset);
}
@Override
public Unsafe unsafe() {
return buf.unsafe();
}
@Override
public int hashCode() {
return buf.hashCode();

View File

@ -1,258 +0,0 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.GatheringByteChannel;
import java.nio.channels.ScatteringByteChannel;
/**
* A derived buffer which hides its parent's tail data beyond a certain index.
* It is recommended to use {@link ByteBuf#slice()} and
* {@link ByteBuf#slice(int, int)} instead of calling the constructor
* explicitly.
*/
public class TruncatedByteBuf extends AbstractByteBuf implements WrappedByteBuf {
private final ByteBuf buffer;
private final int length;
public TruncatedByteBuf(ByteBuf buffer, int length) {
super(buffer.order());
if (length > buffer.capacity()) {
throw new IndexOutOfBoundsException("Length is too large, got "
+ length + " but can't go higher than " + buffer.capacity());
}
this.buffer = buffer;
this.length = length;
writerIndex(length);
}
@Override
public ByteBuf unwrap() {
return buffer;
}
@Override
public ByteBufFactory factory() {
return buffer.factory();
}
@Override
public boolean isDirect() {
return buffer.isDirect();
}
@Override
public int capacity() {
return length;
}
@Override
public boolean hasArray() {
return buffer.hasArray();
}
@Override
public byte[] array() {
return buffer.array();
}
@Override
public int arrayOffset() {
return buffer.arrayOffset();
}
@Override
public byte getByte(int index) {
checkIndex(index);
return buffer.getByte(index);
}
@Override
public short getShort(int index) {
checkIndex(index, 2);
return buffer.getShort(index);
}
@Override
public int getUnsignedMedium(int index) {
checkIndex(index, 3);
return buffer.getUnsignedMedium(index);
}
@Override
public int getInt(int index) {
checkIndex(index, 4);
return buffer.getInt(index);
}
@Override
public long getLong(int index) {
checkIndex(index, 8);
return buffer.getLong(index);
}
@Override
public ByteBuf duplicate() {
ByteBuf duplicate = new TruncatedByteBuf(buffer, length);
duplicate.setIndex(readerIndex(), writerIndex());
return duplicate;
}
@Override
public ByteBuf copy(int index, int length) {
checkIndex(index, length);
return buffer.copy(index, length);
}
@Override
public ByteBuf slice(int index, int length) {
checkIndex(index, length);
if (length == 0) {
return Unpooled.EMPTY_BUFFER;
}
return buffer.slice(index, length);
}
@Override
public void getBytes(int index, ByteBuf dst, int dstIndex, int length) {
checkIndex(index, length);
buffer.getBytes(index, dst, dstIndex, length);
}
@Override
public void getBytes(int index, byte[] dst, int dstIndex, int length) {
checkIndex(index, length);
buffer.getBytes(index, dst, dstIndex, length);
}
@Override
public void getBytes(int index, ByteBuffer dst) {
checkIndex(index, dst.remaining());
buffer.getBytes(index, dst);
}
@Override
public void setByte(int index, int value) {
checkIndex(index);
buffer.setByte(index, value);
}
@Override
public void setShort(int index, int value) {
checkIndex(index, 2);
buffer.setShort(index, value);
}
@Override
public void setMedium(int index, int value) {
checkIndex(index, 3);
buffer.setMedium(index, value);
}
@Override
public void setInt(int index, int value) {
checkIndex(index, 4);
buffer.setInt(index, value);
}
@Override
public void setLong(int index, long value) {
checkIndex(index, 8);
buffer.setLong(index, value);
}
@Override
public void setBytes(int index, byte[] src, int srcIndex, int length) {
checkIndex(index, length);
buffer.setBytes(index, src, srcIndex, length);
}
@Override
public void setBytes(int index, ByteBuf src, int srcIndex, int length) {
checkIndex(index, length);
buffer.setBytes(index, src, srcIndex, length);
}
@Override
public void setBytes(int index, ByteBuffer src) {
checkIndex(index, src.remaining());
buffer.setBytes(index, src);
}
@Override
public void getBytes(int index, OutputStream out, int length)
throws IOException {
checkIndex(index, length);
buffer.getBytes(index, out, length);
}
@Override
public int getBytes(int index, GatheringByteChannel out, int length)
throws IOException {
checkIndex(index, length);
return buffer.getBytes(index, out, length);
}
@Override
public int setBytes(int index, InputStream in, int length)
throws IOException {
checkIndex(index, length);
return buffer.setBytes(index, in, length);
}
@Override
public int setBytes(int index, ScatteringByteChannel in, int length)
throws IOException {
checkIndex(index, length);
return buffer.setBytes(index, in, length);
}
@Override
public boolean hasNioBuffer() {
return buffer.hasNioBuffer();
}
@Override
public ByteBuffer nioBuffer(int index, int length) {
checkIndex(index, length);
return buffer.nioBuffer(index, length);
}
private void checkIndex(int index) {
if (index < 0 || index >= capacity()) {
throw new IndexOutOfBoundsException("Invalid index of " + index
+ ", maximum is " + capacity());
}
}
private void checkIndex(int index, int length) {
if (length < 0) {
throw new IllegalArgumentException(
"length is negative: " + length);
}
if (index + length > capacity()) {
throw new IndexOutOfBoundsException("Invalid index of "
+ (index + length) + ", maximum is " + capacity());
}
}
}

View File

@ -36,7 +36,6 @@ import java.util.Queue;
*
* {@link ByteBuf} heapBuffer = buffer(128);
* {@link ByteBuf} directBuffer = directBuffer(256);
* {@link ByteBuf} dynamicBuffer = dynamicBuffer(512);
* {@link ByteBuf} wrappedBuffer = wrappedBuffer(new byte[128], new byte[256]);
* {@link ByteBuf} copiedBuffe r = copiedBuffer({@link ByteBuffer}.allocate(128));
* </pre>
@ -48,9 +47,6 @@ import java.util.Queue;
* <ul>
* <li>{@link #buffer(int)} allocates a new fixed-capacity heap buffer.</li>
* <li>{@link #directBuffer(int)} allocates a new fixed-capacity direct buffer.</li>
* <li>{@link #dynamicBuffer(int)} allocates a new dynamic-capacity heap
* buffer, whose capacity increases automatically as needed by a write
* operation.</li>
* </ul>
*
* <h3>Creating a wrapped buffer</h3>
@ -94,7 +90,7 @@ public final class Unpooled {
/**
* A buffer whose capacity is {@code 0}.
*/
public static final ByteBuf EMPTY_BUFFER = new HeapByteBuf(0) {
public static final ByteBuf EMPTY_BUFFER = new HeapByteBuf(0, 0) {
@Override
public ByteBuf order(ByteOrder endianness) {
if (endianness == null) {
@ -119,16 +115,50 @@ public final class Unpooled {
return new QueueBackedMessageBuf<T>(queue);
}
/**
* Creates a new big-endian Java heap buffer with reasonably small initial capacity, which
* expands its capacity boundlessly on demand.
*/
public static ByteBuf buffer() {
return buffer(256, Integer.MAX_VALUE);
}
/**
* Creates a new big-endian direct buffer with resaonably small initial capacity, which
* expands its capacity boundlessly on demand.
*/
public static ByteBuf directBuffer() {
return directBuffer(256, Integer.MAX_VALUE);
}
/**
* Creates a new big-endian Java heap buffer with the specified {@code capacity}, which
* expands its capacity boundlessly on demand. The new buffer's {@code readerIndex} and
* {@code writerIndex} are {@code 0}.
*/
public static ByteBuf buffer(int initialCapacity) {
return buffer(initialCapacity, Integer.MAX_VALUE);
}
/**
* Creates a new big-endian direct buffer with the specified {@code capacity}, which
* expands its capacity boundlessly on demand. The new buffer's {@code readerIndex} and
* {@code writerIndex} are {@code 0}.
*/
public static ByteBuf directBuffer(int initialCapacity) {
return directBuffer(initialCapacity, Integer.MAX_VALUE);
}
/**
* Creates a new big-endian Java heap buffer with the specified
* {@code capacity}. The new buffer's {@code readerIndex} and
* {@code writerIndex} are {@code 0}.
*/
public static ByteBuf buffer(int capacity) {
if (capacity == 0) {
public static ByteBuf buffer(int initialCapacity, int maxCapacity) {
if (initialCapacity == 0 && maxCapacity == 0) {
return EMPTY_BUFFER;
}
return new HeapByteBuf(capacity);
return new HeapByteBuf(initialCapacity, maxCapacity);
}
/**
@ -136,60 +166,11 @@ public final class Unpooled {
* {@code capacity}. The new buffer's {@code readerIndex} and
* {@code writerIndex} are {@code 0}.
*/
public static ByteBuf directBuffer(int capacity) {
if (capacity == 0) {
public static ByteBuf directBuffer(int initialCapacity, int maxCapacity) {
if (initialCapacity == 0 && maxCapacity == 0) {
return EMPTY_BUFFER;
}
ByteBuf buffer = new NioBufferBackedByteBuf(ByteBuffer.allocateDirect(capacity));
buffer.clear();
return buffer;
}
/**
* Creates a new big-endian dynamic buffer whose estimated data length is
* {@code 256} bytes. The new buffer's {@code readerIndex} and
* {@code writerIndex} are {@code 0}.
*/
public static ByteBuf dynamicBuffer() {
return dynamicBuffer(256);
}
/**
* Creates a new big-endian dynamic buffer whose estimated data length is
* {@code 256} bytes. The new buffer's {@code readerIndex} and
* {@code writerIndex} are {@code 0}.
*/
public static ByteBuf dynamicBuffer(ByteBufFactory factory) {
if (factory == null) {
throw new NullPointerException("factory");
}
return new DynamicByteBuf(256, factory);
}
/**
* Creates a new big-endian dynamic buffer with the specified estimated
* data length. More accurate estimation yields less unexpected
* reallocation overhead. The new buffer's {@code readerIndex} and
* {@code writerIndex} are {@code 0}.
*/
public static ByteBuf dynamicBuffer(int estimatedLength) {
return new DynamicByteBuf(estimatedLength);
}
/**
* Creates a new big-endian dynamic buffer with the specified estimated
* data length using the specified factory. More accurate estimation yields
* less unexpected reallocation overhead. The new buffer's {@code readerIndex}
* and {@code writerIndex} are {@code 0}.
*/
public static ByteBuf dynamicBuffer(int estimatedLength, ByteBufFactory factory) {
if (factory == null) {
throw new NullPointerException("factory");
}
return new DynamicByteBuf(estimatedLength, factory);
return new DirectByteBuf(initialCapacity, maxCapacity);
}
/**
@ -201,7 +182,7 @@ public final class Unpooled {
if (array.length == 0) {
return EMPTY_BUFFER;
}
return new HeapByteBuf(array);
return new HeapByteBuf(array, array.length);
}
/**
@ -210,24 +191,16 @@ public final class Unpooled {
* content will be visible to the returned buffer.
*/
public static ByteBuf wrappedBuffer(byte[] array, int offset, int length) {
if (offset == 0) {
if (length == array.length) {
if (length == 0) {
return EMPTY_BUFFER;
}
if (offset == 0 && length == array.length) {
return wrappedBuffer(array);
} else {
if (length == 0) {
return EMPTY_BUFFER;
} else {
return new TruncatedByteBuf(wrappedBuffer(array), length);
}
}
} else {
if (length == 0) {
return EMPTY_BUFFER;
} else {
return new SlicedByteBuf(wrappedBuffer(array), offset, length);
}
}
}
/**
* Creates a new buffer which wraps the specified NIO buffer's current
@ -244,7 +217,7 @@ public final class Unpooled {
buffer.arrayOffset() + buffer.position(),
buffer.remaining()).order(buffer.order());
} else {
return new NioBufferBackedByteBuf(buffer);
return new DirectByteBuf(buffer, buffer.remaining());
}
}
@ -267,6 +240,33 @@ public final class Unpooled {
* content will be visible to the returned buffer.
*/
public static ByteBuf wrappedBuffer(byte[]... arrays) {
return wrappedBuffer(16, arrays);
}
/**
* Creates a new big-endian composite buffer which wraps the readable bytes of the
* specified buffers without copying them. A modification on the content
* of the specified buffers will be visible to the returned buffer.
*/
public static ByteBuf wrappedBuffer(ByteBuf... buffers) {
return wrappedBuffer(16, buffers);
}
/**
* Creates a new big-endian composite buffer which wraps the slices of the specified
* NIO buffers without copying them. A modification on the content of the
* specified buffers will be visible to the returned buffer.
*/
public static ByteBuf wrappedBuffer(ByteBuffer... buffers) {
return wrappedBuffer(16, buffers);
}
/**
* Creates a new big-endian composite buffer which wraps the specified
* arrays without copying them. A modification on the specified arrays'
* content will be visible to the returned buffer.
*/
public static ByteBuf wrappedBuffer(int maxNumComponents, byte[]... arrays) {
switch (arrays.length) {
case 0:
break;
@ -286,119 +286,87 @@ public final class Unpooled {
components.add(wrappedBuffer(a));
}
}
return compositeBuffer(BIG_ENDIAN, components);
if (!components.isEmpty()) {
return new DefaultCompositeByteBuf(maxNumComponents, components);
}
}
return EMPTY_BUFFER;
}
/**
* Creates a new composite buffer which wraps the specified
* components without copying them. A modification on the specified components'
* content will be visible to the returned buffer.
*/
private static ByteBuf compositeBuffer(ByteOrder endianness, List<ByteBuf> components) {
switch (components.size()) {
case 0:
return EMPTY_BUFFER;
case 1:
return components.get(0);
default:
return new CompositeByteBuf(endianness, components);
}
}
/**
* Creates a new composite buffer which wraps the readable bytes of the
* Creates a new big-endian composite buffer which wraps the readable bytes of the
* specified buffers without copying them. A modification on the content
* of the specified buffers will be visible to the returned buffer.
*
* @throws IllegalArgumentException
* if the specified buffers' endianness are different from each
* other
*/
public static ByteBuf wrappedBuffer(ByteBuf... buffers) {
public static ByteBuf wrappedBuffer(int maxNumComponents, ByteBuf... buffers) {
switch (buffers.length) {
case 0:
break;
case 1:
if (buffers[0].readable()) {
return wrappedBuffer(buffers[0]);
return wrappedBuffer(buffers[0].order(BIG_ENDIAN));
}
break;
default:
ByteOrder order = null;
final List<ByteBuf> components = new ArrayList<ByteBuf>(buffers.length);
for (ByteBuf c: buffers) {
if (c == null) {
break;
}
if (c.readable()) {
if (order != null) {
if (!order.equals(c.order())) {
throw new IllegalArgumentException("inconsistent byte order");
}
} else {
order = c.order();
}
if (c instanceof CompositeByteBuf) {
// Expand nested composition.
components.addAll(
((CompositeByteBuf) c).decompose(
c.readerIndex(), c.readableBytes()));
} else {
// An ordinary buffer (non-composite)
components.add(c.slice());
for (ByteBuf b: buffers) {
if (b.readable()) {
return new DefaultCompositeByteBuf(maxNumComponents, buffers);
}
}
}
return compositeBuffer(order, components);
}
return EMPTY_BUFFER;
}
/**
* Creates a new composite buffer which wraps the slices of the specified
* Creates a new big-endian composite buffer which wraps the slices of the specified
* NIO buffers without copying them. A modification on the content of the
* specified buffers will be visible to the returned buffer.
*
* @throws IllegalArgumentException
* if the specified buffers' endianness are different from each
* other
*/
public static ByteBuf wrappedBuffer(ByteBuffer... buffers) {
public static ByteBuf wrappedBuffer(int maxNumComponents, ByteBuffer... buffers) {
switch (buffers.length) {
case 0:
break;
case 1:
if (buffers[0].hasRemaining()) {
return wrappedBuffer(buffers[0]);
return wrappedBuffer(buffers[0].order(BIG_ENDIAN));
}
break;
default:
ByteOrder order = null;
// Get the list of the component, while guessing the byte order.
final List<ByteBuf> components = new ArrayList<ByteBuf>(buffers.length);
for (ByteBuffer b: buffers) {
if (b == null) {
break;
}
if (b.hasRemaining()) {
if (order != null) {
if (!order.equals(b.order())) {
throw new IllegalArgumentException("inconsistent byte order");
}
} else {
order = b.order();
}
components.add(wrappedBuffer(b));
if (b.remaining() > 0) {
components.add(wrappedBuffer(b.order(BIG_ENDIAN)));
}
}
return compositeBuffer(order, components);
if (!components.isEmpty()) {
return new DefaultCompositeByteBuf(maxNumComponents, components);
}
}
return EMPTY_BUFFER;
}
/**
* Returns a new big-endian composite buffer with no components.
*/
public static CompositeByteBuf compositeBuffer() {
return compositeBuffer(16);
}
/**
* Returns a new big-endian composite buffer with no components.
*/
public static CompositeByteBuf compositeBuffer(int maxNumComponents) {
return new DefaultCompositeByteBuf(maxNumComponents);
}
/**
* Creates a new big-endian buffer whose content is a copy of the
* specified {@code array}. The new buffer's {@code readerIndex} and
@ -408,7 +376,7 @@ public final class Unpooled {
if (array.length == 0) {
return EMPTY_BUFFER;
}
return new HeapByteBuf(array.clone());
return wrappedBuffer(array.clone());
}
/**

View File

@ -51,7 +51,6 @@ public abstract class AbstractChannelBufferTest {
return true;
}
@Before
public void init() {
buffer = newBuffer(CAPACITY);

View File

@ -46,31 +46,34 @@ public abstract class AbstractCompositeChannelBufferTest extends
@Override
protected ByteBuf newBuffer(int length) {
buffers = new ArrayList<ByteBuf>();
for (int i = 0; i < length; i += 10) {
for (int i = 0; i < length + 45; i += 45) {
buffers.add(Unpooled.EMPTY_BUFFER);
buffers.add(Unpooled.wrappedBuffer(new byte[1]).order(order));
buffers.add(Unpooled.wrappedBuffer(new byte[1]));
buffers.add(Unpooled.EMPTY_BUFFER);
buffers.add(Unpooled.wrappedBuffer(new byte[2]).order(order));
buffers.add(Unpooled.wrappedBuffer(new byte[2]));
buffers.add(Unpooled.EMPTY_BUFFER);
buffers.add(Unpooled.wrappedBuffer(new byte[3]).order(order));
buffers.add(Unpooled.wrappedBuffer(new byte[3]));
buffers.add(Unpooled.EMPTY_BUFFER);
buffers.add(Unpooled.wrappedBuffer(new byte[4]).order(order));
buffers.add(Unpooled.wrappedBuffer(new byte[4]));
buffers.add(Unpooled.EMPTY_BUFFER);
buffers.add(Unpooled.wrappedBuffer(new byte[5]).order(order));
buffers.add(Unpooled.wrappedBuffer(new byte[5]));
buffers.add(Unpooled.EMPTY_BUFFER);
buffers.add(Unpooled.wrappedBuffer(new byte[6]).order(order));
buffers.add(Unpooled.wrappedBuffer(new byte[6]));
buffers.add(Unpooled.EMPTY_BUFFER);
buffers.add(Unpooled.wrappedBuffer(new byte[7]).order(order));
buffers.add(Unpooled.wrappedBuffer(new byte[7]));
buffers.add(Unpooled.EMPTY_BUFFER);
buffers.add(Unpooled.wrappedBuffer(new byte[8]).order(order));
buffers.add(Unpooled.wrappedBuffer(new byte[8]));
buffers.add(Unpooled.EMPTY_BUFFER);
buffers.add(Unpooled.wrappedBuffer(new byte[9]).order(order));
buffers.add(Unpooled.wrappedBuffer(new byte[9]));
buffers.add(Unpooled.EMPTY_BUFFER);
}
buffer = Unpooled.wrappedBuffer(buffers.toArray(new ByteBuf[buffers.size()]));
buffer.writerIndex(length);
buffer = Unpooled.wrappedBuffer(buffer);
buffer = Unpooled.wrappedBuffer(
Integer.MAX_VALUE, buffers.toArray(new ByteBuf[buffers.size()])).order(order);
// Truncate to the requested capacity.
buffer.capacity(length);
assertEquals(length, buffer.capacity());
assertEquals(length, buffer.readableBytes());
assertFalse(buffer.writable());
@ -90,6 +93,29 @@ public abstract class AbstractCompositeChannelBufferTest extends
return false;
}
/**
* Tests the "getBufferFor" method
*/
@Test
public void testComponentAtOffset() {
CompositeByteBuf buf = (CompositeByteBuf) Unpooled.wrappedBuffer(new byte[] { 1, 2, 3, 4, 5 }, new byte[] {4, 5, 6, 7, 8, 9, 26});
//Ensure that a random place will be fine
assertEquals(buf.componentAtOffset(2).capacity(), 5);
//Loop through each byte
byte index = 0;
while (index < buf.capacity()) {
ByteBuf _buf = buf.componentAtOffset(index++);
assertNotNull(_buf);
assertTrue(_buf.capacity() > 0);
assertNotNull(_buf.getByte(0));
assertNotNull(_buf.getByte(_buf.readableBytes() - 1));
}
}
@Test
public void testDiscardReadBytes3() {
ByteBuf a, b;
@ -129,10 +155,57 @@ public abstract class AbstractCompositeChannelBufferTest extends
assertTrue(ByteBufUtil.equals(a, b));
}
@Test
public void testAutoConsolidation() {
CompositeByteBuf buf = compositeBuffer(2);
buf.addComponent(wrappedBuffer(new byte[] { 1 }));
assertEquals(1, buf.numComponents());
buf.addComponent(wrappedBuffer(new byte[] { 2, 3 }));
assertEquals(2, buf.numComponents());
buf.addComponent(wrappedBuffer(new byte[] { 4, 5, 6 }));
assertEquals(1, buf.numComponents());
assertTrue(buf.hasArray());
assertNotNull(buf.array());
assertEquals(0, buf.arrayOffset());
}
@Test
public void testFullConsolidation() {
CompositeByteBuf buf = compositeBuffer(Integer.MAX_VALUE);
buf.addComponent(wrappedBuffer(new byte[] { 1 }));
buf.addComponent(wrappedBuffer(new byte[] { 2, 3 }));
buf.addComponent(wrappedBuffer(new byte[] { 4, 5, 6 }));
buf.consolidate();
assertEquals(1, buf.numComponents());
assertTrue(buf.hasArray());
assertNotNull(buf.array());
assertEquals(0, buf.arrayOffset());
}
@Test
public void testRangedConsolidation() {
CompositeByteBuf buf = compositeBuffer(Integer.MAX_VALUE);
buf.addComponent(wrappedBuffer(new byte[] { 1 }));
buf.addComponent(wrappedBuffer(new byte[] { 2, 3 }));
buf.addComponent(wrappedBuffer(new byte[] { 4, 5, 6 }));
buf.addComponent(wrappedBuffer(new byte[] { 7, 8, 9, 10 }));
buf.consolidate(1, 2);
assertEquals(3, buf.numComponents());
assertEquals(wrappedBuffer(new byte[] { 1 }), buf.component(0));
assertEquals(wrappedBuffer(new byte[] { 2, 3, 4, 5, 6 }), buf.component(1));
assertEquals(wrappedBuffer(new byte[] { 7, 8, 9, 10 }), buf.component(2));
}
@Test
public void testCompositeWrappedBuffer() {
ByteBuf header = dynamicBuffer(12).order(order);
ByteBuf payload = dynamicBuffer(512).order(order);
ByteBuf header = buffer(12).order(order);
ByteBuf payload = buffer(512).order(order);
header.writeBytes(new byte[12]);
payload.writeBytes(new byte[512]);
@ -206,6 +279,7 @@ public abstract class AbstractCompositeChannelBufferTest extends
wrappedBuffer(new byte[] { 0, 1, 2, 3, 4, 6, 7, 8, 5, 9, 10, 11 }, 6, 5).order(order));
assertFalse(ByteBufUtil.equals(a, b));
}
@Test
public void testWrappedBuffer() {

View File

@ -40,6 +40,6 @@ public class BigEndianHeapChannelBufferTest extends AbstractChannelBufferTest {
@Test(expected = NullPointerException.class)
public void shouldNotAllowNullInConstructor() {
new HeapByteBuf(null);
new HeapByteBuf(null, 0);
}
}

View File

@ -1,44 +0,0 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import java.nio.ByteBuffer;
import org.junit.Test;
/**
* Tests ByteBuffer backed heap channel buffers
*/
public class ByteBufferBackedHeapChannelBufferTest extends AbstractChannelBufferTest {
private ByteBuf buffer;
@Override
protected ByteBuf newBuffer(int length) {
buffer = new NioBufferBackedByteBuf(ByteBuffer.allocate(length));
return buffer;
}
@Override
protected ByteBuf[] components() {
return new ByteBuf[] { buffer };
}
@Test(expected = NullPointerException.class)
public void shouldNotAllowNullInConstructor() {
new NioBufferBackedByteBuf(null);
}
}

View File

@ -29,7 +29,7 @@ public class ChannelBufferStreamTest {
@Test
public void testAll() throws Exception {
ByteBuf buf = Unpooled.dynamicBuffer();
ByteBuf buf = Unpooled.buffer(0, 65536);
try {
new ByteBufOutputStream(null);

View File

@ -37,8 +37,8 @@ public class ChannelBuffersTest {
@Test
public void testCompositeWrappedBuffer() {
ByteBuf header = dynamicBuffer(12);
ByteBuf payload = dynamicBuffer(512);
ByteBuf header = buffer(12);
ByteBuf payload = buffer(512);
header.writeBytes(new byte[12]);
payload.writeBytes(new byte[512]);
@ -156,10 +156,6 @@ public class ChannelBuffersTest {
@Test
public void shouldReturnEmptyBufferWhenLengthIsZero() {
assertSame(EMPTY_BUFFER, buffer(0));
assertSame(EMPTY_BUFFER, buffer(0).order(LITTLE_ENDIAN));
assertSame(EMPTY_BUFFER, directBuffer(0));
assertSame(EMPTY_BUFFER, wrappedBuffer(new byte[0]));
assertSame(EMPTY_BUFFER, wrappedBuffer(new byte[0]).order(LITTLE_ENDIAN));
assertSame(EMPTY_BUFFER, wrappedBuffer(new byte[8], 0, 0));

View File

@ -1,61 +0,0 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import static org.junit.Assert.*;
import org.junit.Test;
/**
* Tests dynamic channel buffers
*/
public class DynamicChannelBufferTest extends AbstractChannelBufferTest {
private ByteBuf buffer;
@Override
protected ByteBuf newBuffer(int length) {
buffer = Unpooled.dynamicBuffer(length);
assertEquals(0, buffer.readerIndex());
assertEquals(0, buffer.writerIndex());
assertEquals(length, buffer.capacity());
return buffer;
}
@Override
protected ByteBuf[] components() {
return new ByteBuf[] { buffer };
}
@Test
public void shouldNotFailOnInitialIndexUpdate() {
new DynamicByteBuf(10).setIndex(0, 10);
}
@Test
public void shouldNotFailOnInitialIndexUpdate2() {
new DynamicByteBuf(10).writerIndex(10);
}
@Test
public void shouldNotFailOnInitialIndexUpdate3() {
ByteBuf buf = new DynamicByteBuf(10);
buf.writerIndex(10);
buf.readerIndex(10);
}
}

View File

@ -76,6 +76,7 @@ public class ReadOnlyChannelBufferTest {
public void shouldForwardReadCallsBlindly() throws Exception {
ByteBuf buf = createStrictMock(ByteBuf.class);
expect(buf.order()).andReturn(BIG_ENDIAN).anyTimes();
expect(buf.maxCapacity()).andReturn(65536).anyTimes();
expect(buf.readerIndex()).andReturn(0).anyTimes();
expect(buf.writerIndex()).andReturn(0).anyTimes();
expect(buf.capacity()).andReturn(0).anyTimes();

View File

@ -1,46 +0,0 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import static org.junit.Assert.*;
import org.junit.Test;
/**
* Tests truncated channel buffers
*/
public class TruncatedChannelBufferTest extends AbstractChannelBufferTest {
private ByteBuf buffer;
@Override
protected ByteBuf newBuffer(int length) {
buffer = Unpooled.wrappedBuffer(
new byte[length * 2], 0, length);
assertEquals(length, buffer.writerIndex());
return buffer;
}
@Override
protected ByteBuf[] components() {
return new ByteBuf[] { buffer };
}
@Test(expected = NullPointerException.class)
public void shouldNotAllowNullInConstructor() {
new TruncatedByteBuf(null, 0);
}
}

View File

@ -20,7 +20,7 @@
<parent>
<groupId>io.netty</groupId>
<artifactId>netty-parent</artifactId>
<version>4.0.0.Alpha1-SNAPSHOT</version>
<version>4.0.0.Alpha2-SNAPSHOT</version>
</parent>
<artifactId>netty-codec-http</artifactId>

View File

@ -0,0 +1,120 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http;
import static io.netty.handler.codec.http.CookieEncoderUtil.*;
/**
* Encodes client-side {@link Cookie}s into an HTTP header value. This encoder can encode
* the HTTP cookie version 0, 1, and 2.
* <pre>
* // Example
* {@link HttpRequest} req = ...;
* res.setHeader("Cookie", {@link ClientCookieEncoder}.encode("JSESSIONID", "1234"));
* </pre>
*
* @see CookieDecoder
*
* @apiviz.stereotype utility
* @apiviz.has io.netty.handler.codec.http.Cookie oneway - - encodes
*/
public final class ClientCookieEncoder {
/**
* Encodes the specified cookie into an HTTP header value.
*/
public static String encode(String name, String value) {
return encode(new DefaultCookie(name, value));
}
public static String encode(Cookie cookie) {
if (cookie == null) {
throw new NullPointerException("cookie");
}
StringBuilder buf = new StringBuilder();
encode(buf, cookie);
return stripTrailingSeparator(buf);
}
public static String encode(Cookie... cookies) {
if (cookies == null) {
throw new NullPointerException("cookies");
}
StringBuilder buf = new StringBuilder();
for (Cookie c: cookies) {
if (c == null) {
break;
}
encode(buf, c);
}
return stripTrailingSeparator(buf);
}
public static String encode(Iterable<Cookie> cookies) {
if (cookies == null) {
throw new NullPointerException("cookies");
}
StringBuilder buf = new StringBuilder();
for (Cookie c: cookies) {
if (c == null) {
break;
}
encode(buf, c);
}
return stripTrailingSeparator(buf);
}
private static void encode(StringBuilder buf, Cookie c) {
if (c.getVersion() >= 1) {
add(buf, '$' + CookieHeaderNames.VERSION, 1);
}
add(buf, c.getName(), c.getValue());
if (c.getPath() != null) {
add(buf, '$' + CookieHeaderNames.PATH, c.getPath());
}
if (c.getDomain() != null) {
add(buf, '$' + CookieHeaderNames.DOMAIN, c.getDomain());
}
if (c.getVersion() >= 1) {
if (!c.getPorts().isEmpty()) {
buf.append('$');
buf.append(CookieHeaderNames.PORT);
buf.append((char) HttpConstants.EQUALS);
buf.append((char) HttpConstants.DOUBLE_QUOTE);
for (int port: c.getPorts()) {
buf.append(port);
buf.append((char) HttpConstants.COMMA);
}
buf.setCharAt(buf.length() - 1, (char) HttpConstants.DOUBLE_QUOTE);
buf.append((char) HttpConstants.SEMICOLON);
buf.append((char) HttpConstants.SP);
}
}
}
private ClientCookieEncoder() {
// Unused
}
}

View File

@ -18,134 +18,191 @@ package io.netty.handler.codec.http;
import java.util.Set;
/**
* An HTTP <a href="http://en.wikipedia.org/wiki/HTTP_cookie">Cookie</a>.
* An interface defining an
* <a href="http://en.wikipedia.org/wiki/HTTP_cookie">HTTP cookie</a>.
*/
public interface Cookie extends Comparable<Cookie> {
/**
* Returns the name of this cookie.
* Returns the name of this {@link Cookie}.
*
* @return The name of this {@link Cookie}
*/
String getName();
/**
* Returns the value of this cookie.
* Returns the value of this {@link Cookie}.
*
* @return The value of this {@link Cookie}
*/
String getValue();
/**
* Sets the value of this cookie.
* Sets the value of this {@link Cookie}.
*
* @param value The value to set
*/
void setValue(String value);
/**
* Returns the domain of this cookie.
* Returns the domain of this {@link Cookie}.
*
* @return The domain of this {@link Cookie}
*/
String getDomain();
/**
* Sets the domain of this cookie.
* Sets the domain of this {@link Cookie}.
*
* @param domain The domain to use
*/
void setDomain(String domain);
/**
* Returns the path of this cookie.
* Returns the path of this {@link Cookie}.
*
* @return The {@link Cookie}'s path
*/
String getPath();
/**
* Sets the path of this cookie.
* Sets the path of this {@link Cookie}.
*
* @param path The path to use for this {@link Cookie}
*/
void setPath(String path);
/**
* Returns the comment of this cookie.
* Returns the comment of this {@link Cookie}.
*
* @return The comment of this {@link Cookie}
*/
String getComment();
/**
* Sets the comment of this cookie.
* Sets the comment of this {@link Cookie}.
*
* @param comment The comment to use
*/
void setComment(String comment);
/**
* Returns the max age of this cookie in seconds.
* Returns the maximum age of this {@link Cookie} in seconds or {@link Long#MIN_VALUE} if unspecified
*
* @return The maximum age of this {@link Cookie}
*/
long getMaxAge();
/**
* Sets the max age of this cookie in seconds. If {@code 0} is specified,
* this cookie will be removed by browser because it will be expired
* immediately. If {@code -1} is specified, this cookie will be removed
* when a user terminates browser.
* Sets the maximum age of this {@link Cookie} in seconds.
* If an age of {@code 0} is specified, this {@link Cookie} will be
* automatically removed by browser because it will expire immediately.
* If {@link Long#MIN_VALUE} is specified, this {@link Cookie} will be removed when the
* browser is closed.
*
* @param maxAge The maximum age of this {@link Cookie} in seconds
*/
void setMaxAge(long maxAge);
/**
* Returns the version of this cookie.
* Returns the version of this {@link Cookie}.
*
* @return The version of this {@link Cookie}
*/
int getVersion();
/**
* Sets the version of this cookie.
* Sets the version of this {@link Cookie}.
*
* @param version The new version to use
*/
void setVersion(int version);
/**
* Returns the secure flag of this cookie.
* Checks to see if this {@link Cookie} is secure
*
* @return True if this {@link Cookie} is secure, otherwise false
*/
boolean isSecure();
/**
* Sets the secure flag of this cookie.
* Sets the security status of this {@link Cookie}
*
* @param secure True if this {@link Cookie} is to be secure, otherwise false
*/
void setSecure(boolean secure);
/**
* Returns if this cookie cannot be accessed through client side script.
* This flag works only if the browser supports it. For more information,
* see <a href="http://www.owasp.org/index.php/HTTPOnly">here</a>.
* Checks to see if this {@link Cookie} can only be accessed via HTTP.
* If this returns true, the {@link Cookie} cannot be accessed through
* client side script - But only if the browser supports it.
* For more information, please look <a href="http://www.owasp.org/index.php/HTTPOnly">here</a>
*
* @return True if this {@link Cookie} is HTTP-only or false if it isn't
*/
boolean isHttpOnly();
/**
* Sets if this cookie cannot be accessed through client side script.
* This flag works only if the browser supports it. For more information,
* see <a href="http://www.owasp.org/index.php/HTTPOnly">here</a>.
* Determines if this {@link Cookie} is HTTP only.
* If set to true, this {@link Cookie} cannot be accessed by a client
* side script. However, this works only if the browser supports it.
* For for information, please look
* <a href="http://www.owasp.org/index.php/HTTPOnly">here</a>.
*
* @param httpOnly True if the {@link Cookie} is HTTP only, otherwise false.
*/
void setHttpOnly(boolean httpOnly);
/**
* Returns the comment URL of this cookie.
* Returns the comment URL of this {@link Cookie}.
*
* @return The comment URL of this {@link Cookie}
*/
String getCommentUrl();
/**
* Sets the comment URL of this cookie.
* Sets the comment URL of this {@link Cookie}.
*
* @param commentUrl The comment URL to use
*/
void setCommentUrl(String commentUrl);
/**
* Returns the discard flag of this cookie.
* Checks to see if this {@link Cookie} is to be discarded by the browser
* at the end of the current session.
*
* @return True if this {@link Cookie} is to be discarded, otherwise false
*/
boolean isDiscard();
/**
* Sets the discard flag of this cookie.
* Sets the discard flag of this {@link Cookie}.
* If set to true, this {@link Cookie} will be discarded by the browser
* at the end of the current session
*
* @param discard True if the {@link Cookie} is to be discarded
*/
void setDiscard(boolean discard);
/**
* Returns the ports of this cookie.
* Returns the ports that this {@link Cookie} can be accessed on.
*
* @return The {@link Set} of ports that this {@link Cookie} can use
*/
Set<Integer> getPorts();
/**
* Sets the ports of this cookie.
* Sets the ports that this {@link Cookie} can be accessed on.
*
* @param ports The ports that this {@link Cookie} can be accessed on
*/
void setPorts(int... ports);
/**
* Sets the ports of this cookie.
* Sets the ports that this {@link Cookie} can be accessed on.
*
* @param ports The {@link Iterable} collection of ports that this
* {@link Cookie} can be accessed on.
*/
void setPorts(Iterable<Integer> ports);
}

View File

@ -21,8 +21,6 @@ import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Decodes an HTTP header value into {@link Cookie}s. This decoder can decode
@ -34,46 +32,22 @@ import java.util.regex.Pattern;
* Set&lt;{@link Cookie}&gt; cookies = new {@link CookieDecoder}().decode(value);
* </pre>
*
* @see CookieEncoder
* @see ClientCookieEncoder
* @see ServerCookieEncoder
*
* @apiviz.stereotype utility
* @apiviz.has io.netty.handler.codec.http.Cookie oneway - - decodes
*/
public class CookieDecoder {
private static final Pattern PATTERN =
Pattern.compile(
// See: https://github.com/netty/netty/pull/96
//"(?:\\s|[;,])*\\$*([^;=]+)(?:=(?:[\"']((?:\\\\.|[^\"])*)[\"']|([^;,]*)))?(\\s*(?:[;,]+\\s*|$))"
"(?:\\s|[;,])*\\$*([^;=]+)(?:=(?:[\"']((?:\\\\.|[^\"])*)[\"']|([^;]*)))?(\\s*(?:[;,]+\\s*|$))"
);
public final class CookieDecoder {
private static final String COMMA = ",";
private final boolean lenient;
/**
* Creates a new decoder with strict parsing.
*/
public CookieDecoder() {
this(false);
}
/**
* Creates a new decoder.
*
* @param lenient ignores cookies with the name 'HTTPOnly' instead of throwing an exception
*/
public CookieDecoder(boolean lenient) {
this.lenient = lenient;
}
/**
* Decodes the specified HTTP header value into {@link Cookie}s.
*
* @return the decoded {@link Cookie}s
*/
public Set<Cookie> decode(String header) {
public static Set<Cookie> decode(String header) {
List<String> names = new ArrayList<String>(8);
List<String> values = new ArrayList<String>(8);
extractKeyValuePairs(header, names, values);
@ -106,11 +80,6 @@ public class CookieDecoder {
Set<Cookie> cookies = new TreeSet<Cookie>();
for (; i < names.size(); i ++) {
String name = names.get(i);
// Not all user agents understand the HttpOnly attribute
if (lenient && CookieHeaderNames.HTTPONLY.equalsIgnoreCase(name)) {
continue;
}
String value = values.get(i);
if (value == null) {
value = "";
@ -199,58 +168,130 @@ public class CookieDecoder {
}
private static void extractKeyValuePairs(
String header, List<String> names, List<String> values) {
Matcher m = PATTERN.matcher(header);
int pos = 0;
String name = null;
String value = null;
String separator = null;
while (m.find(pos)) {
pos = m.end();
final String header, final List<String> names, final List<String> values) {
// Extract name and value pair from the match.
String newName = m.group(1);
String newValue = m.group(3);
if (newValue == null) {
newValue = decodeValue(m.group(2));
final int headerLen = header.length();
loop: for (int i = 0;;) {
// Skip spaces and separators.
for (;;) {
if (i == headerLen) {
break loop;
}
String newSeparator = m.group(4);
if (name == null) {
name = newName;
value = newValue == null? "" : newValue;
separator = newSeparator;
switch (header.charAt(i)) {
case '\t': case '\n': case 0x0b: case '\f': case '\r':
case ' ': case ',': case ';':
i ++;
continue;
}
break;
}
if (newValue == null &&
!CookieHeaderNames.DISCARD.equalsIgnoreCase(newName) &&
!CookieHeaderNames.SECURE.equalsIgnoreCase(newName) &&
!CookieHeaderNames.HTTPONLY.equalsIgnoreCase(newName)) {
value = value + separator + newName;
separator = newSeparator;
// Skip '$'.
for (;;) {
if (i == headerLen) {
break loop;
}
if (header.charAt(i) == '$') {
i ++;
continue;
}
names.add(name);
values.add(value);
name = newName;
value = newValue;
separator = newSeparator;
break;
}
String name;
String value;
if (i == headerLen) {
name = null;
value = null;
} else {
int newNameStart = i;
keyValLoop: for (;;) {
switch (header.charAt(i)) {
case ';':
// NAME; (no value till ';')
name = header.substring(newNameStart, i);
value = null;
break keyValLoop;
case '=':
// NAME=VALUE
name = header.substring(newNameStart, i);
i ++;
if (i == headerLen) {
// NAME= (empty value, i.e. nothing after '=')
value = "";
break keyValLoop;
}
int newValueStart = i;
char c = header.charAt(i);
if (c == '"' || c == '\'') {
// NAME="VALUE" or NAME='VALUE'
StringBuilder newValueBuf = new StringBuilder(header.length() - i);
final char q = c;
boolean hadBackslash = false;
i ++;
for (;;) {
if (i == headerLen) {
value = newValueBuf.toString();
break keyValLoop;
}
if (hadBackslash) {
hadBackslash = false;
c = header.charAt(i ++);
switch (c) {
case '\\': case '"': case '\'':
// Escape last backslash.
newValueBuf.setCharAt(newValueBuf.length() - 1, c);
break;
default:
// Do not escape last backslash.
newValueBuf.append(c);
}
} else {
c = header.charAt(i ++);
if (c == q) {
value = newValueBuf.toString();
break keyValLoop;
}
newValueBuf.append(c);
if (c == '\\') {
hadBackslash = true;
}
}
}
} else {
// NAME=VALUE;
int semiPos = header.indexOf(';', i);
if (semiPos > 0) {
value = header.substring(newValueStart, semiPos);
i = semiPos;
} else {
value = header.substring(newValueStart);
i = headerLen;
}
}
break keyValLoop;
default:
i ++;
}
if (i == headerLen) {
// NAME (no value till the end of string)
name = header.substring(newNameStart);
value = null;
break;
}
}
}
// The last entry
if (name != null) {
names.add(name);
values.add(value);
}
}
private static String decodeValue(String value) {
if (value == null) {
return value;
}
return value.replace("\\\"", "\"").replace("\\\\", "\\");
private CookieDecoder() {
// Unused
}
}

View File

@ -1,264 +0,0 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http;
import java.util.Date;
import java.util.Set;
import java.util.TreeSet;
/**
* Encodes {@link Cookie}s into an HTTP header value. This encoder can encode
* the HTTP cookie version 0, 1, and 2.
* <p>
* This encoder is stateful. It maintains an internal data structure that
* holds the {@link Cookie}s added by the {@link #addCookie(String, String)}
* method. Once {@link #encode()} is called, all added {@link Cookie}s are
* encoded into an HTTP header value and all {@link Cookie}s in the internal
* data structure are removed so that the encoder can start over.
* <pre>
* // Client-side example
* {@link HttpRequest} req = ...;
* {@link CookieEncoder} encoder = new {@link CookieEncoder}(false);
* encoder.addCookie("JSESSIONID", "1234");
* res.setHeader("Cookie", encoder.encode());
*
* // Server-side example
* {@link HttpResponse} res = ...;
* {@link CookieEncoder} encoder = new {@link CookieEncoder}(true);
* encoder.addCookie("JSESSIONID", "1234");
* res.setHeader("Set-Cookie", encoder.encode());
* </pre>
*
* @see CookieDecoder
*
* @apiviz.stereotype utility
* @apiviz.has io.netty.handler.codec.http.Cookie oneway - - encodes
*/
public class CookieEncoder {
private final Set<Cookie> cookies = new TreeSet<Cookie>();
private final boolean server;
/**
* Creates a new encoder.
*
* @param server {@code true} if and only if this encoder is supposed to
* encode server-side cookies. {@code false} if and only if
* this encoder is supposed to encode client-side cookies.
*/
public CookieEncoder(boolean server) {
this.server = server;
}
/**
* Adds a new {@link Cookie} created with the specified name and value to
* this encoder.
*/
public void addCookie(String name, String value) {
cookies.add(new DefaultCookie(name, value));
}
/**
* Adds the specified {@link Cookie} to this encoder.
*/
public void addCookie(Cookie cookie) {
cookies.add(cookie);
}
/**
* Encodes the {@link Cookie}s which were added by {@link #addCookie(Cookie)}
* so far into an HTTP header value. If no {@link Cookie}s were added,
* an empty string is returned.
*
* <strong>Be aware that calling this method will clear the {@link Cookie}s you added to
* this encoder.</strong>
*/
public String encode() {
String answer;
if (server) {
answer = encodeServerSide();
} else {
answer = encodeClientSide();
}
cookies.clear();
return answer;
}
private String encodeServerSide() {
StringBuilder sb = new StringBuilder();
for (Cookie cookie: cookies) {
add(sb, cookie.getName(), cookie.getValue());
if (cookie.getMaxAge() >= 0) {
if (cookie.getVersion() == 0) {
addUnquoted(sb, CookieHeaderNames.EXPIRES,
new HttpHeaderDateFormat().format(
new Date(System.currentTimeMillis() +
cookie.getMaxAge() * 1000L)));
} else {
add(sb, CookieHeaderNames.MAX_AGE, cookie.getMaxAge());
}
}
if (cookie.getPath() != null) {
if (cookie.getVersion() > 0) {
add(sb, CookieHeaderNames.PATH, cookie.getPath());
} else {
addUnquoted(sb, CookieHeaderNames.PATH, cookie.getPath());
}
}
if (cookie.getDomain() != null) {
if (cookie.getVersion() > 0) {
add(sb, CookieHeaderNames.DOMAIN, cookie.getDomain());
} else {
addUnquoted(sb, CookieHeaderNames.DOMAIN, cookie.getDomain());
}
}
if (cookie.isSecure()) {
sb.append(CookieHeaderNames.SECURE);
sb.append((char) HttpConstants.SEMICOLON);
}
if (cookie.isHttpOnly()) {
sb.append(CookieHeaderNames.HTTPONLY);
sb.append((char) HttpConstants.SEMICOLON);
}
if (cookie.getVersion() >= 1) {
if (cookie.getComment() != null) {
add(sb, CookieHeaderNames.COMMENT, cookie.getComment());
}
add(sb, CookieHeaderNames.VERSION, 1);
if (cookie.getCommentUrl() != null) {
addQuoted(sb, CookieHeaderNames.COMMENTURL, cookie.getCommentUrl());
}
if (!cookie.getPorts().isEmpty()) {
sb.append(CookieHeaderNames.PORT);
sb.append((char) HttpConstants.EQUALS);
sb.append((char) HttpConstants.DOUBLE_QUOTE);
for (int port: cookie.getPorts()) {
sb.append(port);
sb.append((char) HttpConstants.COMMA);
}
sb.setCharAt(sb.length() - 1, (char) HttpConstants.DOUBLE_QUOTE);
sb.append((char) HttpConstants.SEMICOLON);
}
if (cookie.isDiscard()) {
sb.append(CookieHeaderNames.DISCARD);
sb.append((char) HttpConstants.SEMICOLON);
}
}
}
if (sb.length() > 0) {
sb.setLength(sb.length() - 1);
}
return sb.toString();
}
private String encodeClientSide() {
StringBuilder sb = new StringBuilder();
for (Cookie cookie: cookies) {
if (cookie.getVersion() >= 1) {
add(sb, '$' + CookieHeaderNames.VERSION, 1);
}
add(sb, cookie.getName(), cookie.getValue());
if (cookie.getPath() != null) {
add(sb, '$' + CookieHeaderNames.PATH, cookie.getPath());
}
if (cookie.getDomain() != null) {
add(sb, '$' + CookieHeaderNames.DOMAIN, cookie.getDomain());
}
if (cookie.getVersion() >= 1) {
if (!cookie.getPorts().isEmpty()) {
sb.append('$');
sb.append(CookieHeaderNames.PORT);
sb.append((char) HttpConstants.EQUALS);
sb.append((char) HttpConstants.DOUBLE_QUOTE);
for (int port: cookie.getPorts()) {
sb.append(port);
sb.append((char) HttpConstants.COMMA);
}
sb.setCharAt(sb.length() - 1, (char) HttpConstants.DOUBLE_QUOTE);
sb.append((char) HttpConstants.SEMICOLON);
}
}
}
if (sb.length() > 0) {
sb.setLength(sb.length() - 1);
}
return sb.toString();
}
private static void add(StringBuilder sb, String name, String val) {
if (val == null) {
addQuoted(sb, name, "");
return;
}
for (int i = 0; i < val.length(); i ++) {
char c = val.charAt(i);
switch (c) {
case '\t': case ' ': case '"': case '(': case ')': case ',':
case '/': case ':': case ';': case '<': case '=': case '>':
case '?': case '@': case '[': case '\\': case ']':
case '{': case '}':
addQuoted(sb, name, val);
return;
}
}
addUnquoted(sb, name, val);
}
private static void addUnquoted(StringBuilder sb, String name, String val) {
sb.append(name);
sb.append((char) HttpConstants.EQUALS);
sb.append(val);
sb.append((char) HttpConstants.SEMICOLON);
}
private static void addQuoted(StringBuilder sb, String name, String val) {
if (val == null) {
val = "";
}
sb.append(name);
sb.append((char) HttpConstants.EQUALS);
sb.append((char) HttpConstants.DOUBLE_QUOTE);
sb.append(val.replace("\\", "\\\\").replace("\"", "\\\""));
sb.append((char) HttpConstants.DOUBLE_QUOTE);
sb.append((char) HttpConstants.SEMICOLON);
}
private static void add(StringBuilder sb, String name, long val) {
sb.append(name);
sb.append((char) HttpConstants.EQUALS);
sb.append(val);
sb.append((char) HttpConstants.SEMICOLON);
}
}

View File

@ -0,0 +1,82 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http;
final class CookieEncoderUtil {
static String stripTrailingSeparator(StringBuilder buf) {
if (buf.length() > 0) {
buf.setLength(buf.length() - 2);
}
return buf.toString();
}
static void add(StringBuilder sb, String name, String val) {
if (val == null) {
addQuoted(sb, name, "");
return;
}
for (int i = 0; i < val.length(); i ++) {
char c = val.charAt(i);
switch (c) {
case '\t': case ' ': case '"': case '(': case ')': case ',':
case '/': case ':': case ';': case '<': case '=': case '>':
case '?': case '@': case '[': case '\\': case ']':
case '{': case '}':
addQuoted(sb, name, val);
return;
}
}
addUnquoted(sb, name, val);
}
static void addUnquoted(StringBuilder sb, String name, String val) {
sb.append(name);
sb.append((char) HttpConstants.EQUALS);
sb.append(val);
sb.append((char) HttpConstants.SEMICOLON);
sb.append((char) HttpConstants.SP);
}
static void addQuoted(StringBuilder sb, String name, String val) {
if (val == null) {
val = "";
}
sb.append(name);
sb.append((char) HttpConstants.EQUALS);
sb.append((char) HttpConstants.DOUBLE_QUOTE);
sb.append(val.replace("\\", "\\\\").replace("\"", "\\\""));
sb.append((char) HttpConstants.DOUBLE_QUOTE);
sb.append((char) HttpConstants.SEMICOLON);
sb.append((char) HttpConstants.SP);
}
static void add(StringBuilder sb, String name, long val) {
sb.append(name);
sb.append((char) HttpConstants.EQUALS);
sb.append(val);
sb.append((char) HttpConstants.SEMICOLON);
sb.append((char) HttpConstants.SP);
}
private CookieEncoderUtil() {
// Unused
}
}

View File

@ -15,8 +15,6 @@
*/
package io.netty.handler.codec.http;
import io.netty.util.internal.CaseIgnoringComparator;
import java.util.Collections;
import java.util.Set;
import java.util.TreeSet;
@ -28,22 +26,6 @@ import java.util.TreeSet;
*/
public class DefaultCookie implements Cookie {
private static final Set<String> RESERVED_NAMES = new TreeSet<String>(CaseIgnoringComparator.INSTANCE);
static {
RESERVED_NAMES.add("Domain");
RESERVED_NAMES.add("Path");
RESERVED_NAMES.add("Comment");
RESERVED_NAMES.add("CommentURL");
RESERVED_NAMES.add("Discard");
RESERVED_NAMES.add("Port");
RESERVED_NAMES.add("Max-Age");
RESERVED_NAMES.add("Expires");
RESERVED_NAMES.add("Version");
RESERVED_NAMES.add("Secure");
RESERVED_NAMES.add("HTTPOnly");
}
private final String name;
private String value;
private String domain;
@ -53,7 +35,7 @@ public class DefaultCookie implements Cookie {
private boolean discard;
private Set<Integer> ports = Collections.emptySet();
private Set<Integer> unmodifiablePorts = ports;
private long maxAge = -1;
private long maxAge = Long.MIN_VALUE;
private int version;
private boolean secure;
private boolean httpOnly;
@ -87,8 +69,8 @@ public class DefaultCookie implements Cookie {
}
}
if (RESERVED_NAMES.contains(name)) {
throw new IllegalArgumentException("reserved name: " + name);
if (name.charAt(0) == '$') {
throw new IllegalArgumentException("name starting with '$' not allowed: " + name);
}
this.name = name;

View File

@ -17,6 +17,7 @@ package io.netty.handler.codec.http;
import static io.netty.handler.codec.http.HttpHeaders.*;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
@ -47,13 +48,16 @@ import java.util.Map.Entry;
* @apiviz.has io.netty.handler.codec.http.HttpChunk oneway - - filters out
*/
public class HttpChunkAggregator extends MessageToMessageDecoder<Object, HttpMessage> {
public static final int DEFAULT_MAX_COMPOSITEBUFFER_COMPONENTS = 1024;
private static final ByteBuf CONTINUE = Unpooled.copiedBuffer(
"HTTP/1.1 100 Continue\r\n\r\n", CharsetUtil.US_ASCII);
private final int maxContentLength;
private HttpMessage currentMessage;
private int maxCumulationBufferComponents = DEFAULT_MAX_COMPOSITEBUFFER_COMPONENTS;
private ChannelHandlerContext ctx;
/**
* Creates a new instance.
*
@ -71,6 +75,38 @@ public class HttpChunkAggregator extends MessageToMessageDecoder<Object, HttpMes
this.maxContentLength = maxContentLength;
}
/**
* Returns the maximum number of components in the cumulation buffer. If the number of
* the components in the cumulation buffer exceeds this value, the components of the
* cumulation buffer are consolidated into a single component, involving memory copies.
* The default value of this property is {@link #DEFAULT_MAX_COMPOSITEBUFFER_COMPONENTS}.
*/
public final int getMaxCumulationBufferComponents() {
return maxCumulationBufferComponents;
}
/**
* Sets the maximum number of components in the cumulation buffer. If the number of
* the components in the cumulation buffer exceeds this value, the components of the
* cumulation buffer are consolidated into a single component, involving memory copies.
* The default value of this property is {@link #DEFAULT_MAX_COMPOSITEBUFFER_COMPONENTS}
* and its minimum allowed value is {@code 2}.
*/
public final void setMaxCumulationBufferComponents(int maxCumulationBufferComponents) {
if (maxCumulationBufferComponents < 2) {
throw new IllegalArgumentException(
"maxCumulationBufferComponents: " + maxCumulationBufferComponents +
" (expected: >= 2)");
}
if (ctx == null) {
this.maxCumulationBufferComponents = maxCumulationBufferComponents;
} else {
throw new IllegalStateException(
"decoder properties cannot be changed once the decoder is added to a pipeline.");
}
}
@Override
public boolean isDecodable(Object msg) throws Exception {
return msg instanceof HttpMessage || msg instanceof HttpChunk;
@ -101,7 +137,7 @@ public class HttpChunkAggregator extends MessageToMessageDecoder<Object, HttpMes
m.removeHeader(HttpHeaders.Names.TRANSFER_ENCODING);
}
m.setChunked(false);
m.setContent(Unpooled.dynamicBuffer());
m.setContent(Unpooled.compositeBuffer(maxCumulationBufferComponents));
this.currentMessage = m;
return null;
} else {
@ -131,7 +167,9 @@ public class HttpChunkAggregator extends MessageToMessageDecoder<Object, HttpMes
" bytes.");
}
content.writeBytes(chunk.getContent());
// Append the content of the chunk
appendToCumulation(chunk.getContent());
if (chunk.isLast()) {
this.currentMessage = null;
@ -159,4 +197,16 @@ public class HttpChunkAggregator extends MessageToMessageDecoder<Object, HttpMes
HttpChunk.class.getSimpleName() + " are accepted: " + msg.getClass().getName());
}
}
private void appendToCumulation(ByteBuf input) {
CompositeByteBuf cumulation = (CompositeByteBuf) currentMessage.getContent();
cumulation.addComponent(input);
cumulation.writerIndex(cumulation.capacity());
}
@Override
public void beforeAdd(ChannelHandlerContext ctx) throws Exception {
this.ctx = ctx;
}
}

View File

@ -17,57 +17,84 @@ package io.netty.handler.codec.http;
import java.util.List;
/**
* A utility class mainly for use with HTTP codec classes
*/
final class HttpCodecUtil {
static void validateHeaderName(String name) {
if (name == null) {
throw new NullPointerException("name");
/**
* Validates the name of a header
*
* @param headerName The header name being validated
*/
static void validateHeaderName(String headerName) {
//Check to see if the name is null
if (headerName == null) {
throw new NullPointerException("Header names cannot be null");
}
for (int i = 0; i < name.length(); i ++) {
char c = name.charAt(i);
if (c > 127) {
//Go through each of the characters in the name
for (int index = 0; index < headerName.length(); index ++) {
//Actually get the character
char character = headerName.charAt(index);
//Check to see if the character is not an ASCII character
if (character > 127) {
throw new IllegalArgumentException(
"name contains non-ascii character: " + name);
"Header name cannot contain non-ASCII characters: " + headerName);
}
// Check prohibited characters.
switch (c) {
//Check for prohibited characters.
switch (character) {
case '\t': case '\n': case 0x0b: case '\f': case '\r':
case ' ': case ',': case ':': case ';': case '=':
throw new IllegalArgumentException(
"name contains one of the following prohibited characters: " +
"=,;: \\t\\r\\n\\v\\f: " + name);
"Header name cannot contain the following prohibited characters: " +
"=,;: \\t\\r\\n\\v\\f: " + headerName);
}
}
}
static void validateHeaderValue(String value) {
if (value == null) {
throw new NullPointerException("value");
/**
* Validates the specified header value
*
* @param value The value being validated
*/
static void validateHeaderValue(String headerValue) {
//Check to see if the value is null
if (headerValue == null) {
throw new NullPointerException("Header values cannot be null");
}
// 0 - the previous character was neither CR nor LF
// 1 - the previous character was CR
// 2 - the previous character was LF
/*
* Set up the state of the validation
*
* States are as follows:
*
* 0: Previous character was neither CR nor LF
* 1: The previous character was CR
* 2: The previous character was LF
*/
int state = 0;
for (int i = 0; i < value.length(); i ++) {
char c = value.charAt(i);
//Start looping through each of the character
// Check the absolutely prohibited characters.
switch (c) {
for (int index = 0; index < headerValue.length(); index ++) {
char character = headerValue.charAt(index);
//Check the absolutely prohibited characters.
switch (character) {
case 0x0b: // Vertical tab
throw new IllegalArgumentException(
"value contains a prohibited character '\\v': " + value);
"Header value contains a prohibited character '\\v': " + headerValue);
case '\f':
throw new IllegalArgumentException(
"value contains a prohibited character '\\f': " + value);
"Header value contains a prohibited character '\\f': " + headerValue);
}
// Check the CRLF (HT | SP) pattern
switch (state) {
case 0:
switch (c) {
switch (character) {
case '\r':
state = 1;
break;
@ -77,47 +104,56 @@ final class HttpCodecUtil {
}
break;
case 1:
switch (c) {
switch (character) {
case '\n':
state = 2;
break;
default:
throw new IllegalArgumentException(
"Only '\\n' is allowed after '\\r': " + value);
"Only '\\n' is allowed after '\\r': " + headerValue);
}
break;
case 2:
switch (c) {
switch (character) {
case '\t': case ' ':
state = 0;
break;
default:
throw new IllegalArgumentException(
"Only ' ' and '\\t' are allowed after '\\n': " + value);
"Only ' ' and '\\t' are allowed after '\\n': " + headerValue);
}
}
}
if (state != 0) {
throw new IllegalArgumentException(
"value must not end with '\\r' or '\\n':" + value);
"Header value must not end with '\\r' or '\\n':" + headerValue);
}
}
static boolean isTransferEncodingChunked(HttpMessage m) {
List<String> chunked = m.getHeaders(HttpHeaders.Names.TRANSFER_ENCODING);
if (chunked.isEmpty()) {
/**
* Checks to see if the transfer encoding in a specified {@link HttpMessage} is chunked
*
* @param message The message to check
* @return True if transfer encoding is chunked, otherwise false
*/
static boolean isTransferEncodingChunked(HttpMessage message) {
List<String> transferEncodingHeaders = message.getHeaders(HttpHeaders.Names.TRANSFER_ENCODING);
if (transferEncodingHeaders.isEmpty()) {
return false;
}
for (String v: chunked) {
if (v.equalsIgnoreCase(HttpHeaders.Values.CHUNKED)) {
for (String value: transferEncodingHeaders) {
if (value.equalsIgnoreCase(HttpHeaders.Values.CHUNKED)) {
return true;
}
}
return false;
}
/**
* A constructor to ensure that instances of this class are never made
*/
private HttpCodecUtil() {
}
}

View File

@ -16,7 +16,7 @@
package io.netty.handler.codec.http;
import io.netty.channel.embedded.EmbeddedByteChannel;
import io.netty.handler.codec.compression.ZlibEncoder;
import io.netty.handler.codec.compression.ZlibCodecFactory;
import io.netty.handler.codec.compression.ZlibWrapper;
/**
@ -118,8 +118,8 @@ public class HttpContentCompressor extends HttpContentEncoder {
return new Result(
targetContentEncoding,
new EmbeddedByteChannel(
new ZlibEncoder(wrapper, compressionLevel, windowBits, memLevel)));
new EmbeddedByteChannel(ZlibCodecFactory.newZlibEncoder(
wrapper, compressionLevel, windowBits, memLevel)));
}
protected ZlibWrapper determineWrapper(String acceptEncoding) {

View File

@ -84,7 +84,7 @@ public abstract class HttpContentDecoder extends MessageToMessageDecoder<Object,
if (!m.isChunked()) {
ByteBuf content = m.getContent();
// Decode the content
ByteBuf newContent = Unpooled.dynamicBuffer();
ByteBuf newContent = Unpooled.buffer();
decode(content, newContent);
finishDecode(newContent);
@ -104,7 +104,7 @@ public abstract class HttpContentDecoder extends MessageToMessageDecoder<Object,
// Decode the chunk if necessary.
if (decoder != null) {
if (!c.isLast()) {
ByteBuf newContent = Unpooled.dynamicBuffer();
ByteBuf newContent = Unpooled.buffer();
decode(content, newContent);
if (newContent.readable()) {
c.setContent(newContent);
@ -112,7 +112,7 @@ public abstract class HttpContentDecoder extends MessageToMessageDecoder<Object,
return null;
}
} else {
ByteBuf lastProduct = Unpooled.dynamicBuffer();
ByteBuf lastProduct = Unpooled.buffer();
finishDecode(lastProduct);
// Generate an additional chunk if the decoder produced

View File

@ -16,7 +16,7 @@
package io.netty.handler.codec.http;
import io.netty.channel.embedded.EmbeddedByteChannel;
import io.netty.handler.codec.compression.ZlibDecoder;
import io.netty.handler.codec.compression.ZlibCodecFactory;
import io.netty.handler.codec.compression.ZlibWrapper;
/**
@ -28,10 +28,10 @@ public class HttpContentDecompressor extends HttpContentDecoder {
@Override
protected EmbeddedByteChannel newContentDecoder(String contentEncoding) throws Exception {
if ("gzip".equalsIgnoreCase(contentEncoding) || "x-gzip".equalsIgnoreCase(contentEncoding)) {
return new EmbeddedByteChannel(new ZlibDecoder(ZlibWrapper.GZIP));
return new EmbeddedByteChannel(ZlibCodecFactory.newZlibDecoder(ZlibWrapper.GZIP));
} else if ("deflate".equalsIgnoreCase(contentEncoding) || "x-deflate".equalsIgnoreCase(contentEncoding)) {
// To be strict, 'deflate' means ZLIB, but some servers were not implemented correctly.
return new EmbeddedByteChannel(new ZlibDecoder(ZlibWrapper.ZLIB_OR_NONE));
return new EmbeddedByteChannel(ZlibCodecFactory.newZlibDecoder(ZlibWrapper.ZLIB_OR_NONE));
}
// 'identity' or unsupported

View File

@ -117,7 +117,7 @@ public abstract class HttpContentEncoder extends MessageToMessageCodec<HttpMessa
if (!m.isChunked()) {
ByteBuf content = m.getContent();
// Encode the content.
ByteBuf newContent = Unpooled.dynamicBuffer();
ByteBuf newContent = Unpooled.buffer();
encode(content, newContent);
finishEncode(newContent);
@ -136,7 +136,7 @@ public abstract class HttpContentEncoder extends MessageToMessageCodec<HttpMessa
// Encode the chunk if necessary.
if (encoder != null) {
if (!c.isLast()) {
ByteBuf newContent = Unpooled.dynamicBuffer();
ByteBuf newContent = Unpooled.buffer();
encode(content, newContent);
if (content.readable()) {
c.setContent(newContent);
@ -144,7 +144,7 @@ public abstract class HttpContentEncoder extends MessageToMessageCodec<HttpMessa
return null;
}
} else {
ByteBuf lastProduct = Unpooled.dynamicBuffer();
ByteBuf lastProduct = Unpooled.buffer();
finishEncode(lastProduct);
// Generate an additional chunk if the decoder produced

View File

@ -15,8 +15,6 @@
*/
package io.netty.handler.codec.http;
import io.netty.util.internal.CaseIgnoringComparator;
import java.text.ParseException;
import java.util.Calendar;
import java.util.Date;
@ -1134,8 +1132,8 @@ public class HttpHeaders {
}
Set<String> getHeaderNames() {
Set<String> names =
new TreeSet<String>(CaseIgnoringComparator.INSTANCE);
Set<String> names = new TreeSet<String>(String.CASE_INSENSITIVE_ORDER);
Entry e = head.after;
while (e != head) {

View File

@ -25,8 +25,10 @@ import java.util.Map;
import java.util.Set;
/**
* An HTTP message which provides common properties for {@link HttpRequest} and
* {@link HttpResponse}.
* An interface that defines a HTTP message, providing common properties for
* {@link HttpRequest} and {@link HttpResponse}.
* @see HttpResponse
* @see HttpRequest
* @see HttpHeaders
*
* @apiviz.landmark
@ -35,86 +37,111 @@ import java.util.Set;
public interface HttpMessage {
/**
* Returns the header value with the specified header name. If there are
* more than one header value for the specified header name, the first
* value is returned.
* Returns the value of a header with the specified name. If there are
* more than one values for the specified name, the first value is returned.
*
* @return the header value or {@code null} if there is no such header
* @param name The name of the header to search
* @return The first header value or {@code null} if there is no such header
*/
String getHeader(String name);
/**
* Returns the header values with the specified header name.
* Returns the values of headers with the specified name
*
* @return the {@link List} of header values. An empty list if there is no
* such header.
* @param name The name of the headers to search
* @return A {@link List} of header values which will be empty if no values
* are found
*/
List<String> getHeaders(String name);
/**
* Returns the all header names and values that this message contains.
* Returns the all headers that this message contains.
*
* @return the {@link List} of the header name-value pairs. An empty list
* if there is no header in this message.
* @return A {@link List} of the header name-value entries, which will be
* empty if no pairs are found
*/
List<Map.Entry<String, String>> getHeaders();
/**
* Returns {@code true} if and only if there is a header with the specified
* header name.
* Checks to see if there is a header with the specified name
*
* @param name The name of the header to search for
* @return True if at least one header is found
*/
boolean containsHeader(String name);
/**
* Returns the {@link Set} of all header names that this message contains.
* Gets a {@link Set} of all header names that this message contains
*
* @return A {@link Set} of all header names
*/
Set<String> getHeaderNames();
/**
* Returns the protocol version of this message.
* Returns the protocol version of this {@link HttpMessage}
*
* @returns The protocol version
*/
HttpVersion getProtocolVersion();
/**
* Sets the protocol version of this message.
* Sets the protocol version of this {@link HttpMessage}
*
* @param version The version to set
*/
void setProtocolVersion(HttpVersion version);
/**
* Returns the content of this message. If there is no content or
* {@link #isChunked()} returns {@code true}, an
* {@link Unpooled#EMPTY_BUFFER} is returned.
* Returns the content of this {@link HttpMessage}.
*
* If there is no content or {@link #isChunked()} returns {@code true},
* an {@link Unpooled#EMPTY_BUFFER} is returned.
*
* @return A {@link ByteBuf} containing this {@link HttpMessage}'s content
*/
ByteBuf getContent();
/**
* Sets the content of this message. If {@code null} is specified,
* the content of this message will be set to {@link Unpooled#EMPTY_BUFFER}.
* Sets the content of this {@link HttpMessage}.
*
* If {@code null} is specified, the content of this message
* will be set to {@link Unpooled#EMPTY_BUFFER}
*
* @param content The {@link ByteBuf} containing the content to use
*/
void setContent(ByteBuf content);
/**
* Adds a new header with the specified name and value.
* If the specified value is not a {@link String}, it is converted into a
* {@link String} by {@link Object#toString()}, except for {@link Date}
* and {@link Calendar} which are formatted to the date format defined in
* <a href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1">RFC2616</a>.
*
* If the specified value is not a {@link String}, it is converted
* into a {@link String} by {@link Object#toString()}, except in the cases
* of {@link Date} and {@link Calendar}, which are formatted to the date
* format defined in <a href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1">RFC2616</a>.
*
* @param name The name of the header being added
* @param value The value of the header being added
*/
void addHeader(String name, Object value);
/**
* Sets a new header with the specified name and value. If there is an
* existing header with the same name, the existing header is removed.
* Sets a header with the specified name and value.
*
* If there is an existing header with the same name, it is removed.
* If the specified value is not a {@link String}, it is converted into a
* {@link String} by {@link Object#toString()}, except for {@link Date}
* and {@link Calendar} which are formatted to the date format defined in
* and {@link Calendar}, which are formatted to the date format defined in
* <a href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1">RFC2616</a>.
*
* @param name The name of the header being set
* @param value The value of the header being set
*/
void setHeader(String name, Object value);
/**
* Sets a new header with the specified name and values. If there is an
* existing header with the same name, the existing header is removed.
* Sets a header with the specified name and values.
*
* If there is an existing header with the same name, it is removed.
* This method can be represented approximately as the following code:
* <pre>
* m.removeHeader(name);
@ -125,42 +152,62 @@ public interface HttpMessage {
* m.addHeader(name, v);
* }
* </pre>
*
* @param name The name of the headers being set
* @param values The values of the headers being set
*/
void setHeader(String name, Iterable<?> values);
/**
* Removes the header with the specified name.
*
* @param name The name of the header to remove
*/
void removeHeader(String name);
/**
* Removes all headers from this message.
* Removes all headers from this {@link HttpMessage}.
*/
void clearHeaders();
/**
* Returns {@code true} if and only if this message does not have any
* content but the {@link HttpChunk}s, which is generated by
* {@link HttpMessageDecoder} consecutively, contain the actual content.
* Checks to see if this {@link HttpMessage} is broken into multiple "chunks"
*
* If this returns true, it means that this {@link HttpMessage}
* actually has no content - The {@link HttpChunk}s (which are generated
* by the {@link HttpMessageDecoder} consecutively) contain the actual content.
* <p>
* Please note that this method will keep returning {@code true} if the
* {@code "Transfer-Encoding"} of this message is {@code "chunked"}, even if
* you attempt to override this property by calling {@link #setChunked(boolean)}
* with {@code false}.
* </p>
*
* @return True if this message is chunked, otherwise false
*/
boolean isChunked();
/**
* Sets if this message does not have any content but the
* {@link HttpChunk}s, which is generated by {@link HttpMessageDecoder}
* consecutively, contain the actual content.
* Sets the boolean defining if this {@link HttpMessage} is chunked.
*
* <p>
* If this method is called with {@code true}, the content of this message
* becomes {@link Unpooled#EMPTY_BUFFER}.
* If this is set to true, it means that this initial {@link HttpMessage}
* does not contain any content - The content is contained by multiple
* {@link HttpChunk}s, which are generated by the {@link HttpMessageDecoder}
* consecutively.
*
* Because of this, the content of this {@link HttpMessage} becomes
* {@link Unpooled#EMPTY_BUFFER}
* </p>
*
* <p>
* Even if this method is called with {@code false}, {@link #isChunked()}
* will keep returning {@code true} if the {@code "Transfer-Encoding"} of
* this message is {@code "chunked"}.
* </p>
*
* @param chunked True if this message is to be delivered in chunks,
* otherwise false.
*/
void setChunked(boolean chunked);
}

View File

@ -272,6 +272,17 @@ public abstract class HttpMessageDecoder extends ReplayingDecoder<Object, HttpMe
assert chunkSize <= Integer.MAX_VALUE;
int chunkSize = (int) this.chunkSize;
int readLimit = actualReadableBytes();
// Check if the buffer is readable first as we use the readable byte count
// to create the HttpChunk. This is needed as otherwise we may end up with
// create a HttpChunk instance that contains an empty buffer and so is
// handled like it is the last HttpChunk.
//
// See https://github.com/netty/netty/issues/433
if (readLimit == 0) {
return null;
}
int toRead = chunkSize;
if (toRead > maxChunkSize) {
toRead = maxChunkSize;
@ -325,6 +336,17 @@ public abstract class HttpMessageDecoder extends ReplayingDecoder<Object, HttpMe
assert chunkSize <= Integer.MAX_VALUE;
int chunkSize = (int) this.chunkSize;
int readLimit = actualReadableBytes();
// Check if the buffer is readable first as we use the readable byte count
// to create the HttpChunk. This is needed as otherwise we may end up with
// create a HttpChunk instance that contains an empty buffer and so is
// handled like it is the last HttpChunk.
//
// See https://github.com/netty/netty/issues/433
if (readLimit == 0) {
return null;
}
int toRead = chunkSize;
if (toRead > maxChunkSize) {
toRead = maxChunkSize;
@ -444,19 +466,40 @@ public abstract class HttpMessageDecoder extends ReplayingDecoder<Object, HttpMe
if (length < contentRead) {
if (!message.isChunked()) {
message.setChunked(true);
return new Object[] {message, new DefaultHttpChunk(buffer.readBytes(toRead))};
return new Object[] {message, new DefaultHttpChunk(read(buffer, toRead))};
} else {
return new DefaultHttpChunk(buffer.readBytes(toRead));
return new DefaultHttpChunk(read(buffer, toRead));
}
}
if (content == null) {
content = buffer.readBytes((int) length);
content = read(buffer, (int) length);
} else {
content.writeBytes(buffer.readBytes((int) length));
}
return reset();
}
/**
* Try to do an optimized "read" of len from the given {@link ByteBuf}.
*
* This is part of #412 to safe byte copies
*
*/
private ByteBuf read(ByteBuf buffer, int len) {
ByteBuf internal = internalBuffer();
if (internal.readableBytes() >= len) {
int index = internal.readerIndex();
ByteBuf buf = internal.slice(index, len);
// update the readerindex so an the next read its on the correct position
buffer.readerIndex(index + len);
return buf;
} else {
return buffer.readBytes(len);
}
}
private State readHeaders(ByteBuf buffer) throws TooLongFrameException {
headerSize = 0;
final HttpMessage message = this.message;

View File

@ -23,31 +23,41 @@ package io.netty.handler.codec.http;
* <p>
* Unlike the Servlet API, a query string is constructed and decomposed by
* {@link QueryStringEncoder} and {@link QueryStringDecoder}. {@link Cookie}
* support is also provided separately via {@link CookieEncoder} and
* {@link CookieDecoder}.
* support is also provided separately via {@link CookieDecoder}, {@link ClientCookieEncoder},
* and {@link @ServerCookieEncoder}.
*
* @see HttpResponse
* @see CookieEncoder
* @see ClientCookieEncoder
* @see ServerCookieEncoder
* @see CookieDecoder
*/
public interface HttpRequest extends HttpMessage {
/**
* Returns the method of this request.
* Returns the {@link HttpMethod} of this {@link HttpRequest}.
*
* @return The {@link HttpMethod} of this {@link HttpRequest}
*/
HttpMethod getMethod();
/**
* Sets the method of this request.
* Sets the {@link HttpMethod} of this {@link HttpRequest}.
*
* @param The {@link HttpMethod} to set
*/
void setMethod(HttpMethod method);
/**
* Returns the URI (or path) of this request.
* Returns the requested URI (or alternatively, path)
*
* @return The URI being requested
*/
String getUri();
/**
* Sets the URI (or path) of this request.
* Sets the URI (or alternatively, path) being requested.
*
* @param uri The URI being requested
*/
void setUri(String uri);
}

View File

@ -19,23 +19,29 @@ package io.netty.handler.codec.http;
/**
* An HTTP response.
*
* <h3>Accessing Cookie</h3>
* <h3>Accessing Cookies</h3>
* <p>
* Unlike the Servlet API, {@link Cookie} support is provided separately via
* {@link CookieEncoder} and {@link CookieDecoder}.
* Unlike the Servlet API, {@link Cookie} support is provided separately via {@link CookieDecoder},
* {@link ClientCookieEncoder}, and {@link ServerCookieEncoder}.
*
* @see HttpRequest
* @see CookieEncoder
* @see CookieDecoder
* @see ClientCookieEncoder
* @see ServerCookieEncoder
*/
public interface HttpResponse extends HttpMessage {
/**
* Returns the status of this response.
* Returns the status of this {@link HttpResponse}.
*
* @return The {@link HttpResponseStatus} of this {@link HttpResponse}
*/
HttpResponseStatus getStatus();
/**
* Sets the status of this response.
* Sets the status of this {@link HttpResponse}
*
* @param status The {@link HttpResponseStatus} to use
*/
void setStatus(HttpResponseStatus status);
}

View File

@ -0,0 +1,174 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http;
import static io.netty.handler.codec.http.CookieEncoderUtil.*;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.List;
/**
* Encodes server-side {@link Cookie}s into HTTP header values. This encoder can encode
* the HTTP cookie version 0, 1, and 2.
* <pre>
* // Example
* {@link HttpRequest} req = ...;
* res.setHeader("Set-Cookie", {@link ServerCookieEncoder}.encode("JSESSIONID", "1234"));
* </pre>
*
* @see CookieDecoder
*
* @apiviz.stereotype utility
* @apiviz.has io.netty.handler.codec.http.Cookie oneway - - encodes
*/
public final class ServerCookieEncoder {
/**
* Encodes the specified cookie into an HTTP header value.
*/
public static String encode(String name, String value) {
return encode(new DefaultCookie(name, value));
}
public static String encode(Cookie cookie) {
if (cookie == null) {
throw new NullPointerException("cookie");
}
StringBuilder buf = new StringBuilder();
add(buf, cookie.getName(), cookie.getValue());
if (cookie.getMaxAge() != Long.MIN_VALUE) {
if (cookie.getVersion() == 0) {
addUnquoted(buf, CookieHeaderNames.EXPIRES,
new HttpHeaderDateFormat().format(
new Date(System.currentTimeMillis() +
cookie.getMaxAge() * 1000L)));
} else {
add(buf, CookieHeaderNames.MAX_AGE, cookie.getMaxAge());
}
}
if (cookie.getPath() != null) {
if (cookie.getVersion() > 0) {
add(buf, CookieHeaderNames.PATH, cookie.getPath());
} else {
addUnquoted(buf, CookieHeaderNames.PATH, cookie.getPath());
}
}
if (cookie.getDomain() != null) {
if (cookie.getVersion() > 0) {
add(buf, CookieHeaderNames.DOMAIN, cookie.getDomain());
} else {
addUnquoted(buf, CookieHeaderNames.DOMAIN, cookie.getDomain());
}
}
if (cookie.isSecure()) {
buf.append(CookieHeaderNames.SECURE);
buf.append((char) HttpConstants.SEMICOLON);
buf.append((char) HttpConstants.SP);
}
if (cookie.isHttpOnly()) {
buf.append(CookieHeaderNames.HTTPONLY);
buf.append((char) HttpConstants.SEMICOLON);
buf.append((char) HttpConstants.SP);
}
if (cookie.getVersion() >= 1) {
if (cookie.getComment() != null) {
add(buf, CookieHeaderNames.COMMENT, cookie.getComment());
}
add(buf, CookieHeaderNames.VERSION, 1);
if (cookie.getCommentUrl() != null) {
addQuoted(buf, CookieHeaderNames.COMMENTURL, cookie.getCommentUrl());
}
if (!cookie.getPorts().isEmpty()) {
buf.append(CookieHeaderNames.PORT);
buf.append((char) HttpConstants.EQUALS);
buf.append((char) HttpConstants.DOUBLE_QUOTE);
for (int port: cookie.getPorts()) {
buf.append(port);
buf.append((char) HttpConstants.COMMA);
}
buf.setCharAt(buf.length() - 1, (char) HttpConstants.DOUBLE_QUOTE);
buf.append((char) HttpConstants.SEMICOLON);
buf.append((char) HttpConstants.SP);
}
if (cookie.isDiscard()) {
buf.append(CookieHeaderNames.DISCARD);
buf.append((char) HttpConstants.SEMICOLON);
buf.append((char) HttpConstants.SP);
}
}
return stripTrailingSeparator(buf);
}
public static List<String> encode(Cookie... cookies) {
if (cookies == null) {
throw new NullPointerException("cookies");
}
List<String> encoded = new ArrayList<String>(cookies.length);
for (Cookie c: cookies) {
if (c == null) {
break;
}
encoded.add(encode(c));
}
return encoded;
}
public static List<String> encode(Collection<Cookie> cookies) {
if (cookies == null) {
throw new NullPointerException("cookies");
}
List<String> encoded = new ArrayList<String>(cookies.size());
for (Cookie c: cookies) {
if (c == null) {
break;
}
encoded.add(encode(c));
}
return encoded;
}
public static List<String> encode(Iterable<Cookie> cookies) {
if (cookies == null) {
throw new NullPointerException("cookies");
}
List<String> encoded = new ArrayList<String>();
for (Cookie c: cookies) {
if (c == null) {
break;
}
encoded.add(encode(c));
}
return encoded;
}
private ServerCookieEncoder() {
// Unused
}
}

View File

@ -19,7 +19,6 @@ import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.ReplayingDecoder;
import io.netty.handler.codec.TooLongFrameException;
import io.netty.util.VoidEnum;
/**
* Decodes {@link ByteBuf}s into {@link WebSocketFrame}s.
@ -30,7 +29,7 @@ import io.netty.util.VoidEnum;
* @apiviz.landmark
* @apiviz.uses io.netty.handler.codec.http.websocket.WebSocketFrame
*/
public class WebSocket00FrameDecoder extends ReplayingDecoder<WebSocketFrame, VoidEnum> {
public class WebSocket00FrameDecoder extends ReplayingDecoder<WebSocketFrame, Void> {
static final int DEFAULT_MAX_FRAME_SIZE = 16384;

View File

@ -166,7 +166,7 @@ public class WebSocket08FrameEncoder extends MessageToByteEncoder<WebSocketFrame
if (maskPayload) {
int random = (int) (Math.random() * Integer.MAX_VALUE);
mask = ByteBuffer.allocate(4).putInt(random).array();
out.writeInt((int) (Math.random() * Integer.MAX_VALUE));
out.writeBytes(mask);
int counter = 0;
for (int i = data.readerIndex(); i < data.writerIndex(); i ++) {

View File

@ -92,11 +92,9 @@ public abstract class WebSocketServerHandshaker {
}
/**
<<<<<<< HEAD
* Returns the max length for any frame's payload.
=======
* Returns the max length for any frame's payload
>>>>>>> abd10d9... Fixed bug where subprotocol not sent by client
* Gets the maximum length for any frame's payload.
*
* @return The maximum length for a frame's payload
*/
public int getMaxFramePayloadLength() {
return maxFramePayloadLength;

View File

@ -19,90 +19,91 @@ import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.handler.codec.base64.Base64;
import io.netty.util.CharsetUtil;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
/**
* TODO Document me.
* A utility class mainly for use by web sockets
*/
final class WebSocketUtil {
/**
* Performs an MD5 hash
* Performs a MD5 hash on the specified data
*
* @param bytes
* Data to hash
* @return Hashed data
* @param data The data to hash
* @return The hashed data
*/
static byte[] md5(byte[] bytes) {
static byte[] md5(byte[] data) {
try {
//Try to get a MessageDigest that uses MD5
MessageDigest md = MessageDigest.getInstance("MD5");
return md.digest(bytes);
//Hash the data
return md.digest(data);
} catch (NoSuchAlgorithmException e) {
throw new InternalError("MD5 not supported on this platform");
//This shouldn't happen! How old is the computer?
throw new InternalError("MD5 not supported on this platform - Outdated?");
}
}
/**
* Performs an SHA-1 hash
* Performs a SHA-1 hash on the specified data
*
* @param bytes
* Data to hash
* @return Hashed data
* @param data The data to hash
* @return The hashed data
*/
static byte[] sha1(byte[] bytes) {
static byte[] sha1(byte[] data) {
try {
//Attempt to get a MessageDigest that uses SHA1
MessageDigest md = MessageDigest.getInstance("SHA1");
return md.digest(bytes);
//Hash the data
return md.digest(data);
} catch (NoSuchAlgorithmException e) {
throw new InternalError("SHA-1 not supported on this platform");
//Alright, you might have an old system.
throw new InternalError("SHA-1 is not supported on this platform - Outdated?");
}
}
/**
* Base 64 encoding
* Performs base64 encoding on the specified data
*
* @param bytes
* Bytes to encode
* @return encoded string
* @param data The data to encode
* @return An encoded string containing the data
*/
static String base64(byte[] bytes) {
ByteBuf hashed = Unpooled.wrappedBuffer(bytes);
return Base64.encode(hashed).toString(CharsetUtil.UTF_8);
static String base64(byte[] data) {
ByteBuf encodedData = Unpooled.wrappedBuffer(data);
return Base64.encode(encodedData).toString(CharsetUtil.UTF_8);
}
/**
* Creates some random bytes
* Creates an arbitrary number of random bytes
*
* @param size
* Number of random bytes to create
* @return random bytes
* @param size the number of random bytes to create
* @return An array of random bytes
*/
static byte[] randomBytes(int size) {
byte[] bytes = new byte[size];
for (int i = 0; i < size; i++) {
bytes[i] = (byte) randomNumber(0, 255);
for (int index = 0; index < size; index++) {
bytes[index] = (byte) randomNumber(0, 255);
}
return bytes;
}
/**
* Generates a random number
* Generates a pseudo-random number
*
* @param min
* Minimum value
* @param max
* Maximum value
* @return Random number
* @param minimum The minimum allowable value
* @param maximum The maximum allowable value
* @return A pseudo-random number
*/
static int randomNumber(int min, int max) {
return (int) (Math.random() * max + min);
static int randomNumber(int minimum, int maximum) {
return (int) (Math.random() * maximum + minimum);
}
/**
* A private constructor to ensure that instances of this class cannot be made
*/
private WebSocketUtil() {
// Unused
}

View File

@ -554,7 +554,7 @@ public class SpdyFrameDecoder extends ByteToMessageDecoder<Object> {
// Initialize header block decoding fields
headerSize = 0;
numHeaders = -1;
decompressed = Unpooled.dynamicBuffer(8192);
decompressed = Unpooled.buffer(8192);
}
// Accumulate decompressed data

View File

@ -309,7 +309,7 @@ public class SpdyFrameEncoder extends MessageToByteEncoder<Object> {
throw new IllegalArgumentException(
"header block contains too many headers");
}
ByteBuf headerBlock = Unpooled.dynamicBuffer(256);
ByteBuf headerBlock = Unpooled.buffer();
writeLengthField(version, headerBlock, numHeaders);
for (String name: names) {
byte[] nameBytes = name.getBytes("UTF-8");
@ -340,7 +340,7 @@ public class SpdyFrameEncoder extends MessageToByteEncoder<Object> {
if (uncompressed.readableBytes() == 0) {
return Unpooled.EMPTY_BUFFER;
}
ByteBuf compressed = Unpooled.dynamicBuffer();
ByteBuf compressed = Unpooled.buffer();
synchronized (headerBlockCompressor) {
if (!finished) {
headerBlockCompressor.setInput(uncompressed);

View File

@ -206,7 +206,7 @@ public class SpdyHttpDecoder extends MessageToMessageDecoder<Object, HttpMessage
ByteBuf spdyDataFrameData = spdyDataFrame.getData();
int spdyDataFrameDataLen = spdyDataFrameData.readableBytes();
if (content == Unpooled.EMPTY_BUFFER) {
content = Unpooled.dynamicBuffer(spdyDataFrameDataLen);
content = Unpooled.buffer(spdyDataFrameDataLen);
content.writeBytes(spdyDataFrameData, spdyDataFrameData.readerIndex(), spdyDataFrameDataLen);
httpMessage.setContent(content);
} else {

View File

@ -165,6 +165,6 @@ public final class SpdyHttpHeaders {
* Sets the {@code "X-SPDY-Scheme"} header.
*/
public static void setScheme(HttpMessage message, String scheme) {
message.setHeader(Names.URL, scheme);
message.setHeader(Names.SCHEME, scheme);
}
}

View File

@ -15,14 +15,13 @@
*/
package io.netty.handler.codec.spdy;
import io.netty.util.internal.QueueFactory;
import java.util.Comparator;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicInteger;
final class SpdySession {
@ -177,7 +176,7 @@ final class SpdySession {
private final AtomicInteger sendWindowSize;
private final AtomicInteger receiveWindowSize;
private volatile int receiveWindowSizeLowerBound;
private final BlockingQueue<Object> pendingWriteQueue = QueueFactory.createQueue();
private final Queue<Object> pendingWriteQueue = new ConcurrentLinkedQueue<Object>();
StreamState(
byte priority, boolean remoteSideClosed, boolean localSideClosed,

View File

@ -434,12 +434,6 @@ public class SpdySessionHandler
super.close(ctx, future);
}
@Override
public void disconnect(ChannelHandlerContext ctx, ChannelFuture future) throws Exception {
sendGoAwayFrame(ctx);
super.close(ctx, future);
}
@Override
public void flush(ChannelHandlerContext ctx, ChannelFuture future) throws Exception {
MessageBuf<Object> in = ctx.outboundMessageBuffer();

View File

@ -31,8 +31,7 @@ public class CookieDecoderTest {
String cookieString = "myCookie=myValue;expires=XXX;path=/apathsomewhere;domain=.adomainsomewhere;secure;";
cookieString = cookieString.replace("XXX", new HttpHeaderDateFormat().format(new Date(System.currentTimeMillis() + 50000)));
CookieDecoder cookieDecoder = new CookieDecoder();
Set<Cookie> cookies = cookieDecoder.decode(cookieString);
Set<Cookie> cookies = CookieDecoder.decode(cookieString);
assertEquals(1, cookies.size());
Cookie cookie = cookies.iterator().next();
assertNotNull(cookie);
@ -62,8 +61,7 @@ public class CookieDecoderTest {
@Test
public void testDecodingSingleCookieV0ExtraParamsIgnored() {
String cookieString = "myCookie=myValue;max-age=50;path=/apathsomewhere;domain=.adomainsomewhere;secure;comment=this is a comment;version=0;commentURL=http://aurl.com;port=\"80,8080\";discard;";
CookieDecoder cookieDecoder = new CookieDecoder();
Set<Cookie> cookies = cookieDecoder.decode(cookieString);
Set<Cookie> cookies = CookieDecoder.decode(cookieString);
assertEquals(1, cookies.size());
Cookie cookie = cookies.iterator().next();
assertNotNull(cookie);
@ -81,8 +79,7 @@ public class CookieDecoderTest {
@Test
public void testDecodingSingleCookieV1() {
String cookieString = "myCookie=myValue;max-age=50;path=/apathsomewhere;domain=.adomainsomewhere;secure;comment=this is a comment;version=1;";
CookieDecoder cookieDecoder = new CookieDecoder();
Set<Cookie> cookies = cookieDecoder.decode(cookieString);
Set<Cookie> cookies = CookieDecoder.decode(cookieString);
assertEquals(1, cookies.size());
Cookie cookie = cookies.iterator().next();
assertEquals("myValue", cookie.getValue());
@ -101,8 +98,7 @@ public class CookieDecoderTest {
@Test
public void testDecodingSingleCookieV1ExtraParamsIgnored() {
String cookieString = "myCookie=myValue;max-age=50;path=/apathsomewhere;domain=.adomainsomewhere;secure;comment=this is a comment;version=1;commentURL=http://aurl.com;port='80,8080';discard;";
CookieDecoder cookieDecoder = new CookieDecoder();
Set<Cookie> cookies = cookieDecoder.decode(cookieString);
Set<Cookie> cookies = CookieDecoder.decode(cookieString);
assertEquals(1, cookies.size());
Cookie cookie = cookies.iterator().next();
assertNotNull(cookie);
@ -120,8 +116,7 @@ public class CookieDecoderTest {
@Test
public void testDecodingSingleCookieV2() {
String cookieString = "myCookie=myValue;max-age=50;path=/apathsomewhere;domain=.adomainsomewhere;secure;comment=this is a comment;version=2;commentURL=http://aurl.com;port=\"80,8080\";discard;";
CookieDecoder cookieDecoder = new CookieDecoder();
Set<Cookie> cookies = cookieDecoder.decode(cookieString);
Set<Cookie> cookies = CookieDecoder.decode(cookieString);
assertEquals(1, cookies.size());
Cookie cookie = cookies.iterator().next();
assertNotNull(cookie);
@ -145,9 +140,8 @@ public class CookieDecoderTest {
String c1 = "myCookie=myValue;max-age=50;path=/apathsomewhere;domain=.adomainsomewhere;secure;comment=this is a comment;version=2;commentURL=\"http://aurl.com\";port='80,8080';discard;";
String c2 = "myCookie2=myValue2;max-age=0;path=/anotherpathsomewhere;domain=.anotherdomainsomewhere;comment=this is another comment;version=2;commentURL=http://anotherurl.com;";
String c3 = "myCookie3=myValue3;max-age=0;version=2;";
CookieDecoder decoder = new CookieDecoder();
Set<Cookie> cookies = decoder.decode(c1 + c2 + c3);
Set<Cookie> cookies = CookieDecoder.decode(c1 + c2 + c3);
assertEquals(3, cookies.size());
Iterator<Cookie> it = cookies.iterator();
Cookie cookie = it.next();
@ -196,7 +190,7 @@ public class CookieDecoderTest {
"Part_Number=\"Riding_Rocket_0023\"; $Path=\"/acme/ammo\"; " +
"Part_Number=\"Rocket_Launcher_0001\"; $Path=\"/acme\"";
Set<Cookie> cookies = new CookieDecoder().decode(source);
Set<Cookie> cookies = CookieDecoder.decode(source);
Iterator<Cookie> it = cookies.iterator();
Cookie c;
@ -231,7 +225,7 @@ public class CookieDecoderTest {
"$Version=\"1\"; session_id=\"1234\", " +
"$Version=\"1\"; session_id=\"1111\"; $Domain=\".cracker.edu\"";
Set<Cookie> cookies = new CookieDecoder().decode(source);
Set<Cookie> cookies = CookieDecoder.decode(source);
Iterator<Cookie> it = cookies.iterator();
Cookie c;
@ -271,9 +265,11 @@ public class CookieDecoderTest {
"d=\"1\\\"2\\\"3\"," +
"e=\"\\\"\\\"\"," +
"f=\"1\\\"\\\"2\"," +
"g=\"\\\\\"";
"g=\"\\\\\"," +
"h=\"';,\\x\"";
Set<Cookie> cookies = new CookieDecoder().decode(source);
Set<Cookie> cookies = CookieDecoder.decode(source);
Iterator<Cookie> it = cookies.iterator();
Cookie c;
@ -305,6 +301,10 @@ public class CookieDecoderTest {
assertEquals("g", c.getName());
assertEquals("\\", c.getValue());
c = it.next();
assertEquals("h", c.getName());
assertEquals("';,\\x", c.getValue());
assertFalse(it.hasNext());
}
@ -316,7 +316,7 @@ public class CookieDecoderTest {
"__utma=48461872.1094088325.1258140131.1258140131.1258140131.1; " +
"__utmb=48461872.13.10.1258140131; __utmc=48461872; " +
"__utmz=48461872.1258140131.1.1.utmcsr=overstock.com|utmccn=(referral)|utmcmd=referral|utmcct=/Home-Garden/Furniture/Clearance,/clearance,/32/dept.html";
Set<Cookie> cookies = new CookieDecoder().decode(source);
Set<Cookie> cookies = CookieDecoder.decode(source);
Iterator<Cookie> it = cookies.iterator();
Cookie c;
@ -355,7 +355,7 @@ public class CookieDecoderTest {
String source = "Format=EU; expires=Fri, 31-Dec-9999 23:59:59 GMT; path=/";
Set<Cookie> cookies = new CookieDecoder().decode(source);
Set<Cookie> cookies = CookieDecoder.decode(source);
Cookie c = cookies.iterator().next();
assertTrue(Math.abs(expectedMaxAge - c.getMaxAge()) < 2);
@ -366,9 +366,96 @@ public class CookieDecoderTest {
String source = "UserCookie=timeZoneName=(GMT+04:00) Moscow, St. Petersburg, Volgograd&promocode=&region=BE;" +
" expires=Sat, 01-Dec-2012 10:53:31 GMT; path=/";
Set<Cookie> cookies = new CookieDecoder().decode(source);
Set<Cookie> cookies = CookieDecoder.decode(source);
Cookie c = cookies.iterator().next();
assertEquals("timeZoneName=(GMT+04:00) Moscow, St. Petersburg, Volgograd&promocode=&region=BE", c.getValue());
}
@Test
public void testDecodingWeirdNames1() {
String src = "path=; expires=Mon, 01-Jan-1990 00:00:00 GMT; path=/; domain=.www.google.com";
Set<Cookie> cookies = CookieDecoder.decode(src);
Cookie c = cookies.iterator().next();
assertEquals("path", c.getName());
assertEquals("", c.getValue());
assertEquals("/", c.getPath());
}
@Test
public void testDecodingWeirdNames2() {
String src = "HTTPOnly=";
Set<Cookie> cookies = CookieDecoder.decode(src);
Cookie c = cookies.iterator().next();
assertEquals("HTTPOnly", c.getName());
assertEquals("", c.getValue());
}
@Test
public void testDecodingValuesWithCommasAndEquals() {
String src = "A=v=1&lg=en-US,it-IT,it&intl=it&np=1;T=z=E";
Set<Cookie> cookies = CookieDecoder.decode(src);
Iterator<Cookie> i = cookies.iterator();
Cookie c = i.next();
assertEquals("A", c.getName());
assertEquals("v=1&lg=en-US,it-IT,it&intl=it&np=1", c.getValue());
c = i.next();
assertEquals("T", c.getName());
assertEquals("z=E", c.getValue());
}
@Test
public void testDecodingLongValue() {
String longValue =
"b!!!$Q!!$ha!!<NC=MN(F!!%#4!!<NC=MN(F!!2!d!!!!#=IvZB!!2,F!!!!'=KqtH!!2-9!!!!" +
"'=IvZM!!3f:!!!!$=HbQW!!3g'!!!!%=J^wI!!3g-!!!!%=J^wI!!3g1!!!!$=HbQW!!3g2!!!!" +
"$=HbQW!!3g5!!!!%=J^wI!!3g9!!!!$=HbQW!!3gT!!!!$=HbQW!!3gX!!!!#=J^wI!!3gY!!!!" +
"#=J^wI!!3gh!!!!$=HbQW!!3gj!!!!$=HbQW!!3gr!!!!$=HbQW!!3gx!!!!#=J^wI!!3h!!!!!" +
"$=HbQW!!3h$!!!!#=J^wI!!3h'!!!!$=HbQW!!3h,!!!!$=HbQW!!3h0!!!!%=J^wI!!3h1!!!!" +
"#=J^wI!!3h2!!!!$=HbQW!!3h4!!!!$=HbQW!!3h7!!!!$=HbQW!!3h8!!!!%=J^wI!!3h:!!!!" +
"#=J^wI!!3h@!!!!%=J^wI!!3hB!!!!$=HbQW!!3hC!!!!$=HbQW!!3hL!!!!$=HbQW!!3hQ!!!!" +
"$=HbQW!!3hS!!!!%=J^wI!!3hU!!!!$=HbQW!!3h[!!!!$=HbQW!!3h^!!!!$=HbQW!!3hd!!!!" +
"%=J^wI!!3he!!!!%=J^wI!!3hf!!!!%=J^wI!!3hg!!!!$=HbQW!!3hh!!!!%=J^wI!!3hi!!!!" +
"%=J^wI!!3hv!!!!$=HbQW!!3i/!!!!#=J^wI!!3i2!!!!#=J^wI!!3i3!!!!%=J^wI!!3i4!!!!" +
"$=HbQW!!3i7!!!!$=HbQW!!3i8!!!!$=HbQW!!3i9!!!!%=J^wI!!3i=!!!!#=J^wI!!3i>!!!!" +
"%=J^wI!!3iD!!!!$=HbQW!!3iF!!!!#=J^wI!!3iH!!!!%=J^wI!!3iM!!!!%=J^wI!!3iS!!!!" +
"#=J^wI!!3iU!!!!%=J^wI!!3iZ!!!!#=J^wI!!3i]!!!!%=J^wI!!3ig!!!!%=J^wI!!3ij!!!!" +
"%=J^wI!!3ik!!!!#=J^wI!!3il!!!!$=HbQW!!3in!!!!%=J^wI!!3ip!!!!$=HbQW!!3iq!!!!" +
"$=HbQW!!3it!!!!%=J^wI!!3ix!!!!#=J^wI!!3j!!!!!$=HbQW!!3j%!!!!$=HbQW!!3j'!!!!" +
"%=J^wI!!3j(!!!!%=J^wI!!9mJ!!!!'=KqtH!!=SE!!<NC=MN(F!!?VS!!<NC=MN(F!!Zw`!!!!" +
"%=KqtH!!j+C!!<NC=MN(F!!j+M!!<NC=MN(F!!j+a!!<NC=MN(F!!j,.!!<NC=MN(F!!n>M!!!!" +
"'=KqtH!!s1X!!!!$=MMyc!!s1_!!!!#=MN#O!!ypn!!!!'=KqtH!!ypr!!!!'=KqtH!#%h!!!!!" +
"%=KqtH!#%o!!!!!'=KqtH!#)H6!!<NC=MN(F!#*%'!!!!%=KqtH!#+k(!!!!'=KqtH!#-E!!!!!" +
"'=KqtH!#1)w!!!!'=KqtH!#1)y!!!!'=KqtH!#1*M!!!!#=KqtH!#1*p!!!!'=KqtH!#14Q!!<N" +
"C=MN(F!#14S!!<NC=MN(F!#16I!!<NC=MN(F!#16N!!<NC=MN(F!#16X!!<NC=MN(F!#16k!!<N" +
"C=MN(F!#17@!!<NC=MN(F!#17A!!<NC=MN(F!#1Cq!!!!'=KqtH!#7),!!!!#=KqtH!#7)b!!!!" +
"#=KqtH!#7Ww!!!!'=KqtH!#?cQ!!!!'=KqtH!#His!!!!'=KqtH!#Jrh!!!!'=KqtH!#O@M!!<N" +
"C=MN(F!#O@O!!<NC=MN(F!#OC6!!<NC=MN(F!#Os.!!!!#=KqtH!#YOW!!!!#=H/Li!#Zat!!!!" +
"'=KqtH!#ZbI!!!!%=KqtH!#Zbc!!!!'=KqtH!#Zbs!!!!%=KqtH!#Zby!!!!'=KqtH!#Zce!!!!" +
"'=KqtH!#Zdc!!!!%=KqtH!#Zea!!!!'=KqtH!#ZhI!!!!#=KqtH!#ZiD!!!!'=KqtH!#Zis!!!!" +
"'=KqtH!#Zj0!!!!#=KqtH!#Zj1!!!!'=KqtH!#Zj[!!!!'=KqtH!#Zj]!!!!'=KqtH!#Zj^!!!!" +
"'=KqtH!#Zjb!!!!'=KqtH!#Zk!!!!!'=KqtH!#Zk6!!!!#=KqtH!#Zk9!!!!%=KqtH!#Zk<!!!!" +
"'=KqtH!#Zl>!!!!'=KqtH!#]9R!!!!$=H/Lt!#]I6!!!!#=KqtH!#]Z#!!!!%=KqtH!#^*N!!!!" +
"#=KqtH!#^:m!!!!#=KqtH!#_*_!!!!%=J^wI!#`-7!!!!#=KqtH!#`T>!!!!'=KqtH!#`T?!!!!" +
"'=KqtH!#`TA!!!!'=KqtH!#`TB!!!!'=KqtH!#`TG!!!!'=KqtH!#`TP!!!!#=KqtH!#`U,!!!!" +
"'=KqtH!#`U/!!!!'=KqtH!#`U0!!!!#=KqtH!#`U9!!!!'=KqtH!#aEQ!!!!%=KqtH!#b<)!!!!" +
"'=KqtH!#c9-!!!!%=KqtH!#dxC!!!!%=KqtH!#dxE!!!!%=KqtH!#ev$!!!!'=KqtH!#fBi!!!!" +
"#=KqtH!#fBj!!!!'=KqtH!#fG)!!!!'=KqtH!#fG+!!!!'=KqtH!#g<d!!!!'=KqtH!#g<e!!!!" +
"'=KqtH!#g=J!!!!'=KqtH!#gat!!!!#=KqtH!#s`D!!!!#=J_#p!#sg?!!!!#=J_#p!#t<a!!!!" +
"#=KqtH!#t<c!!!!#=KqtH!#trY!!!!$=JiYj!#vA$!!!!'=KqtH!#xs_!!!!'=KqtH!$$rO!!!!" +
"#=KqtH!$$rP!!!!#=KqtH!$(!%!!!!'=KqtH!$)]o!!!!%=KqtH!$,@)!!!!'=KqtH!$,k]!!!!" +
"'=KqtH!$1]+!!!!%=KqtH!$3IO!!!!%=KqtH!$3J#!!!!'=KqtH!$3J.!!!!'=KqtH!$3J:!!!!" +
"#=KqtH!$3JH!!!!#=KqtH!$3JI!!!!#=KqtH!$3JK!!!!%=KqtH!$3JL!!!!'=KqtH!$3JS!!!!" +
"'=KqtH!$8+M!!!!#=KqtH!$99d!!!!%=KqtH!$:Lw!!!!#=LK+x!$:N@!!!!#=KqtG!$:NC!!!!" +
"#=KqtG!$:hW!!!!'=KqtH!$:i[!!!!'=KqtH!$:ih!!!!'=KqtH!$:it!!!!'=KqtH!$:kO!!!!" +
"'=KqtH!$>*B!!!!'=KqtH!$>hD!!!!+=J^x0!$?lW!!!!'=KqtH!$?ll!!!!'=KqtH!$?lm!!!!" +
"%=KqtH!$?mi!!!!'=KqtH!$?mx!!!!'=KqtH!$D7]!!!!#=J_#p!$D@T!!!!#=J_#p!$V<g!!!!" +
"'=KqtH";
Set<Cookie> cookies = CookieDecoder.decode("bh=\"" + longValue + "\";");
assertEquals(1, cookies.size());
Cookie c = cookies.iterator().next();
assertEquals("bh", c.getName());
assertEquals(longValue, c.getValue());
}
}

View File

@ -19,17 +19,16 @@ import static org.junit.Assert.*;
import java.text.DateFormat;
import java.util.Date;
import java.util.List;
import org.junit.Test;
public class CookieEncoderTest {
@Test
public void testEncodingSingleCookieV0() {
String result = "myCookie=myValue;Expires=XXX;Path=/apathsomewhere;Domain=.adomainsomewhere;Secure";
String result = "myCookie=myValue; Expires=XXX; Path=/apathsomewhere; Domain=.adomainsomewhere; Secure";
DateFormat df = new HttpHeaderDateFormat();
Cookie cookie = new DefaultCookie("myCookie", "myValue");
CookieEncoder encoder = new CookieEncoder(true);
encoder.addCookie(cookie);
cookie.setComment("this is a Comment");
cookie.setCommentUrl("http://aurl.com");
cookie.setDomain(".adomainsomewhere");
@ -39,7 +38,7 @@ public class CookieEncoderTest {
cookie.setPorts(80, 8080);
cookie.setSecure(true);
String encodedCookie = encoder.encode();
String encodedCookie = ServerCookieEncoder.encode(cookie);
long currentTime = System.currentTimeMillis();
boolean fail = true;
@ -59,26 +58,22 @@ public class CookieEncoderTest {
@Test
public void testEncodingSingleCookieV1() {
String result = "myCookie=myValue;Max-Age=50;Path=\"/apathsomewhere\";Domain=.adomainsomewhere;Secure;Comment=\"this is a Comment\";Version=1";
String result = "myCookie=myValue; Max-Age=50; Path=\"/apathsomewhere\"; Domain=.adomainsomewhere; Secure; Comment=\"this is a Comment\"; Version=1";
Cookie cookie = new DefaultCookie("myCookie", "myValue");
CookieEncoder encoder = new CookieEncoder(true);
encoder.addCookie(cookie);
cookie.setVersion(1);
cookie.setComment("this is a Comment");
cookie.setDomain(".adomainsomewhere");
cookie.setMaxAge(50);
cookie.setPath("/apathsomewhere");
cookie.setSecure(true);
String encodedCookie = encoder.encode();
String encodedCookie = ServerCookieEncoder.encode(cookie);
assertEquals(result, encodedCookie);
}
@Test
public void testEncodingSingleCookieV2() {
String result = "myCookie=myValue;Max-Age=50;Path=\"/apathsomewhere\";Domain=.adomainsomewhere;Secure;Comment=\"this is a Comment\";Version=1;CommentURL=\"http://aurl.com\";Port=\"80,8080\";Discard";
String result = "myCookie=myValue; Max-Age=50; Path=\"/apathsomewhere\"; Domain=.adomainsomewhere; Secure; Comment=\"this is a Comment\"; Version=1; CommentURL=\"http://aurl.com\"; Port=\"80,8080\"; Discard";
Cookie cookie = new DefaultCookie("myCookie", "myValue");
CookieEncoder encoder = new CookieEncoder(true);
encoder.addCookie(cookie);
cookie.setVersion(1);
cookie.setComment("this is a Comment");
cookie.setCommentUrl("http://aurl.com");
@ -88,16 +83,15 @@ public class CookieEncoderTest {
cookie.setPath("/apathsomewhere");
cookie.setPorts(80, 8080);
cookie.setSecure(true);
String encodedCookie = encoder.encode();
String encodedCookie = ServerCookieEncoder.encode(cookie);
assertEquals(result, encodedCookie);
}
@Test
public void testEncodingMultipleCookies() {
String c1 = "myCookie=myValue;Max-Age=50;Path=\"/apathsomewhere\";Domain=.adomainsomewhere;Secure;Comment=\"this is a Comment\";Version=1;CommentURL=\"http://aurl.com\";Port=\"80,8080\";Discard;";
String c2 = "myCookie2=myValue2;Path=\"/anotherpathsomewhere\";Domain=.anotherdomainsomewhere;Comment=\"this is another Comment\";Version=1;CommentURL=\"http://anotherurl.com\";";
String c3 = "myCookie3=myValue3;Version=1";
CookieEncoder encoder = new CookieEncoder(true);
public void testEncodingMultipleClientCookies() {
String c1 = "$Version=1; myCookie=myValue; $Path=\"/apathsomewhere\"; $Domain=.adomainsomewhere; $Port=\"80,8080\"; ";
String c2 = "$Version=1; myCookie2=myValue2; $Path=\"/anotherpathsomewhere\"; $Domain=.anotherdomainsomewhere; ";
String c3 = "$Version=1; myCookie3=myValue3";
Cookie cookie = new DefaultCookie("myCookie", "myValue");
cookie.setVersion(1);
cookie.setComment("this is a Comment");
@ -108,7 +102,6 @@ public class CookieEncoderTest {
cookie.setPath("/apathsomewhere");
cookie.setPorts(80, 8080);
cookie.setSecure(true);
encoder.addCookie(cookie);
Cookie cookie2 = new DefaultCookie("myCookie2", "myValue2");
cookie2.setVersion(1);
cookie2.setComment("this is another Comment");
@ -117,23 +110,17 @@ public class CookieEncoderTest {
cookie2.setDiscard(false);
cookie2.setPath("/anotherpathsomewhere");
cookie2.setSecure(false);
encoder.addCookie(cookie2);
Cookie cookie3 = new DefaultCookie("myCookie3", "myValue3");
cookie3.setVersion(1);
encoder.addCookie(cookie3);
String encodedCookie = encoder.encode();
String encodedCookie = ClientCookieEncoder.encode(cookie, cookie2, cookie3);
assertEquals(c1 + c2 + c3, encodedCookie);
}
@Test
public void testEncodingWithNoCookies() {
CookieEncoder encoderForServer = new CookieEncoder(true);
String encodedCookie1 = encoderForServer.encode();
CookieEncoder encoderForClient = new CookieEncoder(false);
String encodedCookie2 = encoderForClient.encode();
String encodedCookie1 = ClientCookieEncoder.encode();
List<String> encodedCookie2 = ServerCookieEncoder.encode();
assertNotNull(encodedCookie1);
assertNotNull(encodedCookie2);
}
}

View File

@ -0,0 +1,139 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http;
import static org.junit.Assert.*;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.embedded.EmbeddedMessageChannel;
import io.netty.handler.codec.TooLongFrameException;
import io.netty.util.CharsetUtil;
import java.util.List;
import org.easymock.EasyMock;
import org.junit.Test;
public class HttpChunkAggregatorTest {
@Test
public void testAggregate() {
HttpChunkAggregator aggr = new HttpChunkAggregator(1024 * 1024);
EmbeddedMessageChannel embedder = new EmbeddedMessageChannel(aggr);
HttpMessage message = new DefaultHttpMessage(HttpVersion.HTTP_1_1);
HttpHeaders.setHeader(message, "X-Test", true);
message.setChunked(true);
HttpChunk chunk1 = new DefaultHttpChunk(Unpooled.copiedBuffer("test", CharsetUtil.US_ASCII));
HttpChunk chunk2 = new DefaultHttpChunk(Unpooled.copiedBuffer("test2", CharsetUtil.US_ASCII));
HttpChunk chunk3 = new DefaultHttpChunk(Unpooled.EMPTY_BUFFER);
assertFalse(embedder.writeInbound(message));
assertFalse(embedder.writeInbound(chunk1));
assertFalse(embedder.writeInbound(chunk2));
// this should trigger a messageReceived event so return true
assertTrue(embedder.writeInbound(chunk3));
assertTrue(embedder.finish());
HttpMessage aggratedMessage = (HttpMessage) embedder.readInbound();
assertNotNull(aggratedMessage);
assertEquals(chunk1.getContent().readableBytes() + chunk2.getContent().readableBytes(), HttpHeaders.getContentLength(aggratedMessage));
assertEquals(aggratedMessage.getHeader("X-Test"), Boolean.TRUE.toString());
checkContentBuffer(aggratedMessage);
assertNull(embedder.readInbound());
}
private void checkContentBuffer(HttpMessage aggregatedMessage) {
CompositeByteBuf buffer = (CompositeByteBuf) aggregatedMessage.getContent();
assertEquals(2, buffer.numComponents());
List<ByteBuf> buffers = buffer.decompose(0, buffer.capacity());
assertEquals(2, buffers.size());
for (ByteBuf buf: buffers) {
// This should be false as we decompose the buffer before to not have deep hierarchy
assertFalse(buf instanceof CompositeByteBuf);
}
}
@Test
public void testAggregateWithTrailer() {
HttpChunkAggregator aggr = new HttpChunkAggregator(1024 * 1024);
EmbeddedMessageChannel embedder = new EmbeddedMessageChannel(aggr);
HttpMessage message = new DefaultHttpMessage(HttpVersion.HTTP_1_1);
HttpHeaders.setHeader(message, "X-Test", true);
message.setChunked(true);
HttpChunk chunk1 = new DefaultHttpChunk(Unpooled.copiedBuffer("test", CharsetUtil.US_ASCII));
HttpChunk chunk2 = new DefaultHttpChunk(Unpooled.copiedBuffer("test2", CharsetUtil.US_ASCII));
HttpChunkTrailer trailer = new DefaultHttpChunkTrailer();
trailer.setHeader("X-Trailer", true);
assertFalse(embedder.writeInbound(message));
assertFalse(embedder.writeInbound(chunk1));
assertFalse(embedder.writeInbound(chunk2));
// this should trigger a messageReceived event so return true
assertTrue(embedder.writeInbound(trailer));
assertTrue(embedder.finish());
HttpMessage aggratedMessage = (HttpMessage) embedder.readInbound();
assertNotNull(aggratedMessage);
assertEquals(chunk1.getContent().readableBytes() + chunk2.getContent().readableBytes(), HttpHeaders.getContentLength(aggratedMessage));
assertEquals(aggratedMessage.getHeader("X-Test"), Boolean.TRUE.toString());
assertEquals(aggratedMessage.getHeader("X-Trailer"), Boolean.TRUE.toString());
checkContentBuffer(aggratedMessage);
assertNull(embedder.readInbound());
}
@Test(expected = TooLongFrameException.class)
public void testTooLongFrameException() {
HttpChunkAggregator aggr = new HttpChunkAggregator(4);
EmbeddedMessageChannel embedder = new EmbeddedMessageChannel(aggr);
HttpMessage message = new DefaultHttpMessage(HttpVersion.HTTP_1_1);
message.setChunked(true);
HttpChunk chunk1 = new DefaultHttpChunk(Unpooled.copiedBuffer("test", CharsetUtil.US_ASCII));
HttpChunk chunk2 = new DefaultHttpChunk(Unpooled.copiedBuffer("test2", CharsetUtil.US_ASCII));
assertFalse(embedder.writeInbound(message));
assertFalse(embedder.writeInbound(chunk1));
embedder.writeInbound(chunk2);
fail();
}
@Test(expected = IllegalArgumentException.class)
public void testInvalidConstructorUsage() {
new HttpChunkAggregator(0);
}
@Test(expected = IllegalArgumentException.class)
public void testInvalidMaxCumulationBufferComponents() {
HttpChunkAggregator aggr= new HttpChunkAggregator(Integer.MAX_VALUE);
aggr.setMaxCumulationBufferComponents(1);
}
@Test(expected = IllegalStateException.class)
public void testSetMaxCumulationBufferComponentsAfterInit() throws Exception {
HttpChunkAggregator aggr = new HttpChunkAggregator(Integer.MAX_VALUE);
ChannelHandlerContext ctx = EasyMock.createMock(ChannelHandlerContext.class);
EasyMock.replay(ctx);
aggr.beforeAdd(ctx);
aggr.setMaxCumulationBufferComponents(10);
}
}

View File

@ -35,7 +35,7 @@ public class HttpContentCompressorTest {
"gzip; q=0.5, identity", "gzip",
"gzip ; q=0.1", "gzip",
"gzip; q=0, deflate", "deflate",
" defalte ; q=0 , *;q=0.5", "gzip",
" deflate ; q=0 , *;q=0.5", "gzip",
};
for (int i = 0; i < tests.length; i += 2) {
String acceptEncoding = tests[i];

View File

@ -0,0 +1,73 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.embedded.EmbeddedByteChannel;
import io.netty.util.CharsetUtil;
import org.junit.Assert;
import org.junit.Test;
public class HttpServerCodecTest {
/**
* Testcase for https://github.com/netty/netty/issues/433
*/
@Test
public void testUnfinishedChunkedHttpRequestIsLastFlag() throws Exception {
int maxChunkSize = 2000;
HttpServerCodec httpServerCodec = new HttpServerCodec(1000, 1000, maxChunkSize);
EmbeddedByteChannel decoderEmbedder = new EmbeddedByteChannel(httpServerCodec);
int totalContentLength = maxChunkSize * 5;
decoderEmbedder.writeInbound(Unpooled.copiedBuffer("PUT /test HTTP/1.1\r\n" +
"Content-Length: " + totalContentLength + "\r\n" +
"\r\n", CharsetUtil.UTF_8));
int offeredContentLength = (int) (maxChunkSize * 2.5);
decoderEmbedder.writeInbound(prepareDataChunk(offeredContentLength));
decoderEmbedder.finish();
HttpMessage httpMessage = (HttpMessage) decoderEmbedder.readInbound();
Assert.assertTrue(httpMessage.isChunked());
boolean empty = true;
int totalBytesPolled = 0;
for (;;) {
HttpChunk httpChunk = (HttpChunk) decoderEmbedder.readInbound();
if (httpChunk == null) {
break;
}
empty = false;
totalBytesPolled += httpChunk.getContent().readableBytes();
Assert.assertFalse(httpChunk.isLast());
}
Assert.assertFalse(empty);
Assert.assertEquals(offeredContentLength, totalBytesPolled);
}
private ByteBuf prepareDataChunk(int size) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < size; ++i) {
sb.append("a");
}
return Unpooled.copiedBuffer(sb.toString(), CharsetUtil.UTF_8);
}
}

View File

@ -20,7 +20,7 @@
<parent>
<groupId>io.netty</groupId>
<artifactId>netty-parent</artifactId>
<version>4.0.0.Alpha1-SNAPSHOT</version>
<version>4.0.0.Alpha2-SNAPSHOT</version>
</parent>
<artifactId>netty-codec</artifactId>

View File

@ -23,7 +23,7 @@ public abstract class ByteToByteDecoder extends ChannelInboundByteHandlerAdapter
@Override
public void inboundBufferUpdated(ChannelHandlerContext ctx, ByteBuf in) throws Exception {
callDecode(ctx, in, ctx.nextOutboundByteBuffer());
callDecode(ctx, in, ctx.nextInboundByteBuffer());
}
@Override
@ -46,7 +46,6 @@ public abstract class ByteToByteDecoder extends ChannelInboundByteHandlerAdapter
}
if (out.readableBytes() > oldOutSize) {
in.discardReadBytes();
ctx.fireInboundBufferUpdated();
}
@ -71,8 +70,8 @@ public abstract class ByteToByteDecoder extends ChannelInboundByteHandlerAdapter
}
}
in.unsafe().discardSomeReadBytes();
if (out.readableBytes() > oldOutSize) {
in.discardReadBytes();
ctx.fireInboundBufferUpdated();
}
}

View File

@ -44,10 +44,7 @@ public abstract class ByteToByteEncoder extends ChannelOutboundByteHandlerAdapte
}
}
if (out.readableBytes() > oldOutSize) {
in.discardReadBytes();
}
in.unsafe().discardSomeReadBytes();
ctx.flush(future);
}

View File

@ -35,7 +35,7 @@ public abstract class ByteToMessageDecoder<O>
@Override
public ByteBuf newInboundBuffer(ChannelHandlerContext ctx) throws Exception {
return Unpooled.dynamicBuffer();
return Unpooled.buffer();
}
@Override
@ -52,7 +52,6 @@ public abstract class ByteToMessageDecoder<O>
try {
if (CodecUtil.unfoldAndAdd(ctx, decodeLast(ctx, in), true)) {
in.discardReadBytes();
ctx.fireInboundBufferUpdated();
}
} catch (Throwable t) {
@ -93,9 +92,10 @@ public abstract class ByteToMessageDecoder<O>
break;
}
} catch (Throwable t) {
in.unsafe().discardSomeReadBytes();
if (decoded) {
decoded = false;
in.discardReadBytes();
ctx.fireInboundBufferUpdated();
}
@ -107,8 +107,9 @@ public abstract class ByteToMessageDecoder<O>
}
}
in.unsafe().discardSomeReadBytes();
if (decoded) {
in.discardReadBytes();
ctx.fireInboundBufferUpdated();
}
}

View File

@ -68,7 +68,7 @@ public class FixedLengthFrameDecoder extends ByteToMessageDecoder<Object> {
@Override
public ByteBuf newInboundBuffer(ChannelHandlerContext ctx) throws Exception {
if (allocateFullBuffer) {
return Unpooled.dynamicBuffer(frameLength);
return Unpooled.buffer(frameLength);
} else {
return super.newInboundBuffer(ctx);
}

View File

@ -16,7 +16,6 @@
package io.netty.handler.codec;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufFactory;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.serialization.ObjectDecoder;
@ -427,7 +426,7 @@ public class LengthFieldBasedFrameDecoder extends ByteToMessageDecoder<Object> {
* is overridden to avoid memory copy.
*/
protected ByteBuf extractFrame(ByteBuf buffer, int index, int length) {
ByteBuf frame = buffer.factory().getBuffer(length);
ByteBuf frame = buffer.unsafe().newBuffer(length);
frame.writeBytes(buffer, index, length);
return frame;
}

View File

@ -16,7 +16,6 @@
package io.netty.handler.codec;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufFactory;
import io.netty.channel.ChannelHandler.Sharable;
import io.netty.channel.ChannelHandlerContext;

View File

@ -22,7 +22,6 @@ import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPipeline;
import io.netty.util.Signal;
import io.netty.util.VoidEnum;
/**
* A specialized variation of {@link ByteToMessageDecoder} which enables implementation
@ -105,7 +104,7 @@ import io.netty.util.VoidEnum;
* <li>You must keep in mind that {@code decode(..)} method can be called many
* times to decode a single message. For example, the following code will
* not work:
* <pre> public class MyDecoder extends {@link ReplayingDecoder}&lt;{@link VoidEnum}&gt; {
* <pre> public class MyDecoder extends {@link ReplayingDecoder}&lt;{@link Void}&gt; {
*
* private final Queue&lt;Integer&gt; values = new LinkedList&lt;Integer&gt;();
*
@ -125,7 +124,7 @@ import io.netty.util.VoidEnum;
* The correct implementation looks like the following, and you can also
* utilize the 'checkpoint' feature which is explained in detail in the
* next section.
* <pre> public class MyDecoder extends {@link ReplayingDecoder}&lt;{@link VoidEnum}&gt; {
* <pre> public class MyDecoder extends {@link ReplayingDecoder}&lt;{@link Void}&gt; {
*
* private final Queue&lt;Integer&gt; values = new LinkedList&lt;Integer&gt;();
*
@ -206,7 +205,7 @@ import io.netty.util.VoidEnum;
* An alternative way to manage the decoder state is to manage it by yourself.
* <pre>
* public class IntegerHeaderFrameDecoder
* extends {@link ReplayingDecoder}&lt;<strong>{@link VoidEnum}</strong>&gt; {
* extends {@link ReplayingDecoder}&lt;<strong>{@link Void}</strong>&gt; {
*
* <strong>private boolean readLength;</strong>
* private int length;
@ -215,7 +214,7 @@ import io.netty.util.VoidEnum;
* protected Object decode({@link ChannelHandlerContext} ctx,
* {@link Channel} channel,
* {@link ByteBuf} buf,
* {@link VoidEnum} state) throws Exception {
* {@link Void} state) throws Exception {
* if (!readLength) {
* length = buf.readInt();
* <strong>readLength = true;</strong>
@ -241,7 +240,7 @@ import io.netty.util.VoidEnum;
* {@link ChannelPipeline#replace(ChannelHandler, String, ChannelHandler)}, but
* some additional steps are required:
* <pre>
* public class FirstDecoder extends {@link ReplayingDecoder}&lt;{@link VoidEnum}&gt; {
* public class FirstDecoder extends {@link ReplayingDecoder}&lt;{@link Void}&gt; {
*
* public FirstDecoder() {
* super(true); // Enable unfold
@ -251,7 +250,7 @@ import io.netty.util.VoidEnum;
* protected Object decode({@link ChannelHandlerContext} ctx,
* {@link Channel} ch,
* {@link ByteBuf} buf,
* {@link VoidEnum} state) {
* {@link Void} state) {
* ...
* // Decode the first message
* Object firstMessage = ...;
@ -272,16 +271,17 @@ import io.netty.util.VoidEnum;
* }
* </pre>
* @param <S>
* the state type; use {@link VoidEnum} if state management is unused
* the state type which is usually an {@link Enum}; use {@link Void} if state management is
* unused
*
* @apiviz.landmark
* @apiviz.has io.netty.handler.codec.UnreplayableOperationException oneway - - throws
*/
public abstract class ReplayingDecoder<O, S extends Enum<S>> extends ByteToMessageDecoder<O> {
public abstract class ReplayingDecoder<O, S> extends ByteToMessageDecoder<O> {
static final Signal REPLAY = new Signal(ReplayingDecoder.class.getName() + ".REPLAY");
private final ByteBuf cumulation = Unpooled.dynamicBuffer();
private final ByteBuf cumulation = Unpooled.buffer();
private final ReplayingDecoderBuffer replayable = new ReplayingDecoderBuffer(cumulation);
private S state;
private int checkpoint = -1;
@ -455,8 +455,10 @@ public abstract class ReplayingDecoder<O, S extends Enum<S>> extends ByteToMessa
}
private void fireInboundBufferUpdated(ChannelHandlerContext ctx, ByteBuf in) {
checkpoint -= in.readerIndex();
in.discardReadBytes();
final int oldReaderIndex = in.readerIndex();
in.unsafe().discardSomeReadBytes();
final int newReaderIndex = in.readerIndex();
checkpoint -= oldReaderIndex - newReaderIndex;
ctx.fireInboundBufferUpdated();
}
}

View File

@ -16,7 +16,6 @@
package io.netty.handler.codec;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufFactory;
import io.netty.buffer.ByteBufIndexFinder;
import io.netty.buffer.ChannelBufType;
import io.netty.buffer.SwappedByteBuf;
@ -64,6 +63,16 @@ class ReplayingDecoderBuffer implements ByteBuf {
}
}
@Override
public void capacity(int newCapacity) {
throw new UnreplayableOperationException();
}
@Override
public int maxCapacity() {
return capacity();
}
@Override
public ChannelBufType type() {
return ChannelBufType.BYTE;
@ -137,19 +146,19 @@ class ReplayingDecoderBuffer implements ByteBuf {
@Override
public boolean getBoolean(int index) {
checkIndex(index);
checkIndex(index, 1);
return buffer.getBoolean(index);
}
@Override
public byte getByte(int index) {
checkIndex(index);
checkIndex(index, 1);
return buffer.getByte(index);
}
@Override
public short getUnsignedByte(int index) {
checkIndex(index);
checkIndex(index, 1);
return buffer.getUnsignedByte(index);
}
@ -349,11 +358,6 @@ class ReplayingDecoderBuffer implements ByteBuf {
throw new UnreplayableOperationException();
}
@Override
public ByteBufFactory factory() {
return buffer.factory();
}
@Override
public ByteOrder order() {
return buffer.order();
@ -801,12 +805,6 @@ class ReplayingDecoderBuffer implements ByteBuf {
throw new UnreplayableOperationException();
}
private void checkIndex(int index) {
if (index > buffer.writerIndex()) {
throw REPLAY;
}
}
private void checkIndex(int index, int length) {
if (index + length > buffer.writerIndex()) {
throw REPLAY;
@ -818,4 +816,9 @@ class ReplayingDecoderBuffer implements ByteBuf {
throw REPLAY;
}
}
@Override
public Unsafe unsafe() {
throw new UnreplayableOperationException();
}
}

View File

@ -20,8 +20,6 @@
package io.netty.handler.codec.base64;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufFactory;
import io.netty.buffer.HeapByteBufFactory;
/**
* Utility class for {@link ByteBuf} that encodes and decodes to and from
@ -78,39 +76,17 @@ public final class Base64 {
return encode(src, breakLines(dialect), dialect);
}
public static ByteBuf encode(
ByteBuf src, ByteBufFactory bufferFactory) {
return encode(src, Base64Dialect.STANDARD, bufferFactory);
}
public static ByteBuf encode(
ByteBuf src, Base64Dialect dialect, ByteBufFactory bufferFactory) {
return encode(src, breakLines(dialect), dialect, bufferFactory);
}
public static ByteBuf encode(ByteBuf src, boolean breakLines) {
return encode(src, breakLines, Base64Dialect.STANDARD);
}
public static ByteBuf encode(
ByteBuf src, boolean breakLines, Base64Dialect dialect) {
return encode(src, breakLines, dialect, HeapByteBufFactory.getInstance());
}
public static ByteBuf encode(
ByteBuf src, boolean breakLines, ByteBufFactory bufferFactory) {
return encode(src, breakLines, Base64Dialect.STANDARD, bufferFactory);
}
public static ByteBuf encode(
ByteBuf src, boolean breakLines, Base64Dialect dialect, ByteBufFactory bufferFactory) {
public static ByteBuf encode(ByteBuf src, boolean breakLines, Base64Dialect dialect) {
if (src == null) {
throw new NullPointerException("src");
}
ByteBuf dest = encode(
src, src.readerIndex(), src.readableBytes(), breakLines, dialect, bufferFactory);
ByteBuf dest = encode(src, src.readerIndex(), src.readableBytes(), breakLines, dialect);
src.readerIndex(src.writerIndex());
return dest;
}
@ -123,35 +99,13 @@ public final class Base64 {
return encode(src, off, len, breakLines(dialect), dialect);
}
public static ByteBuf encode(ByteBuf src, int off, int len, ByteBufFactory bufferFactory) {
return encode(src, off, len, Base64Dialect.STANDARD, bufferFactory);
}
public static ByteBuf encode(
ByteBuf src, int off, int len, Base64Dialect dialect, ByteBufFactory bufferFactory) {
return encode(src, off, len, breakLines(dialect), dialect, bufferFactory);
}
public static ByteBuf encode(
ByteBuf src, int off, int len, boolean breakLines) {
return encode(src, off, len, breakLines, Base64Dialect.STANDARD);
}
public static ByteBuf encode(
ByteBuf src, int off, int len,
boolean breakLines, Base64Dialect dialect) {
return encode(src, off, len, breakLines, dialect, HeapByteBufFactory.getInstance());
}
public static ByteBuf encode(
ByteBuf src, int off, int len,
boolean breakLines, ByteBufFactory bufferFactory) {
return encode(src, off, len, breakLines, Base64Dialect.STANDARD, bufferFactory);
}
public static ByteBuf encode(
ByteBuf src, int off, int len,
boolean breakLines, Base64Dialect dialect, ByteBufFactory bufferFactory) {
ByteBuf src, int off, int len, boolean breakLines, Base64Dialect dialect) {
if (src == null) {
throw new NullPointerException("src");
@ -159,16 +113,12 @@ public final class Base64 {
if (dialect == null) {
throw new NullPointerException("dialect");
}
if (bufferFactory == null) {
throw new NullPointerException("bufferFactory");
}
int len43 = len * 4 / 3;
ByteBuf dest = bufferFactory.getBuffer(
src.order(),
ByteBuf dest = src.unsafe().newBuffer(
len43 +
(len % 3 > 0? 4 : 0) + // Account for padding
(breakLines? len43 / MAX_LINE_LENGTH : 0)); // New lines
(breakLines? len43 / MAX_LINE_LENGTH : 0)).order(src.order()); // New lines
int d = 0;
int e = 0;
int len2 = len - 2;
@ -241,20 +191,12 @@ public final class Base64 {
}
public static ByteBuf decode(ByteBuf src, Base64Dialect dialect) {
return decode(src, dialect, HeapByteBufFactory.getInstance());
}
public static ByteBuf decode(ByteBuf src, ByteBufFactory bufferFactory) {
return decode(src, Base64Dialect.STANDARD, bufferFactory);
}
public static ByteBuf decode(ByteBuf src, Base64Dialect dialect, ByteBufFactory bufferFactory) {
if (src == null) {
throw new NullPointerException("src");
}
ByteBuf dest = decode(src, src.readerIndex(), src.readableBytes(), dialect, bufferFactory);
ByteBuf dest = decode(src, src.readerIndex(), src.readableBytes(), dialect);
src.readerIndex(src.writerIndex());
return dest;
}
@ -266,17 +208,6 @@ public final class Base64 {
public static ByteBuf decode(
ByteBuf src, int off, int len, Base64Dialect dialect) {
return decode(src, off, len, dialect, HeapByteBufFactory.getInstance());
}
public static ByteBuf decode(
ByteBuf src, int off, int len, ByteBufFactory bufferFactory) {
return decode(src, off, len, Base64Dialect.STANDARD, bufferFactory);
}
public static ByteBuf decode(
ByteBuf src, int off, int len, Base64Dialect dialect,
ByteBufFactory bufferFactory) {
if (src == null) {
throw new NullPointerException("src");
@ -284,14 +215,11 @@ public final class Base64 {
if (dialect == null) {
throw new NullPointerException("dialect");
}
if (bufferFactory == null) {
throw new NullPointerException("bufferFactory");
}
byte[] DECODABET = decodabet(dialect);
int len34 = len * 3 / 4;
ByteBuf dest = bufferFactory.getBuffer(src.order(), len34); // Upper limit on size of output
ByteBuf dest = src.unsafe().newBuffer(len34).order(src.order()); // Upper limit on size of output
int outBuffPosn = 0;
byte[] b4 = new byte[4];

View File

@ -0,0 +1,178 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.compression;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.util.internal.jzlib.JZlib;
import io.netty.util.internal.jzlib.ZStream;
public class JZlibDecoder extends ZlibDecoder {
private final ZStream z = new ZStream();
private byte[] dictionary;
private volatile boolean finished;
/**
* Creates a new instance with the default wrapper ({@link ZlibWrapper#ZLIB}).
*
* @throws CompressionException if failed to initialize zlib
*/
public JZlibDecoder() {
this(ZlibWrapper.ZLIB);
}
/**
* Creates a new instance with the specified wrapper.
*
* @throws CompressionException if failed to initialize zlib
*/
public JZlibDecoder(ZlibWrapper wrapper) {
if (wrapper == null) {
throw new NullPointerException("wrapper");
}
int resultCode = z.inflateInit(ZlibUtil.convertWrapperType(wrapper));
if (resultCode != JZlib.Z_OK) {
ZlibUtil.fail(z, "initialization failure", resultCode);
}
}
/**
* Creates a new instance with the specified preset dictionary. The wrapper
* is always {@link ZlibWrapper#ZLIB} because it is the only format that
* supports the preset dictionary.
*
* @throws CompressionException if failed to initialize zlib
*/
public JZlibDecoder(byte[] dictionary) {
if (dictionary == null) {
throw new NullPointerException("dictionary");
}
this.dictionary = dictionary;
int resultCode;
resultCode = z.inflateInit(JZlib.W_ZLIB);
if (resultCode != JZlib.Z_OK) {
ZlibUtil.fail(z, "initialization failure", resultCode);
}
}
/**
* Returns {@code true} if and only if the end of the compressed stream
* has been reached.
*/
@Override
public boolean isClosed() {
return finished;
}
@Override
public void decode(
ChannelHandlerContext ctx,
ByteBuf in, ByteBuf out) throws Exception {
if (!in.readable()) {
return;
}
try {
// Configure input.
int inputLength = in.readableBytes();
boolean inHasArray = in.hasArray();
z.avail_in = inputLength;
if (inHasArray) {
z.next_in = in.array();
z.next_in_index = in.arrayOffset() + in.readerIndex();
} else {
byte[] array = new byte[inputLength];
in.readBytes(array);
z.next_in = array;
z.next_in_index = 0;
}
int oldNextInIndex = z.next_in_index;
// Configure output.
int maxOutputLength = inputLength << 1;
boolean outHasArray = out.hasArray();
if (!outHasArray) {
z.next_out = new byte[maxOutputLength];
}
try {
loop: for (;;) {
z.avail_out = maxOutputLength;
if (outHasArray) {
out.ensureWritableBytes(maxOutputLength);
z.next_out = out.array();
z.next_out_index = out.arrayOffset() + out.writerIndex();
} else {
z.next_out_index = 0;
}
int oldNextOutIndex = z.next_out_index;
// Decompress 'in' into 'out'
int resultCode = z.inflate(JZlib.Z_SYNC_FLUSH);
int outputLength = z.next_out_index - oldNextOutIndex;
if (outputLength > 0) {
if (outHasArray) {
out.writerIndex(out.writerIndex() + outputLength);
} else {
out.writeBytes(z.next_out, 0, outputLength);
}
}
switch (resultCode) {
case JZlib.Z_NEED_DICT:
if (dictionary == null) {
ZlibUtil.fail(z, "decompression failure", resultCode);
} else {
resultCode = z.inflateSetDictionary(dictionary, dictionary.length);
if (resultCode != JZlib.Z_OK) {
ZlibUtil.fail(z, "failed to set the dictionary", resultCode);
}
}
break;
case JZlib.Z_STREAM_END:
finished = true; // Do not decode anymore.
z.inflateEnd();
break loop;
case JZlib.Z_OK:
break;
case JZlib.Z_BUF_ERROR:
if (z.avail_in <= 0) {
break loop;
}
break;
default:
ZlibUtil.fail(z, "decompression failure", resultCode);
}
}
} finally {
if (inHasArray) {
in.skipBytes(z.next_in_index - oldNextInIndex);
}
}
} finally {
// Deference the external references explicitly to tell the VM that
// the allocated byte arrays are temporary so that the call stack
// can be utilized.
// I'm not sure if the modern VMs do this optimization though.
z.next_in = null;
z.next_out = null;
}
}
}

View File

@ -0,0 +1,411 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.compression;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.util.internal.jzlib.JZlib;
import io.netty.util.internal.jzlib.ZStream;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Compresses a {@link ByteBuf} using the deflate algorithm.
* @apiviz.landmark
* @apiviz.has io.netty.handler.codec.compression.ZlibWrapper
*/
public class JZlibEncoder extends ZlibEncoder {
private static final byte[] EMPTY_ARRAY = new byte[0];
private final ZStream z = new ZStream();
private final AtomicBoolean finished = new AtomicBoolean();
private volatile ChannelHandlerContext ctx;
/**
* Creates a new zlib encoder with the default compression level ({@code 6}),
* default window bits ({@code 15}), default memory level ({@code 8}),
* and the default wrapper ({@link ZlibWrapper#ZLIB}).
*
* @throws CompressionException if failed to initialize zlib
*/
public JZlibEncoder() {
this(6);
}
/**
* Creates a new zlib encoder with the specified {@code compressionLevel},
* default window bits ({@code 15}), default memory level ({@code 8}),
* and the default wrapper ({@link ZlibWrapper#ZLIB}).
*
* @param compressionLevel
* {@code 1} yields the fastest compression and {@code 9} yields the
* best compression. {@code 0} means no compression. The default
* compression level is {@code 6}.
*
* @throws CompressionException if failed to initialize zlib
*/
public JZlibEncoder(int compressionLevel) {
this(ZlibWrapper.ZLIB, compressionLevel);
}
/**
* Creates a new zlib encoder with the default compression level ({@code 6}),
* default window bits ({@code 15}), default memory level ({@code 8}),
* and the specified wrapper.
*
* @throws CompressionException if failed to initialize zlib
*/
public JZlibEncoder(ZlibWrapper wrapper) {
this(wrapper, 6);
}
/**
* Creates a new zlib encoder with the specified {@code compressionLevel},
* default window bits ({@code 15}), default memory level ({@code 8}),
* and the specified wrapper.
*
* @param compressionLevel
* {@code 1} yields the fastest compression and {@code 9} yields the
* best compression. {@code 0} means no compression. The default
* compression level is {@code 6}.
*
* @throws CompressionException if failed to initialize zlib
*/
public JZlibEncoder(ZlibWrapper wrapper, int compressionLevel) {
this(wrapper, compressionLevel, 15, 8);
}
/**
* Creates a new zlib encoder with the specified {@code compressionLevel},
* the specified {@code windowBits}, the specified {@code memLevel}, and
* the specified wrapper.
*
* @param compressionLevel
* {@code 1} yields the fastest compression and {@code 9} yields the
* best compression. {@code 0} means no compression. The default
* compression level is {@code 6}.
* @param windowBits
* The base two logarithm of the size of the history buffer. The
* value should be in the range {@code 9} to {@code 15} inclusive.
* Larger values result in better compression at the expense of
* memory usage. The default value is {@code 15}.
* @param memLevel
* How much memory should be allocated for the internal compression
* state. {@code 1} uses minimum memory and {@code 9} uses maximum
* memory. Larger values result in better and faster compression
* at the expense of memory usage. The default value is {@code 8}
*
* @throws CompressionException if failed to initialize zlib
*/
public JZlibEncoder(ZlibWrapper wrapper, int compressionLevel, int windowBits, int memLevel) {
if (compressionLevel < 0 || compressionLevel > 9) {
throw new IllegalArgumentException(
"compressionLevel: " + compressionLevel +
" (expected: 0-9)");
}
if (windowBits < 9 || windowBits > 15) {
throw new IllegalArgumentException(
"windowBits: " + windowBits + " (expected: 9-15)");
}
if (memLevel < 1 || memLevel > 9) {
throw new IllegalArgumentException(
"memLevel: " + memLevel + " (expected: 1-9)");
}
if (wrapper == null) {
throw new NullPointerException("wrapper");
}
if (wrapper == ZlibWrapper.ZLIB_OR_NONE) {
throw new IllegalArgumentException(
"wrapper '" + ZlibWrapper.ZLIB_OR_NONE + "' is not " +
"allowed for compression.");
}
synchronized (z) {
int resultCode = z.deflateInit(
compressionLevel, windowBits, memLevel,
ZlibUtil.convertWrapperType(wrapper));
if (resultCode != JZlib.Z_OK) {
ZlibUtil.fail(z, "initialization failure", resultCode);
}
}
}
/**
* Creates a new zlib encoder with the default compression level ({@code 6}),
* default window bits ({@code 15}), default memory level ({@code 8}),
* and the specified preset dictionary. The wrapper is always
* {@link ZlibWrapper#ZLIB} because it is the only format that supports
* the preset dictionary.
*
* @param dictionary the preset dictionary
*
* @throws CompressionException if failed to initialize zlib
*/
public JZlibEncoder(byte[] dictionary) {
this(6, dictionary);
}
/**
* Creates a new zlib encoder with the specified {@code compressionLevel},
* default window bits ({@code 15}), default memory level ({@code 8}),
* and the specified preset dictionary. The wrapper is always
* {@link ZlibWrapper#ZLIB} because it is the only format that supports
* the preset dictionary.
*
* @param compressionLevel
* {@code 1} yields the fastest compression and {@code 9} yields the
* best compression. {@code 0} means no compression. The default
* compression level is {@code 6}.
* @param dictionary the preset dictionary
*
* @throws CompressionException if failed to initialize zlib
*/
public JZlibEncoder(int compressionLevel, byte[] dictionary) {
this(compressionLevel, 15, 8, dictionary);
}
/**
* Creates a new zlib encoder with the specified {@code compressionLevel},
* the specified {@code windowBits}, the specified {@code memLevel},
* and the specified preset dictionary. The wrapper is always
* {@link ZlibWrapper#ZLIB} because it is the only format that supports
* the preset dictionary.
*
* @param compressionLevel
* {@code 1} yields the fastest compression and {@code 9} yields the
* best compression. {@code 0} means no compression. The default
* compression level is {@code 6}.
* @param windowBits
* The base two logarithm of the size of the history buffer. The
* value should be in the range {@code 9} to {@code 15} inclusive.
* Larger values result in better compression at the expense of
* memory usage. The default value is {@code 15}.
* @param memLevel
* How much memory should be allocated for the internal compression
* state. {@code 1} uses minimum memory and {@code 9} uses maximum
* memory. Larger values result in better and faster compression
* at the expense of memory usage. The default value is {@code 8}
* @param dictionary the preset dictionary
*
* @throws CompressionException if failed to initialize zlib
*/
public JZlibEncoder(int compressionLevel, int windowBits, int memLevel, byte[] dictionary) {
if (compressionLevel < 0 || compressionLevel > 9) {
throw new IllegalArgumentException("compressionLevel: " + compressionLevel + " (expected: 0-9)");
}
if (windowBits < 9 || windowBits > 15) {
throw new IllegalArgumentException(
"windowBits: " + windowBits + " (expected: 9-15)");
}
if (memLevel < 1 || memLevel > 9) {
throw new IllegalArgumentException(
"memLevel: " + memLevel + " (expected: 1-9)");
}
if (dictionary == null) {
throw new NullPointerException("dictionary");
}
synchronized (z) {
int resultCode;
resultCode = z.deflateInit(
compressionLevel, windowBits, memLevel,
JZlib.W_ZLIB); // Default: ZLIB format
if (resultCode != JZlib.Z_OK) {
ZlibUtil.fail(z, "initialization failure", resultCode);
} else {
resultCode = z.deflateSetDictionary(dictionary, dictionary.length);
if (resultCode != JZlib.Z_OK) {
ZlibUtil.fail(z, "failed to set the dictionary", resultCode);
}
}
}
}
@Override
public ChannelFuture close() {
return close(ctx().channel().newFuture());
}
@Override
public ChannelFuture close(ChannelFuture future) {
return finishEncode(ctx(), future);
}
private ChannelHandlerContext ctx() {
ChannelHandlerContext ctx = this.ctx;
if (ctx == null) {
throw new IllegalStateException("not added to a pipeline");
}
return ctx;
}
@Override
public boolean isClosed() {
return finished.get();
}
@Override
public void encode(ChannelHandlerContext ctx,
ByteBuf in, ByteBuf out) throws Exception {
if (finished.get()) {
return;
}
synchronized (z) {
try {
// Configure input.
int inputLength = in.readableBytes();
boolean inHasArray = in.hasArray();
z.avail_in = inputLength;
if (inHasArray) {
z.next_in = in.array();
z.next_in_index = in.arrayOffset() + in.readerIndex();
} else {
byte[] array = new byte[inputLength];
in.readBytes(array);
z.next_in = array;
z.next_in_index = 0;
}
int oldNextInIndex = z.next_in_index;
// Configure output.
int maxOutputLength = (int) Math.ceil(inputLength * 1.001) + 12;
boolean outHasArray = out.hasArray();
z.avail_out = maxOutputLength;
if (outHasArray) {
out.ensureWritableBytes(maxOutputLength);
z.next_out = out.array();
z.next_out_index = out.arrayOffset() + out.writerIndex();
} else {
z.next_out = new byte[maxOutputLength];
z.next_out_index = 0;
}
int oldNextOutIndex = z.next_out_index;
// Note that Z_PARTIAL_FLUSH has been deprecated.
int resultCode;
try {
resultCode = z.deflate(JZlib.Z_SYNC_FLUSH);
} finally {
if (inHasArray) {
in.skipBytes(z.next_in_index - oldNextInIndex);
}
}
if (resultCode != JZlib.Z_OK) {
ZlibUtil.fail(z, "compression failure", resultCode);
}
int outputLength = z.next_out_index - oldNextOutIndex;
if (outputLength > 0) {
if (outHasArray) {
out.writerIndex(out.writerIndex() + outputLength);
} else {
out.writeBytes(z.next_out, 0, outputLength);
}
}
} finally {
// Deference the external references explicitly to tell the VM that
// the allocated byte arrays are temporary so that the call stack
// can be utilized.
// I'm not sure if the modern VMs do this optimization though.
z.next_in = null;
z.next_out = null;
}
}
}
@Override
public void close(
final ChannelHandlerContext ctx,
final ChannelFuture future) throws Exception {
ChannelFuture f = finishEncode(ctx, ctx.newFuture());
f.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f) throws Exception {
ctx.close(future);
}
});
if (!f.isDone()) {
// Ensure the channel is closed even if the write operation completes in time.
ctx.executor().schedule(new Runnable() {
@Override
public void run() {
ctx.close(future);
}
}, 10, TimeUnit.SECONDS); // FIXME: Magic number
}
}
private ChannelFuture finishEncode(ChannelHandlerContext ctx, ChannelFuture future) {
if (!finished.compareAndSet(false, true)) {
future.setSuccess();
return future;
}
ByteBuf footer;
synchronized (z) {
try {
// Configure input.
z.next_in = EMPTY_ARRAY;
z.next_in_index = 0;
z.avail_in = 0;
// Configure output.
byte[] out = new byte[32]; // room for ADLER32 + ZLIB / CRC32 + GZIP header
z.next_out = out;
z.next_out_index = 0;
z.avail_out = out.length;
// Write the ADLER32 checksum (stream footer).
int resultCode = z.deflate(JZlib.Z_FINISH);
if (resultCode != JZlib.Z_OK && resultCode != JZlib.Z_STREAM_END) {
future.setFailure(ZlibUtil.exception(z, "compression failure", resultCode));
return future;
} else if (z.next_out_index != 0) {
footer = Unpooled.wrappedBuffer(out, 0, z.next_out_index);
} else {
footer = Unpooled.EMPTY_BUFFER;
}
} finally {
z.deflateEnd();
// Deference the external references explicitly to tell the VM that
// the allocated byte arrays are temporary so that the call stack
// can be utilized.
// I'm not sure if the modern VMs do this optimization though.
z.next_in = null;
z.next_out = null;
}
}
ctx.write(footer, future);
return future;
}
@Override
public void beforeAdd(ChannelHandlerContext ctx) throws Exception {
this.ctx = ctx;
}
}

View File

@ -0,0 +1,270 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.compression;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.zip.CRC32;
import java.util.zip.Deflater;
/**
* Compresses a {@link ByteBuf} using the deflate algorithm.
* @apiviz.landmark
* @apiviz.has org.jboss.netty.handler.codec.compression.ZlibWrapper
*/
public class JdkZlibEncoder extends ZlibEncoder {
private final byte[] encodeBuf = new byte[8192];
private final Deflater deflater;
private final AtomicBoolean finished = new AtomicBoolean();
private volatile ChannelHandlerContext ctx;
/*
* GZIP support
*/
private final boolean gzip;
private final CRC32 crc = new CRC32();
private static final byte[] gzipHeader = {0x1f, (byte) 0x8b, Deflater.DEFLATED, 0, 0, 0, 0, 0, 0, 0};
private boolean writeHeader = true;
/**
* Creates a new zlib encoder with the default compression level ({@code 6})
* and the default wrapper ({@link ZlibWrapper#ZLIB}).
*
* @throws CompressionException if failed to initialize zlib
*/
public JdkZlibEncoder() {
this(6);
}
/**
* Creates a new zlib encoder with the specified {@code compressionLevel}
* and the default wrapper ({@link ZlibWrapper#ZLIB}).
*
* @param compressionLevel
* {@code 1} yields the fastest compression and {@code 9} yields the
* best compression. {@code 0} means no compression. The default
* compression level is {@code 6}.
*
* @throws CompressionException if failed to initialize zlib
*/
public JdkZlibEncoder(int compressionLevel) {
this(ZlibWrapper.ZLIB, compressionLevel);
}
/**
* Creates a new zlib encoder with the default compression level ({@code 6})
* and the specified wrapper.
*
* @throws CompressionException if failed to initialize zlib
*/
public JdkZlibEncoder(ZlibWrapper wrapper) {
this(wrapper, 6);
}
/**
* Creates a new zlib encoder with the specified {@code compressionLevel}
* and the specified wrapper.
*
* @param compressionLevel
* {@code 1} yields the fastest compression and {@code 9} yields the
* best compression. {@code 0} means no compression. The default
* compression level is {@code 6}.
*
* @throws CompressionException if failed to initialize zlib
*/
public JdkZlibEncoder(ZlibWrapper wrapper, int compressionLevel) {
if (compressionLevel < 0 || compressionLevel > 9) {
throw new IllegalArgumentException(
"compressionLevel: " + compressionLevel + " (expected: 0-9)");
}
if (wrapper == null) {
throw new NullPointerException("wrapper");
}
if (wrapper == ZlibWrapper.ZLIB_OR_NONE) {
throw new IllegalArgumentException(
"wrapper '" + ZlibWrapper.ZLIB_OR_NONE + "' is not " +
"allowed for compression.");
}
gzip = wrapper == ZlibWrapper.GZIP;
deflater = new Deflater(compressionLevel, wrapper != ZlibWrapper.ZLIB);
}
/**
* Creates a new zlib encoder with the default compression level ({@code 6})
* and the specified preset dictionary. The wrapper is always
* {@link ZlibWrapper#ZLIB} because it is the only format that supports
* the preset dictionary.
*
* @param dictionary the preset dictionary
*
* @throws CompressionException if failed to initialize zlib
*/
public JdkZlibEncoder(byte[] dictionary) {
this(6, dictionary);
}
/**
* Creates a new zlib encoder with the specified {@code compressionLevel}
* and the specified preset dictionary. The wrapper is always
* {@link ZlibWrapper#ZLIB} because it is the only format that supports
* the preset dictionary.
*
* @param compressionLevel
* {@code 1} yields the fastest compression and {@code 9} yields the
* best compression. {@code 0} means no compression. The default
* compression level is {@code 6}.
* @param dictionary the preset dictionary
*
* @throws CompressionException if failed to initialize zlib
*/
public JdkZlibEncoder(int compressionLevel, byte[] dictionary) {
if (compressionLevel < 0 || compressionLevel > 9) {
throw new IllegalArgumentException(
"compressionLevel: " + compressionLevel + " (expected: 0-9)");
}
if (dictionary == null) {
throw new NullPointerException("dictionary");
}
gzip = false;
deflater = new Deflater(compressionLevel);
deflater.setDictionary(dictionary);
}
@Override
public ChannelFuture close() {
return close(ctx().newFuture());
}
@Override
public ChannelFuture close(ChannelFuture future) {
return finishEncode(ctx(), future);
}
private ChannelHandlerContext ctx() {
ChannelHandlerContext ctx = this.ctx;
if (ctx == null) {
throw new IllegalStateException("not added to a pipeline");
}
return ctx;
}
@Override
public boolean isClosed() {
return finished.get();
}
@Override
public void encode(ChannelHandlerContext ctx, ByteBuf in, ByteBuf out) throws Exception {
if (finished.get()) {
out.writeBytes(in);
in.discardReadBytes();
return;
}
ByteBuf uncompressed = in;
byte[] inAry = new byte[uncompressed.readableBytes()];
uncompressed.readBytes(inAry);
int sizeEstimate = (int) Math.ceil(inAry.length * 1.001) + 12;
out.ensureWritableBytes(sizeEstimate);
synchronized (deflater) {
if (gzip) {
crc.update(inAry);
if (writeHeader) {
out.writeBytes(gzipHeader);
writeHeader = false;
}
}
deflater.setInput(inAry);
while (!deflater.needsInput()) {
int numBytes = deflater.deflate(encodeBuf, 0, encodeBuf.length, Deflater.SYNC_FLUSH);
out.writeBytes(encodeBuf, 0, numBytes);
}
}
}
@Override
public void close(final ChannelHandlerContext ctx, final ChannelFuture future) throws Exception {
ChannelFuture f = finishEncode(ctx, ctx.newFuture());
f.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f) throws Exception {
ctx.close(future);
}
});
if (!f.isDone()) {
// Ensure the channel is closed even if the write operation completes in time.
ctx.executor().schedule(new Runnable() {
@Override
public void run() {
ctx.close(future);
}
}, 10, TimeUnit.SECONDS); // FIXME: Magic number
}
}
private ChannelFuture finishEncode(final ChannelHandlerContext ctx, ChannelFuture future) {
if (!finished.compareAndSet(false, true)) {
future.setSuccess();
return future;
}
ByteBuf footer = Unpooled.buffer();
synchronized (deflater) {
deflater.finish();
while (!deflater.finished()) {
int numBytes = deflater.deflate(encodeBuf, 0, encodeBuf.length);
footer.writeBytes(encodeBuf, 0, numBytes);
}
if (gzip) {
int crcValue = (int) crc.getValue();
int uncBytes = deflater.getTotalIn();
footer.writeByte(crcValue);
footer.writeByte(crcValue >>> 8);
footer.writeByte(crcValue >>> 16);
footer.writeByte(crcValue >>> 24);
footer.writeByte(uncBytes);
footer.writeByte(uncBytes >>> 8);
footer.writeByte(uncBytes >>> 16);
footer.writeByte(uncBytes >>> 24);
}
deflater.end();
}
ctx.nextOutboundByteBuffer().writeBytes(footer);
ctx.flush(future);
return future;
}
@Override
public void beforeAdd(ChannelHandlerContext ctx) throws Exception {
this.ctx = ctx;
}
}

View File

@ -0,0 +1,96 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.compression;
import io.netty.util.internal.DetectionUtil;
/**
* Creates a new {@link ZlibEncoder} and a new {@link ZlibDecoder}.
*/
public final class ZlibCodecFactory {
public static ZlibEncoder newZlibEncoder(int compressionLevel) {
if (DetectionUtil.javaVersion() < 7) {
return new JZlibEncoder(compressionLevel);
} else {
return new JdkZlibEncoder(compressionLevel);
}
}
public static ZlibEncoder newZlibEncoder(ZlibWrapper wrapper) {
if (DetectionUtil.javaVersion() < 7) {
return new JZlibEncoder(wrapper);
} else {
return new JdkZlibEncoder(wrapper);
}
}
public static ZlibEncoder newZlibEncoder(ZlibWrapper wrapper, int compressionLevel) {
if (DetectionUtil.javaVersion() < 7) {
return new JZlibEncoder(wrapper, compressionLevel);
} else {
return new JdkZlibEncoder(wrapper, compressionLevel);
}
}
public static ZlibEncoder newZlibEncoder(ZlibWrapper wrapper, int compressionLevel, int windowBits, int memLevel) {
if (DetectionUtil.javaVersion() < 7) {
return new JZlibEncoder(wrapper, compressionLevel, windowBits, memLevel);
} else {
return new JdkZlibEncoder(wrapper, compressionLevel);
}
}
public static ZlibEncoder newZlibEncoder(byte[] dictionary) {
if (DetectionUtil.javaVersion() < 7) {
return new JZlibEncoder(dictionary);
} else {
return new JdkZlibEncoder(dictionary);
}
}
public static ZlibEncoder newZlibEncoder(int compressionLevel, byte[] dictionary) {
if (DetectionUtil.javaVersion() < 7) {
return new JZlibEncoder(compressionLevel, dictionary);
} else {
return new JdkZlibEncoder(compressionLevel, dictionary);
}
}
public static ZlibEncoder newZlibEncoder(int compressionLevel, int windowBits, int memLevel, byte[] dictionary) {
if (DetectionUtil.javaVersion() < 7) {
return new JZlibEncoder(compressionLevel, windowBits, memLevel, dictionary);
} else {
return new JdkZlibEncoder(compressionLevel, dictionary);
}
}
public static ZlibDecoder newZlibDecoder() {
return new JZlibDecoder();
}
public static ZlibDecoder newZlibDecoder(ZlibWrapper wrapper) {
return new JZlibDecoder(wrapper);
}
public static ZlibDecoder newZlibDecoder(byte[] dictionary) {
return new JZlibDecoder(dictionary);
}
private ZlibCodecFactory() {
// Unused
}
}

View File

@ -16,169 +16,18 @@
package io.netty.handler.codec.compression;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.ByteToByteDecoder;
import io.netty.util.internal.jzlib.JZlib;
import io.netty.util.internal.jzlib.ZStream;
/**
* Decompresses a {@link ByteBuf} using the deflate algorithm.
*
* @apiviz.landmark
* @apiviz.has io.netty.handler.codec.compression.ZlibWrapper
*/
public class ZlibDecoder extends ByteToByteDecoder {
private final ZStream z = new ZStream();
private byte[] dictionary;
private volatile boolean finished;
/**
* Creates a new instance with the default wrapper ({@link ZlibWrapper#ZLIB}).
*
* @throws CompressionException if failed to initialize zlib
*/
public ZlibDecoder() {
this(ZlibWrapper.ZLIB);
}
/**
* Creates a new instance with the specified wrapper.
*
* @throws CompressionException if failed to initialize zlib
*/
public ZlibDecoder(ZlibWrapper wrapper) {
if (wrapper == null) {
throw new NullPointerException("wrapper");
}
int resultCode = z.inflateInit(ZlibUtil.convertWrapperType(wrapper));
if (resultCode != JZlib.Z_OK) {
ZlibUtil.fail(z, "initialization failure", resultCode);
}
}
/**
* Creates a new instance with the specified preset dictionary. The wrapper
* is always {@link ZlibWrapper#ZLIB} because it is the only format that
* supports the preset dictionary.
*
* @throws CompressionException if failed to initialize zlib
*/
public ZlibDecoder(byte[] dictionary) {
if (dictionary == null) {
throw new NullPointerException("dictionary");
}
this.dictionary = dictionary;
int resultCode;
resultCode = z.inflateInit(JZlib.W_ZLIB);
if (resultCode != JZlib.Z_OK) {
ZlibUtil.fail(z, "initialization failure", resultCode);
}
}
public abstract class ZlibDecoder extends ByteToByteDecoder {
/**
* Returns {@code true} if and only if the end of the compressed stream
* has been reached.
*/
public boolean isClosed() {
return finished;
}
@Override
public void decode(
ChannelHandlerContext ctx,
ByteBuf in, ByteBuf out) throws Exception {
if (!in.readable()) {
return;
}
try {
// Configure input.
int inputLength = in.readableBytes();
boolean inHasArray = in.hasArray();
z.avail_in = inputLength;
if (inHasArray) {
z.next_in = in.array();
z.next_in_index = in.arrayOffset() + in.readerIndex();
} else {
byte[] array = new byte[inputLength];
in.readBytes(array);
z.next_in = array;
z.next_in_index = 0;
}
int oldNextInIndex = z.next_in_index;
// Configure output.
int maxOutputLength = inputLength << 1;
boolean outHasArray = out.hasArray();
if (!outHasArray) {
z.next_out = new byte[maxOutputLength];
}
try {
loop: for (;;) {
z.avail_out = maxOutputLength;
if (outHasArray) {
out.ensureWritableBytes(maxOutputLength);
z.next_out = out.array();
z.next_out_index = out.arrayOffset() + out.writerIndex();
} else {
z.next_out_index = 0;
}
int oldNextOutIndex = z.next_out_index;
// Decompress 'in' into 'out'
int resultCode = z.inflate(JZlib.Z_SYNC_FLUSH);
int outputLength = z.next_out_index - oldNextOutIndex;
if (outputLength > 0) {
if (outHasArray) {
out.writerIndex(out.writerIndex() + outputLength);
} else {
out.writeBytes(z.next_out, 0, outputLength);
}
}
switch (resultCode) {
case JZlib.Z_NEED_DICT:
if (dictionary == null) {
ZlibUtil.fail(z, "decompression failure", resultCode);
} else {
resultCode = z.inflateSetDictionary(dictionary, dictionary.length);
if (resultCode != JZlib.Z_OK) {
ZlibUtil.fail(z, "failed to set the dictionary", resultCode);
}
}
break;
case JZlib.Z_STREAM_END:
finished = true; // Do not decode anymore.
z.inflateEnd();
break loop;
case JZlib.Z_OK:
break;
case JZlib.Z_BUF_ERROR:
if (z.avail_in <= 0) {
break loop;
}
break;
default:
ZlibUtil.fail(z, "decompression failure", resultCode);
}
}
} finally {
if (inHasArray) {
in.skipBytes(z.next_in_index - oldNextInIndex);
}
}
} finally {
// Deference the external references explicitly to tell the VM that
// the allocated byte arrays are temporary so that the call stack
// can be utilized.
// I'm not sure if the modern VMs do this optimization though.
z.next_in = null;
z.next_out = null;
}
}
public abstract boolean isClosed();
}

View File

@ -16,394 +16,24 @@
package io.netty.handler.codec.compression;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.ByteToByteEncoder;
import io.netty.util.internal.jzlib.JZlib;
import io.netty.util.internal.jzlib.ZStream;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Compresses a {@link ByteBuf} using the deflate algorithm.
*
* @apiviz.landmark
* @apiviz.has io.netty.handler.codec.compression.ZlibWrapper
*/
public class ZlibEncoder extends ByteToByteEncoder {
private static final byte[] EMPTY_ARRAY = new byte[0];
private final ZStream z = new ZStream();
private final AtomicBoolean finished = new AtomicBoolean();
private volatile ChannelHandlerContext ctx;
public abstract class ZlibEncoder extends ByteToByteEncoder {
/**
* Creates a new zlib encoder with the default compression level ({@code 6}),
* default window bits ({@code 15}), default memory level ({@code 8}),
* and the default wrapper ({@link ZlibWrapper#ZLIB}).
*
* @throws CompressionException if failed to initialize zlib
* Returns {@code true} if and only if the end of the compressed stream
* has been reached.
*/
public ZlibEncoder() {
this(6);
}
public abstract boolean isClosed();
/**
* Creates a new zlib encoder with the specified {@code compressionLevel},
* default window bits ({@code 15}), default memory level ({@code 8}),
* and the default wrapper ({@link ZlibWrapper#ZLIB}).
*
* @param compressionLevel
* {@code 1} yields the fastest compression and {@code 9} yields the
* best compression. {@code 0} means no compression. The default
* compression level is {@code 6}.
*
* @throws CompressionException if failed to initialize zlib
*/
public ZlibEncoder(int compressionLevel) {
this(ZlibWrapper.ZLIB, compressionLevel);
}
public abstract ChannelFuture close();
/**
* Creates a new zlib encoder with the default compression level ({@code 6}),
* default window bits ({@code 15}), default memory level ({@code 8}),
* and the specified wrapper.
*
* @throws CompressionException if failed to initialize zlib
*/
public ZlibEncoder(ZlibWrapper wrapper) {
this(wrapper, 6);
}
public abstract ChannelFuture close(ChannelFuture future);
/**
* Creates a new zlib encoder with the specified {@code compressionLevel},
* default window bits ({@code 15}), default memory level ({@code 8}),
* and the specified wrapper.
*
* @param compressionLevel
* {@code 1} yields the fastest compression and {@code 9} yields the
* best compression. {@code 0} means no compression. The default
* compression level is {@code 6}.
*
* @throws CompressionException if failed to initialize zlib
*/
public ZlibEncoder(ZlibWrapper wrapper, int compressionLevel) {
this(wrapper, compressionLevel, 15, 8);
}
/**
* Creates a new zlib encoder with the specified {@code compressionLevel},
* the specified {@code windowBits}, the specified {@code memLevel}, and
* the specified wrapper.
*
* @param compressionLevel
* {@code 1} yields the fastest compression and {@code 9} yields the
* best compression. {@code 0} means no compression. The default
* compression level is {@code 6}.
* @param windowBits
* The base two logarithm of the size of the history buffer. The
* value should be in the range {@code 9} to {@code 15} inclusive.
* Larger values result in better compression at the expense of
* memory usage. The default value is {@code 15}.
* @param memLevel
* How much memory should be allocated for the internal compression
* state. {@code 1} uses minimum memory and {@code 9} uses maximum
* memory. Larger values result in better and faster compression
* at the expense of memory usage. The default value is {@code 8}
*
* @throws CompressionException if failed to initialize zlib
*/
public ZlibEncoder(ZlibWrapper wrapper, int compressionLevel, int windowBits, int memLevel) {
if (compressionLevel < 0 || compressionLevel > 9) {
throw new IllegalArgumentException(
"compressionLevel: " + compressionLevel +
" (expected: 0-9)");
}
if (windowBits < 9 || windowBits > 15) {
throw new IllegalArgumentException(
"windowBits: " + windowBits + " (expected: 9-15)");
}
if (memLevel < 1 || memLevel > 9) {
throw new IllegalArgumentException(
"memLevel: " + memLevel + " (expected: 1-9)");
}
if (wrapper == null) {
throw new NullPointerException("wrapper");
}
if (wrapper == ZlibWrapper.ZLIB_OR_NONE) {
throw new IllegalArgumentException(
"wrapper '" + ZlibWrapper.ZLIB_OR_NONE + "' is not " +
"allowed for compression.");
}
synchronized (z) {
int resultCode = z.deflateInit(
compressionLevel, windowBits, memLevel,
ZlibUtil.convertWrapperType(wrapper));
if (resultCode != JZlib.Z_OK) {
ZlibUtil.fail(z, "initialization failure", resultCode);
}
}
}
/**
* Creates a new zlib encoder with the default compression level ({@code 6}),
* default window bits ({@code 15}), default memory level ({@code 8}),
* and the specified preset dictionary. The wrapper is always
* {@link ZlibWrapper#ZLIB} because it is the only format that supports
* the preset dictionary.
*
* @param dictionary the preset dictionary
*
* @throws CompressionException if failed to initialize zlib
*/
public ZlibEncoder(byte[] dictionary) {
this(6, dictionary);
}
/**
* Creates a new zlib encoder with the specified {@code compressionLevel},
* default window bits ({@code 15}), default memory level ({@code 8}),
* and the specified preset dictionary. The wrapper is always
* {@link ZlibWrapper#ZLIB} because it is the only format that supports
* the preset dictionary.
*
* @param compressionLevel
* {@code 1} yields the fastest compression and {@code 9} yields the
* best compression. {@code 0} means no compression. The default
* compression level is {@code 6}.
* @param dictionary the preset dictionary
*
* @throws CompressionException if failed to initialize zlib
*/
public ZlibEncoder(int compressionLevel, byte[] dictionary) {
this(compressionLevel, 15, 8, dictionary);
}
/**
* Creates a new zlib encoder with the specified {@code compressionLevel},
* the specified {@code windowBits}, the specified {@code memLevel},
* and the specified preset dictionary. The wrapper is always
* {@link ZlibWrapper#ZLIB} because it is the only format that supports
* the preset dictionary.
*
* @param compressionLevel
* {@code 1} yields the fastest compression and {@code 9} yields the
* best compression. {@code 0} means no compression. The default
* compression level is {@code 6}.
* @param windowBits
* The base two logarithm of the size of the history buffer. The
* value should be in the range {@code 9} to {@code 15} inclusive.
* Larger values result in better compression at the expense of
* memory usage. The default value is {@code 15}.
* @param memLevel
* How much memory should be allocated for the internal compression
* state. {@code 1} uses minimum memory and {@code 9} uses maximum
* memory. Larger values result in better and faster compression
* at the expense of memory usage. The default value is {@code 8}
* @param dictionary the preset dictionary
*
* @throws CompressionException if failed to initialize zlib
*/
public ZlibEncoder(int compressionLevel, int windowBits, int memLevel, byte[] dictionary) {
if (compressionLevel < 0 || compressionLevel > 9) {
throw new IllegalArgumentException("compressionLevel: " + compressionLevel + " (expected: 0-9)");
}
if (windowBits < 9 || windowBits > 15) {
throw new IllegalArgumentException(
"windowBits: " + windowBits + " (expected: 9-15)");
}
if (memLevel < 1 || memLevel > 9) {
throw new IllegalArgumentException(
"memLevel: " + memLevel + " (expected: 1-9)");
}
if (dictionary == null) {
throw new NullPointerException("dictionary");
}
synchronized (z) {
int resultCode;
resultCode = z.deflateInit(
compressionLevel, windowBits, memLevel,
JZlib.W_ZLIB); // Default: ZLIB format
if (resultCode != JZlib.Z_OK) {
ZlibUtil.fail(z, "initialization failure", resultCode);
} else {
resultCode = z.deflateSetDictionary(dictionary, dictionary.length);
if (resultCode != JZlib.Z_OK) {
ZlibUtil.fail(z, "failed to set the dictionary", resultCode);
}
}
}
}
public ChannelFuture close() {
return close(ctx().channel().newFuture());
}
public ChannelFuture close(ChannelFuture future) {
return finishEncode(ctx(), future);
}
private ChannelHandlerContext ctx() {
ChannelHandlerContext ctx = this.ctx;
if (ctx == null) {
throw new IllegalStateException("not added to a pipeline");
}
return ctx;
}
public boolean isClosed() {
return finished.get();
}
@Override
public void encode(ChannelHandlerContext ctx,
ByteBuf in, ByteBuf out) throws Exception {
if (finished.get()) {
return;
}
synchronized (z) {
try {
// Configure input.
int inputLength = in.readableBytes();
boolean inHasArray = in.hasArray();
z.avail_in = inputLength;
if (inHasArray) {
z.next_in = in.array();
z.next_in_index = in.arrayOffset() + in.readerIndex();
} else {
byte[] array = new byte[inputLength];
in.readBytes(array);
z.next_in = array;
z.next_in_index = 0;
}
int oldNextInIndex = z.next_in_index;
// Configure output.
int maxOutputLength = (int) Math.ceil(inputLength * 1.001) + 12;
boolean outHasArray = out.hasArray();
z.avail_out = maxOutputLength;
if (outHasArray) {
out.ensureWritableBytes(maxOutputLength);
z.next_out = out.array();
z.next_out_index = out.arrayOffset() + out.writerIndex();
} else {
z.next_out = new byte[maxOutputLength];
z.next_out_index = 0;
}
int oldNextOutIndex = z.next_out_index;
// Note that Z_PARTIAL_FLUSH has been deprecated.
int resultCode;
try {
resultCode = z.deflate(JZlib.Z_SYNC_FLUSH);
} finally {
if (inHasArray) {
in.skipBytes(z.next_in_index - oldNextInIndex);
}
}
if (resultCode != JZlib.Z_OK) {
ZlibUtil.fail(z, "compression failure", resultCode);
}
int outputLength = z.next_out_index - oldNextOutIndex;
if (outputLength > 0) {
if (outHasArray) {
out.writerIndex(out.writerIndex() + outputLength);
} else {
out.writeBytes(z.next_out, 0, outputLength);
}
}
} finally {
// Deference the external references explicitly to tell the VM that
// the allocated byte arrays are temporary so that the call stack
// can be utilized.
// I'm not sure if the modern VMs do this optimization though.
z.next_in = null;
z.next_out = null;
}
}
}
@Override
public void disconnect(
final ChannelHandlerContext ctx,
final ChannelFuture future) throws Exception {
finishEncode(ctx, ctx.newFuture()).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f) throws Exception {
ctx.disconnect(future);
}
});
}
@Override
public void close(
final ChannelHandlerContext ctx,
final ChannelFuture future) throws Exception {
finishEncode(ctx, ctx.newFuture()).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f) throws Exception {
ctx.close(future);
}
});
}
private ChannelFuture finishEncode(ChannelHandlerContext ctx, ChannelFuture future) {
if (!finished.compareAndSet(false, true)) {
future.setSuccess();
return future;
}
ByteBuf footer;
synchronized (z) {
try {
// Configure input.
z.next_in = EMPTY_ARRAY;
z.next_in_index = 0;
z.avail_in = 0;
// Configure output.
byte[] out = new byte[32]; // room for ADLER32 + ZLIB / CRC32 + GZIP header
z.next_out = out;
z.next_out_index = 0;
z.avail_out = out.length;
// Write the ADLER32 checksum (stream footer).
int resultCode = z.deflate(JZlib.Z_FINISH);
if (resultCode != JZlib.Z_OK && resultCode != JZlib.Z_STREAM_END) {
future.setFailure(ZlibUtil.exception(z, "compression failure", resultCode));
return future;
} else if (z.next_out_index != 0) {
footer = Unpooled.wrappedBuffer(out, 0, z.next_out_index);
} else {
footer = Unpooled.EMPTY_BUFFER;
}
} finally {
z.deflateEnd();
// Deference the external references explicitly to tell the VM that
// the allocated byte arrays are temporary so that the call stack
// can be utilized.
// I'm not sure if the modern VMs do this optimization though.
z.next_in = null;
z.next_out = null;
}
}
ctx.write(footer, future);
return future;
}
@Override
public void beforeAdd(ChannelHandlerContext ctx) throws Exception {
this.ctx = ctx;
}
}

View File

@ -19,7 +19,7 @@ import io.netty.util.internal.jzlib.JZlib;
import io.netty.util.internal.jzlib.ZStream;
/**
* Utility methods used by {@link ZlibEncoder} and {@link ZlibDecoder}.
* Utility methods used by {@link JZlibEncoder} and {@link JZlibDecoder}.
*/
final class ZlibUtil {

View File

@ -16,8 +16,6 @@
package io.netty.handler.codec.marshalling;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufFactory;
import io.netty.buffer.Unpooled;
import java.io.IOException;
@ -40,13 +38,6 @@ class ChannelBufferByteOutput implements ByteOutput {
this.buffer = buffer;
}
/**
* Calls {@link #ChannelBufferByteOutput(ByteBuf)} with a dynamic {@link ByteBuf}
*/
public ChannelBufferByteOutput(ByteBufFactory factory, int estimatedLength) {
this(Unpooled.dynamicBuffer(estimatedLength, factory));
}
@Override
public void close() throws IOException {
// Nothing todo

View File

@ -20,7 +20,6 @@ import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.ReplayingDecoder;
import io.netty.handler.codec.TooLongFrameException;
import io.netty.util.VoidEnum;
import java.io.ObjectStreamConstants;
@ -32,7 +31,7 @@ import org.jboss.marshalling.Unmarshaller;
*
* If you can you should use {@link MarshallingDecoder}.
*/
public class CompatibleMarshallingDecoder extends ReplayingDecoder<Object, VoidEnum> {
public class CompatibleMarshallingDecoder extends ReplayingDecoder<Object, Void> {
protected final UnmarshallerProvider provider;
protected final int maxObjectSize;

View File

@ -53,7 +53,7 @@ class CompactObjectInputStream extends ObjectInputStream {
case CompactObjectOutputStream.TYPE_THIN_DESCRIPTOR:
String className = readUTF();
Class<?> clazz = classResolver.resolve(className);
return ObjectStreamClass.lookup(clazz);
return ObjectStreamClass.lookupAny(clazz);
default:
throw new StreamCorruptedException(
"Unexpected class descriptor type: " + type);

View File

@ -37,7 +37,8 @@ class CompactObjectOutputStream extends ObjectOutputStream {
@Override
protected void writeClassDescriptor(ObjectStreamClass desc) throws IOException {
Class<?> clazz = desc.forClass();
if (clazz.isPrimitive() || clazz.isArray() || desc.getSerialVersionUID() == 0) {
if (clazz.isPrimitive() || clazz.isArray() || clazz.isInterface() ||
desc.getSerialVersionUID() == 0) {
write(TYPE_FAT_DESCRIPTOR);
super.writeClassDescriptor(desc);
} else {

View File

@ -100,7 +100,7 @@ public class CompatibleObjectEncoder extends MessageToByteEncoder<Object> {
oos.reset();
// Also discard the byproduct to avoid OOM on the sending side.
out.discardReadBytes();
out.unsafe().discardSomeReadBytes();
}
}

View File

@ -80,8 +80,7 @@ public class ObjectEncoderOutputStream extends OutputStream implements
@Override
public void writeObject(Object obj) throws IOException {
ByteBufOutputStream bout = new ByteBufOutputStream(
Unpooled.dynamicBuffer(estimatedLength));
ByteBufOutputStream bout = new ByteBufOutputStream(Unpooled.buffer(estimatedLength));
ObjectOutputStream oout = new CompactObjectOutputStream(bout);
oout.writeObject(obj);
oout.flush();

View File

@ -0,0 +1,99 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.util.CharsetUtil;
import io.netty.util.Signal;
import org.junit.Test;
public class ReplayingDecoderBufferTest {
/**
* See https://github.com/netty/netty/issues/445
*/
@Test
public void testGetUnsignedByte() {
ReplayingDecoderBuffer buffer = new ReplayingDecoderBuffer(Unpooled.copiedBuffer("TestBuffer", CharsetUtil.ISO_8859_1));
boolean error;
int i = 0;
try {
for (;;) {
buffer.getUnsignedByte(i);
i++;
}
} catch (Signal e) {
error = true;
}
assertTrue(error);
assertEquals(10, i);
}
/**
* See https://github.com/netty/netty/issues/445
*/
@Test
public void testGetByte() {
ReplayingDecoderBuffer buffer = new ReplayingDecoderBuffer(Unpooled.copiedBuffer("TestBuffer", CharsetUtil.ISO_8859_1));
boolean error;
int i = 0;
try {
for (;;) {
buffer.getByte(i);
i++;
}
} catch (Signal e) {
error = true;
}
assertTrue(error);
assertEquals(10, i);
}
/**
* See https://github.com/netty/netty/issues/445
*/
@Test
public void testGetBoolean() {
ByteBuf buf = Unpooled.buffer(10);
while(buf.writable()) {
buf.writeBoolean(true);
}
ReplayingDecoderBuffer buffer = new ReplayingDecoderBuffer(buf);
boolean error;
int i = 0;
try {
for (;;) {
buffer.getBoolean(i);
i++;
}
} catch (Signal e) {
error = true;
}
assertTrue(error);
assertEquals(10, i);
}
}

View File

@ -21,7 +21,6 @@ import io.netty.buffer.ByteBufIndexFinder;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.embedded.EmbeddedByteChannel;
import io.netty.util.VoidEnum;
import org.junit.Test;
@ -48,7 +47,7 @@ public class ReplayingDecoderTest {
assertNull(ch.readInbound());
}
private static final class LineDecoder extends ReplayingDecoder<ByteBuf, VoidEnum> {
private static final class LineDecoder extends ReplayingDecoder<ByteBuf, Void> {
LineDecoder() {
}

View File

@ -20,7 +20,7 @@
<parent>
<groupId>io.netty</groupId>
<artifactId>netty-parent</artifactId>
<version>4.0.0.Alpha1-SNAPSHOT</version>
<version>4.0.0.Alpha2-SNAPSHOT</version>
</parent>
<artifactId>netty-common</artifactId>

View File

@ -31,6 +31,8 @@ public abstract class AbstractInternalLogger implements InternalLogger {
@Override
public boolean isEnabled(InternalLogLevel level) {
switch (level) {
case TRACE:
return isTraceEnabled();
case DEBUG:
return isDebugEnabled();
case INFO:
@ -47,6 +49,9 @@ public abstract class AbstractInternalLogger implements InternalLogger {
@Override
public void log(InternalLogLevel level, String msg, Throwable cause) {
switch (level) {
case TRACE:
trace(msg, cause);
break;
case DEBUG:
debug(msg, cause);
break;
@ -67,6 +72,9 @@ public abstract class AbstractInternalLogger implements InternalLogger {
@Override
public void log(InternalLogLevel level, String msg) {
switch (level) {
case TRACE:
trace(msg);
break;
case DEBUG:
debug(msg);
break;

View File

@ -31,6 +31,16 @@ class CommonsLogger extends AbstractInternalLogger {
this.loggerName = loggerName;
}
@Override
public void trace(String msg) {
logger.trace(msg);
}
@Override
public void trace(String msg, Throwable cause) {
logger.trace(msg, cause);
}
@Override
public void debug(String msg) {
logger.debug(msg);
@ -61,6 +71,11 @@ class CommonsLogger extends AbstractInternalLogger {
logger.info(msg, cause);
}
@Override
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
@Override
public boolean isDebugEnabled() {
return logger.isDebugEnabled();

View File

@ -19,6 +19,10 @@ package io.netty.logging;
* The log level that {@link InternalLogger} can log at.
*/
public enum InternalLogLevel {
/**
* 'TRACE' log level.
*/
TRACE,
/**
* 'DEBUG' log level.
*/

View File

@ -20,6 +20,11 @@ package io.netty.logging;
* access this class outside of Netty.
*/
public interface InternalLogger {
/**
* Returns {@code true} if a TRACE level message is logged.
*/
boolean isTraceEnabled();
/**
* Returns {@code true} if a DEBUG level message is logged.
*/
@ -45,6 +50,16 @@ public interface InternalLogger {
*/
boolean isEnabled(InternalLogLevel level);
/**
* Logs a TRACE level message.
*/
void trace(String msg);
/**
* Logs a TRACE level message.
*/
void trace(String msg, Throwable cause);
/**
* Logs a DEBUG level message.
*/

View File

@ -66,6 +66,16 @@ public abstract class InternalLoggerFactory {
final InternalLogger logger = getDefaultFactory().newInstance(name);
return new InternalLogger() {
@Override
public void trace(String msg) {
logger.trace(msg);
}
@Override
public void trace(String msg, Throwable cause) {
logger.trace(msg, cause);
}
@Override
public void debug(String msg) {
logger.debug(msg);
@ -96,6 +106,11 @@ public abstract class InternalLoggerFactory {
logger.info(msg, cause);
}
@Override
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
@Override
public boolean isDebugEnabled() {
return logger.isDebugEnabled();

View File

@ -29,6 +29,16 @@ class JBossLogger extends AbstractInternalLogger {
this.logger = logger;
}
@Override
public void trace(String msg) {
logger.trace(msg);
}
@Override
public void trace(String msg, Throwable cause) {
logger.trace(msg, cause);
}
@Override
public void debug(String msg) {
logger.debug(msg);
@ -59,6 +69,11 @@ class JBossLogger extends AbstractInternalLogger {
logger.info(msg, cause);
}
@Override
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
@Override
@SuppressWarnings("deprecation")
public boolean isDebugEnabled() {

View File

@ -32,6 +32,16 @@ class JdkLogger extends AbstractInternalLogger {
this.loggerName = loggerName;
}
@Override
public void trace(String msg) {
logger.logp(Level.FINEST, loggerName, null, msg);
}
@Override
public void trace(String msg, Throwable cause) {
logger.logp(Level.FINEST, loggerName, null, msg, cause);
}
@Override
public void debug(String msg) {
logger.logp(Level.FINE, loggerName, null, msg);
@ -62,6 +72,11 @@ class JdkLogger extends AbstractInternalLogger {
logger.logp(Level.INFO, loggerName, null, msg, cause);
}
@Override
public boolean isTraceEnabled() {
return logger.isLoggable(Level.FINEST);
}
@Override
public boolean isDebugEnabled() {
return logger.isLoggable(Level.FINE);

View File

@ -29,6 +29,16 @@ class Log4JLogger extends AbstractInternalLogger {
this.logger = logger;
}
@Override
public void trace(String msg) {
logger.trace(msg);
}
@Override
public void trace(String msg, Throwable cause) {
logger.trace(msg, cause);
}
@Override
public void debug(String msg) {
logger.debug(msg);
@ -59,6 +69,11 @@ class Log4JLogger extends AbstractInternalLogger {
logger.info(msg, cause);
}
@Override
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
@Override
public boolean isDebugEnabled() {
return logger.isDebugEnabled();

View File

@ -34,6 +34,16 @@ class OsgiLogger extends AbstractInternalLogger {
prefix = "[" + name + "] ";
}
@Override
public void trace(String msg) {
// This logger doesn't have TRACE level
}
@Override
public void trace(String msg, Throwable cause) {
// This logger doesn't have TRACE level
}
@Override
public void debug(String msg) {
LogService logService = parent.getLogService();
@ -94,6 +104,11 @@ class OsgiLogger extends AbstractInternalLogger {
}
}
@Override
public boolean isTraceEnabled() {
return false;
}
@Override
public boolean isDebugEnabled() {
return true;

View File

@ -28,6 +28,16 @@ class Slf4JLogger extends AbstractInternalLogger {
this.logger = logger;
}
@Override
public void trace(String msg) {
logger.trace(msg);
}
@Override
public void trace(String msg, Throwable cause) {
logger.trace(msg, cause);
}
@Override
public void debug(String msg) {
logger.debug(msg);
@ -58,6 +68,11 @@ class Slf4JLogger extends AbstractInternalLogger {
logger.info(msg, cause);
}
@Override
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
@Override
public boolean isDebugEnabled() {
return logger.isDebugEnabled();

View File

@ -1,30 +0,0 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.util;
/**
* Represents an object which contains another object that needs to be taken
* into account by {@link ObjectSizeEstimator} for more accurate object size
* estimation.
*/
public interface EstimatableObjectWrapper {
/**
* Returns the underlying object that needs to be taken into account
* by {@link ObjectSizeEstimator} for more accurate object size estimation.
*/
Object unwrap();
}

Some files were not shown because too many files have changed in this diff Show More