Update with recent Netty 5 changes

This commit is contained in:
Chris Vest 2021-09-01 11:16:31 +02:00
parent c6e4fd98c8
commit 6413143811
135 changed files with 690 additions and 16497 deletions

View File

@ -22,7 +22,7 @@ RUN curl https://downloads.apache.org/maven/maven-3/3.6.3/binaries/apache-maven-
ENV PATH=/home/build/apache-maven-3.6.3/bin:$PATH ENV PATH=/home/build/apache-maven-3.6.3/bin:$PATH
# Prepare a snapshot of Netty 5 # Prepare a snapshot of Netty 5
RUN git clone --depth 1 -b master https://github.com/netty/netty.git netty \ RUN git clone --depth 1 -b main https://github.com/netty/netty.git netty \
&& cd netty \ && cd netty \
&& mvn install -DskipTests -T1C -B -am \ && mvn install -DskipTests -T1C -B -am \
&& cd .. \ && cd .. \
@ -31,7 +31,6 @@ RUN git clone --depth 1 -b master https://github.com/netty/netty.git netty \
# Prepare our own build # Prepare our own build
RUN mkdir buffer-api && mkdir buffer-memseg && mkdir buffer-tests RUN mkdir buffer-api && mkdir buffer-memseg && mkdir buffer-tests
COPY pom.xml pom.xml COPY pom.xml pom.xml
COPY buffer-api/pom.xml buffer-api/pom.xml
COPY buffer-memseg/pom.xml buffer-memseg/pom.xml COPY buffer-memseg/pom.xml buffer-memseg/pom.xml
COPY buffer-tests/pom.xml buffer-tests/pom.xml COPY buffer-tests/pom.xml buffer-tests/pom.xml
RUN mvn install dependency:go-offline surefire:test checkstyle:check -ntp RUN mvn install dependency:go-offline surefire:test checkstyle:check -ntp

View File

@ -1,43 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Copyright 2021 The Netty Project
~
~ The Netty Project licenses this file to you under the Apache License,
~ version 2.0 (the "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at:
~
~ https://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
~ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
~ License for the specific language governing permissions and limitations
~ under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>io.netty.incubator</groupId>
<artifactId>netty-incubator-buffer-parent</artifactId>
<version>0.0.1.Final-SNAPSHOT</version>
</parent>
<artifactId>netty-incubator-buffer-api</artifactId>
<version>0.0.1.Final-SNAPSHOT</version>
<name>Netty/Incubator/Buffer</name>
<packaging>jar</packaging>
<dependencies>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-common</artifactId>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-buffer</artifactId>
</dependency>
</dependencies>
</project>

View File

@ -1,62 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
/**
* Methods for accessing and controlling the internals of an allocator.
* This interface is intended to be used by implementors of the {@link BufferAllocator}, {@link Buffer} and
* {@link MemoryManager} interfaces.
*/
public interface AllocatorControl {
/**
* Allocate a buffer that is not tethered to any particular {@link Buffer} object,
* and return the recoverable memory object from it.
* <p>
* This allows a buffer to implement {@link Buffer#ensureWritable(int)} by having new memory allocated to it,
* without that memory being attached to some other lifetime.
*
* @param originator The buffer that originated the request for an untethered memory allocated.
* @param size The size of the requested memory allocation, in bytes.
* @return A {@link UntetheredMemory} object that is the requested allocation.
*/
UntetheredMemory allocateUntethered(Buffer originator, int size);
/**
* Return memory to the allocator, after it has been untethered from it's lifetime.
* This either happens if the memory has leaked and been re-captured, or if it is no longer in use by a buffer
* through {@link Buffer#ensureWritable(int)}.
*
* @param memory The untethered memory to return to the allocator.
*/
void recoverMemory(Object memory);
/**
* Memory that isn't attached to any particular buffer.
*/
interface UntetheredMemory {
/**
* Produce the recoverable memory object associated with this piece of untethered memory.
* @implNote This method should only be called once, since it might be expensive.
*/
<Memory> Memory memory();
/**
* Produce the drop instance associated with this piece of untethered memory.
* @implNote This method should only be called once, since it might be expensive, or interact with Cleaners.
*/
<BufferType extends Buffer> Drop<BufferType> drop();
}
}

View File

@ -1,682 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
/**
* A reference counted buffer of memory, with separate reader and writer offsets.
* <p>
* A buffer is a sequential stretch of memory with a certain capacity, an offset for writing, and an offset for reading.
*
* <h3>Creating a buffer</h3>
*
* Buffers are created by {@linkplain BufferAllocator allocators}, and their {@code allocate} family of methods.
* A number of standard allocators exist, and ara available through static methods on the {@code BufferAllocator}
* interface.
*
* <h3>Life cycle and reference counting</h3>
*
* The buffer has a life cycle, where it is allocated, used, and deallocated.
* When the buffer is initially allocated, a pairing {@link #close()} call will deallocate it.
* If a buffer is {@linkplain #send() sent} elsewhere, the {@linkplain #close() close} method on the given instance
* will become a no-op.
* The buffer can be thought of as a view onto memory, and calling {@link #send()} on the buffer will effectively close
* that view, and recreate it upon reception at its destination.
*
* <h3>Thread-safety</h3>
*
* Buffers are not thread-safe.
* The {@linkplain #isAccessible() accessibility state} implied by the {@link Resource} interface is itself not
* thread-safe, and buffers additionally contain other mutable data that is not thread-safe.
*
* <h3>Accessing data</h3>
*
* Data access methods fall into two classes:
* <ol>
* <li>Access that are based on, and updates, the read or write offset positions.</li>
* <ul><li>These accessor methods are typically called {@code readX} or {@code writeX}.</li></ul>
* <li>Access that take offsets as arguments, and do not update read or write offset positions.</li>
* <ul><li>These accessor methods are typically called {@code getX} or {@code setX}.</li></ul>
* </ol>
*
* A buffer contains two mutable offset positions: one for reading and one for writing.
* These positions use <a href="https://en.wikipedia.org/wiki/Zero-based_numbering">zero-based indexing</a>,
* such that the first byte of data in the buffer is placed at offset {@code 0},
* and the last byte in the buffer is at offset {@link #capacity() capacity - 1}.
* The {@link #readerOffset()} is the offset into the buffer from which the next read will take place,
* and is initially zero.
* The reader offset must always be less than or equal to the {@link #writerOffset()}.
* The {@link #writerOffset()} is likewise the offset into the buffer where the next write will take place.
* The writer offset is also initially zero, and must be less than or equal to the {@linkplain #capacity() capacity}.
* <p>
* This carves the buffer into three regions, as demonstrated by this diagram:
* <pre>
* +-------------------+------------------+------------------+
* | discardable bytes | readable bytes | writable bytes |
* | | (CONTENT) | |
* +-------------------+------------------+------------------+
* | | | |
* 0 <= readerOffset <= writerOffset <= capacity
* </pre>
*
* <h3 name="split">Splitting buffers</h3>
*
* The {@link #split()} method breaks a buffer into two.
* The two buffers will share the underlying memory, but their regions will not overlap, ensuring that the memory is
* safely shared between the two.
* <p>
* Splitting a buffer is useful for when you want to hand over a region of a buffer to some other,
* perhaps unknown, piece of code, and relinquish your ownership of that buffer region in the process.
* Examples include aggregating messages into an accumulator buffer, and sending messages down the pipeline for
* further processing, as split buffer regions, once their data has been received in its entirety.
*
* If you instead wish to temporarily share a region of a buffer, you will have to pass offset and length along with the
* buffer, or you will have to make a copy of the region.
*
* <h3>Buffers as constants</h3>
*
* Sometimes, the same bit of data will be processed or transmitted over and over again. In such cases, it can be
* tempting to allocate and fill a buffer once, and then reuse it.
* Such reuse must be done carefully, however, to avoid a number of bugs.
* The {@link BufferAllocator} has a {@link BufferAllocator#constBufferSupplier(byte[])} method that solves this, and
* prevents these bugs from occurring.
*/
public interface Buffer extends Resource<Buffer>, BufferAccessors {
/**
* Change the default byte order of this buffer, and return this buffer.
*
* @param order The new default byte order, used by accessor methods that don't use an explicit byte order.
* @return This buffer instance.
*/
Buffer order(ByteOrder order);
/**
* The default byte order of this buffer.
* @return The default byte order of this buffer.
*/
ByteOrder order();
/**
* The capacity of this buffer, that is, the maximum number of bytes it can contain.
*
* @return The capacity in bytes.
*/
int capacity();
/**
* Get the current reader offset. The next read will happen from this byte offset into the buffer.
*
* @return The current reader offset.
*/
int readerOffset();
/**
* Set the reader offset. Make the next read happen from the given offset into the buffer.
*
* @param offset The reader offset to set.
* @return This Buffer.
* @throws IndexOutOfBoundsException if the specified {@code offset} is less than zero or greater than the current
* {@link #writerOffset()}.
*/
Buffer readerOffset(int offset);
/**
* Get the current writer offset. The next write will happen at this byte offset into the byffer.
*
* @return The current writer offset.
*/
int writerOffset();
/**
* Set the writer offset. Make the next write happen at the given offset.
*
* @param offset The writer offset to set.
* @return This Buffer.
* @throws IndexOutOfBoundsException if the specified {@code offset} is less than the current
* {@link #readerOffset()} or greater than {@link #capacity()}.
* @throws BufferClosedException if this buffer is closed.
* @throws BufferReadOnlyException if this buffer is {@linkplain #readOnly() read-only}.
*/
Buffer writerOffset(int offset);
/**
* Returns the number of readable bytes which is equal to {@code (writerOffset() - readerOffset())}.
*/
default int readableBytes() {
return writerOffset() - readerOffset();
}
/**
* Returns the number of writable bytes which is equal to {@code (capacity() - writerOffset())}.
*/
default int writableBytes() {
return capacity() - writerOffset();
}
/**
* Fill the buffer with the given byte value. This method does not respect the {@link #readerOffset()} or {@link
* #writerOffset()}, but copies the full capacity of the buffer. The {@link #readerOffset()} and {@link
* #writerOffset()} are not modified.
*
* @param value The byte value to write at every position in the buffer.
* @return This Buffer.
* @throws BufferReadOnlyException if this buffer is {@linkplain #readOnly() read-only}.
*/
Buffer fill(byte value);
/**
* Give the native memory address backing this buffer, or return 0 if this buffer has no native memory address.
* @return The native memory address, if any, otherwise 0.
*/
long nativeAddress();
/**
* Make this buffer read-only.
* This is irreversible.
*
* @return this buffer.
*/
Buffer makeReadOnly();
/**
* Query if this buffer is read-only or not.
*
* @return {@code true} if this buffer is read-only, {@code false} otherwise.
*/
boolean readOnly();
/**
* Copies the given length of data from this buffer into the given destination array, beginning at the given source
* position in this buffer, and the given destination position in the destination array.
* <p>
* This method does not read or modify the {@linkplain #writerOffset() write offset} or the
* {@linkplain #readerOffset() read offset}.
*
* @param srcPos The byte offset into this buffer wherefrom the copying should start; the byte at this offset in
* this buffer will be copied to the {@code destPos} index in the {@code dest} array.
* @param dest The destination byte array.
* @param destPos The index into the {@code dest} array wherefrom the copying should start.
* @param length The number of bytes to copy.
* @throws NullPointerException if the destination array is null.
* @throws IndexOutOfBoundsException if the source or destination positions, or the length, are negative,
* or if the resulting end positions reaches beyond the end of either this buffer, or the destination array.
*/
void copyInto(int srcPos, byte[] dest, int destPos, int length);
/**
* Copies the given length of data from this buffer into the given destination byte buffer, beginning at the given
* source position in this buffer, and the given destination position in the destination byte buffer.
* <p>
* This method does not read or modify the {@linkplain #writerOffset() write offset} or the
* {@linkplain #readerOffset() read offset}, nor is the position of the destination buffer changed.
* <p>
* The position and limit of the destination byte buffer are also ignored, and do not influence {@code destPos}
* or {@code length}.
*
* @param srcPos The byte offset into this buffer wherefrom the copying should start; the byte at this offset in
* this buffer will be copied to the {@code destPos} index in the {@code dest} array.
* @param dest The destination byte buffer.
* @param destPos The index into the {@code dest} array wherefrom the copying should start.
* @param length The number of bytes to copy.
* @throws NullPointerException if the destination array is null.
* @throws IndexOutOfBoundsException if the source or destination positions, or the length, are negative,
* or if the resulting end positions reaches beyond the end of either this buffer, or the destination array.
*/
void copyInto(int srcPos, ByteBuffer dest, int destPos, int length);
/**
* Copies the given length of data from this buffer into the given destination buffer, beginning at the given
* source position in this buffer, and the given destination position in the destination buffer.
* <p>
* This method does not read or modify the {@linkplain #writerOffset() write offset} or the
* {@linkplain #readerOffset() read offset} on this buffer, nor on the destination buffer.
* <p>
* The read and write offsets of the destination buffer are also ignored, and do not influence {@code destPos}
* or {@code length}.
*
* @param srcPos The byte offset into this buffer wherefrom the copying should start; the byte at this offset in
* this buffer will be copied to the {@code destPos} index in the {@code dest} array.
* @param dest The destination buffer.
* @param destPos The index into the {@code dest} array wherefrom the copying should start.
* @param length The number of bytes to copy.
* @throws NullPointerException if the destination array is null.
* @throws IndexOutOfBoundsException if the source or destination positions, or the length, are negative,
* or if the resulting end positions reaches beyond the end of either this buffer, or the destination array.
*/
void copyInto(int srcPos, Buffer dest, int destPos, int length);
/**
* Write into this buffer, all the readable bytes from the given buffer.
* This updates the {@linkplain #writerOffset() write offset} of this buffer, and the
* {@linkplain #readerOffset() reader offset} of the given buffer.
*
* @param source The buffer to read from.
* @return This buffer.
*/
default Buffer writeBytes(Buffer source) {
int size = source.readableBytes();
int woff = writerOffset();
writerOffset(woff + size);
source.copyInto(source.readerOffset(), this, woff, size);
source.readerOffset(source.readerOffset() + size);
return this;
}
/**
* Write into this buffer, all the bytes from the given byte array.
* This updates the {@linkplain #writerOffset() write offset} of this buffer by the length of the array.
*
* @param source The byte array to read from.
* @return This buffer.
*/
default Buffer writeBytes(byte[] source) {
int size = source.length;
int woff = writerOffset();
writerOffset(woff + size);
for (int i = 0; i < size; i++) {
setByte(woff + i, source[i]);
}
return this;
}
/**
* Resets the {@linkplain #readerOffset() read offset} and the {@linkplain #writerOffset() write offset} on this
* buffer to their initial values.
*/
default Buffer reset() {
readerOffset(0);
writerOffset(0);
return this;
}
/**
* Open a cursor to iterate the readable bytes of this buffer. The {@linkplain #readerOffset() reader offset} and
* {@linkplain #writerOffset() witer offset} are not modified by the cursor.
* <p>
* Care should be taken to ensure that the buffer's lifetime extends beyond the cursor and the iteration, and that
* the {@linkplain #readerOffset() reader offset} and {@linkplain #writerOffset() writer offset} are not modified
* while the iteration takes place. Otherwise, unpredictable behaviour might result.
*
* @return A {@link ByteCursor} for iterating the readable bytes of this buffer.
*/
ByteCursor openCursor();
/**
* Open a cursor to iterate the given number bytes of this buffer, starting at the given offset.
* The {@linkplain #readerOffset() reader offset} and {@linkplain #writerOffset() witer offset} are not modified by
* the cursor.
* <p>
* Care should be taken to ensure that the buffer's lifetime extends beyond the cursor and the iteration, and that
* the {@linkplain #readerOffset() reader offset} and {@linkplain #writerOffset() writer offset} are not modified
* while the iteration takes place. Otherwise, unpredictable behaviour might result.
*
* @param fromOffset The offset into the buffer where iteration should start.
* The first byte read from the iterator will be the byte at this offset.
* @param length The number of bytes to iterate.
* @return A {@link ByteCursor} for the given stretch of bytes of this buffer.
* @throws IllegalArgumentException if the length is negative, or if the region given by the {@code fromOffset} and
* the {@code length} reaches outside the bounds of this buffer.
*/
ByteCursor openCursor(int fromOffset, int length);
/**
* Open a cursor to iterate the readable bytes of this buffer, in reverse.
* The {@linkplain #readerOffset() reader offset} and {@linkplain #writerOffset() witer offset} are not modified by
* the cursor.
* <p>
* Care should be taken to ensure that the buffer's lifetime extends beyond the cursor and the iteration, and that
* the {@linkplain #readerOffset() reader offset} and {@linkplain #writerOffset() writer offset} are not modified
* while the iteration takes place. Otherwise, unpredictable behaviour might result.
*
* @return A {@link ByteCursor} for the readable bytes of this buffer.
*/
default ByteCursor openReverseCursor() {
int woff = writerOffset();
return openReverseCursor(woff == 0? 0 : woff - 1, readableBytes());
}
/**
* Open a cursor to iterate the given number bytes of this buffer, in reverse, starting at the given offset.
* The {@linkplain #readerOffset() reader offset} and {@linkplain #writerOffset() witer offset} are not modified by
* the cursor.
* <p>
* Care should be taken to ensure that the buffer's lifetime extends beyond the cursor and the iteration, and that
* the {@linkplain #readerOffset() reader offset} and {@linkplain #writerOffset() writer offset} are not modified
* while the iteration takes place. Otherwise, unpredictable behaviour might result.
*
* @param fromOffset The offset into the buffer where iteration should start.
* The first byte read from the iterator will be the byte at this offset.
* @param length The number of bytes to iterate.
* @return A {@link ByteCursor} for the given stretch of bytes of this buffer.
* @throws IllegalArgumentException if the length is negative, or if the region given by the {@code fromOffset} and
* the {@code length} reaches outside the bounds of this buffer.
*/
ByteCursor openReverseCursor(int fromOffset, int length);
/**
* Ensure that this buffer has {@linkplain #writableBytes() available space for writing} the given number of
* bytes.
* If this buffer already has the necessary space, then this method returns immediately.
* If this buffer does not already have the necessary space, then it will be expanded using the
* {@link BufferAllocator} the buffer was created with.
* This method is the same as calling {@link #ensureWritable(int, int, boolean)} where {@code allowCompaction} is
* {@code false}.
*
* @param size The requested number of bytes of space that should be available for writing.
* @throws IllegalStateException if this buffer is in a bad state.
* @throws BufferClosedException if this buffer is closed.
* @throws BufferReadOnlyException if this buffer is {@linkplain #readOnly() read-only}.
*/
default void ensureWritable(int size) {
ensureWritable(size, 1, true);
}
/**
* Ensure that this buffer has {@linkplain #writableBytes() available space for writing} the given number of
* bytes.
* If this buffer already has the necessary space, then this method returns immediately.
* If this buffer does not already have the necessary space, then space will be made available in one or all of
* the following available ways:
*
* <ul>
* <li>
* If {@code allowCompaction} is {@code true}, and sum of the read and writable bytes would be enough to
* satisfy the request, and it (depending on the buffer implementation) seems faster and easier to compact
* the existing buffer rather than allocation a new buffer, then the requested bytes will be made available
* that way. The compaction will not necessarily work the same way as the {@link #compact()} method, as the
* implementation may be able to make the requested bytes available with less effort than is strictly
* mandated by the {@link #compact()} method.
* </li>
* <li>
* Regardless of the value of the {@code allowCompaction}, the implementation may make more space available
* by just allocating more or larger buffers. This allocation would use the same {@link BufferAllocator}
* that this buffer was created with.
* </li>
* <li>
* If {@code allowCompaction} is {@code true}, then the implementation may choose to do a combination of
* compaction and allocation.
* </li>
* </ul>
*
* @param size The requested number of bytes of space that should be available for writing.
* @param minimumGrowth The minimum number of bytes to grow by. If it is determined that memory should be allocated
* and copied, make sure that the new memory allocation is bigger than the old one by at least
* this many bytes. This way, the buffer can grow by more than what is immediately necessary,
* thus amortising the costs of allocating and copying.
* @param allowCompaction {@code true} if the method is allowed to modify the
* {@linkplain #readerOffset() reader offset} and
* {@linkplain #writerOffset() writer offset}, otherwise {@code false}.
* @throws BufferReadOnlyException if this buffer is {@linkplain #readOnly() read-only}.
* @throws IllegalArgumentException if {@code size} or {@code minimumGrowth} are negative.
* @throws IllegalStateException if this buffer is in a bad state.
*/
void ensureWritable(int size, int minimumGrowth, boolean allowCompaction);
/**
* Returns a copy of this buffer's readable bytes.
* Modifying the content of the returned buffer will not affect this buffers contents.
* The two buffers will maintain separate offsets. This method is identical to
* {@code buf.copy(buf.readerOffset(), buf.readableBytes())}.
* This method does not modify {@link #readerOffset()} or {@link #writerOffset()} of this buffer.
* <p>
* The copy is created with a {@linkplain #writerOffset() write offset} equal to the length of the copied data,
* so that the entire contents of the copy is ready to be read.
*
* @return A new buffer instance, with independent {@link #readerOffset()} and {@link #writerOffset()},
* that contains a copy of the readable region of this buffer.
*/
default Buffer copy() {
int offset = readerOffset();
int length = readableBytes();
return copy(offset, length);
}
/**
* Returns a copy of the given region of this buffer.
* Modifying the content of the returned buffer will not affect this buffers contents.
* The two buffers will maintain separate offsets.
* This method does not modify {@link #readerOffset()} or {@link #writerOffset()} of this buffer.
* <p>
* The copy is created with a {@linkplain #writerOffset() write offset} equal to the length of the copy,
* so that the entire contents of the copy is ready to be read.
*
* @return A new buffer instance, with independent {@link #readerOffset()} and {@link #writerOffset()},
* that contains a copy of the given region of this buffer.
*/
Buffer copy(int offset, int length);
/**
* Split the buffer into two, at the {@linkplain #writerOffset() write offset} position.
* <p>
* The region of this buffer that contain the read and readable bytes, will be captured and returned in a new
* buffer, that will hold its own ownership of that region. This allows the returned buffer to be independently
* {@linkplain #send() sent} to other threads.
* <p>
* The returned buffer will adopt the {@link #readerOffset()} of this buffer, and have its {@link #writerOffset()}
* and {@link #capacity()} both set to the equal to the write-offset of this buffer.
* <p>
* The memory region in the returned buffer will become inaccessible through this buffer. This buffer will have its
* capacity reduced by the capacity of the returned buffer, and the read and write offsets of this buffer will both
* become zero, even though their position in memory remain unchanged.
* <p>
* Effectively, the following transformation takes place:
* <pre>{@code
* This buffer:
* +------------------------------------------+
* 0| |r/o |w/o |cap
* +---+---------------------+----------------+
* / / / \ \
* / / / \ \
* / / / \ \
* / / / \ \
* / / / \ \
* +---+---------------------+ +---------------+
* | |r/o |w/o & cap |r/o & w/o |cap
* +---+---------------------+ +---------------+
* Returned buffer. This buffer.
* }</pre>
* When the buffers are in this state, both of the split parts retain an atomic reference count on the
* underlying memory. This means that shared underlying memory will not be deallocated or returned to a pool, until
* all the split parts have been closed.
* <p>
* Composite buffers have it a little easier, in that at most only one of the constituent buffers will actually be
* split. If the split point lands perfectly between two constituent buffers, then a composite buffer can
* simply split its internal array in two.
* <p>
* Split buffers support all operations that normal buffers do, including {@link #ensureWritable(int)}.
* <p>
* See the <a href="#split">Splitting buffers</a> section for details.
*
* @return A new buffer with independent and exclusive ownership over the read and readable bytes from this buffer.
*/
default Buffer split() {
return split(writerOffset());
}
/**
* Split the buffer into two, at the given {@code splitOffset}.
* <p>
* The region of this buffer that precede the {@code splitOffset}, will be captured and returned in a new
* buffer, that will hold its own ownership of that region. This allows the returned buffer to be independently
* {@linkplain #send() sent} to other threads.
* <p>
* The returned buffer will adopt the {@link #readerOffset()} and {@link #writerOffset()} of this buffer,
* but truncated to fit within the capacity dictated by the {@code splitOffset}.
* <p>
* The memory region in the returned buffer will become inaccessible through this buffer. If the
* {@link #readerOffset()} or {@link #writerOffset()} of this buffer lie prior to the {@code splitOffset},
* then those offsets will be moved forward, so they land on offset 0 after the split.
* <p>
* Effectively, the following transformation takes place:
* <pre>{@code
* This buffer:
* +--------------------------------+
* 0| |splitOffset |cap
* +---------------+----------------+
* / / \ \
* / / \ \
* / / \ \
* / / \ \
* / / \ \
* +---------------+ +---------------+
* | |cap | |cap
* +---------------+ +---------------+
* Returned buffer. This buffer.
* }</pre>
* When the buffers are in this state, both of the split parts retain an atomic reference count on the
* underlying memory. This means that shared underlying memory will not be deallocated or returned to a pool, until
* all the split parts have been closed.
* <p>
* Composite buffers have it a little easier, in that at most only one of the constituent buffers will actually be
* split. If the split point lands perfectly between two constituent buffers, then a composite buffer can
* simply split its internal array in two.
* <p>
* Split buffers support all operations that normal buffers do, including {@link #ensureWritable(int)}.
* <p>
* See the <a href="#split">Splitting buffers</a> section for details.
*
* @return A new buffer with independent and exclusive ownership over the bytes from the beginning to the given
* offset of this buffer.
*/
Buffer split(int splitOffset);
/**
* Discards the read bytes, and moves the buffer contents to the beginning of the buffer.
*
* @throws BufferReadOnlyException if this buffer is {@linkplain #readOnly() read-only}.
* @throws IllegalStateException if this buffer is in a bad state.
*/
void compact();
/**
* Get the number of "components" in this buffer. For composite buffers, this is the number of transitive
* constituent buffers, while non-composite buffers only have one component.
*
* @return The number of components in this buffer.
*/
int countComponents();
/**
* Get the number of "components" in this buffer, that are readable. These are the components that would be
* processed by {@link #forEachReadable(int, ReadableComponentProcessor)}. For composite buffers, this is the
* number of transitive constituent buffers that are readable, while non-composite buffers only have at most one
* readable component.
* <p>
* The number of readable components may be less than the {@link #countComponents() component count}, if not all of
* them have readable data.
*
* @return The number of readable components in this buffer.
*/
int countReadableComponents();
/**
* Get the number of "components" in this buffer, that are writable. These are the components that would be
* processed by {@link #forEachWritable(int, WritableComponentProcessor)}. For composite buffers, this is the
* number of transitive constituent buffers that are writable, while non-composite buffers only have at most one
* writable component.
* <p>
* The number of writable components may be less than the {@link #countComponents() component count}, if not all of
* them have space for writing.
*
* @return The number of writable components in this buffer.
*/
int countWritableComponents();
/**
* Process all readable components of this buffer, and return the number of components processed.
* <p>
* The given {@linkplain ReadableComponentProcessor processor} is called for each readable component in this buffer,
* and passed a component index, for the given component in the iteration, and a {@link ReadableComponent} object
* for accessing the data within the given component.
* <p>
* The component index is specific to the particular invokation of this method. The first call to the consumer will
* be passed the given initial index, and the next call will be passed the initial index plus one, and so on.
* <p>
* The {@linkplain ReadableComponentProcessor component processor} may stop the iteration at any time by returning
* {@code false}.
* This will cause the number of components processed to be returned as a negative number (to signal early return),
* and the number of components processed may then be less than the
* {@linkplain #countReadableComponents() readable component count}.
* <p>
* <strong>Note</strong> that the {@link ReadableComponent} instance passed to the consumer could be reused for
* multiple calls, so the data must be extracted from the component in the context of the iteration.
* <p>
* The {@link ByteBuffer} instances obtained from the component, share lifetime with that internal component.
* This means they can be accessed as long as the internal memory store remain unchanged. Methods that may cause
* such changes are {@link #split(int)}, {@link #split()}, {@link #compact()}, {@link #ensureWritable(int)},
* {@link #ensureWritable(int, int, boolean)}, and {@link #send()}.
* <p>
* The best way to ensure this doesn't cause any trouble, is to use the buffers directly as part of the iteration,
* or immediately after the iteration while we are still in the scope of the method that triggered the iteration.
* <p>
* <strong>Note</strong> that the arrays, memory addresses, and byte buffers exposed as components by this method,
* should not be used for changing the buffer contents. Doing so may cause undefined behaviour.
* <p>
* Changes to position and limit of the byte buffers exposed via the processed components, are not reflected back to
* this buffer instance.
*
* @param initialIndex The initial index of the iteration, and the index that will be passed to the first call to
* the {@linkplain ReadableComponentProcessor#process(int, ReadableComponent) processor}.
* @param processor The processor that will be used to process the buffer components.
* @return The number of readable components processed, as a positive number of all readable components were
* processed, or as a negative number if the iteration was stopped because
* {@link ReadableComponentProcessor#process(int, ReadableComponent)} returned {@code false}.
* In any case, the number of components processed may be less than {@link #countComponents()}.
*/
<E extends Exception> int forEachReadable(int initialIndex, ReadableComponentProcessor<E> processor) throws E;
/**
* Process all writable components of this buffer, and return the number of components processed.
* <p>
* The given {@linkplain WritableComponentProcessor processor} is called for each writable component in this buffer,
* and passed a component index, for the given component in the iteration, and a {@link WritableComponent} object
* for accessing the data within the given component.
* <p>
* The component index is specific to the particular invokation of this method. The first call to the consumer will
* be passed the given initial index, and the next call will be passed the initial index plus one, and so on.
* <p>
* The {@link WritableComponentProcessor component processor} may stop the iteration at any time by returning
* {@code false}.
* This will cause the number of components processed to be returned as a negative number (to signal early return),
* and the number of components processed may then be less than the
* {@linkplain #countReadableComponents() readable component count}.
* <p>
* <strong>Note</strong> that the {@link WritableComponent} instance passed to the consumer could be reused for
* multiple calls, so the data must be extracted from the component in the context of the iteration.
* <p>
* The {@link ByteBuffer} instances obtained from the component, share lifetime with that internal component.
* This means they can be accessed as long as the internal memory store remain unchanged. Methods that may cause
* such changes are {@link #split(int)}, {@link #split()}, {@link #compact()}, {@link #ensureWritable(int)},
* {@link #ensureWritable(int, int, boolean)}, and {@link #send()}.
* <p>
* The best way to ensure this doesn't cause any trouble, is to use the buffers directly as part of the iteration,
* or immediately after the iteration while we are still in the scope of the method that triggered the iteration.
* <p>
* Changes to position and limit of the byte buffers exposed via the processed components, are not reflected back to
* this buffer instance.
*
* @param initialIndex The initial index of the iteration, and the index that will be passed to the first call to
* the {@linkplain WritableComponentProcessor#process(int, WritableComponent) processor}.
* @param processor The processor that will be used to process the buffer components.
* @return The number of writable components processed, as a positive number of all writable components were
* processed, or as a negative number if the iteration was stopped because
* {@link WritableComponentProcessor#process(int, WritableComponent)} returned {@code false}.
* In any case, the number of components processed may be less than {@link #countComponents()}.
*/
<E extends Exception> int forEachWritable(int initialIndex, WritableComponentProcessor<E> processor) throws E;
}

View File

@ -1,614 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
/**
* This interface is just the primitive data accessor methods that {@link Buffer} exposes.
* It can be useful if you only need the data access methods, and perhaps wish to decorate or modify their behaviour.
* Usually, you'd use the {@link Buffer} interface directly, since this lets you properly control the buffer reference
* count.
*/
public interface BufferAccessors {
// <editor-fold defaultstate="collapsed" desc="Primitive accessors interface.">
/**
* Get the byte value at the current {@link Buffer#readerOffset()},
* and increases the reader offset by {@link Byte#BYTES}.
* The value is read using a two's complement 8-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @return The byte value at the current reader offset.
* @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Byte#BYTES}.
*/
byte readByte();
/**
* Get the byte value at the given reader offset.
* The {@link Buffer#readerOffset()} is not modified.
* The value is read using a two's complement 8-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param roff The read offset, an absolute offset into this buffer, to read from.
* @return The byte value at the given offset.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Byte#BYTES}.
*/
byte getByte(int roff);
/**
* Get the unsigned byte value at the current {@link Buffer#readerOffset()},
* and increases the reader offset by {@link Byte#BYTES}.
* The value is read using an unsigned two's complement 8-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @return The unsigned byte value at the current reader offset.
* @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Byte#BYTES}.
*/
int readUnsignedByte();
/**
* Get the unsigned byte value at the given reader offset.
* The {@link Buffer#readerOffset()} is not modified.
* The value is read using an unsigned two's complement 8-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param roff The read offset, an absolute offset into this buffer, to read from.
* @return The unsigned byte value at the given offset.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Byte#BYTES}.
*/
int getUnsignedByte(int roff);
/**
* Set the given byte value at the current {@link Buffer#writerOffset()},
* and increase the writer offset by {@link Byte#BYTES}.
* The value is written using a two's complement 8-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param value The byte value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Byte#BYTES}.
*/
Buffer writeByte(byte value);
/**
* Set the given byte value at the given write offset. The {@link Buffer#writerOffset()} is not modified.
* The value is written using a two's complement 8-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param woff The write offset, an absolute offset into this buffer to write to.
* @param value The byte value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Byte#BYTES}.
*/
Buffer setByte(int woff, byte value);
/**
* Set the given unsigned byte value at the current {@link Buffer#writerOffset()},
* and increase the writer offset by {@link Byte#BYTES}.
* The value is written using an unsigned two's complement 8-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param value The int value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Byte#BYTES}.
*/
Buffer writeUnsignedByte(int value);
/**
* Set the given unsigned byte value at the given write offset. The {@link Buffer#writerOffset()} is not modified.
* The value is written using an unsigned two's complement 8-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param woff The write offset, an absolute offset into this buffer to write to.
* @param value The int value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Byte#BYTES}.
*/
Buffer setUnsignedByte(int woff, int value);
/**
* Get the char value at the current {@link Buffer#readerOffset()},
* and increases the reader offset by 2.
* The value is read using a 2-byte UTF-16 encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @return The char value at the current reader offset.
* @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than 2.
*/
char readChar();
/**
* Get the char value at the given reader offset.
* The {@link Buffer#readerOffset()} is not modified.
* The value is read using a 2-byte UTF-16 encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param roff The read offset, an absolute offset into this buffer, to read from.
* @return The char value at the given offset.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus 2.
*/
char getChar(int roff);
/**
* Set the given char value at the current {@link Buffer#writerOffset()},
* and increase the writer offset by 2.
* The value is written using a 2-byte UTF-16 encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param value The char value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than 2.
*/
Buffer writeChar(char value);
/**
* Set the given char value at the given write offset. The {@link Buffer#writerOffset()} is not modified.
* The value is written using a 2-byte UTF-16 encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param woff The write offset, an absolute offset into this buffer to write to.
* @param value The char value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus 2.
*/
Buffer setChar(int woff, char value);
/**
* Get the short value at the current {@link Buffer#readerOffset()},
* and increases the reader offset by {@link Short#BYTES}.
* The value is read using a two's complement 16-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @return The short value at the current reader offset.
* @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Short#BYTES}.
*/
short readShort();
/**
* Get the short value at the given reader offset.
* The {@link Buffer#readerOffset()} is not modified.
* The value is read using a two's complement 16-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param roff The read offset, an absolute offset into this buffer, to read from.
* @return The short value at the given offset.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Short#BYTES}.
*/
short getShort(int roff);
/**
* Get the unsigned short value at the current {@link Buffer#readerOffset()},
* and increases the reader offset by {@link Short#BYTES}.
* The value is read using an unsigned two's complement 16-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @return The unsigned short value at the current reader offset.
* @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Short#BYTES}.
*/
int readUnsignedShort();
/**
* Get the unsigned short value at the given reader offset.
* The {@link Buffer#readerOffset()} is not modified.
* The value is read using an unsigned two's complement 16-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param roff The read offset, an absolute offset into this buffer, to read from.
* @return The unsigned short value at the given offset.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Short#BYTES}.
*/
int getUnsignedShort(int roff);
/**
* Set the given short value at the current {@link Buffer#writerOffset()},
* and increase the writer offset by {@link Short#BYTES}.
* The value is written using a two's complement 16-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param value The short value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Short#BYTES}.
*/
Buffer writeShort(short value);
/**
* Set the given short value at the given write offset. The {@link Buffer#writerOffset()} is not modified.
* The value is written using a two's complement 16-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param woff The write offset, an absolute offset into this buffer to write to.
* @param value The short value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Short#BYTES}.
*/
Buffer setShort(int woff, short value);
/**
* Set the given unsigned short value at the current {@link Buffer#writerOffset()},
* and increase the writer offset by {@link Short#BYTES}.
* The value is written using an unsigned two's complement 16-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param value The int value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Short#BYTES}.
*/
Buffer writeUnsignedShort(int value);
/**
* Set the given unsigned short value at the given write offset. The {@link Buffer#writerOffset()} is not modified.
* The value is written using an unsigned two's complement 16-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param woff The write offset, an absolute offset into this buffer to write to.
* @param value The int value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Short#BYTES}.
*/
Buffer setUnsignedShort(int woff, int value);
/**
* Get the int value at the current {@link Buffer#readerOffset()},
* and increases the reader offset by 3.
* The value is read using a two's complement 24-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @return The int value at the current reader offset.
* @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than 3.
*/
int readMedium();
/**
* Get the int value at the given reader offset.
* The {@link Buffer#readerOffset()} is not modified.
* The value is read using a two's complement 24-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param roff The read offset, an absolute offset into this buffer, to read from.
* @return The int value at the given offset.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus 3.
*/
int getMedium(int roff);
/**
* Get the unsigned int value at the current {@link Buffer#readerOffset()},
* and increases the reader offset by 3.
* The value is read using an unsigned two's complement 24-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @return The unsigned int value at the current reader offset.
* @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than 3.
*/
int readUnsignedMedium();
/**
* Get the unsigned int value at the given reader offset.
* The {@link Buffer#readerOffset()} is not modified.
* The value is read using an unsigned two's complement 24-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param roff The read offset, an absolute offset into this buffer, to read from.
* @return The unsigned int value at the given offset.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus 3.
*/
int getUnsignedMedium(int roff);
/**
* Set the given int value at the current {@link Buffer#writerOffset()},
* and increase the writer offset by 3.
* The value is written using a two's complement 24-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param value The int value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than 3.
*/
Buffer writeMedium(int value);
/**
* Set the given int value at the given write offset. The {@link Buffer#writerOffset()} is not modified.
* The value is written using a two's complement 24-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param woff The write offset, an absolute offset into this buffer to write to.
* @param value The int value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus 3.
*/
Buffer setMedium(int woff, int value);
/**
* Set the given unsigned int value at the current {@link Buffer#writerOffset()},
* and increase the writer offset by 3.
* The value is written using an unsigned two's complement 24-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param value The int value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than 3.
*/
Buffer writeUnsignedMedium(int value);
/**
* Set the given unsigned int value at the given write offset. The {@link Buffer#writerOffset()} is not modified.
* The value is written using an unsigned two's complement 24-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param woff The write offset, an absolute offset into this buffer to write to.
* @param value The int value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus 3.
*/
Buffer setUnsignedMedium(int woff, int value);
/**
* Get the int value at the current {@link Buffer#readerOffset()},
* and increases the reader offset by {@link Integer#BYTES}.
* The value is read using a two's complement 32-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @return The int value at the current reader offset.
* @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Integer#BYTES}.
*/
int readInt();
/**
* Get the int value at the given reader offset.
* The {@link Buffer#readerOffset()} is not modified.
* The value is read using a two's complement 32-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param roff The read offset, an absolute offset into this buffer, to read from.
* @return The int value at the given offset.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Integer#BYTES}.
*/
int getInt(int roff);
/**
* Get the unsigned int value at the current {@link Buffer#readerOffset()},
* and increases the reader offset by {@link Integer#BYTES}.
* The value is read using an unsigned two's complement 32-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @return The unsigned int value at the current reader offset.
* @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Integer#BYTES}.
*/
long readUnsignedInt();
/**
* Get the unsigned int value at the given reader offset.
* The {@link Buffer#readerOffset()} is not modified.
* The value is read using an unsigned two's complement 32-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param roff The read offset, an absolute offset into this buffer, to read from.
* @return The unsigned int value at the given offset.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Integer#BYTES}.
*/
long getUnsignedInt(int roff);
/**
* Set the given int value at the current {@link Buffer#writerOffset()},
* and increase the writer offset by {@link Integer#BYTES}.
* The value is written using a two's complement 32-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param value The int value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Integer#BYTES}.
*/
Buffer writeInt(int value);
/**
* Set the given int value at the given write offset. The {@link Buffer#writerOffset()} is not modified.
* The value is written using a two's complement 32-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param woff The write offset, an absolute offset into this buffer to write to.
* @param value The int value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Integer#BYTES}.
*/
Buffer setInt(int woff, int value);
/**
* Set the given unsigned int value at the current {@link Buffer#writerOffset()},
* and increase the writer offset by {@link Integer#BYTES}.
* The value is written using an unsigned two's complement 32-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param value The long value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Integer#BYTES}.
*/
Buffer writeUnsignedInt(long value);
/**
* Set the given unsigned int value at the given write offset. The {@link Buffer#writerOffset()} is not modified.
* The value is written using an unsigned two's complement 32-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param woff The write offset, an absolute offset into this buffer to write to.
* @param value The long value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Integer#BYTES}.
*/
Buffer setUnsignedInt(int woff, long value);
/**
* Get the float value at the current {@link Buffer#readerOffset()},
* and increases the reader offset by {@link Float#BYTES}.
* The value is read using a 32-bit IEEE floating point encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @return The float value at the current reader offset.
* @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Float#BYTES}.
*/
float readFloat();
/**
* Get the float value at the given reader offset.
* The {@link Buffer#readerOffset()} is not modified.
* The value is read using a 32-bit IEEE floating point encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param roff The read offset, an absolute offset into this buffer, to read from.
* @return The float value at the given offset.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Float#BYTES}.
*/
float getFloat(int roff);
/**
* Set the given float value at the current {@link Buffer#writerOffset()},
* and increase the writer offset by {@link Float#BYTES}.
* The value is written using a 32-bit IEEE floating point encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param value The float value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Float#BYTES}.
*/
Buffer writeFloat(float value);
/**
* Set the given float value at the given write offset. The {@link Buffer#writerOffset()} is not modified.
* The value is written using a 32-bit IEEE floating point encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param woff The write offset, an absolute offset into this buffer to write to.
* @param value The float value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Float#BYTES}.
*/
Buffer setFloat(int woff, float value);
/**
* Get the long value at the current {@link Buffer#readerOffset()},
* and increases the reader offset by {@link Long#BYTES}.
* The value is read using a two's complement 64-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @return The long value at the current reader offset.
* @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Long#BYTES}.
*/
long readLong();
/**
* Get the long value at the given reader offset.
* The {@link Buffer#readerOffset()} is not modified.
* The value is read using a two's complement 64-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param roff The read offset, an absolute offset into this buffer, to read from.
* @return The long value at the given offset.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Long#BYTES}.
*/
long getLong(int roff);
/**
* Set the given long value at the current {@link Buffer#writerOffset()},
* and increase the writer offset by {@link Long#BYTES}.
* The value is written using a two's complement 64-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param value The long value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Long#BYTES}.
*/
Buffer writeLong(long value);
/**
* Set the given long value at the given write offset. The {@link Buffer#writerOffset()} is not modified.
* The value is written using a two's complement 64-bit encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param woff The write offset, an absolute offset into this buffer to write to.
* @param value The long value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Long#BYTES}.
*/
Buffer setLong(int woff, long value);
/**
* Get the double value at the current {@link Buffer#readerOffset()},
* and increases the reader offset by {@link Double#BYTES}.
* The value is read using a 64-bit IEEE floating point encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @return The double value at the current reader offset.
* @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Double#BYTES}.
*/
double readDouble();
/**
* Get the double value at the given reader offset.
* The {@link Buffer#readerOffset()} is not modified.
* The value is read using a 64-bit IEEE floating point encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param roff The read offset, an absolute offset into this buffer, to read from.
* @return The double value at the given offset.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Double#BYTES}.
*/
double getDouble(int roff);
/**
* Set the given double value at the current {@link Buffer#writerOffset()},
* and increase the writer offset by {@link Double#BYTES}.
* The value is written using a 64-bit IEEE floating point encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param value The double value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Double#BYTES}.
*/
Buffer writeDouble(double value);
/**
* Set the given double value at the given write offset. The {@link Buffer#writerOffset()} is not modified.
* The value is written using a 64-bit IEEE floating point encoding,
* with the {@link Buffer#order() configured} default byte order.
*
* @param woff The write offset, an absolute offset into this buffer to write to.
* @param value The double value to write.
* @return This Buffer.
* @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or
* greater than {@link Buffer#capacity()} minus {@link Double#BYTES}.
*/
Buffer setDouble(int woff, double value);
// </editor-fold>
}

View File

@ -1,134 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
import io.netty.buffer.api.pool.PooledBufferAllocator;
import java.nio.ByteOrder;
import java.util.function.Supplier;
/**
* Interface for {@link Buffer} allocators.
*/
public interface BufferAllocator extends AutoCloseable {
/**
* Check that the given {@code size} argument is a valid buffer size, or throw an {@link IllegalArgumentException}.
*
* @param size The size to check.
* @throws IllegalArgumentException if the size is not positive, or if the size is too big (over ~2 GB) for a
* buffer to accommodate.
*/
static void checkSize(long size) {
if (size < 1) {
throw new IllegalArgumentException("Buffer size must be positive, but was " + size + '.');
}
// We use max array size because on-heap buffers will be backed by byte-arrays.
int maxArraySize = Integer.MAX_VALUE - 8;
if (size > maxArraySize) {
throw new IllegalArgumentException(
"Buffer size cannot be greater than " + maxArraySize + ", but was " + size + '.');
}
}
/**
* Allocate a {@link Buffer} of the given size in bytes. This method may throw an {@link OutOfMemoryError} if there
* is not enough free memory available to allocate a {@link Buffer} of the requested size.
* <p>
* The buffer will use the current platform native byte order by default, for accessor methods that don't have an
* explicit byte order.
*
* @param size The size of {@link Buffer} to allocate.
* @return The newly allocated {@link Buffer}.
*/
Buffer allocate(int size);
/**
* Allocate a {@link Buffer} of the given size in bytes. This method may throw an {@link OutOfMemoryError} if there
* is not enough free memory available to allocate a {@link Buffer} of the requested size.
* <p>
* The buffer will use the given byte order by default.
*
* @param size The size of {@link Buffer} to allocate.
* @param order The default byte order used by the accessor methods that don't have an explicit byte order.
* @return The newly allocated {@link Buffer}.
*/
default Buffer allocate(int size, ByteOrder order) {
return allocate(size).order(order);
}
/**
* Create a supplier of "constant" {@linkplain Buffer Buffers} from this allocator, that all have the given
* byte contents. The buffer has the same capacity as the byte array length, and its write offset is placed at the
* end, and its read offset is at the beginning, such that the entire buffer contents are readable.
* <p>
* The buffers produced by the supplier will each have their own independent life-cycle, and closing them will
* make them {@linkplain Buffer#isAccessible() inaccessible}, just like normally allocated buffers.
* <p>
* The buffers produced are "constants", in the sense that they are {@linkplain Buffer#readOnly() read-only}.
* <p>
* It can generally be expected, but is not guaranteed, that the returned supplier is more resource efficient than
* allocating and copying memory with other available APIs. In such optimised implementations, the underlying memory
* baking the buffers will be shared among all the buffers produced by the supplier.
* <p>
* The primary use case for this API, is when you need to repeatedly produce buffers with the same contents, and
* you perhaps wish to keep a {@code static final} field with these contents. This use case has previously been
* solved by allocating a read-only buffer with the given contents, and then slicing or duplicating it on every use.
* This approach had several problems. For instance, if you forget to slice, the offsets of the buffer can change
* in unexpected ways, since the same buffer instance is shared and accessed from many places. The buffer could also
* be deallocated, making the data inaccessible. The supplier-based API solves all of these problems, by enforcing
* that each usage get their own distinct buffer instance.
*
* @param bytes The byte contents of the buffers produced by the returned supplier.
* @return A supplier of read-only buffers with the given contents.
*/
default Supplier<Buffer> constBufferSupplier(byte[] bytes) {
byte[] safeCopy = bytes.clone(); // Prevent modifying the bytes after creating the supplier.
return () -> allocate(bytes.length).writeBytes(safeCopy).makeReadOnly();
}
/**
* Close this allocator, freeing all of its internal resources.
* <p>
* Existing (currently in-use) allocated buffers will not be impacted by calling this method.
* If this is a pooling or caching allocator, then existing buffers will be immediately freed when they are closed,
* instead of being pooled or cached.
* <p>
* The allocator can still be used to allocate more buffers after calling this method.
* However, if this is a pooling or caching allocator, then the pooling and caching functionality will be
* effectively disabled after calling this method.
* <p>
* If this allocator does not perform any pooling or caching, then calling this method likely has no effect.
*/
@Override
default void close() {
}
static BufferAllocator heap() {
return new ManagedBufferAllocator(MemoryManagers.getManagers().getHeapMemoryManager());
}
static BufferAllocator direct() {
return new ManagedBufferAllocator(MemoryManagers.getManagers().getNativeMemoryManager());
}
static BufferAllocator pooledHeap() {
return new PooledBufferAllocator(MemoryManagers.getManagers().getHeapMemoryManager());
}
static BufferAllocator pooledDirect() {
return new PooledBufferAllocator(MemoryManagers.getManagers().getNativeMemoryManager());
}
}

View File

@ -1,27 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
/**
* An exception thrown when an operation is attempted on a {@link Buffer} when it has been closed.
*/
public final class BufferClosedException extends UnsupportedOperationException {
private static final long serialVersionUID = 85913332711192868L;
public BufferClosedException(final String message) {
super(message);
}
}

View File

@ -1,149 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
import io.netty.buffer.api.internal.ResourceSupport;
import io.netty.buffer.api.internal.Statics;
import java.lang.invoke.VarHandle;
import java.util.Objects;
import static java.lang.invoke.MethodHandles.lookup;
/**
* The {@link BufferHolder} is an abstract class that simplifies the implementation of objects that themselves contain
* a {@link Buffer} instance.
* <p>
* The {@link BufferHolder} can only hold on to a single buffer, so objects and classes that need to hold on to multiple
* buffers will have to do their implementation from scratch, though they can use the code of the {@link BufferHolder}
* as inspiration.
* <p>
* If you just want an object that is a reference to a buffer, then the {@link BufferRef} can be used for that purpose.
* If you have an advanced use case where you wish to implement {@link Resource}, and tightly control lifetimes, then
* {@link ResourceSupport} can be of help.
*
* @param <T> The concrete {@link BufferHolder} type.
*/
public abstract class BufferHolder<T extends BufferHolder<T>> implements Resource<T> {
private static final VarHandle BUF = Statics.findVarHandle(lookup(), BufferHolder.class, "buf", Buffer.class);
private Buffer buf;
/**
* Create a new {@link BufferHolder} to hold the given {@linkplain Buffer buffer}.
* <p>
* <strong>Note:</strong> this increases the reference count of the given buffer.
*
* @param buf The {@linkplain Buffer buffer} to be held by this holder.
*/
protected BufferHolder(Buffer buf) {
this.buf = Objects.requireNonNull(buf, "The buffer cannot be null.");
}
/**
* Create a new {@link BufferHolder} to hold the {@linkplain Buffer buffer} received from the given {@link Send}.
* <p>
* The {@link BufferHolder} will then be holding exclusive ownership of the buffer.
*
* @param send The {@linkplain Buffer buffer} to be held by this holder.
*/
protected BufferHolder(Send<Buffer> send) {
buf = Objects.requireNonNull(send, "The send cannot be null.").receive();
}
@Override
public void close() {
buf.close();
}
@SuppressWarnings("unchecked")
@Override
public Send<T> send() {
return buf.send().map((Class<T>) getClass(), this::receive);
}
/**
* Called when a {@linkplain #send() sent} {@link BufferHolder} is received by the recipient.
* The {@link BufferHolder} should return a new concrete instance, that wraps the given {@link Buffer} object.
*
* @param buf The {@link Buffer} that is {@linkplain Send#receive() received} by the recipient,
* and needs to be wrapped in a new {@link BufferHolder} instance.
* @return A new {@linkplain T buffer holder} instance, containing the given {@linkplain Buffer buffer}.
*/
protected abstract T receive(Buffer buf);
/**
* Replace the underlying referenced buffer with the given buffer.
* <p>
* This method is protected to permit advanced use cases of {@link BufferHolder} sub-class implementations.
* <p>
* <strong>Note:</strong> this method decreases the reference count of the current buffer,
* and takes exclusive ownership of the sent buffer.
* <p>
* The buffer assignment is performed using a plain store.
*
* @param send The new {@link Buffer} instance that is replacing the currently held buffer.
*/
protected final void replaceBuffer(Send<Buffer> send) {
Buffer received = send.receive();
buf.close();
buf = received;
}
/**
* Replace the underlying referenced buffer with the given buffer.
* <p>
* This method is protected to permit advanced use cases of {@link BufferHolder} sub-class implementations.
* <p>
* <strong>Note:</strong> this method decreases the reference count of the current buffer,
* and takes exclusive ownership of the sent buffer.
* <p>
* The buffer assignment is performed using a volatile store.
*
* @param send The {@link Send} with the new {@link Buffer} instance that is replacing the currently held buffer.
*/
protected final void replaceBufferVolatile(Send<Buffer> send) {
Buffer received = send.receive();
var prev = (Buffer) BUF.getAndSet(this, received);
prev.close();
}
/**
* Access the held {@link Buffer} instance.
* <p>
* The access is performed using a plain load.
*
* @return The {@link Buffer} instance being held by this {@linkplain T buffer holder}.
*/
protected final Buffer getBuffer() {
return buf;
}
/**
* Access the held {@link Buffer} instance.
* <p>
* The access is performed using a volatile load.
*
* @return The {@link Buffer} instance being held by this {@linkplain T buffer holder}.
*/
protected final Buffer getBufferVolatile() {
return (Buffer) BUF.getVolatile(this);
}
@Override
public boolean isAccessible() {
return buf.isAccessible();
}
}

View File

@ -1,27 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
/**
* An exception thrown when an operation is attempted on a {@linkplain Buffer#readOnly() read-only} {@link Buffer}.
*/
public final class BufferReadOnlyException extends UnsupportedOperationException {
private static final long serialVersionUID = 4855825594125231593L;
public BufferReadOnlyException(final String message) {
super(message);
}
}

View File

@ -1,74 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
import java.lang.invoke.VarHandle;
/**
* A mutable reference to a buffer.
*/
public final class BufferRef extends BufferHolder<BufferRef> {
/**
* Create a reference to the given {@linkplain Buffer buffer}.
* This increments the reference count of the buffer.
*
* @param buf The buffer to reference.
*/
private BufferRef(Buffer buf) {
super(buf);
// BufferRef is meant to be atomic, so we need to add a fence to get the semantics of a volatile store.
VarHandle.fullFence();
}
/**
* Create a reference that holds the exclusive ownership of the sent buffer.
*
* @param send The {@linkplain Send sent} buffer to take ownership of.
*/
public BufferRef(Send<Buffer> send) {
super(send);
// BufferRef is meant to be atomic, so we need to add a fence to get the semantics of a volatile store.
VarHandle.fullFence();
}
@Override
protected BufferRef receive(Buffer buf) {
return new BufferRef(buf);
}
/**
* Replace the underlying referenced buffer with the given buffer.
* <p>
* <strong>Note:</strong> this method decreases the reference count of the current buffer,
* and takes exclusive ownership of the sent buffer.
* <p>
* The buffer assignment is performed using a volatile store.
*
* @param send The {@link Send} with the new {@link Buffer} instance that is replacing the currently held buffer.
*/
public void replace(Send<Buffer> send) {
replaceBufferVolatile(send);
}
/**
* Access the buffer in this reference.
*
* @return The buffer held by the reference.
*/
public Buffer contents() {
return getBufferVolatile();
}
}

View File

@ -1,108 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License, version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package io.netty.buffer.api;
import io.netty.util.ByteProcessor;
/**
* The ByteCursor scans through a sequence of bytes.
* This is similar to {@link ByteProcessor}, but for external iteration rather than internal iteration.
* The external iteration allows the callers to control the pace of the iteration.
* The API includes methods for reading {@code long}s as a batch of 8 bytes.
* The long values are always in big-endian format, so that the highest-order byte in the long value, contain the byte
* that would otherwise have been returned by the next call to {@link #getByte()}.
*/
public interface ByteCursor {
/**
* Check if the iterator has at least 8 bytes left, and if so, read those 8 bytes and move the cursor forward.
* The bytes are packed as a {@code long} value in big-endian format, such that the highest-order byte
* in the long, is the byte that would otherwise have been returned by the next call to {@link #getByte()},
* after a call to {@link #readByte()}.
* The bytes (as a {@code long}) will then be available through the {@link #getLong()} method.
* <p>
* Note that when this method returns {@code false}, the {@link #readByte()} can still return {@code true}.
* It is recommended to have any long-processing loop be followed by a byte-processing loop for the 7 or fewer
* bytes that might form a tail in the cursor.
* <p>
* Also note that this method will not influence what is returned the {@link #getByte()} method.
*
* @return {@code true} if the cursor read 8 bytes and moved forward, otherwise {@code false}.
*/
boolean readLong();
/**
* Return the last 8 bytes read by {@link #readLong()}.
* If {@link #readLong()} has not been called on this cursor before, then {@code -1} is returned.
* <p>
* The long value is in the big-endian format, such that the highest-order by of the long value, is the byte that
* would otherwise have been produced by a {@link #readByte()} / {@link #getByte()} pair.
* This means that cursors that iterate in reverse, e.g. from {@link Buffer#openReverseCursor()}, return longs in a
* "mirrored" or "reversed" big-endian format.
*
* @return The 8 bytes, in big-endian format, that was read by the most recent successful call to
* {@link #readLong()}.
*/
long getLong();
/**
* Check if the iterator has at least one byte left, and if so, read that byte and move the cursor forward.
* The byte will then be available through the {@link #getByte()}.
* <p>
* Note that this method will not influence what is returned from the {@link #getLong()} method.
*
* @return {@code true} if the cursor read a byte and moved forward, otherwise {@code false}.
*/
boolean readByte();
/**
* Return the last byte that was read by {@link #readByte()}.
* If {@link #readByte()} has not been called on this cursor before, then {@code -1} is returned.
*
* @return The next byte that was read by the most recent successful call to {@link #readByte()}.
*/
byte getByte();
/**
* The current position of this iterator into the underlying sequence of bytes.
* For instance, if we are iterating a buffer, this would be the iterators current offset into the buffer.
*
* @return The current iterator offset into the underlying sequence of bytes.
*/
int currentOffset();
/**
* Get the current number of bytes left in the iterator.
*
* @return The number of bytes left in the iterator.
*/
int bytesLeft();
/**
* Process the remaining bytes in this iterator with the given {@link ByteProcessor}.
* This method consumes the iterator.
*
* @param processor The processor to use for processing the bytes in the iterator.
* @return The number of bytes processed, if the {@link ByteProcessor#process(byte) process} method returned
* {@code false}, or {@code -1} if the whole iterator was processed.
*/
default int process(ByteProcessor processor) {
boolean requestMore = true;
int count = 0;
while (readByte() && (requestMore = processor.process(getByte()))) {
count++;
}
return requestMore? -1 : count;
}
}

View File

@ -1,40 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
/**
* The Drop interface is used by {@link Resource} instances to implement their resource disposal mechanics.
* The {@link #drop(Object)} method will be called by the resource when they are closed.
*
* @param <T>
*/
@FunctionalInterface
public interface Drop<T> {
/**
* Dispose of the resources in the given {@link Resource} instance.
*
* @param obj The {@link Resource} instance being dropped.
*/
void drop(T obj);
/**
* Called when the resource changes owner.
*
* @param obj The new {@link Resource} instance with the new owner.
*/
default void attach(T obj) {
}
}

View File

@ -1,67 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
import io.netty.buffer.api.internal.Statics;
import java.util.function.Supplier;
import static io.netty.buffer.api.internal.Statics.NO_OP_DROP;
class ManagedBufferAllocator implements BufferAllocator, AllocatorControl {
private final MemoryManager manager;
ManagedBufferAllocator(MemoryManager manager) {
this.manager = manager;
}
@Override
public Buffer allocate(int size) {
BufferAllocator.checkSize(size);
return manager.allocateShared(this, size, manager.drop(), Statics.CLEANER);
}
@Override
public Supplier<Buffer> constBufferSupplier(byte[] bytes) {
Buffer constantBuffer = manager.allocateShared(this, bytes.length, manager.drop(), Statics.CLEANER);
constantBuffer.writeBytes(bytes).makeReadOnly();
return () -> manager.allocateConstChild(constantBuffer);
}
@SuppressWarnings("unchecked")
@Override
public UntetheredMemory allocateUntethered(Buffer originator, int size) {
BufferAllocator.checkSize(size);
var buf = manager.allocateShared(this, size, NO_OP_DROP, Statics.CLEANER);
return new UntetheredMemory() {
@Override
public <Memory> Memory memory() {
return (Memory) manager.unwrapRecoverableMemory(buf);
}
@Override
public <BufferType extends Buffer> Drop<BufferType> drop() {
return (Drop<BufferType>) manager.drop();
}
};
}
@Override
public void recoverMemory(Object memory) {
// Free the recovered memory.
manager.recoverMemory(this, memory, manager.drop()).close();
}
}

View File

@ -1,31 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
import java.lang.ref.Cleaner;
public interface MemoryManager {
boolean isNative();
Buffer allocateShared(AllocatorControl allocatorControl, long size, Drop<Buffer> drop, Cleaner cleaner);
Buffer allocateConstChild(Buffer readOnlyConstParent);
Drop<Buffer> drop();
Object unwrapRecoverableMemory(Buffer buf);
int capacityOfRecoverableMemory(Object memory);
void discardRecoverableMemory(Object recoverableMemory);
// todo should recoverMemory re-attach a cleaner?
Buffer recoverMemory(AllocatorControl allocatorControl, Object recoverableMemory, Drop<Buffer> drop);
Object sliceMemory(Object memory, int offset, int length);
}

View File

@ -1,110 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
import io.netty.buffer.api.internal.MemoryManagersOverride;
import java.util.Optional;
import java.util.ServiceLoader;
import java.util.function.Supplier;
import java.util.stream.Stream;
/**
* The MemoryManagers interface is the handle through which {@link BufferAllocator buffer allocators} access the low
* level memory management APIs.
* <p>
* This is hidden behind this interface in order to make allocation and pool agnostic and reusable across buffer and
* memory implementations.
*/
public interface MemoryManagers {
/**
* Get the default, or currently configured, memory managers instance.
* @return A MemoryManagers instance.
*/
static MemoryManagers getManagers() {
return MemoryManagersOverride.getManagers();
}
/**
* Temporarily override the default configured memory managers instance.
* <p>
* Calls to {@link #getManagers()} from within the given supplier will get the given managers instance.
*
* @param managers Override the default configured managers instance with this instance.
* @param supplier The supplier function to be called while the override is in place.
* @param <T> The result type from the supplier.
* @return The result from the supplier.
*/
static <T> T using(MemoryManagers managers, Supplier<T> supplier) {
return MemoryManagersOverride.using(managers, supplier);
}
/**
* Get a lazy-loading stream of all available memory managers.
* <p>
* Note: All available {@link MemoryManagers} instances are service loaded and instantiated on every call.
*
* @return A stream of providers of memory managers instances.
*/
static Stream<ServiceLoader.Provider<MemoryManagers>> getAllManagers() {
var loader = ServiceLoader.load(MemoryManagers.class);
return loader.stream();
}
/**
* Find a {@link MemoryManagers} implementation by its {@linkplain #getImplementationName() implementation name}.
* <p>
* Note: All available {@link MemoryManagers} instances are service loaded and instantiated every time this
* method is called.
*
* @param implementationName The named implementation to look for.
* @return A {@link MemoryManagers} implementation, if any was found.
*/
static Optional<MemoryManagers> lookupImplementation(String implementationName) {
return getAllManagers()
.flatMap(provider -> {
try {
return Stream.ofNullable(provider.get());
} catch (Exception e) {
return Stream.empty();
}
})
.filter(impl -> implementationName.equals(impl.getImplementationName()))
.findFirst();
}
/**
* Get a {@link MemoryManager} instance that is suitable for allocating on-heap {@link Buffer} instances.
*
* @return An on-heap {@link MemoryManager}.
*/
MemoryManager getHeapMemoryManager();
/**
* Get a {@link MemoryManager} instance that is suitable for allocating off-heap {@link Buffer} instances.
*
* @return An off-heap {@link MemoryManager}.
*/
MemoryManager getNativeMemoryManager();
/**
* Get the name for this implementation, which can be used for finding this particular implementation via the
* {@link #lookupImplementation(String)} method.
*
* @return The name of this memory managers implementation.
*/
String getImplementationName();
}

View File

@ -1,38 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
/**
* This interface encapsulates the ownership of a {@link Resource}, and exposes a method that may be used to transfer
* this ownership to the specified recipient thread.
*
* @param <T> The concrete type of {@link Resource} that is owned.
*/
@SuppressWarnings("InterfaceMayBeAnnotatedFunctional")
public interface Owned<T> {
/**
* Transfer the ownership of the resource, to the calling thread. The resource instance is invalidated but without
* disposing of its internal state. Then a new resource instance with the given owner is produced in its stead.
* <p>
* This method is called by {@link Send} implementations. These implementations will ensure that the transfer of
* ownership (the calling of this method) happens-before the new owner begins accessing the new object. This ensures
* that the new resource instanec is safely published to the new owners.
*
* @param drop The drop object that knows how to dispose of the state represented by this {@link Resource}.
* @return A new resource instance that is exactly the same as this resource.
*/
T transferOwnership(Drop<T> drop);
}

View File

@ -1,100 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
import java.nio.ByteBuffer;
/**
* A view onto the buffer component being processed in a given iteration of
* {@link Buffer#forEachReadable(int, ReadableComponentProcessor)}.
*/
public interface ReadableComponent {
/**
* Check if this component is backed by a cached byte array than can be accessed cheaply.
* <p>
* <strong>Note</strong> that regardless of what this method returns, the array should not be used to modify the
* contents of this buffer component.
*
* @return {@code true} if {@link #readableArray()} is a cheap operation, otherwise {@code false}.
*/
boolean hasReadableArray();
/**
* Get a byte array of the contents of this component.
* <p>
* <strong>Note</strong> that the array is meant to be read-only. It may either be a direct reference to the
* concrete array instance that is backing this component, or it is a fresh copy. Writing to the array may produce
* undefined behaviour.
*
* @return A byte array of the contents of this component.
* @throws UnsupportedOperationException if {@link #hasReadableArray()} returns {@code false}.
* @see #readableArrayOffset()
* @see #readableArrayLength()
*/
byte[] readableArray();
/**
* An offset into the {@link #readableArray()} where this component starts.
*
* @return An offset into {@link #readableArray()}.
* @throws UnsupportedOperationException if {@link #hasReadableArray()} returns {@code false}.
*/
int readableArrayOffset();
/**
* The number of bytes in the {@link #readableArray()} that belong to this component.
*
* @return The number of bytes, from the {@link #readableArrayOffset()} into the {@link #readableArray()},
* that belong to this component.
* @throws UnsupportedOperationException if {@link #hasReadableArray()} returns {@code false}.
*/
int readableArrayLength();
/**
* Give the native memory address backing this buffer, or return 0 if this buffer has no native memory address.
* <p>
* <strong>Note</strong> that the address should not be used for writing to the buffer memory, and doing so may
* produce undefined behaviour.
*
* @return The native memory address, if any, otherwise 0.
*/
long readableNativeAddress();
/**
* Get a {@link ByteBuffer} instance for this memory component.
* <p>
* <strong>Note</strong> that the {@link ByteBuffer} is read-only, to prevent write accesses to the memory,
* when the buffer component is obtained through {@link Buffer#forEachReadable(int, ReadableComponentProcessor)}.
*
* @return A new {@link ByteBuffer}, with its own position and limit, for this memory component.
*/
ByteBuffer readableBuffer();
/**
* Open a cursor to iterate the readable bytes of this component.
* Any offsets internal to the component are not modified by the cursor.
* <p>
* Care should be taken to ensure that the buffers lifetime extends beyond the cursor and the iteration, and that
* the internal offsets of the component (such as {@link Buffer#readerOffset()} and {@link Buffer#writerOffset()})
* are not modified while the iteration takes place. Otherwise unpredictable behaviour might result.
*
* @return A {@link ByteCursor} for iterating the readable bytes of this buffer.
* @see Buffer#openCursor()
*/
ByteCursor openCursor();
// todo for Unsafe-based impl, DBB.attachment needs to keep underlying memory alive
}

View File

@ -1,40 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
import java.nio.ByteBuffer;
/**
* A processor of {@linkplain ReadableComponent readable components}.
*/
@FunctionalInterface
public interface ReadableComponentProcessor<E extends Exception> {
/**
* Process the given component at the given index in the
* {@link Buffer#forEachReadable(int, ReadableComponentProcessor) iteration}.
* <p>
* The component object itself is only valid during this call, but the {@link ByteBuffer byte buffers}, arrays, and
* native address pointers obtained from it, will be valid until any operation is performed on the buffer, which
* changes the internal memory.
*
* @param index The current index of the given buffer component, based on the initial index passed to the
* {@link Buffer#forEachReadable(int, ReadableComponentProcessor)} method.
* @param component The current buffer component being processed.
* @return {@code true} if the iteration should continue and more components should be processed, otherwise
* {@code false} to stop the iteration early.
*/
boolean process(int index, ReadableComponent component) throws E;
}

View File

@ -1,56 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
/**
* A resource that has a life-time, and can be {@linkplain #close() closed}.
* Resources are initially {@linkplain #isAccessible() accessible}, but closing them makes them inaccessible.
*/
public interface Resource<T extends Resource<T>> extends AutoCloseable {
/**
* Send this object instance to another Thread, transferring the ownership to the recipient.
* <p>
* The object must be in a state where it can be sent, which includes at least being
* {@linkplain #isAccessible() accessible}.
* <p>
* When sent, this instance will immediately become inaccessible, as if by {@linkplain #close() closing} it.
* All attempts at accessing an object that has been sent, even if that object has not yet been received, should
* cause an exception to be thrown.
* <p>
* Calling {@link #close()} on an object that has been sent will have no effect, so this method is safe to call
* within a try-with-resources statement.
*/
Send<T> send();
/**
* Close the resource, making it inaccessible.
* <p>
* Note, this method is not thread-safe unless otherwise specific.
*
* @throws IllegalStateException If this {@code Resource} has already been closed.
*/
@Override
void close();
/**
* Check if this object is accessible.
*
* @return {@code true} if this object is still valid and can be accessed,
* otherwise {@code false} if, for instance, this object has been dropped/deallocated,
* or been {@linkplain #send() sent} elsewhere.
*/
boolean isAccessible();
}

View File

@ -1,23 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
/**
* Thrown when resource disposal fails while closing a resource pool.
*/
public class ResourceDisposeFailedException extends RuntimeException {
private static final long serialVersionUID = -1413426368835341993L;
}

View File

@ -1,60 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
import java.util.ArrayDeque;
/**
* A scope is a convenient mechanism for capturing the life cycles of multiple reference counted objects. Once the scope
* is closed, all the added objects will also be closed in reverse insert order. That is, the most recently added
* object will be closed first.
* <p>
* Scopes can be reused. After a scope has been closed, new objects can be added to it, and they will be closed when the
* scope is closed again.
* <p>
* Objects will not be closed multiple times if the scope is closed multiple times, unless said objects are also added
* multiple times.
* <p>
* Note that scopes are not thread-safe. They are intended to be used from a single thread.
*/
public final class Scope implements AutoCloseable {
private final ArrayDeque<Resource<?>> deque = new ArrayDeque<>();
/**
* Add the given reference counted object to this scope, so that it will be {@linkplain Resource#close() closed}
* when this scope is {@linkplain #close() closed}.
*
* @param obj The reference counted object to add to this scope.
* @param <T> The type of the reference counted object.
* @return The same exact object that was added; further operations can be chained on the object after this method
* call.
*/
public <T extends Resource<T>> T add(T obj) {
deque.addLast(obj);
return obj;
}
/**
* Close this scope and all the reference counted object it contains.
*/
@Override
public void close() {
Resource<?> obj;
while ((obj = deque.pollLast()) != null) {
obj.close();
}
}
}

View File

@ -1,129 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
import java.util.function.Supplier;
/**
* A {@code Send} object is a temporary holder of a {@link Resource}, used for transferring the ownership of the
* resource from one thread to another.
* <p>
* Prior to the {@code Send} being created, the originating resource is invalidated, to prevent access while it is being
* sent. This means it cannot be accessed, closed, or disposed of, while it is in-flight. Once the resource is
* {@linkplain #receive() received}, the new ownership is established.
* <p>
* Care must be taken to ensure that the resource is always received by some thread.
* Failure to do so can result in a resource leak.
*
* @param <T>
*/
public interface Send<T extends Resource<T>> {
/**
* Construct a {@link Send} based on the given {@link Supplier}.
* The supplier will be called only once, in the receiving thread.
*
* @param concreteObjectType The concrete type of the object being sent. Specifically, the object returned from the
* {@link Supplier#get()} method must be an instance of this class.
* @param supplier The supplier of the object being sent, which will be called when the object is ready to be
* received.
* @param <T> The type of object being sent.
* @return A {@link Send} which will deliver an object of the given type, from the supplier.
*/
static <T extends Resource<T>> Send<T> sending(Class<T> concreteObjectType, Supplier<? extends T> supplier) {
return new Send<T>() {
private final AtomicBoolean gate = new AtomicBoolean();
@Override
public T receive() {
if (gate.getAndSet(true)) {
throw new IllegalStateException("This object has already been received.");
}
return supplier.get();
}
@Override
public boolean referentIsInstanceOf(Class<?> cls) {
return cls.isAssignableFrom(concreteObjectType);
}
@Override
public void discard() {
if (!gate.getAndSet(true)) {
supplier.get().close();
}
}
};
}
/**
* Determine if the given candidate object is an instance of a {@link Send} from which an object of the given type
* can be received.
*
* @param type The type of object we wish to receive.
* @param candidate The candidate object that might be a {@link Send} of an object of the given type.
* @return {@code true} if the candidate object is a {@link Send} that would deliver an object of the given type,
* otherwise {@code false}.
*/
static boolean isSendOf(Class<?> type, Object candidate) {
return candidate instanceof Send && ((Send<?>) candidate).referentIsInstanceOf(type);
}
/**
* Receive the {@link Resource} instance being sent, and bind its ownership to the calling thread.
* The invalidation of the sent resource in the sending thread happens-before the return of this method.
* <p>
* This method can only be called once, and will throw otherwise.
*
* @return The sent resource instance.
* @throws IllegalStateException If this method is called more than once.
*/
T receive();
/**
* Apply a mapping function to the object being sent. The mapping will occur when the object is received.
*
* @param type The result type of the mapping function.
* @param mapper The mapping function to apply to the object being sent.
* @param <R> The result type of the mapping function.
* @return A new {@link Send} instance that will deliver an object that is the result of the mapping.
*/
default <R extends Resource<R>> Send<R> map(Class<R> type, Function<T, R> mapper) {
return sending(type, () -> mapper.apply(receive()));
}
/**
* Discard this {@link Send} and the object it contains.
* This has no effect if the send object has already been received.
*/
default void discard() {
try {
receive().close();
} catch (IllegalStateException ignore) {
// Don't do anything if the "Send" has already been consumed.
}
}
/**
* Determine if the object received from this {@code Send} is an instance of the given class.
*
* @param cls The type to check.
* @return {@code true} if the object received from this {@code Send} can be assigned fields or variables of the
* given type, otherwise false.
*/
boolean referentIsInstanceOf(Class<?> cls);
}

View File

@ -1,74 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
import java.nio.ByteBuffer;
/**
* A view onto the buffer component being processed in a given iteration of
* {@link Buffer#forEachWritable(int, WritableComponentProcessor)}.
*/
public interface WritableComponent {
/**
* Check if this component is backed by a cached byte array than can be accessed cheaply.
*
* @return {@code true} if {@link #writableArray()} is a cheap operation, otherwise {@code false}.
*/
boolean hasWritableArray();
/**
* Get a byte array of the contents of this component.
*
* @return A byte array of the contents of this component.
* @throws UnsupportedOperationException if {@link #hasWritableArray()} returns {@code false}.
* @see #writableArrayOffset()
* @see #writableArrayLength()
*/
byte[] writableArray();
/**
* An offset into the {@link #writableArray()} where this component starts.
*
* @return An offset into {@link #writableArray()}.
* @throws UnsupportedOperationException if {@link #hasWritableArray()} returns {@code false}.
*/
int writableArrayOffset();
/**
* The number of bytes in the {@link #writableArray()} that belong to this component.
*
* @return The number of bytes, from the {@link #writableArrayOffset()} into the {@link #writableArray()},
* that belong to this component.
* @throws UnsupportedOperationException if {@link #hasWritableArray()} returns {@code false}.
*/
int writableArrayLength();
/**
* Give the native memory address backing this buffer, or return 0 if this buffer has no native memory address.
*
* @return The native memory address, if any, otherwise 0.
*/
long writableNativeAddress();
/**
* Get a {@link ByteBuffer} instance for this memory component, which can be used for modifying the buffer
* contents.
*
* @return A new {@link ByteBuffer}, with its own position and limit, for this memory component.
*/
ByteBuffer writableBuffer();
}

View File

@ -1,40 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
import java.nio.ByteBuffer;
/**
* A processor of {@linkplain WritableComponent writable components}.
*/
@FunctionalInterface
public interface WritableComponentProcessor<E extends Exception> {
/**
* Process the given component at the given index in the
* {@link Buffer#forEachWritable(int, WritableComponentProcessor)} iteration}.
* <p>
* The component object itself is only valid during this call, but the {@link ByteBuffer byte buffers}, arrays, and
* native address pointers obtained from it, will be valid until any {@link Buffer#isOwned() ownership} requiring
* operation is performed on the buffer.
*
* @param index The current index of the given buffer component, based on the initial index passed to the
* {@link Buffer#forEachWritable(int, WritableComponentProcessor)} method.
* @param component The current buffer component being processed.
* @return {@code true} if the iteration should continue and more components should be processed, otherwise
* {@code false} to stop the iteration early.
*/
boolean process(int index, WritableComponent component) throws E;
}

View File

@ -1,25 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.adaptor;
import io.netty.buffer.ByteBufConvertible;
import io.netty.util.ReferenceCounted;
/**
* Interfaces that are required for an object to stand-in for a {@link io.netty.buffer.ByteBuf} in Netty.
*/
public interface BufferIntegratable extends ByteBufConvertible, ReferenceCounted {
}

View File

@ -1,159 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.adaptor;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.api.BufferAllocator;
import static java.nio.ByteOrder.BIG_ENDIAN;
public class ByteBufAllocatorAdaptor implements ByteBufAllocator, AutoCloseable {
private final BufferAllocator onheap;
private final BufferAllocator offheap;
private boolean closed;
public ByteBufAllocatorAdaptor() {
this(BufferAllocator.pooledHeap(), BufferAllocator.pooledDirect());
}
public ByteBufAllocatorAdaptor(BufferAllocator onheap, BufferAllocator offheap) {
this.onheap = onheap;
this.offheap = offheap;
}
@Override
public ByteBuf buffer() {
return buffer(256);
}
public BufferAllocator getOnHeap() {
return onheap;
}
public BufferAllocator getOffHeap() {
return offheap;
}
public boolean isClosed() {
return closed;
}
@Override
public ByteBuf buffer(int initialCapacity) {
return new ByteBufAdaptor(this, onheap.allocate(initialCapacity).order(BIG_ENDIAN));
}
@Override
public ByteBuf buffer(int initialCapacity, int maxCapacity) {
return buffer(maxCapacity);
}
@Override
public ByteBuf ioBuffer() {
return directBuffer();
}
@Override
public ByteBuf ioBuffer(int initialCapacity) {
return directBuffer(initialCapacity);
}
@Override
public ByteBuf ioBuffer(int initialCapacity, int maxCapacity) {
return directBuffer(initialCapacity, maxCapacity);
}
@Override
public ByteBuf heapBuffer() {
return buffer();
}
@Override
public ByteBuf heapBuffer(int initialCapacity) {
return buffer(initialCapacity);
}
@Override
public ByteBuf heapBuffer(int initialCapacity, int maxCapacity) {
return buffer(initialCapacity, maxCapacity);
}
@Override
public ByteBuf directBuffer() {
return directBuffer(256);
}
@Override
public ByteBuf directBuffer(int initialCapacity) {
return new ByteBufAdaptor(this, offheap.allocate(initialCapacity).order(BIG_ENDIAN));
}
@Override
public ByteBuf directBuffer(int initialCapacity, int maxCapacity) {
return directBuffer(maxCapacity);
}
@Override
public CompositeByteBuf compositeBuffer() {
return compositeHeapBuffer();
}
@Override
public CompositeByteBuf compositeBuffer(int maxNumComponents) {
return compositeHeapBuffer(maxNumComponents);
}
@Override
public CompositeByteBuf compositeHeapBuffer() {
return compositeHeapBuffer(1024);
}
@Override
public CompositeByteBuf compositeHeapBuffer(int maxNumComponents) {
return new CompositeByteBuf(this, false, maxNumComponents, heapBuffer());
}
@Override
public CompositeByteBuf compositeDirectBuffer() {
return compositeDirectBuffer(1024);
}
@Override
public CompositeByteBuf compositeDirectBuffer(int maxNumComponents) {
return new CompositeByteBuf(this, true, maxNumComponents, directBuffer());
}
@Override
public boolean isDirectBufferPooled() {
return true;
}
@Override
public int calculateNewCapacity(int minNewCapacity, int maxCapacity) {
return 0;
}
@Override
public void close() throws Exception {
try (onheap) {
try (offheap) {
closed = true;
}
}
}
}

View File

@ -1,20 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
/**
* Helpers for integrating with the existing {@link io.netty.buffer.ByteBuf} API.
*/
package io.netty.buffer.api.adaptor;

View File

@ -1,90 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.bytebuffer;
import io.netty.buffer.api.AllocatorControl;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.Drop;
import io.netty.buffer.api.MemoryManager;
import io.netty.buffer.api.internal.Statics;
import java.lang.ref.Cleaner;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import static io.netty.buffer.api.internal.Statics.bbslice;
import static io.netty.buffer.api.internal.Statics.convert;
public class ByteBufferMemoryManager implements MemoryManager {
private final boolean direct;
public ByteBufferMemoryManager(boolean direct) {
this.direct = direct;
}
@Override
public boolean isNative() {
return direct;
}
@Override
public Buffer allocateShared(AllocatorControl allocatorControl, long size, Drop<Buffer> drop, Cleaner cleaner) {
int capacity = Math.toIntExact(size);
ByteBuffer buffer = direct? ByteBuffer.allocateDirect(capacity) : ByteBuffer.allocate(capacity);
buffer.order(ByteOrder.nativeOrder());
return new NioBuffer(buffer, buffer, allocatorControl, convert(drop));
}
@Override
public Buffer allocateConstChild(Buffer readOnlyConstParent) {
assert readOnlyConstParent.readOnly();
NioBuffer buf = (NioBuffer) readOnlyConstParent;
return new NioBuffer(buf);
}
@Override
public Drop<Buffer> drop() {
return Statics.NO_OP_DROP;
}
@Override
public Object unwrapRecoverableMemory(Buffer buf) {
return ((NioBuffer) buf).recoverable();
}
@Override
public int capacityOfRecoverableMemory(Object memory) {
//noinspection OverlyStrongTypeCast
return ((ByteBuffer) memory).capacity();
}
@Override
public void discardRecoverableMemory(Object recoverableMemory) {
// ByteBuffers have their memory released by the GC, so there is nothing for us to do.
}
@Override
public Buffer recoverMemory(AllocatorControl allocatorControl, Object recoverableMemory, Drop<Buffer> drop) {
ByteBuffer memory = (ByteBuffer) recoverableMemory;
return new NioBuffer(memory, memory, allocatorControl, convert(drop));
}
@Override
public Object sliceMemory(Object memory, int offset, int length) {
var buffer = (ByteBuffer) memory;
return bbslice(buffer, offset, length);
}
}

View File

@ -1,41 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.bytebuffer;
import io.netty.buffer.api.MemoryManager;
import io.netty.buffer.api.MemoryManagers;
public class ByteBufferMemoryManagers implements MemoryManagers {
@Override
public MemoryManager getHeapMemoryManager() {
return new ByteBufferMemoryManager(false);
}
@Override
public MemoryManager getNativeMemoryManager() {
return new ByteBufferMemoryManager(true);
}
@Override
public String getImplementationName() {
return "ByteBuffer";
}
@Override
public String toString() {
return "BB";
}
}

View File

@ -1,20 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
/**
* Safe ByteBuffer based implementation.
*/
package io.netty.buffer.api.bytebuffer;

View File

@ -1,115 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.internal;
import io.netty.buffer.api.Drop;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.VarHandle;
public final class ArcDrop<T> implements Drop<T> {
private static final VarHandle COUNT;
static {
try {
COUNT = MethodHandles.lookup().findVarHandle(ArcDrop.class, "count", int.class);
} catch (Exception e) {
throw new ExceptionInInitializerError(e);
}
}
private final Drop<T> delegate;
@SuppressWarnings("FieldMayBeFinal")
private volatile int count;
public ArcDrop(Drop<T> delegate) {
this.delegate = delegate;
count = 1;
}
public static <X> Drop<X> wrap(Drop<X> drop) {
if (drop.getClass() == ArcDrop.class) {
return drop;
}
return new ArcDrop<X>(drop);
}
public static <X> Drop<X> acquire(Drop<X> drop) {
if (drop.getClass() == ArcDrop.class) {
((ArcDrop<X>) drop).increment();
return drop;
}
return new ArcDrop<X>(drop);
}
public ArcDrop<T> increment() {
int c;
do {
c = count;
checkValidState(c);
} while (!COUNT.compareAndSet(this, c, c + 1));
return this;
}
@Override
public void drop(T obj) {
int c;
int n;
do {
c = count;
n = c - 1;
checkValidState(c);
} while (!COUNT.compareAndSet(this, c, n));
if (n == 0) {
delegate.drop(obj);
}
}
@Override
public void attach(T obj) {
delegate.attach(obj);
}
public boolean isOwned() {
return count <= 1;
}
public int countBorrows() {
return count - 1;
}
public Drop<T> unwrap() {
return delegate;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder()
.append("ArcDrop@")
.append(Integer.toHexString(System.identityHashCode(this)))
.append('(').append(count).append(", ");
Drop<T> drop = this;
while ((drop = ((ArcDrop<T>) drop).unwrap()) instanceof ArcDrop) {
builder.append(((ArcDrop<T>) drop).count).append(", ");
}
return builder.append(drop).append(')').toString();
}
private static void checkValidState(int count) {
if (count == 0) {
throw new IllegalStateException("Underlying resources have already been freed.");
}
}
}

View File

@ -1,79 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.internal;
import io.netty.buffer.api.Drop;
import java.lang.ref.Cleaner;
import java.util.concurrent.atomic.AtomicReference;
/**
* A drop implementation that delegates to another drop instance, either when called directly, or when it becomes
* cleanable. This ensures that objects are dropped even if they leak.
*/
public final class CleanerDrop<T> implements Drop<T> {
private Cleaner.Cleanable cleanable;
private GatedRunner<T> runner;
/**
* Wrap the given drop instance, and produce a new drop instance that will also call the delegate drop instance if
* it becomes cleanable.
*/
public static <T> Drop<T> wrap(Drop<T> drop) {
CleanerDrop<T> cleanerDrop = new CleanerDrop<>();
GatedRunner<T> runner = new GatedRunner<>(drop);
cleanerDrop.cleanable = Statics.CLEANER.register(cleanerDrop, runner);
cleanerDrop.runner = runner;
return cleanerDrop;
}
private CleanerDrop() {
}
@Override
public void attach(T obj) {
runner.set(obj);
runner.drop.attach(obj);
}
@Override
public void drop(T obj) {
attach(obj);
cleanable.clean();
}
@Override
public String toString() {
return "CleanerDrop(" + runner.drop + ')';
}
private static final class GatedRunner<T> extends AtomicReference<T> implements Runnable {
private static final long serialVersionUID = 2685535951915798850L;
final Drop<T> drop;
private GatedRunner(Drop<T> drop) {
this.drop = drop;
}
@Override
public void run() {
T obj = getAndSet(null); // Make absolutely sure we only delegate once.
if (obj != null) {
drop.drop(obj);
}
}
}
}

View File

@ -1,195 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.internal;
import io.netty.buffer.api.Drop;
import io.netty.buffer.api.Owned;
import io.netty.buffer.api.Resource;
import java.util.ArrayDeque;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Stream;
public abstract class LifecycleTracer {
public static LifecycleTracer get() {
if (Trace.TRACE_LIFECYCLE_DEPTH == 0) {
return NoOpTracer.INSTANCE;
}
StackTracer stackTracer = new StackTracer();
stackTracer.addTrace(StackTracer.WALKER.walk(new Trace("allocate", 0)));
return stackTracer;
}
public abstract void acquire(int acquires);
public abstract void drop(int acquires);
public abstract void close(int acquires);
public abstract <I extends Resource<I>, T extends ResourceSupport<I, T>> Owned<T> send(Owned<T> send, int acquires);
public abstract <E extends Throwable> E attachTrace(E throwable);
private static final class NoOpTracer extends LifecycleTracer {
private static final NoOpTracer INSTANCE = new NoOpTracer();
@Override
public void acquire(int acquires) {
}
@Override
public void drop(int acquires) {
}
@Override
public void close(int acquires) {
}
@Override
public <I extends Resource<I>, T extends ResourceSupport<I, T>> Owned<T> send(Owned<T> send, int acquires) {
return send;
}
@Override
public <E extends Throwable> E attachTrace(E throwable) {
return throwable;
}
}
private static final class StackTracer extends LifecycleTracer {
private static final int MAX_TRACE_POINTS = Math.min(Integer.getInteger("MAX_TRACE_POINTS", 50), 1000);
private static final StackWalker WALKER;
static {
int depth = Trace.TRACE_LIFECYCLE_DEPTH;
WALKER = depth > 0 ? StackWalker.getInstance(Set.of(), depth + 2) : null;
}
private final ArrayDeque<Trace> traces = new ArrayDeque<>();
private boolean dropped;
@Override
public void acquire(int acquires) {
Trace trace = WALKER.walk(new Trace("acquire", acquires));
addTrace(trace);
}
void addTrace(Trace trace) {
synchronized (traces) {
if (traces.size() == MAX_TRACE_POINTS) {
traces.pollFirst();
}
traces.addLast(trace);
}
}
@Override
public void drop(int acquires) {
dropped = true;
addTrace(WALKER.walk(new Trace("drop", acquires)));
}
@Override
public void close(int acquires) {
if (!dropped) {
addTrace(WALKER.walk(new Trace("close", acquires)));
}
}
@Override
public <I extends Resource<I>, T extends ResourceSupport<I, T>> Owned<T> send(Owned<T> send, int acquires) {
Trace sendTrace = new Trace("send", acquires);
sendTrace.sent = true;
addTrace(WALKER.walk(sendTrace));
return new Owned<T>() {
@Override
public T transferOwnership(Drop<T> drop) {
sendTrace.received = WALKER.walk(new Trace("received", acquires));
return send.transferOwnership(drop);
}
};
}
@Override
public <E extends Throwable> E attachTrace(E throwable) {
synchronized (traces) {
long timestamp = System.nanoTime();
for (Trace trace : traces) {
trace.attach(throwable, timestamp);
}
}
return throwable;
}
}
private static final class Trace implements Function<Stream<StackWalker.StackFrame>, Trace> {
private static final int TRACE_LIFECYCLE_DEPTH;
static {
int traceDefault = 0;
//noinspection AssertWithSideEffects
assert (traceDefault = 10) > 0;
TRACE_LIFECYCLE_DEPTH = Math.max(Integer.getInteger("TRACE_LIFECYCLE_DEPTH", traceDefault), 0);
}
final String name;
final int acquires;
final long timestamp;
boolean sent;
volatile Trace received;
StackWalker.StackFrame[] frames;
Trace(String name, int acquires) {
this.name = name;
this.acquires = acquires;
timestamp = System.nanoTime();
}
@Override
public Trace apply(Stream<StackWalker.StackFrame> frames) {
this.frames = frames.limit(TRACE_LIFECYCLE_DEPTH + 1).toArray(StackWalker.StackFrame[]::new);
return this;
}
public <E extends Throwable> void attach(E throwable, long timestamp) {
Trace recv = received;
String message = sent && recv == null ? name + " (sent but not received)" : name;
message += " (current acquires = " + acquires + ") T" + (this.timestamp - timestamp) / 1000 + "µs.";
Traceback exception = new Traceback(message);
StackTraceElement[] stackTrace = new StackTraceElement[frames.length];
for (int i = 0; i < frames.length; i++) {
stackTrace[i] = frames[i].toStackTraceElement();
}
exception.setStackTrace(stackTrace);
if (recv != null) {
recv.attach(exception, timestamp);
}
throwable.addSuppressed(exception);
}
}
private static final class Traceback extends Throwable {
private static final long serialVersionUID = 941453986194634605L;
Traceback(String message) {
super(message);
}
@Override
public synchronized Throwable fillInStackTrace() {
return this;
}
}
}

View File

@ -1,53 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.internal;
import io.netty.buffer.api.MemoryManagers;
import io.netty.buffer.api.bytebuffer.ByteBufferMemoryManagers;
import java.util.Collections;
import java.util.IdentityHashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Supplier;
public final class MemoryManagersOverride {
private static final MemoryManagers DEFAULT = new ByteBufferMemoryManagers();
private static final AtomicInteger OVERRIDES_AVAILABLE = new AtomicInteger();
private static final Map<Thread, MemoryManagers> OVERRIDES = Collections.synchronizedMap(new IdentityHashMap<>());
private MemoryManagersOverride() {
}
public static MemoryManagers getManagers() {
if (OVERRIDES_AVAILABLE.get() > 0) {
return OVERRIDES.getOrDefault(Thread.currentThread(), DEFAULT);
}
return DEFAULT;
}
public static <T> T using(MemoryManagers managers, Supplier<T> supplier) {
Thread thread = Thread.currentThread();
OVERRIDES.put(thread, managers);
OVERRIDES_AVAILABLE.incrementAndGet();
try {
return supplier.get();
} finally {
OVERRIDES_AVAILABLE.decrementAndGet();
OVERRIDES.remove(thread);
}
}
}

View File

@ -1,231 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.internal;
import io.netty.buffer.api.Drop;
import io.netty.buffer.api.Owned;
import io.netty.buffer.api.Resource;
import io.netty.buffer.api.Send;
import java.util.Objects;
/**
* Internal support class for resources.
*
* @param <I> The public interface for the resource.
* @param <T> The concrete implementation of the resource.
*/
public abstract class ResourceSupport<I extends Resource<I>, T extends ResourceSupport<I, T>> implements Resource<I> {
private int acquires; // Closed if negative.
private Drop<T> drop;
private final LifecycleTracer tracer;
protected ResourceSupport(Drop<T> drop) {
this.drop = drop;
tracer = LifecycleTracer.get();
}
/**
* Encapsulation bypass for calling {@link #acquire()} on the given object.
* <p>
* Note: this {@code acquire} method does not check the type of the return value from acquire at compile time.
* The type is instead checked at runtime, and will cause a {@link ClassCastException} to be thrown if done
* incorrectly.
*
* @param obj The object we wish to acquire (increment reference count) on.
* @param <T> The type of the acquired object, given by target-typing.
* @return The acquired object.
*/
@SuppressWarnings("unchecked")
static <T> T acquire(ResourceSupport<?, ?> obj) {
return (T) obj.acquire();
}
/**
* Increment the reference count.
* <p>
* Note, this method is not thread-safe because Resources are meant to thread-confined.
*
* @return This {@link Resource} instance.
*/
protected final I acquire() {
if (acquires < 0) {
throw attachTrace(createResourceClosedException());
}
if (acquires == Integer.MAX_VALUE) {
throw new IllegalStateException("Cannot acquire more references; counter would overflow.");
}
acquires++;
tracer.acquire(acquires);
return self();
}
protected abstract RuntimeException createResourceClosedException();
/**
* Decrement the reference count, and despose of the resource if the last reference is closed.
* <p>
* Note, this method is not thread-safe because Resources are meant to be thread-confined.
*
* @throws IllegalStateException If this Resource has already been closed.
*/
@Override
public final void close() {
if (acquires == -1) {
throw attachTrace(new IllegalStateException("Double-free: Resource already closed and dropped."));
}
if (acquires == 0) {
tracer.drop(acquires);
drop.drop(impl());
}
acquires--;
tracer.close(acquires);
}
/**
* Send this Resource instance to another Thread, transferring the ownership to the recipient.
* This method can be used when the receiving thread is not known up front.
* <p>
* This instance immediately becomes inaccessible, and all attempts at accessing this resource will throw.
* Calling {@link #close()} will have no effect, so this method is safe to call within a try-with-resources
* statement.
*
* @throws IllegalStateException if this object has any outstanding acquires; that is, if this object has been
* {@link #acquire() acquired} more times than it has been {@link #close() closed}.
*/
@Override
public final Send<I> send() {
if (acquires < 0) {
throw attachTrace(createResourceClosedException());
}
if (!isOwned()) {
throw notSendableException();
}
var owned = tracer.send(prepareSend(), acquires);
acquires = -2; // Close without dropping. This also ignore future double-free attempts.
return new TransferSend<I, T>(owned, drop, getClass());
}
/**
* Attach a trace of the life-cycle of this object as suppressed exceptions to the given throwable.
*
* @param throwable The throwable to attach a life-cycle trace to.
* @param <E> The concrete exception type.
* @return The given exception, which can then be thrown.
*/
protected <E extends Throwable> E attachTrace(E throwable) {
return tracer.attachTrace(throwable);
}
/**
* Create an {@link IllegalStateException} with a custom message, tailored to this particular
* {@link Resource} instance, for when the object cannot be sent for some reason.
* @return An {@link IllegalStateException} to be thrown when this object cannot be sent.
*/
protected IllegalStateException notSendableException() {
return new IllegalStateException(
"Cannot send() a reference counted object with " + countBorrows() + " borrows: " + this + '.');
}
/**
* Encapsulation bypass to call {@link #isOwned()} on the given object.
*
* @param obj The object to query the ownership state on.
* @return {@code true} if the given object is owned, otherwise {@code false}.
*/
static boolean isOwned(ResourceSupport<?, ?> obj) {
return obj.isOwned();
}
/**
* Query if this object is in an "owned" state, which means no other references have been
* {@linkplain #acquire() acquired} to it.
*
* This would usually be the case, since there are no public methods for acquiring references to these objects.
*
* @return {@code true} if this object is in an owned state, otherwise {@code false}.
*/
protected boolean isOwned() {
return acquires == 0;
}
/**
* Encapsulation bypass to call {@link #countBorrows()} on the given object.
*
* @param obj The object to count borrows on.
* @return The number of borrows, or outstanding {@linkplain #acquire() acquires}, if any, of the given object.
*/
static int countBorrows(ResourceSupport<?, ?> obj) {
return obj.countBorrows();
}
/**
* Count the number of borrows of this object.
* Note that even if the number of borrows is {@code 0}, this object might not be {@linkplain #isOwned() owned}
* because there could be other restrictions involved in ownership.
*
* @return The number of borrows, if any, of this object.
*/
protected int countBorrows() {
return Math.max(acquires, 0);
}
@Override
public boolean isAccessible() {
return acquires >= 0;
}
/**
* Prepare this instance for ownsership transfer. This method is called from {@link #send()} in the sending thread.
* This method should put this resource in a deactivated state where it is no longer accessible from the currently
* owning thread.
* In this state, the resource instance should only allow a call to {@link Owned#transferOwnership(Drop)} in the
* recipient thread.
*
* @return This resource instance in a deactivated state.
*/
protected abstract Owned<T> prepareSend();
/**
* Get access to the underlying {@link Drop} object.
* This method is unsafe because it open the possibility of bypassing and overriding resource lifetimes.
*
* @return The {@link Drop} object used by this reference counted object.
*/
protected Drop<T> unsafeGetDrop() {
return drop;
}
/**
* Replace the current underlying {@link Drop} object with the given one.
* This method is unsafe because it open the possibility of bypassing and overring resource lifetimes.
*
* @param replacement The new {@link Drop} object to use instead of the current one.
*/
protected void unsafeSetDrop(Drop<T> replacement) {
drop = Objects.requireNonNull(replacement, "Replacement drop cannot be null.");
}
@SuppressWarnings("unchecked")
private I self() {
return (I) this;
}
@SuppressWarnings("unchecked")
private T impl() {
return (T) this;
}
}

View File

@ -1,190 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.internal;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.BufferClosedException;
import io.netty.buffer.api.BufferReadOnlyException;
import io.netty.buffer.api.Drop;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodHandles.Lookup;
import java.lang.invoke.MethodType;
import java.lang.invoke.VarHandle;
import java.lang.ref.Cleaner;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.concurrent.atomic.LongAdder;
public interface Statics {
LongAdder MEM_USAGE_NATIVE = new LongAdder();
Cleaner CLEANER = Cleaner.create();
Drop<Buffer> NO_OP_DROP = new Drop<Buffer>() {
@Override
public void drop(Buffer obj) {
}
@Override
public String toString() {
return "NO_OP_DROP";
}
};
MethodHandle BB_SLICE_OFFSETS = getByteBufferSliceOffsetsMethodHandle();
MethodHandle BB_PUT_OFFSETS = getByteBufferPutOffsetsMethodHandle();
static MethodHandle getByteBufferSliceOffsetsMethodHandle() {
try {
Lookup lookup = MethodHandles.lookup();
MethodType type = MethodType.methodType(ByteBuffer.class, int.class, int.class);
return lookup.findVirtual(ByteBuffer.class, "slice", type);
} catch (Exception ignore) {
return null;
}
}
@SuppressWarnings("JavaLangInvokeHandleSignature")
static MethodHandle getByteBufferPutOffsetsMethodHandle() {
try {
Lookup lookup = MethodHandles.lookup();
MethodType type = MethodType.methodType(ByteBuffer.class, int.class, ByteBuffer.class, int.class, int.class);
return lookup.findVirtual(ByteBuffer.class, "put", type);
} catch (Exception ignore) {
return null;
}
}
@SuppressWarnings({"unchecked", "unused"})
static <T extends Buffer> Drop<T> noOpDrop() {
return (Drop<T>) NO_OP_DROP;
}
static VarHandle findVarHandle(Lookup lookup, Class<?> recv, String name, Class<?> type) {
try {
return lookup.findVarHandle(recv, name, type);
} catch (Exception e) {
throw new ExceptionInInitializerError(e);
}
}
@SuppressWarnings("unchecked")
static <T, R> Drop<R> convert(Drop<T> drop) {
return (Drop<R>) drop;
}
static void copyToViaReverseCursor(Buffer src, int srcPos, Buffer dest, int destPos, int length) {
// Iterate in reverse to account for src and dest buffer overlap.
var itr = src.openReverseCursor(srcPos + length - 1, length);
ByteOrder prevOrder = dest.order();
// We read longs in BE, in reverse, so they need to be flipped for writing.
dest.order(ByteOrder.LITTLE_ENDIAN);
try {
while (itr.readLong()) {
long val = itr.getLong();
length -= Long.BYTES;
dest.setLong(destPos + length, val);
}
while (itr.readByte()) {
dest.setByte(destPos + --length, itr.getByte());
}
} finally {
dest.order(prevOrder);
}
}
/**
* The ByteBuffer slice-with-offset-and-length method is only available from Java 13 and onwards, but we need to
* support Java 11.
*/
static ByteBuffer bbslice(ByteBuffer buffer, int fromOffset, int length) {
if (BB_SLICE_OFFSETS != null) {
return bbsliceJdk13(buffer, fromOffset, length);
}
return bbsliceFallback(buffer, fromOffset, length);
}
private static ByteBuffer bbsliceJdk13(ByteBuffer buffer, int fromOffset, int length) {
try {
return (ByteBuffer) BB_SLICE_OFFSETS.invokeExact(buffer, fromOffset, length);
} catch (RuntimeException re) {
throw re;
} catch (Throwable throwable) {
throw new LinkageError("Unexpected exception from ByteBuffer.slice(int,int).", throwable);
}
}
private static ByteBuffer bbsliceFallback(ByteBuffer buffer, int fromOffset, int length) {
if (fromOffset < 0) {
throw new IndexOutOfBoundsException("The fromOffset must be positive: " + fromOffset + '.');
}
int newLimit = fromOffset + length;
if (newLimit > buffer.capacity()) {
throw new IndexOutOfBoundsException(
"The limit of " + newLimit + " would be greater than capacity: " + buffer.capacity() + '.');
}
try {
return buffer.position(fromOffset).limit(newLimit).slice();
} finally {
buffer.clear();
}
}
/**
* The ByteBuffer put-buffer-with-offset-and-length method is not available in Java 11.
*/
static void bbput(ByteBuffer dest, int destPos, ByteBuffer src, int srcPos, int length) {
if (BB_PUT_OFFSETS != null) {
bbputJdk16(dest, destPos, src, srcPos, length);
} else {
bbputFallback(dest, destPos, src, srcPos, length);
}
}
private static void bbputJdk16(ByteBuffer dest, int destPos, ByteBuffer src, int srcPos, int length) {
try {
@SuppressWarnings("unused") // We need to cast the return type in order to invokeExact.
ByteBuffer ignore = (ByteBuffer) BB_PUT_OFFSETS.invokeExact(dest, destPos, src, srcPos, length);
} catch (RuntimeException re) {
throw re;
} catch (Throwable throwable) {
throw new LinkageError("Unexpected exception from ByteBuffer.put(int,ByteBuffer,int,int).", throwable);
}
}
private static void bbputFallback(ByteBuffer dest, int destPos, ByteBuffer src, int srcPos, int length) {
dest.position(destPos).put(bbslice(src, srcPos, length));
}
static BufferClosedException bufferIsClosed(Buffer buffer) {
return new BufferClosedException("This buffer is closed: " + buffer);
}
static BufferReadOnlyException bufferIsReadOnly(Buffer buffer) {
return new BufferReadOnlyException("This buffer is read-only: " + buffer);
}
static <T> T acquire(ResourceSupport<?, ?> obj) {
return ResourceSupport.acquire(obj);
}
static boolean isOwned(ResourceSupport<?, ?> obj) {
return ResourceSupport.isOwned(obj);
}
static int countBorrows(ResourceSupport<?, ?> obj) {
return ResourceSupport.countBorrows(obj);
}
}

View File

@ -1,69 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.internal;
import io.netty.buffer.api.Drop;
import io.netty.buffer.api.Owned;
import io.netty.buffer.api.Resource;
import io.netty.buffer.api.Send;
import java.lang.invoke.VarHandle;
import static java.lang.invoke.MethodHandles.lookup;
public class TransferSend<I extends Resource<I>, T extends ResourceSupport<I, T>> implements Send<I> {
private static final VarHandle RECEIVED = Statics.findVarHandle(lookup(), TransferSend.class, "received", boolean.class);
private final Owned<T> outgoing;
private final Drop<T> drop;
private final Class<?> concreteType;
@SuppressWarnings("unused")
private volatile boolean received; // Accessed via VarHandle
public TransferSend(Owned<T> outgoing, Drop<T> drop, Class<?> concreteType) {
this.outgoing = outgoing;
this.drop = drop;
this.concreteType = concreteType;
}
@SuppressWarnings("unchecked")
@Override
public I receive() {
gateReception();
var copy = outgoing.transferOwnership(drop);
drop.attach(copy);
return (I) copy;
}
private void gateReception() {
if ((boolean) RECEIVED.getAndSet(this, true)) {
throw new IllegalStateException("This object has already been received.");
}
}
@Override
public boolean referentIsInstanceOf(Class<?> cls) {
return cls.isAssignableFrom(concreteType);
}
@Override
public void discard() {
if (!(boolean) RECEIVED.getAndSet(this, true)) {
var copy = outgoing.transferOwnership(drop);
drop.attach(copy);
copy.close();
}
}
}

View File

@ -1,20 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
/**
* Internal implementation details that can be shared among Buffer implementations.
*/
package io.netty.buffer.api.internal;

View File

@ -1,20 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
/**
* Incubating {@code Buffer} API, as a proposed alternative to {@code ByteBuf}.
*/
package io.netty.buffer.api;

View File

@ -1,25 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.BufferAllocator;
public interface BufferAllocatorMetric {
/**
* Returns the number of bytes of heap memory used by a {@link BufferAllocator} or {@code -1} if unknown.
*/
long usedMemory();
}

View File

@ -1,26 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.BufferAllocator;
public interface BufferAllocatorMetricProvider {
/**
* Returns a {@link BufferAllocatorMetric} for a {@link BufferAllocator}.
*/
BufferAllocatorMetric metric();
}

View File

@ -1,129 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
/**
* Internal primitive map implementation that is specifically optimised for the runs availability map use case in
* {@link PoolChunk}.
*/
final class LongLongHashMap {
private static final int MASK_TEMPLATE = ~1;
private int mask;
private long[] array;
private int maxProbe;
private long zeroVal;
private final long emptyVal;
LongLongHashMap(long emptyVal) {
this.emptyVal = emptyVal;
zeroVal = emptyVal;
int initialSize = 32;
array = new long[initialSize];
mask = initialSize - 1;
computeMaskAndProbe();
}
public long put(long key, long value) {
if (key == 0) {
long prev = zeroVal;
zeroVal = value;
return prev;
}
for (;;) {
int index = index(key);
for (int i = 0; i < maxProbe; i++) {
long existing = array[index];
if (existing == key || existing == 0) {
long prev = existing == 0? emptyVal : array[index + 1];
array[index] = key;
array[index + 1] = value;
for (; i < maxProbe; i++) { // Nerf any existing misplaced entries.
index = index + 2 & mask;
if (array[index] == key) {
array[index] = 0;
prev = array[index + 1];
break;
}
}
return prev;
}
index = index + 2 & mask;
}
expand(); // Grow array and re-hash.
}
}
public void remove(long key) {
if (key == 0) {
zeroVal = emptyVal;
return;
}
int index = index(key);
for (int i = 0; i < maxProbe; i++) {
long existing = array[index];
if (existing == key) {
array[index] = 0;
break;
}
index = index + 2 & mask;
}
}
public long get(long key) {
if (key == 0) {
return zeroVal;
}
int index = index(key);
for (int i = 0; i < maxProbe; i++) {
long existing = array[index];
if (existing == key) {
return array[index + 1];
}
index = index + 2 & mask;
}
return emptyVal;
}
private int index(long key) {
// Hash with murmur64, and mask.
key ^= key >>> 33;
key *= 0xff51afd7ed558ccdL;
key ^= key >>> 33;
key *= 0xc4ceb9fe1a85ec53L;
key ^= key >>> 33;
return (int) key & mask;
}
private void expand() {
long[] prev = array;
array = new long[prev.length * 2];
computeMaskAndProbe();
for (int i = 0; i < prev.length; i += 2) {
long key = prev[i];
if (key != 0) {
long val = prev[i + 1];
put(key, val);
}
}
}
private void computeMaskAndProbe() {
int length = array.length;
mask = length - 1 & MASK_TEMPLATE;
maxProbe = (int) Math.log(length);
}
}

View File

@ -1,107 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import java.util.Arrays;
/**
* Internal primitive priority queue, used by {@link PoolChunk}.
* The implementation is based on the binary heap, as described in Algorithms by Sedgewick and Wayne.
*/
final class LongPriorityQueue {
public static final int NO_VALUE = -1;
private long[] array = new long[9];
private int size;
public void offer(long handle) {
if (handle == NO_VALUE) {
throw new IllegalArgumentException("The NO_VALUE (" + NO_VALUE + ") cannot be added to the queue.");
}
size++;
if (size == array.length) {
// Grow queue capacity.
array = Arrays.copyOf(array, 1 + (array.length - 1) * 2);
}
array[size] = handle;
lift(size);
}
public void remove(long value) {
for (int i = 1; i <= size; i++) {
if (array[i] == value) {
array[i] = array[size--];
lift(i);
sink(i);
return;
}
}
}
public long peek() {
if (size == 0) {
return NO_VALUE;
}
return array[1];
}
public long poll() {
if (size == 0) {
return NO_VALUE;
}
long val = array[1];
array[1] = array[size];
array[size] = 0;
size--;
sink(1);
return val;
}
public boolean isEmpty() {
return size == 0;
}
private void lift(int index) {
int parentIndex;
while (index > 1 && subord(parentIndex = index >> 1, index)) {
swap(index, parentIndex);
index = parentIndex;
}
}
private void sink(int index) {
int child;
while ((child = index << 1) <= size) {
if (child < size && subord(child, child + 1)) {
child++;
}
if (!subord(index, child)) {
break;
}
swap(index, child);
index = child;
}
}
private boolean subord(int a, int b) {
return array[a] > array[b];
}
private void swap(int a, int b) {
long value = array[a];
array[a] = array[b];
array[b] = value;
}
}

View File

@ -1,471 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.AllocatorControl;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.MemoryManager;
import io.netty.util.internal.StringUtil;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.VarHandle;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.LongAdder;
import static io.netty.buffer.api.pool.PoolChunk.isSubpage;
import static java.lang.Math.max;
class PoolArena extends SizeClasses implements PoolArenaMetric, AllocatorControl {
private static final VarHandle SUBPAGE_ARRAY = MethodHandles.arrayElementVarHandle(PoolSubpage[].class);
enum SizeClass {
Small,
Normal
}
final PooledBufferAllocator parent;
final MemoryManager manager;
final int numSmallSubpagePools;
final int directMemoryCacheAlignment;
private final PoolSubpage[] smallSubpagePools;
private final PoolChunkList q050;
private final PoolChunkList q025;
private final PoolChunkList q000;
private final PoolChunkList qInit;
private final PoolChunkList q075;
private final PoolChunkList q100;
private final List<PoolChunkListMetric> chunkListMetrics;
// Metrics for allocations and deallocations
private long allocationsNormal;
// We need to use the LongAdder here as this is not guarded via synchronized block.
private final LongAdder allocationsSmall = new LongAdder();
private final LongAdder allocationsHuge = new LongAdder();
private final LongAdder activeBytesHuge = new LongAdder();
private long deallocationsSmall;
private long deallocationsNormal;
// We need to use the LongAdder here as this is not guarded via synchronized block.
private final LongAdder deallocationsHuge = new LongAdder();
// Number of thread caches backed by this arena.
final AtomicInteger numThreadCaches = new AtomicInteger();
protected PoolArena(PooledBufferAllocator parent, MemoryManager manager, int pageSize,
int pageShifts, int chunkSize, int cacheAlignment) {
super(pageSize, pageShifts, chunkSize, cacheAlignment);
this.parent = parent;
this.manager = manager;
directMemoryCacheAlignment = cacheAlignment;
numSmallSubpagePools = nSubpages;
smallSubpagePools = newSubpagePoolArray(numSmallSubpagePools);
q100 = new PoolChunkList(this, null, 100, Integer.MAX_VALUE, chunkSize);
q075 = new PoolChunkList(this, q100, 75, 100, chunkSize);
q050 = new PoolChunkList(this, q075, 50, 100, chunkSize);
q025 = new PoolChunkList(this, q050, 25, 75, chunkSize);
q000 = new PoolChunkList(this, q025, 1, 50, chunkSize);
qInit = new PoolChunkList(this, q000, Integer.MIN_VALUE, 25, chunkSize);
q100.prevList(q075);
q075.prevList(q050);
q050.prevList(q025);
q025.prevList(q000);
q000.prevList(null);
qInit.prevList(qInit);
chunkListMetrics = List.of(qInit, q000, q025, q050, q075, q100);
}
private static PoolSubpage newSubpagePoolHead() {
PoolSubpage head = new PoolSubpage();
head.prev = head;
head.next = head;
return head;
}
private static PoolSubpage[] newSubpagePoolArray(int size) {
return new PoolSubpage[size];
}
UntetheredMemory allocate(PooledAllocatorControl control, PoolThreadCache cache, int size) {
final int sizeIdx = size2SizeIdx(size);
if (sizeIdx <= smallMaxSizeIdx) {
return tcacheAllocateSmall(control, cache, size, sizeIdx);
} else if (sizeIdx < nSizes) {
return tcacheAllocateNormal(control, cache, size, sizeIdx);
} else {
int normCapacity = directMemoryCacheAlignment > 0
? normalizeSize(size) : size;
// Huge allocations are never served via the cache so just call allocateHuge
return allocateHuge(normCapacity);
}
}
private UntetheredMemory tcacheAllocateSmall(PooledAllocatorControl control, PoolThreadCache cache, final int size,
final int sizeIdx) {
UntetheredMemory memory = cache.allocateSmall(control, size, sizeIdx);
if (memory != null) {
// was able to allocate out of the cache so move on
return memory;
}
/*
* Synchronize on the head. This is needed as {@link PoolChunk#allocateSubpage(int)} and
* {@link PoolChunk#free(long)} may modify the doubly linked list as well.
*/
PoolSubpage head = findSubpagePoolHead(sizeIdx);
final boolean needsNormalAllocation;
synchronized (head) {
final PoolSubpage s = head.next;
needsNormalAllocation = s == head;
if (!needsNormalAllocation) {
assert s.doNotDestroy && s.elemSize == sizeIdx2size(sizeIdx);
long handle = s.allocate();
assert handle >= 0;
memory = s.chunk.allocateBufferWithSubpage(handle, size, cache, control);
}
}
if (needsNormalAllocation) {
synchronized (this) {
memory = allocateNormal(size, sizeIdx, cache, control);
}
}
incSmallAllocation();
return memory;
}
private UntetheredMemory tcacheAllocateNormal(
PooledAllocatorControl control, PoolThreadCache cache, int size, int sizeIdx) {
UntetheredMemory memory = cache.allocateNormal(this, control, size, sizeIdx);
if (memory != null) {
// was able to allocate out of the cache so move on
return memory;
}
synchronized (this) {
memory = allocateNormal(size, sizeIdx, cache, control);
allocationsNormal++;
}
return memory;
}
// Method must be called inside synchronized(this) { ... } block
private UntetheredMemory allocateNormal(
int size, int sizeIdx, PoolThreadCache threadCache, PooledAllocatorControl control) {
UntetheredMemory memory = q050.allocate(size, sizeIdx, threadCache, control);
if (memory != null) {
return memory;
}
memory = q025.allocate(size, sizeIdx, threadCache, control);
if (memory != null) {
return memory;
}
memory = q000.allocate(size, sizeIdx, threadCache, control);
if (memory != null) {
return memory;
}
memory = qInit.allocate(size, sizeIdx, threadCache, control);
if (memory != null) {
return memory;
}
memory = q075.allocate(size, sizeIdx, threadCache, control);
if (memory != null) {
return memory;
}
// Add a new chunk.
PoolChunk c = newChunk(pageSize, nPSizes, pageShifts, chunkSize);
memory = c.allocate(size, sizeIdx, threadCache, control);
assert memory != null;
qInit.add(c);
return memory;
}
private void incSmallAllocation() {
allocationsSmall.increment();
}
private UntetheredMemory allocateHuge(int size) {
activeBytesHuge.add(size);
allocationsHuge.increment();
return new UnpooledUnthetheredMemory(parent, manager, size);
}
void free(PoolChunk chunk, long handle, int normCapacity, PoolThreadCache cache) {
SizeClass sizeClass = sizeClass(handle);
if (cache != null && cache.add(this, chunk, handle, normCapacity, sizeClass)) {
// cached so not free it.
return;
}
freeChunk(chunk, handle, normCapacity, sizeClass);
}
private static SizeClass sizeClass(long handle) {
return isSubpage(handle) ? SizeClass.Small : SizeClass.Normal;
}
void freeChunk(PoolChunk chunk, long handle, int normCapacity, SizeClass sizeClass) {
final boolean destroyChunk;
synchronized (this) {
if (sizeClass == SizeClass.Normal) {
++deallocationsNormal;
} else if (sizeClass == SizeClass.Small) {
++deallocationsSmall;
} else {
throw new AssertionError("Unexpected size class: " + sizeClass);
}
destroyChunk = !chunk.parent.free(chunk, handle, normCapacity);
}
if (destroyChunk) {
// destroyChunk not need to be called while holding the synchronized lock.
chunk.destroy();
}
}
PoolSubpage findSubpagePoolHead(int sizeIdx) {
PoolSubpage head = (PoolSubpage) SUBPAGE_ARRAY.getVolatile(smallSubpagePools, sizeIdx);
if (head == null) {
head = newSubpagePoolHead();
if (!SUBPAGE_ARRAY.compareAndSet(smallSubpagePools, sizeIdx, null, head)) {
// We lost the race. Read the winning value.
head = (PoolSubpage) SUBPAGE_ARRAY.getVolatile(smallSubpagePools, sizeIdx);
}
}
return head;
}
@Override
public UntetheredMemory allocateUntethered(Buffer originator, int size) {
throw new AssertionError("PoolChunk base buffers should never need to reallocate.");
}
@Override
public void recoverMemory(Object memory) {
// This means we've lost all strong references to a PoolChunk.
// Probably means we don't need it anymore, so just free its memory.
manager.discardRecoverableMemory(memory);
}
@Override
public int numThreadCaches() {
return numThreadCaches.get();
}
@Override
public int numSmallSubpages() {
return smallSubpagePools.length;
}
@Override
public int numChunkLists() {
return chunkListMetrics.size();
}
@Override
public List<PoolSubpageMetric> smallSubpages() {
return subPageMetricList(smallSubpagePools);
}
@Override
public List<PoolChunkListMetric> chunkLists() {
return chunkListMetrics;
}
private static List<PoolSubpageMetric> subPageMetricList(PoolSubpage[] pages) {
List<PoolSubpageMetric> metrics = new ArrayList<>();
for (int i = 0, len = pages.length; i < len; i++) {
PoolSubpage head = (PoolSubpage) SUBPAGE_ARRAY.getVolatile(pages, i);
if (head == null || head.next == head) {
continue;
}
PoolSubpage s = head.next;
do {
metrics.add(s);
s = s.next;
} while (s != head);
}
return metrics;
}
@Override
public long numAllocations() {
final long allocsNormal;
synchronized (this) {
allocsNormal = allocationsNormal;
}
return allocationsSmall.longValue() + allocsNormal + allocationsHuge.longValue();
}
@Override
public long numSmallAllocations() {
return allocationsSmall.longValue();
}
@Override
public synchronized long numNormalAllocations() {
return allocationsNormal;
}
@Override
public long numDeallocations() {
final long deallocs;
synchronized (this) {
deallocs = deallocationsSmall + deallocationsNormal;
}
return deallocs + deallocationsHuge.longValue();
}
@Override
public synchronized long numSmallDeallocations() {
return deallocationsSmall;
}
@Override
public synchronized long numNormalDeallocations() {
return deallocationsNormal;
}
@Override
public long numHugeAllocations() {
return allocationsHuge.longValue();
}
@Override
public long numHugeDeallocations() {
return deallocationsHuge.longValue();
}
@Override
public long numActiveAllocations() {
long val = allocationsSmall.longValue() + allocationsHuge.longValue()
- deallocationsHuge.longValue();
synchronized (this) {
val += allocationsNormal - (deallocationsSmall + deallocationsNormal);
}
return max(val, 0);
}
@Override
public long numActiveSmallAllocations() {
return max(numSmallAllocations() - numSmallDeallocations(), 0);
}
@Override
public long numActiveNormalAllocations() {
final long val;
synchronized (this) {
val = allocationsNormal - deallocationsNormal;
}
return max(val, 0);
}
@Override
public long numActiveHugeAllocations() {
return max(numHugeAllocations() - numHugeDeallocations(), 0);
}
@Override
public long numActiveBytes() {
long val = activeBytesHuge.longValue();
synchronized (this) {
for (int i = 0; i < chunkListMetrics.size(); i++) {
for (PoolChunkMetric m: chunkListMetrics.get(i)) {
val += m.chunkSize();
}
}
}
return max(0, val);
}
protected final PoolChunk newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize) {
return new PoolChunk(this, pageSize, pageShifts, chunkSize, maxPageIdx);
}
@Override
public synchronized String toString() {
StringBuilder buf = new StringBuilder()
.append("Chunk(s) at 0~25%:")
.append(StringUtil.NEWLINE)
.append(qInit)
.append(StringUtil.NEWLINE)
.append("Chunk(s) at 0~50%:")
.append(StringUtil.NEWLINE)
.append(q000)
.append(StringUtil.NEWLINE)
.append("Chunk(s) at 25~75%:")
.append(StringUtil.NEWLINE)
.append(q025)
.append(StringUtil.NEWLINE)
.append("Chunk(s) at 50~100%:")
.append(StringUtil.NEWLINE)
.append(q050)
.append(StringUtil.NEWLINE)
.append("Chunk(s) at 75~100%:")
.append(StringUtil.NEWLINE)
.append(q075)
.append(StringUtil.NEWLINE)
.append("Chunk(s) at 100%:")
.append(StringUtil.NEWLINE)
.append(q100)
.append(StringUtil.NEWLINE)
.append("small subpages:");
appendPoolSubPages(buf, smallSubpagePools);
buf.append(StringUtil.NEWLINE);
return buf.toString();
}
private static void appendPoolSubPages(StringBuilder buf, PoolSubpage[] subpages) {
for (int i = 0; i < subpages.length; i ++) {
PoolSubpage head = (PoolSubpage) SUBPAGE_ARRAY.getVolatile(subpages, i);
if (head == null || head.next == head) {
continue;
}
buf.append(StringUtil.NEWLINE)
.append(i)
.append(": ");
PoolSubpage s = head.next;
do {
buf.append(s);
s = s.next;
} while (s != head);
}
}
public void close() {
for (int i = 0, len = smallSubpagePools.length; i < len; i++) {
PoolSubpage page = (PoolSubpage) SUBPAGE_ARRAY.getVolatile(smallSubpagePools, i);
if (page != null) {
page.destroy();
}
}
for (PoolChunkList list : new PoolChunkList[] {qInit, q000, q025, q050, q100}) {
list.destroy();
}
}
}

View File

@ -1,114 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import java.util.List;
/**
* Expose metrics for an arena.
*/
public interface PoolArenaMetric extends SizeClassesMetric {
/**
* Returns the number of thread caches backed by this arena.
*/
int numThreadCaches();
/**
* Returns the number of small sub-pages for the arena.
*/
int numSmallSubpages();
/**
* Returns the number of chunk lists for the arena.
*/
int numChunkLists();
/**
* Returns an unmodifiable {@link List} which holds {@link PoolSubpageMetric}s for small sub-pages.
*/
List<PoolSubpageMetric> smallSubpages();
/**
* Returns an unmodifiable {@link List} which holds {@link PoolChunkListMetric}s.
*/
List<PoolChunkListMetric> chunkLists();
/**
* Return the number of allocations done via the arena. This includes all sizes.
*/
long numAllocations();
/**
* Return the number of small allocations done via the arena.
*/
long numSmallAllocations();
/**
* Return the number of normal allocations done via the arena.
*/
long numNormalAllocations();
/**
* Return the number of huge allocations done via the arena.
*/
long numHugeAllocations();
/**
* Return the number of deallocations done via the arena. This includes all sizes.
*/
long numDeallocations();
/**
* Return the number of small deallocations done via the arena.
*/
long numSmallDeallocations();
/**
* Return the number of normal deallocations done via the arena.
*/
long numNormalDeallocations();
/**
* Return the number of huge deallocations done via the arena.
*/
long numHugeDeallocations();
/**
* Return the number of currently active allocations.
*/
long numActiveAllocations();
/**
* Return the number of currently active small allocations.
*/
long numActiveSmallAllocations();
/**
* Return the number of currently active normal allocations.
*/
long numActiveNormalAllocations();
/**
* Return the number of currently active huge allocations.
*/
long numActiveHugeAllocations();
/**
* Return the number of active bytes that are currently allocated by the arena.
*/
long numActiveBytes();
}

View File

@ -1,662 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.internal.CleanerDrop;
import io.netty.buffer.api.AllocatorControl.UntetheredMemory;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.Drop;
import io.netty.buffer.api.MemoryManager;
import io.netty.buffer.api.internal.ArcDrop;
import io.netty.buffer.api.internal.Statics;
import java.util.PriorityQueue;
/**
* Description of algorithm for PageRun/PoolSubpage allocation from PoolChunk
*
* Notation: The following terms are important to understand the code
* > page - a page is the smallest unit of memory chunk that can be allocated
* > run - a run is a collection of pages
* > chunk - a chunk is a collection of runs
* > in this code chunkSize = maxPages * pageSize
*
* To begin we allocate a byte array of size = chunkSize
* Whenever a ByteBuf of given size needs to be created we search for the first position
* in the byte array that has enough empty space to accommodate the requested size and
* return a (long) handle that encodes this offset information, (this memory segment is then
* marked as reserved, so it is always used by exactly one ByteBuf and no more)
*
* For simplicity all sizes are normalized according to {@link PoolArena#size2SizeIdx(int)} method.
* This ensures that when we request for memory segments of size > pageSize the normalizedCapacity
* equals the next nearest size in {@link SizeClasses}.
*
*
* A chunk has the following layout:
*
* /-----------------\
* | run |
* | |
* | |
* |-----------------|
* | run |
* | |
* |-----------------|
* | unalloctated |
* | (freed) |
* | |
* |-----------------|
* | subpage |
* |-----------------|
* | unallocated |
* | (freed) |
* | ... |
* | ... |
* | ... |
* | |
* | |
* | |
* \-----------------/
*
*
* handle:
* -------
* a handle is a long number, the bit layout of a run looks like:
*
* oooooooo ooooooos ssssssss ssssssue bbbbbbbb bbbbbbbb bbbbbbbb bbbbbbbb
*
* o: runOffset (page offset in the chunk), 15bit
* s: size (number of pages) of this run, 15bit
* u: isUsed?, 1bit
* e: isSubpage?, 1bit
* b: bitmapIdx of subpage, zero if it's not subpage, 32bit
*
* runsAvailMap:
* ------
* a map which manages all runs (used and not in used).
* For each run, the first runOffset and last runOffset are stored in runsAvailMap.
* key: runOffset
* value: handle
*
* runsAvail:
* ----------
* an array of {@link PriorityQueue}.
* Each queue manages same size of runs.
* Runs are sorted by offset, so that we always allocate runs with smaller offset.
*
*
* Algorithm:
* ----------
*
* As we allocate runs, we update values stored in runsAvailMap and runsAvail so that the property is maintained.
*
* Initialization -
* In the beginning we store the initial run which is the whole chunk.
* The initial run:
* runOffset = 0
* size = chunkSize
* isUsed = no
* isSubpage = no
* bitmapIdx = 0
*
*
* Algorithm: [allocateRun(size)]
* ----------
* 1) find the first avail run using in runsAvails according to size
* 2) if pages of run is larger than request pages then split it, and save the tailing run
* for later using
*
* Algorithm: [allocateSubpage(size)]
* ----------
* 1) find a not full subpage according to size.
* if it already exists just return, otherwise allocate a new PoolSubpage and call init()
* note that this subpage object is added to subpagesPool in the PoolArena when we init() it
* 2) call subpage.allocate()
*
* Algorithm: [free(handle, length, nioBuffer)]
* ----------
* 1) if it is a subpage, return the slab back into this subpage
* 2) if the subpage is not used, or it is a run, then start free this run
* 3) merge continuous avail runs
* 4) save the merged run
*
*/
final class PoolChunk implements PoolChunkMetric {
private static final int SIZE_BIT_LENGTH = 15;
private static final int INUSED_BIT_LENGTH = 1;
private static final int SUBPAGE_BIT_LENGTH = 1;
private static final int BITMAP_IDX_BIT_LENGTH = 32;
static final int IS_SUBPAGE_SHIFT = BITMAP_IDX_BIT_LENGTH;
static final int IS_USED_SHIFT = SUBPAGE_BIT_LENGTH + IS_SUBPAGE_SHIFT;
static final int SIZE_SHIFT = INUSED_BIT_LENGTH + IS_USED_SHIFT;
static final int RUN_OFFSET_SHIFT = SIZE_BIT_LENGTH + SIZE_SHIFT;
final PoolArena arena;
final Buffer base; // The buffer that is the source of the memory. Closing it will free the memory.
final Object memory;
final Drop<Buffer> baseDrop; // An ArcDrop that manages references to the base Buffer.
/**
* store the first page and last page of each avail run
*/
private final LongLongHashMap runsAvailMap;
/**
* manage all avail runs
*/
private final LongPriorityQueue[] runsAvail;
/**
* manage all subpages in this chunk
*/
private final PoolSubpage[] subpages;
private final int pageSize;
private final int pageShifts;
private final int chunkSize;
int freeBytes;
PoolChunkList parent;
PoolChunk prev;
PoolChunk next;
PoolChunk(PoolArena arena, int pageSize, int pageShifts, int chunkSize,
int maxPageIdx) {
this.arena = arena;
MemoryManager manager = arena.manager;
base = manager.allocateShared(arena, chunkSize, manager.drop(), Statics.CLEANER);
memory = manager.unwrapRecoverableMemory(base);
baseDrop = ArcDrop.wrap(Buffer::close);
this.pageSize = pageSize;
this.pageShifts = pageShifts;
this.chunkSize = chunkSize;
freeBytes = chunkSize;
runsAvail = newRunsAvailqueueArray(maxPageIdx);
runsAvailMap = new LongLongHashMap(-1);
subpages = new PoolSubpage[chunkSize >> pageShifts];
//insert initial run, offset = 0, pages = chunkSize / pageSize
int pages = chunkSize >> pageShifts;
long initHandle = (long) pages << SIZE_SHIFT;
insertAvailRun(0, pages, initHandle);
}
private static LongPriorityQueue[] newRunsAvailqueueArray(int size) {
LongPriorityQueue[] queueArray = new LongPriorityQueue[size];
for (int i = 0; i < queueArray.length; i++) {
queueArray[i] = new LongPriorityQueue();
}
return queueArray;
}
private void insertAvailRun(int runOffset, int pages, long handle) {
int pageIdxFloor = arena.pages2pageIdxFloor(pages);
LongPriorityQueue queue = runsAvail[pageIdxFloor];
queue.offer(handle);
//insert first page of run
insertAvailRun0(runOffset, handle);
if (pages > 1) {
//insert last page of run
insertAvailRun0(lastPage(runOffset, pages), handle);
}
}
private void insertAvailRun0(int runOffset, long handle) {
long pre = runsAvailMap.put(runOffset, handle);
assert pre == -1;
}
private void removeAvailRun(long handle) {
int pageIdxFloor = arena.pages2pageIdxFloor(runPages(handle));
LongPriorityQueue queue = runsAvail[pageIdxFloor];
removeAvailRun(queue, handle);
}
private void removeAvailRun(LongPriorityQueue queue, long handle) {
queue.remove(handle);
int runOffset = runOffset(handle);
int pages = runPages(handle);
//remove first page of run
runsAvailMap.remove(runOffset);
if (pages > 1) {
//remove last page of run
runsAvailMap.remove(lastPage(runOffset, pages));
}
}
private static int lastPage(int runOffset, int pages) {
return runOffset + pages - 1;
}
private long getAvailRunByOffset(int runOffset) {
return runsAvailMap.get(runOffset);
}
@Override
public int usage() {
final int freeBytes;
synchronized (arena) {
freeBytes = this.freeBytes;
}
return usage(freeBytes);
}
private int usage(int freeBytes) {
if (freeBytes == 0) {
return 100;
}
int freePercentage = (int) (freeBytes * 100L / chunkSize);
if (freePercentage == 0) {
return 99;
}
return 100 - freePercentage;
}
UntetheredMemory allocate(int size, int sizeIdx, PoolThreadCache cache, PooledAllocatorControl control) {
final long handle;
if (sizeIdx <= arena.smallMaxSizeIdx) {
// small
handle = allocateSubpage(sizeIdx);
if (handle < 0) {
return null;
}
assert isSubpage(handle);
} else {
// normal
// runSize must be multiple of pageSize
int runSize = arena.sizeIdx2size(sizeIdx);
handle = allocateRun(runSize);
if (handle < 0) {
return null;
}
}
return allocateBuffer(handle, size, cache, control);
}
private long allocateRun(int runSize) {
int pages = runSize >> pageShifts;
int pageIdx = arena.pages2pageIdx(pages);
synchronized (runsAvail) {
//find first queue which has at least one big enough run
int queueIdx = runFirstBestFit(pageIdx);
if (queueIdx == -1) {
return -1;
}
//get run with min offset in this queue
LongPriorityQueue queue = runsAvail[queueIdx];
long handle = queue.poll();
assert handle != LongPriorityQueue.NO_VALUE && !isUsed(handle) : "invalid handle: " + handle;
removeAvailRun(queue, handle);
if (handle != -1) {
handle = splitLargeRun(handle, pages);
}
freeBytes -= runSize(pageShifts, handle);
return handle;
}
}
private int calculateRunSize(int sizeIdx) {
int maxElements = 1 << pageShifts - SizeClasses.LOG2_QUANTUM;
int runSize = 0;
int nElements;
final int elemSize = arena.sizeIdx2size(sizeIdx);
// Find the lowest common multiple of pageSize and elemSize
do {
runSize += pageSize;
nElements = runSize / elemSize;
} while (nElements < maxElements && runSize != nElements * elemSize);
while (nElements > maxElements) {
runSize -= pageSize;
nElements = runSize / elemSize;
}
assert nElements > 0;
assert runSize <= chunkSize;
assert runSize >= elemSize;
return runSize;
}
private int runFirstBestFit(int pageIdx) {
if (freeBytes == chunkSize) {
return arena.nPSizes - 1;
}
for (int i = pageIdx; i < arena.nPSizes; i++) {
LongPriorityQueue queue = runsAvail[i];
if (queue != null && !queue.isEmpty()) {
return i;
}
}
return -1;
}
private long splitLargeRun(long handle, int needPages) {
assert needPages > 0;
int totalPages = runPages(handle);
assert needPages <= totalPages;
int remPages = totalPages - needPages;
if (remPages > 0) {
int runOffset = runOffset(handle);
// keep track of trailing unused pages for later use
int availOffset = runOffset + needPages;
long availRun = toRunHandle(availOffset, remPages, 0);
insertAvailRun(availOffset, remPages, availRun);
// not avail
return toRunHandle(runOffset, needPages, 1);
}
//mark it as used
handle |= 1L << IS_USED_SHIFT;
return handle;
}
/**
* Create / initialize a new PoolSubpage of normCapacity. Any PoolSubpage created / initialized here is added to
* subpage pool in the PoolArena that owns this PoolChunk
*
* @param sizeIdx sizeIdx of normalized size
*
* @return index in memoryMap
*/
private long allocateSubpage(int sizeIdx) {
// Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
// This is need as we may add it back and so alter the linked-list structure.
PoolSubpage head = arena.findSubpagePoolHead(sizeIdx);
synchronized (head) {
//allocate a new run
int runSize = calculateRunSize(sizeIdx);
//runSize must be multiples of pageSize
long runHandle = allocateRun(runSize);
if (runHandle < 0) {
return -1;
}
int runOffset = runOffset(runHandle);
assert subpages[runOffset] == null;
int elemSize = arena.sizeIdx2size(sizeIdx);
PoolSubpage subpage = new PoolSubpage(head, this, pageShifts, runOffset,
runSize(pageShifts, runHandle), elemSize);
subpages[runOffset] = subpage;
return subpage.allocate();
}
}
/**
* Free a subpage, or a run of pages When a subpage is freed from PoolSubpage, it might be added back to subpage
* pool of the owning PoolArena. If the subpage pool in PoolArena has at least one other PoolSubpage of given
* elemSize, we can completely free the owning Page, so it is available for subsequent allocations.
*
* @param handle handle to free
*/
void free(long handle, int normCapacity) {
baseDrop.drop(base); // Decrement reference count.
if (isSubpage(handle)) {
int sizeIdx = arena.size2SizeIdx(normCapacity);
PoolSubpage head = arena.findSubpagePoolHead(sizeIdx);
int sIdx = runOffset(handle);
PoolSubpage subpage = subpages[sIdx];
assert subpage != null && subpage.doNotDestroy;
// Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
// This is need as we may add it back and so alter the linked-list structure.
synchronized (head) {
if (subpage.free(head, bitmapIdx(handle))) {
//the subpage is still used, do not free it
return;
}
assert !subpage.doNotDestroy;
// Null out slot in the array as it was freed, and we should not use it anymore.
subpages[sIdx] = null;
}
}
//start free run
int pages = runPages(handle);
synchronized (runsAvail) {
// collapse continuous runs, successfully collapsed runs
// will be removed from runsAvail and runsAvailMap
long finalRun = collapseRuns(handle);
//set run as not used
finalRun &= ~(1L << IS_USED_SHIFT);
//if it is a subpage, set it to run
finalRun &= ~(1L << IS_SUBPAGE_SHIFT);
insertAvailRun(runOffset(finalRun), runPages(finalRun), finalRun);
freeBytes += pages << pageShifts;
}
}
private long collapseRuns(long handle) {
return collapseNext(collapsePast(handle));
}
private long collapsePast(long handle) {
for (;;) {
int runOffset = runOffset(handle);
int runPages = runPages(handle);
long pastRun = getAvailRunByOffset(runOffset - 1);
if (pastRun == -1) {
return handle;
}
int pastOffset = runOffset(pastRun);
int pastPages = runPages(pastRun);
//is continuous
if (pastRun != handle && pastOffset + pastPages == runOffset) {
//remove past run
removeAvailRun(pastRun);
handle = toRunHandle(pastOffset, pastPages + runPages, 0);
} else {
return handle;
}
}
}
private long collapseNext(long handle) {
for (;;) {
int runOffset = runOffset(handle);
int runPages = runPages(handle);
long nextRun = getAvailRunByOffset(runOffset + runPages);
if (nextRun == -1) {
return handle;
}
int nextOffset = runOffset(nextRun);
int nextPages = runPages(nextRun);
//is continuous
if (nextRun != handle && runOffset + runPages == nextOffset) {
//remove next run
removeAvailRun(nextRun);
handle = toRunHandle(runOffset, runPages + nextPages, 0);
} else {
return handle;
}
}
}
private static long toRunHandle(int runOffset, int runPages, int inUsed) {
return (long) runOffset << RUN_OFFSET_SHIFT
| (long) runPages << SIZE_SHIFT
| (long) inUsed << IS_USED_SHIFT;
}
UntetheredMemory allocateBuffer(long handle, int size, PoolThreadCache threadCache,
PooledAllocatorControl control) {
if (isRun(handle)) {
int offset = runOffset(handle) << pageShifts;
int maxLength = runSize(pageShifts, handle);
PoolThreadCache poolThreadCache = arena.parent.threadCache();
initAllocatorControl(control, poolThreadCache, handle, maxLength);
ArcDrop.acquire(baseDrop);
return new UntetheredChunkAllocation(
memory, this, poolThreadCache, handle, maxLength, offset, size);
} else {
return allocateBufferWithSubpage(handle, size, threadCache, control);
}
}
UntetheredMemory allocateBufferWithSubpage(long handle, int size, PoolThreadCache threadCache,
PooledAllocatorControl control) {
int runOffset = runOffset(handle);
int bitmapIdx = bitmapIdx(handle);
PoolSubpage s = subpages[runOffset];
assert s.doNotDestroy;
assert size <= s.elemSize;
int offset = (runOffset << pageShifts) + bitmapIdx * s.elemSize;
initAllocatorControl(control, threadCache, handle, s.elemSize);
ArcDrop.acquire(baseDrop);
return new UntetheredChunkAllocation(memory, this, threadCache, handle, s.elemSize, offset, size);
}
@SuppressWarnings("unchecked")
private static final class UntetheredChunkAllocation implements UntetheredMemory {
private final Object memory;
private final PoolChunk chunk;
private final PoolThreadCache threadCache;
private final long handle;
private final int maxLength;
private final int offset;
private final int size;
private UntetheredChunkAllocation(
Object memory, PoolChunk chunk, PoolThreadCache threadCache,
long handle, int maxLength, int offset, int size) {
this.memory = memory;
this.chunk = chunk;
this.threadCache = threadCache;
this.handle = handle;
this.maxLength = maxLength;
this.offset = offset;
this.size = size;
}
@Override
public <Memory> Memory memory() {
return (Memory) chunk.arena.manager.sliceMemory(memory, offset, size);
}
@Override
public <BufferType extends Buffer> Drop<BufferType> drop() {
PooledDrop pooledDrop = new PooledDrop(chunk.arena, chunk, threadCache, handle, maxLength);
return (Drop<BufferType>) CleanerDrop.wrap(pooledDrop);
}
}
private void initAllocatorControl(PooledAllocatorControl control, PoolThreadCache threadCache, long handle,
int normSize) {
control.arena = arena;
control.chunk = this;
control.threadCache = threadCache;
control.handle = handle;
control.normSize = normSize;
}
@Override
public int chunkSize() {
return chunkSize;
}
@Override
public int freeBytes() {
synchronized (arena) {
return freeBytes;
}
}
@Override
public String toString() {
final int freeBytes;
synchronized (arena) {
freeBytes = this.freeBytes;
}
return new StringBuilder()
.append("Chunk(")
.append(Integer.toHexString(System.identityHashCode(this)))
.append(": ")
.append(usage(freeBytes))
.append("%, ")
.append(chunkSize - freeBytes)
.append('/')
.append(chunkSize)
.append(')')
.toString();
}
void destroy() {
baseDrop.drop(base); // Decrement reference count from the chunk (allocated buffers may keep the base alive)
}
static int runOffset(long handle) {
return (int) (handle >> RUN_OFFSET_SHIFT);
}
static int runSize(int pageShifts, long handle) {
return runPages(handle) << pageShifts;
}
static int runPages(long handle) {
return (int) (handle >> SIZE_SHIFT & 0x7fff);
}
static boolean isUsed(long handle) {
return (handle >> IS_USED_SHIFT & 1) == 1L;
}
static boolean isRun(long handle) {
return !isSubpage(handle);
}
static boolean isSubpage(long handle) {
return (handle >> IS_SUBPAGE_SHIFT & 1) == 1L;
}
static int bitmapIdx(long handle) {
return (int) handle;
}
}

View File

@ -1,250 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.AllocatorControl.UntetheredMemory;
import io.netty.util.internal.StringUtil;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import static java.lang.Math.max;
import static java.lang.Math.min;
final class PoolChunkList implements PoolChunkListMetric {
private static final Iterator<PoolChunkMetric> EMPTY_METRICS = Collections.emptyIterator();
private final PoolArena arena;
private final PoolChunkList nextList;
private final int minUsage;
private final int maxUsage;
private final int maxCapacity;
private PoolChunk head;
private final int freeMinThreshold;
private final int freeMaxThreshold;
// This is only update once when create the linked like list of PoolChunkList in PoolArena constructor.
private PoolChunkList prevList;
PoolChunkList(PoolArena arena, PoolChunkList nextList, int minUsage, int maxUsage, int chunkSize) {
assert minUsage <= maxUsage;
this.arena = arena;
this.nextList = nextList;
this.minUsage = minUsage;
this.maxUsage = maxUsage;
maxCapacity = calculateMaxCapacity(minUsage, chunkSize);
// the thresholds are aligned with PoolChunk.usage() logic:
// 1) basic logic: usage() = 100 - freeBytes * 100L / chunkSize
// so, for example: (usage() >= maxUsage) condition can be transformed in the following way:
// 100 - freeBytes * 100L / chunkSize >= maxUsage
// freeBytes <= chunkSize * (100 - maxUsage) / 100
// let freeMinThreshold = chunkSize * (100 - maxUsage) / 100, then freeBytes <= freeMinThreshold
//
// 2) usage() returns an int value and has a floor rounding during a calculation,
// to be aligned absolute thresholds should be shifted for "the rounding step":
// freeBytes * 100 / chunkSize < 1
// the condition can be converted to: freeBytes < 1 * chunkSize / 100
// this is why we have + 0.99999999 shifts. A example why just +1 shift cannot be used:
// freeBytes = 16777216 == freeMaxThreshold: 16777216, usage = 0 < minUsage: 1, chunkSize: 16777216
// At the same time we want to have zero thresholds in case of (maxUsage == 100) and (minUsage == 100).
//
freeMinThreshold = maxUsage == 100 ? 0 : (int) (chunkSize * (100.0 - maxUsage + 0.99999999) / 100L);
freeMaxThreshold = minUsage == 100 ? 0 : (int) (chunkSize * (100.0 - minUsage + 0.99999999) / 100L);
}
/**
* Calculates the maximum capacity of a buffer that will ever be possible to allocate out of the {@link PoolChunk}s
* that belong to the {@link PoolChunkList} with the given {@code minUsage} and {@code maxUsage} settings.
*/
private static int calculateMaxCapacity(int minUsage, int chunkSize) {
minUsage = minUsage0(minUsage);
if (minUsage == 100) {
// If the minUsage is 100 we can not allocate anything out of this list.
return 0;
}
// Calculate the maximum amount of bytes that can be allocated from a PoolChunk in this PoolChunkList.
//
// As an example:
// - If a PoolChunkList has minUsage == 25 we are allowed to allocate at most 75% of the chunkSize because
// this is the maximum amount available in any PoolChunk in this PoolChunkList.
return (int) (chunkSize * (100L - minUsage) / 100L);
}
void prevList(PoolChunkList prevList) {
assert this.prevList == null;
this.prevList = prevList;
}
UntetheredMemory allocate(int size, int sizeIdx, PoolThreadCache threadCache, PooledAllocatorControl control) {
int normCapacity = arena.sizeIdx2size(sizeIdx);
if (normCapacity > maxCapacity) {
// Either this PoolChunkList is empty, or the requested capacity is larger than the capacity which can
// be handled by the PoolChunks that are contained in this PoolChunkList.
return null;
}
for (PoolChunk cur = head; cur != null; cur = cur.next) {
UntetheredMemory memory = cur.allocate(size, sizeIdx, threadCache, control);
if (memory != null) {
if (cur.freeBytes <= freeMinThreshold) {
remove(cur);
nextList.add(cur);
}
return memory;
}
}
return null;
}
boolean free(PoolChunk chunk, long handle, int normCapacity) {
chunk.free(handle, normCapacity);
if (chunk.freeBytes > freeMaxThreshold) {
remove(chunk);
// Move the PoolChunk down the PoolChunkList linked-list.
return move0(chunk);
}
return true;
}
private boolean move(PoolChunk chunk) {
if (chunk.freeBytes > freeMaxThreshold) {
// Move the PoolChunk down the PoolChunkList linked-list.
return move0(chunk);
}
// PoolChunk fits into this PoolChunkList, adding it here.
add0(chunk);
return true;
}
/**
* Moves the {@link PoolChunk} down the {@link PoolChunkList} linked-list, so it will end up in the right
* {@link PoolChunkList} that has the correct minUsage / maxUsage in respect to {@link PoolChunk#usage()}.
*/
private boolean move0(PoolChunk chunk) {
if (prevList == null) {
// There is no previous PoolChunkList so return false which result in having the PoolChunk destroyed and
// all memory associated with the PoolChunk will be released.
return false;
}
return prevList.move(chunk);
}
void add(PoolChunk chunk) {
if (chunk.freeBytes <= freeMinThreshold) {
nextList.add(chunk);
return;
}
add0(chunk);
}
/**
* Adds the {@link PoolChunk} to this {@link PoolChunkList}.
*/
void add0(PoolChunk chunk) {
chunk.parent = this;
if (head == null) {
head = chunk;
chunk.prev = null;
chunk.next = null;
} else {
chunk.prev = null;
chunk.next = head;
head.prev = chunk;
head = chunk;
}
}
private void remove(PoolChunk cur) {
if (cur == head) {
head = cur.next;
if (head != null) {
head.prev = null;
}
} else {
PoolChunk next = cur.next;
cur.prev.next = next;
if (next != null) {
next.prev = cur.prev;
}
}
}
@Override
public int minUsage() {
return minUsage0(minUsage);
}
@Override
public int maxUsage() {
return min(maxUsage, 100);
}
private static int minUsage0(int value) {
return max(1, value);
}
@Override
public Iterator<PoolChunkMetric> iterator() {
synchronized (arena) {
if (head == null) {
return EMPTY_METRICS;
}
List<PoolChunkMetric> metrics = new ArrayList<>();
for (PoolChunk cur = head;;) {
metrics.add(cur);
cur = cur.next;
if (cur == null) {
break;
}
}
return metrics.iterator();
}
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder();
synchronized (arena) {
if (head == null) {
return "none";
}
for (PoolChunk cur = head;;) {
buf.append(cur);
cur = cur.next;
if (cur == null) {
break;
}
buf.append(StringUtil.NEWLINE);
}
}
return buf.toString();
}
void destroy() {
PoolChunk chunk = head;
while (chunk != null) {
chunk.destroy();
chunk = chunk.next;
}
head = null;
}
}

View File

@ -1,32 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
/**
* Metrics for a list of chunks.
*/
public interface PoolChunkListMetric extends Iterable<PoolChunkMetric> {
/**
* Return the minimum usage of the chunk list before which chunks are promoted to the previous list.
*/
int minUsage();
/**
* Return the maximum usage of the chunk list after which chunks are promoted to the next list.
*/
int maxUsage();
}

View File

@ -1,37 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
/**
* Metrics for a chunk.
*/
public interface PoolChunkMetric {
/**
* Return the percentage of the current usage of the chunk.
*/
int usage();
/**
* Return the size of the chunk in bytes, this is the maximum of bytes that can be served out of the chunk.
*/
int chunkSize();
/**
* Return the number of free bytes in the chunk.
*/
int freeBytes();
}

View File

@ -1,287 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import static io.netty.buffer.api.pool.PoolChunk.RUN_OFFSET_SHIFT;
import static io.netty.buffer.api.pool.PoolChunk.SIZE_SHIFT;
import static io.netty.buffer.api.pool.PoolChunk.IS_USED_SHIFT;
import static io.netty.buffer.api.pool.PoolChunk.IS_SUBPAGE_SHIFT;
import static io.netty.buffer.api.pool.SizeClasses.LOG2_QUANTUM;
final class PoolSubpage implements PoolSubpageMetric {
final PoolChunk chunk;
private final int pageShifts;
private final int runOffset;
private final int runSize;
private final long[] bitmap;
PoolSubpage prev;
PoolSubpage next;
boolean doNotDestroy;
int elemSize;
private int maxNumElems;
private int bitmapLength;
private int nextAvail;
private int numAvail;
/** Special constructor that creates a linked list head */
PoolSubpage() {
chunk = null;
pageShifts = -1;
runOffset = -1;
elemSize = -1;
runSize = -1;
bitmap = null;
}
PoolSubpage(PoolSubpage head, PoolChunk chunk, int pageShifts, int runOffset, int runSize, int elemSize) {
this.chunk = chunk;
this.pageShifts = pageShifts;
this.runOffset = runOffset;
this.runSize = runSize;
this.elemSize = elemSize;
bitmap = new long[runSize >>> 6 + LOG2_QUANTUM]; // runSize / 64 / QUANTUM
doNotDestroy = true;
if (elemSize != 0) {
maxNumElems = numAvail = runSize / elemSize;
nextAvail = 0;
bitmapLength = maxNumElems >>> 6;
if ((maxNumElems & 63) != 0) {
bitmapLength ++;
}
for (int i = 0; i < bitmapLength; i ++) {
bitmap[i] = 0;
}
}
addToPool(head);
}
/**
* Returns the bitmap index of the subpage allocation.
*/
long allocate() {
if (numAvail == 0 || !doNotDestroy) {
return -1;
}
final int bitmapIdx = getNextAvail();
int q = bitmapIdx >>> 6;
int r = bitmapIdx & 63;
assert (bitmap[q] >>> r & 1) == 0;
bitmap[q] |= 1L << r;
if (-- numAvail == 0) {
removeFromPool();
}
return toHandle(bitmapIdx);
}
/**
* @return {@code true} if this subpage is in use.
* {@code false} if this subpage is not used by its chunk and thus it's OK to be released.
*/
boolean free(PoolSubpage head, int bitmapIdx) {
if (elemSize == 0) {
return true;
}
int q = bitmapIdx >>> 6;
int r = bitmapIdx & 63;
assert (bitmap[q] >>> r & 1) != 0;
bitmap[q] ^= 1L << r;
setNextAvail(bitmapIdx);
if (numAvail++ == 0) {
addToPool(head);
// When maxNumElems == 1, the maximum numAvail is also 1.
// Each of these PoolSubpages will go in here when they do free operation.
// If they return true directly from here, then the rest of the code will be unreachable,
// and they will not actually be recycled. So return true only on maxNumElems > 1.
if (maxNumElems > 1) {
return true;
}
}
if (numAvail != maxNumElems) {
return true;
} else {
// Subpage not in use (numAvail == maxNumElems)
if (prev == next) {
// Do not remove if this subpage is the only one left in the pool.
return true;
}
// Remove this subpage from the pool if there are other subpages left in the pool.
doNotDestroy = false;
removeFromPool();
return false;
}
}
private void addToPool(PoolSubpage head) {
assert prev == null && next == null;
prev = head;
next = head.next;
next.prev = this;
head.next = this;
}
private void removeFromPool() {
assert prev != null && next != null;
prev.next = next;
next.prev = prev;
next = null;
prev = null;
}
private void setNextAvail(int bitmapIdx) {
nextAvail = bitmapIdx;
}
private int getNextAvail() {
int nextAvail = this.nextAvail;
if (nextAvail >= 0) {
this.nextAvail = -1;
return nextAvail;
}
return findNextAvail();
}
private int findNextAvail() {
final long[] bitmap = this.bitmap;
final int bitmapLength = this.bitmapLength;
for (int i = 0; i < bitmapLength; i ++) {
long bits = bitmap[i];
if (~bits != 0) {
return findNextAvail0(i, bits);
}
}
return -1;
}
private int findNextAvail0(int i, long bits) {
final int maxNumElems = this.maxNumElems;
final int baseVal = i << 6;
for (int j = 0; j < 64; j ++) {
if ((bits & 1) == 0) {
int val = baseVal | j;
if (val < maxNumElems) {
return val;
} else {
break;
}
}
bits >>>= 1;
}
return -1;
}
private long toHandle(int bitmapIdx) {
int pages = runSize >> pageShifts;
return (long) runOffset << RUN_OFFSET_SHIFT
| (long) pages << SIZE_SHIFT
| 1L << IS_USED_SHIFT
| 1L << IS_SUBPAGE_SHIFT
| bitmapIdx;
}
@Override
public String toString() {
final boolean doNotDestroy;
final int maxNumElems;
final int numAvail;
final int elemSize;
if (chunk == null) {
// This is the head so there is no need to synchronize at all as these never change.
doNotDestroy = true;
maxNumElems = 0;
numAvail = 0;
elemSize = -1;
} else {
synchronized (chunk.arena) {
if (!this.doNotDestroy) {
doNotDestroy = false;
// Not used for creating the String.
maxNumElems = numAvail = elemSize = -1;
} else {
doNotDestroy = true;
maxNumElems = this.maxNumElems;
numAvail = this.numAvail;
elemSize = this.elemSize;
}
}
}
if (!doNotDestroy) {
return "(" + runOffset + ": not in use)";
}
return "(" + runOffset + ": " + (maxNumElems - numAvail) + '/' + maxNumElems +
", offset: " + runOffset + ", length: " + runSize + ", elemSize: " + elemSize + ')';
}
@Override
public int maxNumElements() {
if (chunk == null) {
// It's the head.
return 0;
}
synchronized (chunk.arena) {
return maxNumElems;
}
}
@Override
public int numAvailable() {
if (chunk == null) {
// It's the head.
return 0;
}
synchronized (chunk.arena) {
return numAvail;
}
}
@Override
public int elementSize() {
if (chunk == null) {
// It's the head.
return -1;
}
synchronized (chunk.arena) {
return elemSize;
}
}
@Override
public int pageSize() {
return 1 << pageShifts;
}
void destroy() {
if (chunk != null) {
chunk.destroy();
}
}
}

View File

@ -1,42 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
/**
* Metrics for a sub-page.
*/
public interface PoolSubpageMetric {
/**
* Return the number of maximal elements that can be allocated out of the sub-page.
*/
int maxNumElements();
/**
* Return the number of available elements to be allocated.
*/
int numAvailable();
/**
* Return the size (in bytes) of the elements that will be allocated.
*/
int elementSize();
/**
* Return the page size (in bytes) of this page.
*/
int pageSize();
}

View File

@ -1,393 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.AllocatorControl.UntetheredMemory;
import io.netty.buffer.api.pool.PoolArena.SizeClass;
import io.netty.util.internal.MathUtil;
import io.netty.util.internal.ObjectPool;
import io.netty.util.internal.ObjectPool.Handle;
import io.netty.util.internal.PlatformDependent;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.Queue;
import static io.netty.buffer.api.pool.PoolArena.SizeClass.Normal;
import static io.netty.buffer.api.pool.PoolArena.SizeClass.Small;
import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
/**
* Acts a Thread cache for allocations. This implementation is modelled after
* <a href="https://people.freebsd.org/~jasone/jemalloc/bsdcan2006/jemalloc.pdf">jemalloc</a> and the described
* techniques of
* <a href="https://www.facebook.com/notes/facebook-engineering/scalable-memory-allocation-using-jemalloc/480222803919">
* Scalable memory allocation using jemalloc</a>.
*/
final class PoolThreadCache {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(PoolThreadCache.class);
private static final int INTEGER_SIZE_MINUS_ONE = Integer.SIZE - 1;
final PoolArena arena;
// Hold the caches for the different size classes, which are tiny, small and normal.
private final MemoryRegionCache[] smallSubPageCaches;
private final MemoryRegionCache[] normalCaches;
private final int freeSweepAllocationThreshold;
private int allocations;
PoolThreadCache(PoolArena arena,
int smallCacheSize, int normalCacheSize, int maxCachedBufferCapacity,
int freeSweepAllocationThreshold) {
checkPositiveOrZero(maxCachedBufferCapacity, "maxCachedBufferCapacity");
this.freeSweepAllocationThreshold = freeSweepAllocationThreshold;
this.arena = arena;
if (arena != null) {
// Create the caches for the heap allocations
smallSubPageCaches = createSubPageCaches(
smallCacheSize, arena.numSmallSubpagePools);
normalCaches = createNormalCaches(
normalCacheSize, maxCachedBufferCapacity, arena);
arena.numThreadCaches.getAndIncrement();
} else {
// No heapArea is configured so just null out all caches
smallSubPageCaches = null;
normalCaches = null;
}
// Only check if there are caches in use.
if ((smallSubPageCaches != null || normalCaches != null)
&& freeSweepAllocationThreshold < 1) {
throw new IllegalArgumentException("freeSweepAllocationThreshold: "
+ freeSweepAllocationThreshold + " (expected: > 0)");
}
}
private static MemoryRegionCache[] createSubPageCaches(
int cacheSize, int numCaches) {
if (cacheSize > 0 && numCaches > 0) {
MemoryRegionCache[] cache = new MemoryRegionCache[numCaches];
for (int i = 0; i < cache.length; i++) {
// TODO: maybe use cacheSize / cache.length
cache[i] = new SubPageMemoryRegionCache(cacheSize);
}
return cache;
} else {
return null;
}
}
private static MemoryRegionCache[] createNormalCaches(
int cacheSize, int maxCachedBufferCapacity, PoolArena area) {
if (cacheSize > 0 && maxCachedBufferCapacity > 0) {
int max = Math.min(area.chunkSize, maxCachedBufferCapacity);
// Create as many normal caches as we support based on how many sizeIdx we have and what the upper
// bound is that we want to cache in general.
List<MemoryRegionCache> cache = new ArrayList<>() ;
for (int idx = area.numSmallSubpagePools; idx < area.nSizes && area.sizeIdx2size(idx) <= max ; idx++) {
cache.add(new NormalMemoryRegionCache(cacheSize));
}
return cache.toArray(MemoryRegionCache[]::new);
} else {
return null;
}
}
// val > 0
static int log2(int val) {
return INTEGER_SIZE_MINUS_ONE - Integer.numberOfLeadingZeros(val);
}
/**
* Try to allocate a small buffer out of the cache. Returns {@code true} if successful {@code false} otherwise
*/
UntetheredMemory allocateSmall(PooledAllocatorControl control, int size, int sizeIdx) {
return allocate(cacheForSmall(sizeIdx), control, size);
}
/**
* Try to allocate a normal buffer out of the cache. Returns {@code true} if successful {@code false} otherwise
*/
UntetheredMemory allocateNormal(PoolArena area, PooledAllocatorControl control, int size, int sizeIdx) {
return allocate(cacheForNormal(area, sizeIdx), control, size);
}
private UntetheredMemory allocate(MemoryRegionCache cache, PooledAllocatorControl control, int size) {
if (cache == null) {
// no cache found so just return false here
return null;
}
UntetheredMemory allocated = cache.allocate(size, this, control);
if (++allocations >= freeSweepAllocationThreshold) {
allocations = 0;
trim();
}
return allocated;
}
/**
* Add {@link PoolChunk} and {@code handle} to the cache if there is enough room.
* Returns {@code true} if it fit into the cache {@code false} otherwise.
*/
boolean add(PoolArena area, PoolChunk chunk,
long handle, int normCapacity, SizeClass sizeClass) {
int sizeIdx = area.size2SizeIdx(normCapacity);
MemoryRegionCache cache = cache(area, sizeIdx, sizeClass);
if (cache == null) {
return false;
}
return cache.add(chunk, handle, normCapacity);
}
private MemoryRegionCache cache(PoolArena area, int sizeIdx, SizeClass sizeClass) {
if (sizeClass == Normal) {
return cacheForNormal(area, sizeIdx);
}
if (sizeClass == Small) {
return cacheForSmall(sizeIdx);
}
throw new AssertionError("Unexpected size class: " + sizeClass);
}
/**
* Should be called if the Thread that uses this cache is about to exist to release resources out of the cache
*/
void free() {
int numFreed = free(smallSubPageCaches) + free(normalCaches);
if (numFreed > 0 && logger.isDebugEnabled()) {
logger.debug("Freed {} thread-local buffer(s) from thread: {}", numFreed,
Thread.currentThread().getName());
}
if (arena != null) {
arena.numThreadCaches.getAndDecrement();
}
}
private static int free(MemoryRegionCache[] caches) {
if (caches == null) {
return 0;
}
int numFreed = 0;
for (MemoryRegionCache c: caches) {
numFreed += free(c);
}
return numFreed;
}
private static int free(MemoryRegionCache cache) {
if (cache == null) {
return 0;
}
return cache.free();
}
void trim() {
trim(smallSubPageCaches);
trim(normalCaches);
}
private static void trim(MemoryRegionCache[] caches) {
if (caches == null) {
return;
}
for (MemoryRegionCache c: caches) {
trim(c);
}
}
private static void trim(MemoryRegionCache cache) {
if (cache == null) {
return;
}
cache.trim();
}
private MemoryRegionCache cacheForSmall(int sizeIdx) {
return cache(smallSubPageCaches, sizeIdx);
}
private MemoryRegionCache cacheForNormal(PoolArena area, int sizeIdx) {
// We need to substract area.numSmallSubpagePools as sizeIdx is the overall index for all sizes.
int idx = sizeIdx - area.numSmallSubpagePools;
return cache(normalCaches, idx);
}
private static MemoryRegionCache cache(MemoryRegionCache[] cache, int sizeIdx) {
if (cache == null || sizeIdx > cache.length - 1) {
return null;
}
return cache[sizeIdx];
}
/**
* Cache used for buffers which are backed by SMALL size.
*/
private static final class SubPageMemoryRegionCache extends MemoryRegionCache {
SubPageMemoryRegionCache(int size) {
super(size, Small);
}
@Override
protected UntetheredMemory allocBuf(PoolChunk chunk, long handle, int size, PoolThreadCache threadCache,
PooledAllocatorControl control) {
return chunk.allocateBufferWithSubpage(handle, size, threadCache, control);
}
}
/**
* Cache used for buffers which are backed by NORMAL size.
*/
private static final class NormalMemoryRegionCache extends MemoryRegionCache {
NormalMemoryRegionCache(int size) {
super(size, Normal);
}
@Override
protected UntetheredMemory allocBuf(PoolChunk chunk, long handle, int size, PoolThreadCache threadCache,
PooledAllocatorControl control) {
return chunk.allocateBuffer(handle, size, threadCache, control);
}
}
private abstract static class MemoryRegionCache {
private final int size;
private final Queue<Entry> queue;
private final SizeClass sizeClass;
private int allocations;
MemoryRegionCache(int size, SizeClass sizeClass) {
this.size = MathUtil.safeFindNextPositivePowerOfTwo(size);
queue = PlatformDependent.newFixedMpscQueue(this.size);
this.sizeClass = sizeClass;
}
/**
* Allocate a new {@link UntetheredMemory} using the provided chunk and handle with the capacity restrictions.
*/
protected abstract UntetheredMemory allocBuf(
PoolChunk chunk, long handle, int size, PoolThreadCache threadCache, PooledAllocatorControl control);
/**
* Add to cache if not already full.
*/
public final boolean add(PoolChunk chunk, long handle, int normCapacity) {
Entry entry = newEntry(chunk, handle, normCapacity);
boolean queued = queue.offer(entry);
if (!queued) {
// If it was not possible to cache the chunk, immediately recycle the entry
entry.recycle();
}
return queued;
}
/**
* Allocate something out of the cache if possible and remove the entry from the cache.
*/
public final UntetheredMemory allocate(int size, PoolThreadCache threadCache, PooledAllocatorControl control) {
Entry entry = queue.poll();
if (entry == null) {
return null;
}
UntetheredMemory buffer = allocBuf(entry.chunk, entry.handle, size, threadCache, control);
entry.recycle();
// allocations are not thread-safe which is fine as this is only called from the same thread all time.
allocations++;
return buffer;
}
/**
* Clear out this cache and free up all previous cached {@link PoolChunk}s and {@code handle}s.
*/
public final int free() {
return free(Integer.MAX_VALUE);
}
private int free(int max) {
int numFreed = 0;
for (; numFreed < max; numFreed++) {
Entry entry = queue.poll();
if (entry != null) {
freeEntry(entry);
} else {
// all cleared
return numFreed;
}
}
return numFreed;
}
/**
* Free up cached {@link PoolChunk}s if not allocated frequently enough.
*/
public final void trim() {
int free = size - allocations;
allocations = 0;
// We not even allocated all the number that are
if (free > 0) {
free(free);
}
}
private void freeEntry(Entry entry) {
PoolChunk chunk = entry.chunk;
long handle = entry.handle;
entry.recycle();
chunk.arena.freeChunk(chunk, handle, entry.normCapacity, sizeClass);
}
static final class Entry {
final Handle<Entry> recyclerHandle;
PoolChunk chunk;
long handle = -1;
int normCapacity;
Entry(Handle<Entry> recyclerHandle) {
this.recyclerHandle = recyclerHandle;
}
void recycle() {
chunk = null;
handle = -1;
recyclerHandle.recycle(this);
}
}
private static Entry newEntry(PoolChunk chunk, long handle, int normCapacity) {
Entry entry = RECYCLER.get();
entry.chunk = chunk;
entry.handle = handle;
entry.normCapacity = normCapacity;
return entry;
}
private static final ObjectPool<Entry> RECYCLER = ObjectPool.newPool(handle -> new Entry(handle));
}
}

View File

@ -1,38 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.AllocatorControl;
import io.netty.buffer.api.Buffer;
class PooledAllocatorControl implements AllocatorControl {
public PooledBufferAllocator parent;
public PoolArena arena;
public PoolChunk chunk;
public PoolThreadCache threadCache;
public long handle;
public int normSize;
@Override
public UntetheredMemory allocateUntethered(Buffer originator, int size) {
return parent.allocate(this, size);
}
@Override
public void recoverMemory(Object memory) {
arena.free(chunk, handle, normSize, threadCache);
}
}

View File

@ -1,558 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.AllocatorControl.UntetheredMemory;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.BufferAllocator;
import io.netty.buffer.api.MemoryManager;
import io.netty.util.NettyRuntime;
import io.netty.util.concurrent.EventExecutor;
import io.netty.util.concurrent.FastThreadLocal;
import io.netty.util.concurrent.FastThreadLocalThread;
import io.netty.util.internal.PlatformDependent;
import io.netty.util.internal.StringUtil;
import io.netty.util.internal.SystemPropertyUtil;
import io.netty.util.internal.ThreadExecutorMap;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
import java.nio.ByteOrder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
import static java.util.Objects.requireNonNull;
public class PooledBufferAllocator implements BufferAllocator, BufferAllocatorMetricProvider {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(PooledBufferAllocator.class);
private static final int DEFAULT_NUM_HEAP_ARENA;
private static final int DEFAULT_NUM_DIRECT_ARENA;
private static final int DEFAULT_PAGE_SIZE;
private static final int DEFAULT_MAX_ORDER; // 8192 << 9 = 4 MiB per chunk
private static final int DEFAULT_SMALL_CACHE_SIZE;
private static final int DEFAULT_NORMAL_CACHE_SIZE;
static final int DEFAULT_MAX_CACHED_BUFFER_CAPACITY;
private static final int DEFAULT_CACHE_TRIM_INTERVAL;
private static final long DEFAULT_CACHE_TRIM_INTERVAL_MILLIS;
private static final boolean DEFAULT_USE_CACHE_FOR_ALL_THREADS;
private static final int DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT;
static final int DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK;
private static final int MIN_PAGE_SIZE = 4096;
private static final int MAX_CHUNK_SIZE = (int) (((long) Integer.MAX_VALUE + 1) / 2);
private final Runnable trimTask = this::trimCurrentThreadCache;
static {
int defaultAlignment = SystemPropertyUtil.getInt(
"io.netty.allocator.directMemoryCacheAlignment", 0);
int defaultPageSize = SystemPropertyUtil.getInt("io.netty.allocator.pageSize", 8192);
Throwable pageSizeFallbackCause = null;
try {
validateAndCalculatePageShifts(defaultPageSize, defaultAlignment);
} catch (Throwable t) {
pageSizeFallbackCause = t;
defaultPageSize = 8192;
defaultAlignment = 0;
}
DEFAULT_PAGE_SIZE = defaultPageSize;
DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT = defaultAlignment;
int defaultMaxOrder = SystemPropertyUtil.getInt("io.netty.allocator.maxOrder", 9);
Throwable maxOrderFallbackCause = null;
try {
validateAndCalculateChunkSize(DEFAULT_PAGE_SIZE, defaultMaxOrder);
} catch (Throwable t) {
maxOrderFallbackCause = t;
defaultMaxOrder = 11;
}
DEFAULT_MAX_ORDER = defaultMaxOrder;
// Determine reasonable default for nHeapArena and nDirectArena.
// Assuming each arena has 3 chunks, the pool should not consume more than 50% of max memory.
final Runtime runtime = Runtime.getRuntime();
/*
* We use 2 * available processors by default to reduce contention as we use 2 * available processors for the
* number of EventLoops in NIO and EPOLL as well. If we choose a smaller number we will run into hot spots as
* allocation and de-allocation needs to be synchronized on the PoolArena.
*
* See https://github.com/netty/netty/issues/3888.
*/
final int defaultMinNumArena = NettyRuntime.availableProcessors() * 2;
final int defaultChunkSize = DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER;
DEFAULT_NUM_HEAP_ARENA = Math.max(0,
SystemPropertyUtil.getInt(
"io.netty.allocator.numArenas",
(int) Math.min(
defaultMinNumArena,
runtime.maxMemory() / defaultChunkSize / 2 / 3)));
DEFAULT_NUM_DIRECT_ARENA = Math.max(0,
SystemPropertyUtil.getInt(
"io.netty.allocator.numDirectArenas",
(int) Math.min(
defaultMinNumArena,
PlatformDependent.maxDirectMemory() / defaultChunkSize / 2 / 3)));
// cache sizes
DEFAULT_SMALL_CACHE_SIZE = SystemPropertyUtil.getInt("io.netty.allocator.smallCacheSize", 256);
DEFAULT_NORMAL_CACHE_SIZE = SystemPropertyUtil.getInt("io.netty.allocator.normalCacheSize", 64);
// 32 kb is the default maximum capacity of the cached buffer. Similar to what is explained in
// 'Scalable memory allocation using jemalloc'
DEFAULT_MAX_CACHED_BUFFER_CAPACITY = SystemPropertyUtil.getInt(
"io.netty.allocator.maxCachedBufferCapacity", 32 * 1024);
// the number of threshold of allocations when cached entries will be freed up if not frequently used
DEFAULT_CACHE_TRIM_INTERVAL = SystemPropertyUtil.getInt(
"io.netty.allocator.cacheTrimInterval", 8192);
DEFAULT_CACHE_TRIM_INTERVAL_MILLIS = SystemPropertyUtil.getLong(
"io.netty.allocator.cacheTrimIntervalMillis", 0);
DEFAULT_USE_CACHE_FOR_ALL_THREADS = SystemPropertyUtil.getBoolean(
"io.netty.allocator.useCacheForAllThreads", false);
// Use 1023 by default as we use an ArrayDeque as backing storage which will then allocate an internal array
// of 1024 elements. Otherwise, we would allocate 2048 and only use 1024 which is wasteful.
DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK = SystemPropertyUtil.getInt(
"io.netty.allocator.maxCachedByteBuffersPerChunk", 1023);
if (logger.isDebugEnabled()) {
logger.debug("-Dio.netty.allocator.numArenas: {}", DEFAULT_NUM_HEAP_ARENA);
logger.debug("-Dio.netty.allocator.numDirectArenas: {}", DEFAULT_NUM_DIRECT_ARENA);
if (pageSizeFallbackCause == null) {
logger.debug("-Dio.netty.allocator.pageSize: {}", DEFAULT_PAGE_SIZE);
} else {
logger.debug("-Dio.netty.allocator.pageSize: {}", DEFAULT_PAGE_SIZE, pageSizeFallbackCause);
}
if (maxOrderFallbackCause == null) {
logger.debug("-Dio.netty.allocator.maxOrder: {}", DEFAULT_MAX_ORDER);
} else {
logger.debug("-Dio.netty.allocator.maxOrder: {}", DEFAULT_MAX_ORDER, maxOrderFallbackCause);
}
logger.debug("-Dio.netty.allocator.chunkSize: {}", DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER);
logger.debug("-Dio.netty.allocator.smallCacheSize: {}", DEFAULT_SMALL_CACHE_SIZE);
logger.debug("-Dio.netty.allocator.normalCacheSize: {}", DEFAULT_NORMAL_CACHE_SIZE);
logger.debug("-Dio.netty.allocator.maxCachedBufferCapacity: {}", DEFAULT_MAX_CACHED_BUFFER_CAPACITY);
logger.debug("-Dio.netty.allocator.cacheTrimInterval: {}", DEFAULT_CACHE_TRIM_INTERVAL);
logger.debug("-Dio.netty.allocator.cacheTrimIntervalMillis: {}", DEFAULT_CACHE_TRIM_INTERVAL_MILLIS);
logger.debug("-Dio.netty.allocator.useCacheForAllThreads: {}", DEFAULT_USE_CACHE_FOR_ALL_THREADS);
logger.debug("-Dio.netty.allocator.maxCachedByteBuffersPerChunk: {}",
DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK);
}
}
private final MemoryManager manager;
private final PoolArena[] arenas;
private final int smallCacheSize;
private final int normalCacheSize;
private final List<PoolArenaMetric> arenaMetrics;
private final List<PoolArenaMetric> arenaMetricsView;
private final PoolThreadLocalCache threadCache;
private final int chunkSize;
private final PooledBufferAllocatorMetric metric;
public PooledBufferAllocator(MemoryManager manager) {
this(manager, manager.isNative()? DEFAULT_NUM_DIRECT_ARENA : DEFAULT_NUM_HEAP_ARENA,
DEFAULT_PAGE_SIZE, DEFAULT_MAX_ORDER, DEFAULT_SMALL_CACHE_SIZE,
DEFAULT_NORMAL_CACHE_SIZE, DEFAULT_USE_CACHE_FOR_ALL_THREADS,
DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT);
}
public PooledBufferAllocator(MemoryManager manager, int numArenas, int pageSize, int maxOrder) {
this(manager, numArenas, pageSize, maxOrder, DEFAULT_SMALL_CACHE_SIZE,
DEFAULT_NORMAL_CACHE_SIZE, DEFAULT_USE_CACHE_FOR_ALL_THREADS,
DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT);
}
public PooledBufferAllocator(MemoryManager manager, int numArenas, int pageSize, int maxOrder,
int smallCacheSize, int normalCacheSize,
boolean useCacheForAllThreads) {
this(manager, numArenas, pageSize, maxOrder,
smallCacheSize, normalCacheSize,
useCacheForAllThreads, DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT);
}
public PooledBufferAllocator(MemoryManager manager, int numArenas, int pageSize, int maxOrder,
int smallCacheSize, int normalCacheSize,
boolean useCacheForAllThreads, int directMemoryCacheAlignment) {
this.manager = requireNonNull(manager, "MemoryManager");
threadCache = new PoolThreadLocalCache(useCacheForAllThreads);
this.smallCacheSize = smallCacheSize;
this.normalCacheSize = normalCacheSize;
if (directMemoryCacheAlignment != 0) {
if (!PlatformDependent.hasAlignDirectByteBuffer()) {
throw new UnsupportedOperationException("Buffer alignment is not supported. " +
"Either Unsafe or ByteBuffer.alignSlice() must be available.");
}
// Ensure page size is a whole multiple of the alignment, or bump it to the next whole multiple.
pageSize = (int) PlatformDependent.align(pageSize, directMemoryCacheAlignment);
}
chunkSize = validateAndCalculateChunkSize(pageSize, maxOrder);
checkPositiveOrZero(numArenas, "numArenas");
checkPositiveOrZero(directMemoryCacheAlignment, "directMemoryCacheAlignment");
if (directMemoryCacheAlignment > 0 && !isDirectMemoryCacheAlignmentSupported()) {
throw new IllegalArgumentException("directMemoryCacheAlignment is not supported");
}
if ((directMemoryCacheAlignment & -directMemoryCacheAlignment) != directMemoryCacheAlignment) {
throw new IllegalArgumentException("directMemoryCacheAlignment: "
+ directMemoryCacheAlignment + " (expected: power of two)");
}
int pageShifts = validateAndCalculatePageShifts(pageSize, directMemoryCacheAlignment);
if (numArenas > 0) {
arenas = newArenaArray(numArenas);
List<PoolArenaMetric> metrics = new ArrayList<>(arenas.length);
for (int i = 0; i < arenas.length; i ++) {
PoolArena arena = new PoolArena(this, manager,
pageSize, pageShifts, chunkSize,
directMemoryCacheAlignment);
arenas[i] = arena;
metrics.add(arena);
}
arenaMetrics = metrics;
arenaMetricsView = Collections.unmodifiableList(metrics);
} else {
arenas = null;
arenaMetrics = new ArrayList<>(1);
arenaMetricsView = Collections.emptyList();
}
metric = new PooledBufferAllocatorMetric(this);
}
private static PoolArena[] newArenaArray(int size) {
return new PoolArena[size];
}
private static int validateAndCalculatePageShifts(int pageSize, int alignment) {
if (pageSize < MIN_PAGE_SIZE) {
throw new IllegalArgumentException("pageSize: " + pageSize + " (expected: " + MIN_PAGE_SIZE + ')');
}
if ((pageSize & pageSize - 1) != 0) {
throw new IllegalArgumentException("pageSize: " + pageSize + " (expected: power of 2)");
}
if (pageSize < alignment) {
throw new IllegalArgumentException("Alignment cannot be greater than page size. " +
"Alignment: " + alignment + ", page size: " + pageSize + '.');
}
// Logarithm base 2. At this point we know that pageSize is a power of two.
return Integer.SIZE - 1 - Integer.numberOfLeadingZeros(pageSize);
}
private static int validateAndCalculateChunkSize(int pageSize, int maxOrder) {
if (maxOrder > 14) {
throw new IllegalArgumentException("maxOrder: " + maxOrder + " (expected: 0-14)");
}
// Ensure the resulting chunkSize does not overflow.
int chunkSize = pageSize;
for (int i = maxOrder; i > 0; i--) {
if (chunkSize > MAX_CHUNK_SIZE / 2) {
throw new IllegalArgumentException(String.format(
"pageSize (%d) << maxOrder (%d) must not exceed %d", pageSize, maxOrder, MAX_CHUNK_SIZE));
}
chunkSize <<= 1;
}
return chunkSize;
}
@Override
public Buffer allocate(int size) {
if (size < 1) {
throw new IllegalArgumentException("Allocation size must be positive, but was " + size + '.');
}
PooledAllocatorControl control = new PooledAllocatorControl();
control.parent = this;
UntetheredMemory memory = allocate(control, size);
Buffer buffer = manager.recoverMemory(control, memory.memory(), memory.drop());
return buffer.fill((byte) 0).order(ByteOrder.nativeOrder());
}
UntetheredMemory allocate(PooledAllocatorControl control, int size) {
PoolThreadCache cache = threadCache.get();
PoolArena arena = cache.arena;
if (arena != null) {
return arena.allocate(control, cache, size);
}
return allocateUnpooled(size);
}
private UntetheredMemory allocateUnpooled(int size) {
return new UnpooledUnthetheredMemory(this, manager, size);
}
@Override
public void close() {
trimCurrentThreadCache();
threadCache.remove();
for (int i = 0, arenasLength = arenas.length; i < arenasLength; i++) {
PoolArena arena = arenas[i];
if (arena != null) {
arena.close();
arenas[i] = null;
}
}
arenaMetrics.clear();
}
/**
* Default number of heap arenas - System Property: io.netty.allocator.numHeapArenas - default 2 * cores
*/
public static int defaultNumHeapArena() {
return DEFAULT_NUM_HEAP_ARENA;
}
/**
* Default number of direct arenas - System Property: io.netty.allocator.numDirectArenas - default 2 * cores
*/
public static int defaultNumDirectArena() {
return DEFAULT_NUM_DIRECT_ARENA;
}
/**
* Default buffer page size - System Property: io.netty.allocator.pageSize - default 8192
*/
public static int defaultPageSize() {
return DEFAULT_PAGE_SIZE;
}
/**
* Default maximum order - System Property: io.netty.allocator.maxOrder - default 11
*/
public static int defaultMaxOrder() {
return DEFAULT_MAX_ORDER;
}
/**
* Default thread caching behavior - System Property: io.netty.allocator.useCacheForAllThreads - default true
*/
public static boolean defaultUseCacheForAllThreads() {
return DEFAULT_USE_CACHE_FOR_ALL_THREADS;
}
/**
* Default prefer direct - System Property: io.netty.noPreferDirect - default false
*/
public static boolean defaultPreferDirect() {
return PlatformDependent.directBufferPreferred();
}
/**
* Default small cache size - System Property: io.netty.allocator.smallCacheSize - default 256
*/
public static int defaultSmallCacheSize() {
return DEFAULT_SMALL_CACHE_SIZE;
}
/**
* Default normal cache size - System Property: io.netty.allocator.normalCacheSize - default 64
*/
public static int defaultNormalCacheSize() {
return DEFAULT_NORMAL_CACHE_SIZE;
}
/**
* Return {@code true} if direct memory cache alignment is supported, {@code false} otherwise.
*/
public static boolean isDirectMemoryCacheAlignmentSupported() {
return PlatformDependent.hasUnsafe();
}
public boolean isDirectBufferPooled() {
return manager.isNative();
}
public int numArenas() {
return arenas.length;
}
final class PoolThreadLocalCache extends FastThreadLocal<PoolThreadCache> {
private final boolean useCacheForAllThreads;
PoolThreadLocalCache(boolean useCacheForAllThreads) {
this.useCacheForAllThreads = useCacheForAllThreads;
}
@Override
protected synchronized PoolThreadCache initialValue() {
final PoolArena arena = leastUsedArena(arenas);
final Thread current = Thread.currentThread();
if (useCacheForAllThreads || current instanceof FastThreadLocalThread) {
final PoolThreadCache cache = new PoolThreadCache(
arena, smallCacheSize, normalCacheSize,
DEFAULT_MAX_CACHED_BUFFER_CAPACITY, DEFAULT_CACHE_TRIM_INTERVAL);
if (DEFAULT_CACHE_TRIM_INTERVAL_MILLIS > 0) {
final EventExecutor executor = ThreadExecutorMap.currentExecutor();
if (executor != null) {
executor.scheduleAtFixedRate(trimTask, DEFAULT_CACHE_TRIM_INTERVAL_MILLIS,
DEFAULT_CACHE_TRIM_INTERVAL_MILLIS, TimeUnit.MILLISECONDS);
}
}
return cache;
}
// No caching so just use 0 as sizes.
return new PoolThreadCache(null, 0, 0, 0, 0);
}
@Override
protected void onRemoval(PoolThreadCache threadCache) {
threadCache.free();
}
}
static PoolArena leastUsedArena(PoolArena[] arenas) {
if (arenas == null || arenas.length == 0) {
return null;
}
PoolArena minArena = arenas[0];
for (int i = 1; i < arenas.length; i++) {
PoolArena arena = arenas[i];
if (arena.numThreadCaches.get() < minArena.numThreadCaches.get()) {
minArena = arena;
}
}
return minArena;
}
@Override
public PooledBufferAllocatorMetric metric() {
return metric;
}
/**
* Return a {@link List} of all heap {@link PoolArenaMetric}s that are provided by this pool.
*/
List<PoolArenaMetric> arenaMetrics() {
return arenaMetricsView;
}
/**
* Return the number of thread local caches used by this {@link PooledBufferAllocator}.
*/
int numThreadLocalCaches() {
if (arenas == null) {
return 0;
}
int total = 0;
for (PoolArena arena : arenas) {
total += arena.numThreadCaches.get();
}
return total;
}
/**
* Return the size of the small cache.
*/
int smallCacheSize() {
return smallCacheSize;
}
/**
* Return the size of the normal cache.
*/
int normalCacheSize() {
return normalCacheSize;
}
/**
* Return the chunk size for an arena.
*/
final int chunkSize() {
return chunkSize;
}
final long usedMemory() {
return usedMemory(arenas);
}
private static long usedMemory(PoolArena[] arenas) {
if (arenas == null) {
return -1;
}
long used = 0;
for (PoolArena arena : arenas) {
used += arena.numActiveBytes();
if (used < 0) {
return Long.MAX_VALUE;
}
}
return used;
}
final PoolThreadCache threadCache() {
PoolThreadCache cache = threadCache.get();
assert cache != null;
return cache;
}
/**
* Trim thread local cache for the current {@link Thread}, which will give back any cached memory that was not
* allocated frequently since the last trim operation.
*
* Returns {@code true} if a cache for the current {@link Thread} exists and so was trimmed, false otherwise.
*/
public boolean trimCurrentThreadCache() {
PoolThreadCache cache = threadCache.getIfExists();
if (cache != null) {
cache.trim();
return true;
}
return false;
}
/**
* Returns the status of the allocator (which contains all metrics) as string. Be aware this may be expensive
* and so should not be called too frequently.
*/
public String dumpStats() {
int heapArenasLen = arenas == null ? 0 : arenas.length;
StringBuilder buf = new StringBuilder(512)
.append(heapArenasLen)
.append(" arena(s):")
.append(StringUtil.NEWLINE);
if (heapArenasLen > 0) {
for (PoolArena a: arenas) {
buf.append(a);
}
}
return buf.toString();
}
}

View File

@ -1,92 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.util.internal.StringUtil;
import java.util.List;
/**
* Exposed metric for {@link PooledBufferAllocator}.
*/
final class PooledBufferAllocatorMetric implements BufferAllocatorMetric {
private final PooledBufferAllocator allocator;
PooledBufferAllocatorMetric(PooledBufferAllocator allocator) {
this.allocator = allocator;
}
/**
* Return the number of arenas.
*/
public int numArenas() {
return allocator.numArenas();
}
/**
* Return a {@link List} of all {@link PoolArenaMetric}s that are provided by this pool.
*/
public List<PoolArenaMetric> arenaMetrics() {
return allocator.arenaMetrics();
}
/**
* Return the number of thread local caches used by this {@link PooledBufferAllocator}.
*/
public int numThreadLocalCaches() {
return allocator.numThreadLocalCaches();
}
/**
* Return the size of the small cache.
*/
public int smallCacheSize() {
return allocator.smallCacheSize();
}
/**
* Return the size of the normal cache.
*/
public int normalCacheSize() {
return allocator.normalCacheSize();
}
/**
* Return the chunk size for an arena.
*/
public int chunkSize() {
return allocator.chunkSize();
}
@Override
public long usedMemory() {
return allocator.usedMemory();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder(256);
sb.append(StringUtil.simpleClassName(this))
.append("(usedMemory: ").append(usedMemory())
.append("; numArenas: ").append(numArenas())
.append("; smallCacheSize: ").append(smallCacheSize())
.append("; normalCacheSize: ").append(normalCacheSize())
.append("; numThreadLocalCaches: ").append(numThreadLocalCaches())
.append("; chunkSize: ").append(chunkSize()).append(')');
return sb.toString();
}
}

View File

@ -1,40 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.Drop;
class PooledDrop implements Drop<Buffer> {
private final PoolArena arena;
private final PoolChunk chunk;
private final PoolThreadCache threadCache;
private final long handle;
private final int normSize;
PooledDrop(PoolArena arena, PoolChunk chunk, PoolThreadCache threadCache, long handle, int normSize) {
this.arena = arena;
this.chunk = chunk;
this.threadCache = threadCache;
this.handle = handle;
this.normSize = normSize;
}
@Override
public void drop(Buffer obj) {
arena.free(chunk, handle, normSize, threadCache);
}
}

View File

@ -1,478 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import java.util.concurrent.ConcurrentHashMap;
/**
* SizeClasses requires {@code pageShifts} to be defined prior to inclusion,
* and it in turn defines:
* <p>
* LOG2_SIZE_CLASS_GROUP: Log of size class count for each size doubling.
* LOG2_MAX_LOOKUP_SIZE: Log of max size class in the lookup table.
* sizeClasses: Complete table of [index, log2Group, log2Delta, nDelta, isMultiPageSize,
* isSubPage, log2DeltaLookup] tuples.
* index: Size class index.
* log2Group: Log of group base size (no deltas added).
* log2Delta: Log of delta to previous size class.
* nDelta: Delta multiplier.
* isMultiPageSize: 'yes' if a multiple of the page size, 'no' otherwise.
* isSubPage: 'yes' if a subpage size class, 'no' otherwise.
* log2DeltaLookup: Same as log2Delta if a lookup table size class, 'no'
* otherwise.
* <p>
* nSubpages: Number of subpages size classes.
* nSizes: Number of size classes.
* nPSizes: Number of size classes that are multiples of pageSize.
*
* smallMaxSizeIdx: Maximum small size class index.
*
* lookupMaxclass: Maximum size class included in lookup table.
* log2NormalMinClass: Log of minimum normal size class.
* <p>
* The first size class and spacing are 1 << LOG2_QUANTUM.
* Each group has 1 << LOG2_SIZE_CLASS_GROUP of size classes.
*
* size = 1 << log2Group + nDelta * (1 << log2Delta)
*
* The first size class has an unusual encoding, because the size has to be
* split between group and delta*nDelta.
*
* If pageShift = 13, sizeClasses looks like this:
*
* (index, log2Group, log2Delta, nDelta, isMultiPageSize, isSubPage, log2DeltaLookup)
* <p>
* ( 0, 4, 4, 0, no, yes, 4)
* ( 1, 4, 4, 1, no, yes, 4)
* ( 2, 4, 4, 2, no, yes, 4)
* ( 3, 4, 4, 3, no, yes, 4)
* <p>
* ( 4, 6, 4, 1, no, yes, 4)
* ( 5, 6, 4, 2, no, yes, 4)
* ( 6, 6, 4, 3, no, yes, 4)
* ( 7, 6, 4, 4, no, yes, 4)
* <p>
* ( 8, 7, 5, 1, no, yes, 5)
* ( 9, 7, 5, 2, no, yes, 5)
* ( 10, 7, 5, 3, no, yes, 5)
* ( 11, 7, 5, 4, no, yes, 5)
* ...
* ...
* ( 72, 23, 21, 1, yes, no, no)
* ( 73, 23, 21, 2, yes, no, no)
* ( 74, 23, 21, 3, yes, no, no)
* ( 75, 23, 21, 4, yes, no, no)
* <p>
* ( 76, 24, 22, 1, yes, no, no)
*/
abstract class SizeClasses implements SizeClassesMetric {
private static final ConcurrentHashMap<SizeClassKey, SizeClassValue> CACHE =
new ConcurrentHashMap<SizeClassKey, SizeClassValue>();
static final int LOG2_QUANTUM = 4;
private static final int LOG2_SIZE_CLASS_GROUP = 2;
private static final int LOG2_MAX_LOOKUP_SIZE = 12;
private static final int LOG2GROUP_IDX = 1;
private static final int LOG2DELTA_IDX = 2;
private static final int NDELTA_IDX = 3;
private static final int PAGESIZE_IDX = 4;
private static final int SUBPAGE_IDX = 5;
private static final int LOG2_DELTA_LOOKUP_IDX = 6;
private static final byte no = 0, yes = 1;
protected SizeClasses(int pageSize, int pageShifts, int chunkSize, int directMemoryCacheAlignment) {
this.pageSize = pageSize;
this.pageShifts = pageShifts;
this.chunkSize = chunkSize;
this.directMemoryCacheAlignment = directMemoryCacheAlignment;
SizeClassValue value = CACHE.computeIfAbsent(
new SizeClassKey(pageSize, pageShifts, chunkSize, directMemoryCacheAlignment),
SizeClassValue::new);
nSizes = value.nSizes;
nSubpages = value.nSubpages;
nPSizes = value.nPSizes;
smallMaxSizeIdx = value.smallMaxSizeIdx;
lookupMaxSize = value.lookupMaxSize;
pageIdx2sizeTab = value.pageIdx2sizeTab;
sizeIdx2sizeTab = value.sizeIdx2sizeTab;
size2idxTab = value.size2idxTab;
}
protected final int pageSize;
protected final int pageShifts;
protected final int chunkSize;
protected final int directMemoryCacheAlignment;
final int nSizes;
final int nSubpages;
final int nPSizes;
final int smallMaxSizeIdx;
private final int lookupMaxSize;
private final int[] pageIdx2sizeTab;
// lookup table for sizeIdx <= smallMaxSizeIdx
private final int[] sizeIdx2sizeTab;
// lookup table used for size <= lookupMaxclass
// spacing is 1 << LOG2_QUANTUM, so the size of array is lookupMaxclass >> LOG2_QUANTUM
private final int[] size2idxTab;
@Override
public int sizeIdx2size(int sizeIdx) {
return sizeIdx2sizeTab[sizeIdx];
}
@Override
public int sizeIdx2sizeCompute(int sizeIdx) {
int group = sizeIdx >> LOG2_SIZE_CLASS_GROUP;
int mod = sizeIdx & (1 << LOG2_SIZE_CLASS_GROUP) - 1;
int groupSize = group == 0? 0 :
1 << LOG2_QUANTUM + LOG2_SIZE_CLASS_GROUP - 1 << group;
int shift = group == 0? 1 : group;
int lgDelta = shift + LOG2_QUANTUM - 1;
int modSize = mod + 1 << lgDelta;
return groupSize + modSize;
}
@Override
public long pageIdx2size(int pageIdx) {
return pageIdx2sizeTab[pageIdx];
}
@Override
public long pageIdx2sizeCompute(int pageIdx) {
int group = pageIdx >> LOG2_SIZE_CLASS_GROUP;
int mod = pageIdx & (1 << LOG2_SIZE_CLASS_GROUP) - 1;
long groupSize = group == 0? 0 :
1L << pageShifts + LOG2_SIZE_CLASS_GROUP - 1 << group;
int shift = group == 0? 1 : group;
int log2Delta = shift + pageShifts - 1;
int modSize = mod + 1 << log2Delta;
return groupSize + modSize;
}
@Override
public int size2SizeIdx(int size) {
if (size == 0) {
return 0;
}
if (size > chunkSize) {
return nSizes;
}
if (directMemoryCacheAlignment > 0) {
size = alignSize(size);
}
if (size <= lookupMaxSize) {
//size-1 / MIN_TINY
return size2idxTab[size - 1 >> LOG2_QUANTUM];
}
int x = PoolThreadCache.log2((size << 1) - 1);
int shift = x < LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM + 1
? 0 : x - (LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM);
int group = shift << LOG2_SIZE_CLASS_GROUP;
int log2Delta = x < LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM + 1
? LOG2_QUANTUM : x - LOG2_SIZE_CLASS_GROUP - 1;
int deltaInverseMask = -1 << log2Delta;
int mod = (size - 1 & deltaInverseMask) >> log2Delta &
(1 << LOG2_SIZE_CLASS_GROUP) - 1;
return group + mod;
}
@Override
public int pages2pageIdx(int pages) {
return pages2pageIdxCompute(pages, false);
}
@Override
public int pages2pageIdxFloor(int pages) {
return pages2pageIdxCompute(pages, true);
}
private int pages2pageIdxCompute(int pages, boolean floor) {
int pageSize = pages << pageShifts;
if (pageSize > chunkSize) {
return nPSizes;
}
int x = PoolThreadCache.log2((pageSize << 1) - 1);
int shift = x < LOG2_SIZE_CLASS_GROUP + pageShifts
? 0 : x - (LOG2_SIZE_CLASS_GROUP + pageShifts);
int group = shift << LOG2_SIZE_CLASS_GROUP;
int log2Delta = x < LOG2_SIZE_CLASS_GROUP + pageShifts + 1?
pageShifts : x - LOG2_SIZE_CLASS_GROUP - 1;
int deltaInverseMask = -1 << log2Delta;
int mod = (pageSize - 1 & deltaInverseMask) >> log2Delta &
(1 << LOG2_SIZE_CLASS_GROUP) - 1;
int pageIdx = group + mod;
if (floor && pageIdx2sizeTab[pageIdx] > pages << pageShifts) {
pageIdx--;
}
return pageIdx;
}
// Round size up to the nearest multiple of alignment.
private int alignSize(int size) {
int delta = size & directMemoryCacheAlignment - 1;
return delta == 0? size : size + directMemoryCacheAlignment - delta;
}
@Override
public int normalizeSize(int size) {
if (size == 0) {
return sizeIdx2sizeTab[0];
}
if (directMemoryCacheAlignment > 0) {
size = alignSize(size);
}
if (size <= lookupMaxSize) {
int ret = sizeIdx2sizeTab[size2idxTab[size - 1 >> LOG2_QUANTUM]];
assert ret == normalizeSizeCompute(size);
return ret;
}
return normalizeSizeCompute(size);
}
private static int normalizeSizeCompute(int size) {
int x = PoolThreadCache.log2((size << 1) - 1);
int log2Delta = x < LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM + 1
? LOG2_QUANTUM : x - LOG2_SIZE_CLASS_GROUP - 1;
int delta = 1 << log2Delta;
int delta_mask = delta - 1;
return size + delta_mask & ~delta_mask;
}
private static final class SizeClassKey {
final int pageSize;
final int pageShifts;
final int chunkSize;
final int directMemoryCacheAlignment;
private SizeClassKey(int pageSize, int pageShifts, int chunkSize, int directMemoryCacheAlignment) {
this.pageSize = pageSize;
this.pageShifts = pageShifts;
this.chunkSize = chunkSize;
this.directMemoryCacheAlignment = directMemoryCacheAlignment;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
SizeClassKey that = (SizeClassKey) o;
if (pageSize != that.pageSize) {
return false;
}
if (pageShifts != that.pageShifts) {
return false;
}
if (chunkSize != that.chunkSize) {
return false;
}
return directMemoryCacheAlignment == that.directMemoryCacheAlignment;
}
@Override
public int hashCode() {
int result = pageSize;
result = 31 * result + pageShifts;
result = 31 * result + chunkSize;
result = 31 * result + directMemoryCacheAlignment;
return result;
}
}
private static final class SizeClassValue {
final SizeClassKey key;
final int nSizes;
int nSubpages;
int nPSizes;
int smallMaxSizeIdx;
int lookupMaxSize;
final short[][] sizeClasses;
final int[] pageIdx2sizeTab;
final int[] sizeIdx2sizeTab;
final int[] size2idxTab;
SizeClassValue(SizeClassKey key) {
this.key = key;
int group = PoolThreadCache.log2(key.chunkSize) + 1 - LOG2_QUANTUM;
//generate size classes
//[index, log2Group, log2Delta, nDelta, isMultiPageSize, isSubPage, log2DeltaLookup]
sizeClasses = new short[group << LOG2_SIZE_CLASS_GROUP][7];
nSizes = sizeClasses();
//generate lookup table
sizeIdx2sizeTab = new int[nSizes];
pageIdx2sizeTab = new int[nPSizes];
idx2SizeTab(sizeIdx2sizeTab, pageIdx2sizeTab);
size2idxTab = new int[lookupMaxSize >> LOG2_QUANTUM];
size2idxTab(size2idxTab);
}
private int sizeClasses() {
int normalMaxSize = -1;
int index = 0;
int size = 0;
int log2Group = LOG2_QUANTUM;
int log2Delta = LOG2_QUANTUM;
int ndeltaLimit = 1 << LOG2_SIZE_CLASS_GROUP;
//First small group, nDelta start at 0.
//first size class is 1 << LOG2_QUANTUM
int nDelta = 0;
while (nDelta < ndeltaLimit) {
size = sizeClass(index++, log2Group, log2Delta, nDelta++);
}
log2Group += LOG2_SIZE_CLASS_GROUP;
//All remaining groups, nDelta start at 1.
while (size < key.chunkSize) {
nDelta = 1;
while (nDelta <= ndeltaLimit && size < key.chunkSize) {
size = sizeClass(index++, log2Group, log2Delta, nDelta++);
normalMaxSize = size;
}
log2Group++;
log2Delta++;
}
//chunkSize must be normalMaxSize
assert key.chunkSize == normalMaxSize;
//return number of size index
return index;
}
//calculate size class
private int sizeClass(int index, int log2Group, int log2Delta, int nDelta) {
short isMultiPageSize;
if (log2Delta >= key.pageShifts) {
isMultiPageSize = yes;
} else {
int pageSize = 1 << key.pageShifts;
int size = (1 << log2Group) + (1 << log2Delta) * nDelta;
isMultiPageSize = size == size / pageSize * pageSize? yes : no;
}
int log2Ndelta = nDelta == 0? 0 : PoolThreadCache.log2(nDelta);
byte remove = 1 << log2Ndelta < nDelta? yes : no;
int log2Size = log2Delta + log2Ndelta == log2Group? log2Group + 1 : log2Group;
if (log2Size == log2Group) {
remove = yes;
}
short isSubpage = log2Size < key.pageShifts + LOG2_SIZE_CLASS_GROUP? yes : no;
int log2DeltaLookup = log2Size < LOG2_MAX_LOOKUP_SIZE ||
log2Size == LOG2_MAX_LOOKUP_SIZE && remove == no
? log2Delta : no;
short[] sz = {
(short) index, (short) log2Group, (short) log2Delta,
(short) nDelta, isMultiPageSize, isSubpage, (short) log2DeltaLookup
};
sizeClasses[index] = sz;
int size = (1 << log2Group) + (nDelta << log2Delta);
if (sz[PAGESIZE_IDX] == yes) {
nPSizes++;
}
if (sz[SUBPAGE_IDX] == yes) {
nSubpages++;
smallMaxSizeIdx = index;
}
if (sz[LOG2_DELTA_LOOKUP_IDX] != no) {
lookupMaxSize = size;
}
return size;
}
private void idx2SizeTab(int[] sizeIdx2sizeTab, int[] pageIdx2sizeTab) {
int pageIdx = 0;
for (int i = 0; i < nSizes; i++) {
short[] sizeClass = sizeClasses[i];
int log2Group = sizeClass[LOG2GROUP_IDX];
int log2Delta = sizeClass[LOG2DELTA_IDX];
int nDelta = sizeClass[NDELTA_IDX];
int size = (1 << log2Group) + (nDelta << log2Delta);
sizeIdx2sizeTab[i] = size;
if (sizeClass[PAGESIZE_IDX] == yes) {
pageIdx2sizeTab[pageIdx++] = size;
}
}
}
private void size2idxTab(int[] size2idxTab) {
int idx = 0;
int size = 0;
for (int i = 0; size <= lookupMaxSize; i++) {
int log2Delta = sizeClasses[i][LOG2DELTA_IDX];
int times = 1 << log2Delta - LOG2_QUANTUM;
while (size <= lookupMaxSize && times-- > 0) {
size2idxTab[idx++] = i;
size = idx + 1 << LOG2_QUANTUM;
}
}
}
}
}

View File

@ -1,87 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
/**
* Expose metrics for an SizeClasses.
*/
public interface SizeClassesMetric {
/**
* Computes size from lookup table according to sizeIdx.
*
* @return size
*/
int sizeIdx2size(int sizeIdx);
/**
* Computes size according to sizeIdx.
*
* @return size
*/
int sizeIdx2sizeCompute(int sizeIdx);
/**
* Computes size from lookup table according to pageIdx.
*
* @return size which is multiples of pageSize.
*/
long pageIdx2size(int pageIdx);
/**
* Computes size according to pageIdx.
*
* @return size which is multiples of pageSize
*/
long pageIdx2sizeCompute(int pageIdx);
/**
* Normalizes request size up to the nearest size class.
*
* @param size request size
*
* @return sizeIdx of the size class
*/
int size2SizeIdx(int size);
/**
* Normalizes request size up to the nearest pageSize class.
*
* @param pages multiples of pageSizes
*
* @return pageIdx of the pageSize class
*/
int pages2pageIdx(int pages);
/**
* Normalizes request size down to the nearest pageSize class.
*
* @param pages multiples of pageSizes
*
* @return pageIdx of the pageSize class
*/
int pages2pageIdxFloor(int pages);
/**
* Normalizes usable size that would result from allocating an object with the
* specified size and alignment.
*
* @param size request size
*
* @return normalized size
*/
int normalizeSize(int size);
}

View File

@ -1,45 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.AllocatorControl;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.Drop;
import io.netty.buffer.api.MemoryManager;
import io.netty.buffer.api.internal.Statics;
@SuppressWarnings("unchecked")
class UnpooledUnthetheredMemory implements AllocatorControl.UntetheredMemory {
private final MemoryManager manager;
private final Buffer buffer;
UnpooledUnthetheredMemory(PooledBufferAllocator allocator, MemoryManager manager, int size) {
this.manager = manager;
PooledAllocatorControl allocatorControl = new PooledAllocatorControl();
allocatorControl.parent = allocator;
buffer = manager.allocateShared(allocatorControl, size, manager.drop(), Statics.CLEANER);
}
@Override
public <Memory> Memory memory() {
return (Memory) manager.unwrapRecoverableMemory(buffer);
}
@Override
public <BufferType extends Buffer> Drop<BufferType> drop() {
return (Drop<BufferType>) manager.drop();
}
}

View File

@ -1,19 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
/**
* A pooling {@link io.netty.buffer.api.BufferAllocator} implementation based on jemalloc.
*/
package io.netty.buffer.api.pool;

View File

@ -1,55 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.unsafe;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.Drop;
import io.netty.util.internal.PlatformDependent;
import java.lang.ref.Cleaner;
public class CleanerDrop implements Drop<Buffer> {
private final Drop<Buffer> drop;
public CleanerDrop(UnsafeMemory memory, Drop<Buffer> drop, Cleaner cleaner) {
this.drop = drop;
long address = memory.address;
cleaner.register(memory, new FreeAddress(address));
}
@Override
public void drop(Buffer obj) {
drop.drop(obj);
}
@Override
public void attach(Buffer obj) {
drop.attach(obj);
}
private static class FreeAddress implements Runnable {
private final long address;
FreeAddress(long address) {
this.address = address;
}
@Override
public void run() {
PlatformDependent.freeMemory(address);
}
}
}

View File

@ -1,32 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.unsafe;
class UnsafeMemory {
final Object base;
final long address;
final int size;
UnsafeMemory(Object base, long address, int size) {
this.base = base;
this.address = address;
this.size = size;
}
public UnsafeMemory slice(int offset, int length) {
return new UnsafeMemory(base, address + offset, length);
}
}

View File

@ -1,102 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.unsafe;
import io.netty.buffer.api.AllocatorControl;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.Drop;
import io.netty.buffer.api.MemoryManager;
import io.netty.buffer.api.internal.Statics;
import io.netty.util.internal.PlatformDependent;
import java.lang.ref.Cleaner;
import static io.netty.buffer.api.internal.Statics.convert;
public class UnsafeMemoryManager implements MemoryManager {
private final boolean offheap;
public UnsafeMemoryManager(boolean offheap) {
this.offheap = offheap;
}
@Override
public boolean isNative() {
return offheap;
}
@Override
public Buffer allocateShared(AllocatorControl allocatorControl, long size, Drop<Buffer> drop, Cleaner cleaner) {
final Object base;
final long address;
final UnsafeMemory memory;
final int size32 = Math.toIntExact(size);
if (cleaner == null) {
cleaner = Statics.CLEANER;
}
if (offheap) {
base = null;
address = PlatformDependent.allocateMemory(size);
PlatformDependent.setMemory(address, size, (byte) 0);
memory = new UnsafeMemory(base, address, size32);
drop = new CleanerDrop(memory, drop, cleaner);
} else {
base = new byte[size32];
address = PlatformDependent.byteArrayBaseOffset();
memory = new UnsafeMemory(base, address, size32);
}
return new UnsafeBuffer(memory, 0, size32, allocatorControl, convert(drop));
}
@Override
public Buffer allocateConstChild(Buffer readOnlyConstParent) {
assert readOnlyConstParent.readOnly();
UnsafeBuffer buf = (UnsafeBuffer) readOnlyConstParent;
return new UnsafeBuffer(buf);
}
@Override
public Drop<Buffer> drop() {
// We cannot reliably drop unsafe memory. We have to rely on the cleaner to do that.
return Statics.NO_OP_DROP;
}
@Override
public Object unwrapRecoverableMemory(Buffer buf) {
return ((UnsafeBuffer) buf).recover();
}
@Override
public int capacityOfRecoverableMemory(Object memory) {
return ((UnsafeMemory) memory).size;
}
@Override
public void discardRecoverableMemory(Object recoverableMemory) {
// We cannot reliably drop unsafe memory. We have to rely on the cleaner to do that.
}
@Override
public Buffer recoverMemory(AllocatorControl allocatorControl, Object recoverableMemory, Drop<Buffer> drop) {
UnsafeMemory memory = (UnsafeMemory) recoverableMemory;
return new UnsafeBuffer(memory, 0, memory.size, allocatorControl, convert(drop));
}
@Override
public Object sliceMemory(Object memory, int offset, int length) {
return ((UnsafeMemory) memory).slice(offset, length);
}
}

View File

@ -1,51 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.unsafe;
import io.netty.buffer.api.MemoryManager;
import io.netty.buffer.api.MemoryManagers;
import io.netty.util.internal.PlatformDependent;
public class UnsafeMemoryManagers implements MemoryManagers {
public UnsafeMemoryManagers() {
if (!PlatformDependent.hasUnsafe()) {
throw new UnsupportedOperationException("Unsafe is not available.");
}
if (!PlatformDependent.hasDirectBufferNoCleanerConstructor()) {
throw new UnsupportedOperationException("DirectByteBuffer internal constructor is not available.");
}
}
@Override
public MemoryManager getHeapMemoryManager() {
return new UnsafeMemoryManager(false);
}
@Override
public MemoryManager getNativeMemoryManager() {
return new UnsafeMemoryManager(true);
}
@Override
public String getImplementationName() {
return "Unsafe";
}
@Override
public String toString() {
return "US";
}
}

View File

@ -1,20 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
/**
* A {@link io.netty.buffer.api.Buffer} implementation that is based on {@code sun.misc.Unsafe}.
*/
package io.netty.buffer.api.unsafe;

View File

@ -1,41 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
import io.netty.buffer.api.MemoryManagers;
import io.netty.buffer.api.bytebuffer.ByteBufferMemoryManagers;
import io.netty.buffer.api.unsafe.UnsafeMemoryManagers;
module netty.incubator.buffer {
requires io.netty.common;
requires io.netty.buffer;
// Optional dependencies, needed for some examples.
requires static java.logging;//todo remove
exports io.netty.buffer.api;
exports io.netty.buffer.api.adaptor;
uses MemoryManagers;
// Permit reflective access to non-public members.
// Also means we don't have to make all test methods etc. public for JUnit to access them.
opens io.netty.buffer.api;
exports io.netty.buffer.api.internal;
opens io.netty.buffer.api.internal;//todo remove
provides MemoryManagers with
ByteBufferMemoryManagers,
UnsafeMemoryManagers;
}

View File

@ -1,30 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Copyright 2021 The Netty Project
~
~ The Netty Project licenses this file to you under the Apache License,
~ version 2.0 (the "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at:
~
~ https://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
~ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
~ License for the specific language governing permissions and limitations
~ under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>io.netty.incubator</groupId>
<artifactId>netty-incubator-buffer-parent</artifactId>
<version>0.0.1.Final-SNAPSHOT</version>
</parent>
<artifactId>netty-incubator-buffer-memseg-dummy</artifactId>
<version>0.0.1.Final-SNAPSHOT</version>
</project>

View File

@ -1,19 +0,0 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
module netty.incubator.buffer.memseg {
// Java 11 compatible stand-in module for the memory segment implementation.
// We need this module in order for the tests module to pull in the memseg module.
}

View File

@ -44,10 +44,6 @@
</build> </build>
<dependencies> <dependencies>
<dependency>
<groupId>io.netty.incubator</groupId>
<artifactId>netty-incubator-buffer-api</artifactId>
</dependency>
<dependency> <dependency>
<groupId>io.netty</groupId> <groupId>io.netty</groupId>
<artifactId>netty-common</artifactId> <artifactId>netty-common</artifactId>

View File

@ -1,81 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.memseg;
import io.netty.buffer.api.internal.ArcDrop;
import io.netty.buffer.api.internal.Statics;
import io.netty.buffer.api.AllocatorControl;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.Drop;
import io.netty.buffer.api.MemoryManager;
import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.ResourceScope;
import java.lang.ref.Cleaner;
public abstract class AbstractMemorySegmentManager implements MemoryManager {
@Override
public Buffer allocateShared(AllocatorControl allocatorControl, long size, Drop<Buffer> drop, Cleaner cleaner) {
var segment = createSegment(size, cleaner);
return new MemSegBuffer(segment, segment, Statics.convert(drop), allocatorControl);
}
@Override
public Buffer allocateConstChild(Buffer readOnlyConstParent) {
assert readOnlyConstParent.readOnly();
MemSegBuffer buf = (MemSegBuffer) readOnlyConstParent;
return new MemSegBuffer(buf);
}
protected abstract MemorySegment createSegment(long size, Cleaner cleaner);
@Override
public Drop<Buffer> drop() {
return Statics.convert(MemSegBuffer.SEGMENT_CLOSE);
}
@Override
public Object unwrapRecoverableMemory(Buffer buf) {
var b = (MemSegBuffer) buf;
return b.recoverableMemory();
}
@Override
public int capacityOfRecoverableMemory(Object memory) {
return (int) ((MemorySegment) memory).byteSize();
}
@Override
public void discardRecoverableMemory(Object recoverableMemory) {
var segment = (MemorySegment) recoverableMemory;
ResourceScope scope = segment.scope();
if (!scope.isImplicit()) {
scope.close();
}
}
@Override
public Buffer recoverMemory(AllocatorControl allocatorControl, Object recoverableMemory, Drop<Buffer> drop) {
var segment = (MemorySegment) recoverableMemory;
return new MemSegBuffer(segment, segment, Statics.convert(ArcDrop.acquire(drop)), allocatorControl);
}
@Override
public Object sliceMemory(Object memory, int offset, int length) {
var segment = (MemorySegment) memory;
return segment.asSlice(offset, length);
}
}

View File

@ -1,32 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.memseg;
import jdk.incubator.foreign.MemorySegment;
import java.lang.ref.Cleaner;
public class HeapMemorySegmentManager extends AbstractMemorySegmentManager {
@Override
protected MemorySegment createSegment(long size, Cleaner cleaner) {
return MemorySegment.ofArray(new byte[Math.toIntExact(size)]);
}
@Override
public boolean isNative() {
return false;
}
}

View File

@ -15,25 +15,21 @@
*/ */
package io.netty.buffer.api.memseg; package io.netty.buffer.api.memseg;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.api.BufferAllocator;
import io.netty.buffer.api.BufferReadOnlyException;
import io.netty.buffer.api.adaptor.BufferIntegratable;
import io.netty.buffer.api.adaptor.ByteBufAdaptor;
import io.netty.buffer.api.adaptor.ByteBufAllocatorAdaptor;
import io.netty.buffer.api.internal.ArcDrop;
import io.netty.buffer.api.internal.Statics;
import io.netty.buffer.api.AllocatorControl; import io.netty.buffer.api.AllocatorControl;
import io.netty.buffer.api.Buffer; import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.BufferAllocator;
import io.netty.buffer.api.BufferReadOnlyException;
import io.netty.buffer.api.ByteCursor; import io.netty.buffer.api.ByteCursor;
import io.netty.buffer.api.Drop;
import io.netty.buffer.api.Owned;
import io.netty.buffer.api.ReadableComponent; import io.netty.buffer.api.ReadableComponent;
import io.netty.buffer.api.ReadableComponentProcessor; import io.netty.buffer.api.ReadableComponentProcessor;
import io.netty.buffer.api.WritableComponent; import io.netty.buffer.api.WritableComponent;
import io.netty.buffer.api.WritableComponentProcessor; import io.netty.buffer.api.WritableComponentProcessor;
import io.netty.buffer.api.Drop; import io.netty.buffer.api.adaptor.BufferIntegratable;
import io.netty.buffer.api.Owned; import io.netty.buffer.api.internal.AdaptableBuffer;
import io.netty.buffer.api.internal.ResourceSupport; import io.netty.buffer.api.internal.ArcDrop;
import io.netty.util.IllegalReferenceCountException; import io.netty.buffer.api.internal.Statics;
import jdk.incubator.foreign.MemorySegment; import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.ResourceScope; import jdk.incubator.foreign.ResourceScope;
@ -57,7 +53,7 @@ import static jdk.incubator.foreign.MemoryAccess.setIntAtOffset;
import static jdk.incubator.foreign.MemoryAccess.setLongAtOffset; import static jdk.incubator.foreign.MemoryAccess.setLongAtOffset;
import static jdk.incubator.foreign.MemoryAccess.setShortAtOffset; import static jdk.incubator.foreign.MemoryAccess.setShortAtOffset;
class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buffer, ReadableComponent, class MemSegBuffer extends AdaptableBuffer<MemSegBuffer> implements Buffer, ReadableComponent,
WritableComponent, BufferIntegratable { WritableComponent, BufferIntegratable {
private static final MemorySegment CLOSED_SEGMENT; private static final MemorySegment CLOSED_SEGMENT;
private static final MemorySegment ZERO_OFFHEAP_SEGMENT; private static final MemorySegment ZERO_OFFHEAP_SEGMENT;
@ -105,7 +101,7 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
this.base = base; this.base = base;
seg = view; seg = view;
wseg = view; wseg = view;
order = ByteOrder.nativeOrder(); order = ByteOrder.BIG_ENDIAN;
} }
/** /**
@ -120,7 +116,6 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
order = parent.order; order = parent.order;
roff = parent.roff; roff = parent.roff;
woff = parent.woff; woff = parent.woff;
adaptor = null;
constBuffer = true; constBuffer = true;
} }
@ -172,17 +167,6 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
return bufferIsClosed(this); return bufferIsClosed(this);
} }
@Override
public Buffer order(ByteOrder order) {
this.order = order;
return this;
}
@Override
public ByteOrder order() {
return order;
}
@Override @Override
public int capacity() { public int capacity() {
return (int) seg.byteSize(); return (int) seg.byteSize();
@ -333,7 +317,7 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
MemorySegment segment = memory.memory(); MemorySegment segment = memory.memory();
Buffer copy = new MemSegBuffer(segment, segment, memory.drop(), control); Buffer copy = new MemSegBuffer(segment, segment, memory.drop(), control);
copyInto(offset, copy, 0, length); copyInto(offset, copy, 0, length);
copy.writerOffset(length).order(order()); copy.writerOffset(length);
if (readOnly()) { if (readOnly()) {
copy = copy.makeReadOnly(); copy = copy.makeReadOnly();
} }
@ -369,14 +353,13 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
@Override @Override
public void copyInto(int srcPos, Buffer dest, int destPos, int length) { public void copyInto(int srcPos, Buffer dest, int destPos, int length) {
if (dest instanceof MemSegBuffer) { if (dest instanceof MemSegBuffer memSegBuf) {
var memSegBuf = (MemSegBuffer) dest;
memSegBuf.checkSet(destPos, length); memSegBuf.checkSet(destPos, length);
copyInto(srcPos, memSegBuf.seg, destPos, length); copyInto(srcPos, memSegBuf.seg, destPos, length);
return; return;
} }
Statics.copyToViaReverseCursor(this, srcPos, dest, destPos, length); Statics.copyToViaReverseLoop(this, srcPos, dest, destPos, length);
} }
@Override @Override
@ -406,7 +389,6 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
long longValue = -1; long longValue = -1;
byte byteValue = -1; byte byteValue = -1;
@Override
public boolean readLong() { public boolean readLong() {
if (index + Long.BYTES <= end) { if (index + Long.BYTES <= end) {
longValue = getLongAtOffset(segment, index, ByteOrder.BIG_ENDIAN); longValue = getLongAtOffset(segment, index, ByteOrder.BIG_ENDIAN);
@ -416,7 +398,6 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
return false; return false;
} }
@Override
public long getLong() { public long getLong() {
return longValue; return longValue;
} }
@ -479,7 +460,6 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
long longValue = -1; long longValue = -1;
byte byteValue = -1; byte byteValue = -1;
@Override
public boolean readLong() { public boolean readLong() {
if (index - Long.BYTES >= end) { if (index - Long.BYTES >= end) {
index -= 7; index -= 7;
@ -490,7 +470,6 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
return false; return false;
} }
@Override
public long getLong() { public long getLong() {
return longValue; return longValue;
} }
@ -523,7 +502,7 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
} }
@Override @Override
public void ensureWritable(int size, int minimumGrowth, boolean allowCompaction) { public Buffer ensureWritable(int size, int minimumGrowth, boolean allowCompaction) {
if (!isAccessible()) { if (!isAccessible()) {
throw bufferIsClosed(this); throw bufferIsClosed(this);
} }
@ -542,18 +521,17 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
} }
if (writableBytes() >= size) { if (writableBytes() >= size) {
// We already have enough space. // We already have enough space.
return; return this;
} }
if (allowCompaction && writableBytes() + readerOffset() >= size) { if (allowCompaction && writableBytes() + readerOffset() >= size) {
// We can solve this with compaction. // We can solve this with compaction.
compact(); return compact();
return;
} }
// Allocate a bigger buffer. // Allocate a bigger buffer.
long newSize = capacity() + (long) Math.max(size - writableBytes(), minimumGrowth); long newSize = capacity() + (long) Math.max(size - writableBytes(), minimumGrowth);
BufferAllocator.checkSize(newSize); Statics.assertValidBufferSize(newSize);
var untethered = control.allocateUntethered(this, (int) newSize); var untethered = control.allocateUntethered(this, (int) newSize);
MemorySegment newSegment = untethered.memory(); MemorySegment newSegment = untethered.memory();
@ -564,6 +542,7 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
Drop<MemSegBuffer> drop = untethered.drop(); Drop<MemSegBuffer> drop = untethered.drop();
disconnectDrop(drop); disconnectDrop(drop);
attachNewMemorySegment(newSegment, drop); attachNewMemorySegment(newSegment, drop);
return this;
} }
private void disconnectDrop(Drop<MemSegBuffer> newDrop) { private void disconnectDrop(Drop<MemSegBuffer> newDrop) {
@ -606,7 +585,6 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
var splitBuffer = new MemSegBuffer(base, splitSegment, new ArcDrop<>(drop.increment()), control); var splitBuffer = new MemSegBuffer(base, splitSegment, new ArcDrop<>(drop.increment()), control);
splitBuffer.woff = Math.min(woff, splitOffset); splitBuffer.woff = Math.min(woff, splitOffset);
splitBuffer.roff = Math.min(roff, splitOffset); splitBuffer.roff = Math.min(roff, splitOffset);
splitBuffer.order(order);
boolean readOnly = readOnly(); boolean readOnly = readOnly();
if (readOnly) { if (readOnly) {
splitBuffer.makeReadOnly(); splitBuffer.makeReadOnly();
@ -623,7 +601,7 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
} }
@Override @Override
public void compact() { public Buffer compact() {
if (!isOwned()) { if (!isOwned()) {
throw attachTrace(new IllegalStateException("Buffer must be owned in order to compact.")); throw attachTrace(new IllegalStateException("Buffer must be owned in order to compact."));
} }
@ -632,11 +610,12 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
} }
int distance = roff; int distance = roff;
if (distance == 0) { if (distance == 0) {
return; return this;
} }
seg.copyFrom(seg.asSlice(roff, woff - roff)); seg.copyFrom(seg.asSlice(roff, woff - roff));
roff -= distance; roff -= distance;
woff -= distance; woff -= distance;
return this;
} }
@Override @Override
@ -1238,67 +1217,4 @@ class MemSegBuffer extends ResourceSupport<Buffer, MemSegBuffer> implements Buff
Object recoverableMemory() { Object recoverableMemory() {
return base; return base;
} }
// <editor-fold name="BufferIntegratable methods">
private ByteBufAdaptor adaptor;
@Override
public ByteBuf asByteBuf() {
ByteBufAdaptor bba = adaptor;
if (bba == null) {
ByteBufAllocatorAdaptor alloc = new ByteBufAllocatorAdaptor(
BufferAllocator.heap(), BufferAllocator.direct());
return adaptor = new ByteBufAdaptor(alloc, this);
}
return bba;
}
@Override
public MemSegBuffer retain(int increment) {
for (int i = 0; i < increment; i++) {
acquire();
}
return this;
}
@Override
public int refCnt() {
return isAccessible()? 1 + countBorrows() : 0;
}
@Override
public MemSegBuffer retain() {
return retain(1);
}
@Override
public MemSegBuffer touch() {
return this;
}
@Override
public MemSegBuffer touch(Object hint) {
return this;
}
@Override
public boolean release() {
return release(1);
}
@Override
public boolean release(int decrement) {
int refCount = 1 + countBorrows();
if (!isAccessible() || decrement > refCount) {
throw new IllegalReferenceCountException(refCount, -decrement);
}
for (int i = 0; i < decrement; i++) {
try {
close();
} catch (RuntimeException e) {
throw new IllegalReferenceCountException(e);
}
}
return !isAccessible();
}
// </editor-fold>
} }

View File

@ -1,67 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.memseg;
import io.netty.buffer.api.internal.Statics;
import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.ResourceScope;
import java.lang.ref.Cleaner;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import static jdk.incubator.foreign.ResourceScope.newSharedScope;
public class NativeMemorySegmentManager extends AbstractMemorySegmentManager {
private static final ConcurrentHashMap<Long, Runnable> CLEANUP_ACTIONS = new ConcurrentHashMap<>();
private static final Function<Long, Runnable> CLEANUP_ACTION_MAKER = s -> new ReduceNativeMemoryUsage(s);
static Runnable getCleanupAction(long size) {
return CLEANUP_ACTIONS.computeIfAbsent(size, CLEANUP_ACTION_MAKER);
}
private static final class ReduceNativeMemoryUsage implements Runnable {
private final long size;
private ReduceNativeMemoryUsage(long size) {
this.size = size;
}
@Override
public void run() {
Statics.MEM_USAGE_NATIVE.add(-size);
}
@Override
public String toString() {
return "ReduceNativeMemoryUsage(by " + size + " bytes)";
}
}
@Override
public boolean isNative() {
return true;
}
@Override
protected MemorySegment createSegment(long size, Cleaner cleaner) {
final ResourceScope scope = cleaner == null ? newSharedScope() : newSharedScope(cleaner);
scope.addCloseAction(getCleanupAction(size));
var segment = MemorySegment.allocateNative(size, scope);
Statics.MEM_USAGE_NATIVE.add(size);
return segment;
}
}

View File

@ -0,0 +1,21 @@
package io.netty.buffer.api.memseg;
import io.netty.buffer.api.internal.Statics;
final class ReduceNativeMemoryUsage implements Runnable {
private final long size;
ReduceNativeMemoryUsage(long size) {
this.size = size;
}
@Override
public void run() {
Statics.MEM_USAGE_NATIVE.add(-size);
}
@Override
public String toString() {
return "ReduceNativeMemoryUsage(by " + size + " bytes)";
}
}

View File

@ -15,27 +15,88 @@
*/ */
package io.netty.buffer.api.memseg; package io.netty.buffer.api.memseg;
import io.netty.buffer.api.AllocationType;
import io.netty.buffer.api.AllocatorControl;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.Drop;
import io.netty.buffer.api.MemoryManager; import io.netty.buffer.api.MemoryManager;
import io.netty.buffer.api.MemoryManagers; import io.netty.buffer.api.StandardAllocationTypes;
import io.netty.buffer.api.internal.ArcDrop;
import io.netty.buffer.api.internal.Statics;
import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.ResourceScope;
public class SegmentMemoryManagers implements MemoryManagers { import java.lang.ref.Cleaner;
@Override import java.util.concurrent.ConcurrentHashMap;
public MemoryManager getHeapMemoryManager() { import java.util.function.Function;
return new HeapMemorySegmentManager();
import static jdk.incubator.foreign.ResourceScope.newSharedScope;
public class SegmentMemoryManagers implements MemoryManager {
private static final ConcurrentHashMap<Long, Runnable> CLEANUP_ACTIONS = new ConcurrentHashMap<>();
private static final Function<Long, Runnable> CLEANUP_ACTION_MAKER = s -> new ReduceNativeMemoryUsage(s);
static Runnable getCleanupAction(long size) {
return CLEANUP_ACTIONS.computeIfAbsent(size, CLEANUP_ACTION_MAKER);
}
private static MemorySegment createHeapSegment(long size) {
return MemorySegment.ofArray(new byte[Math.toIntExact(size)]);
}
private static MemorySegment createNativeSegment(long size, Cleaner cleaner) {
final ResourceScope scope = cleaner == null ? newSharedScope() : newSharedScope(cleaner);
scope.addCloseAction(getCleanupAction(size));
var segment = MemorySegment.allocateNative(size, scope);
Statics.MEM_USAGE_NATIVE.add(size);
return segment;
} }
@Override @Override
public MemoryManager getNativeMemoryManager() { public Buffer allocateShared(AllocatorControl allocatorControl, long size, Drop<Buffer> drop, Cleaner cleaner,
return new NativeMemorySegmentManager(); AllocationType type) {
if (type instanceof StandardAllocationTypes stype) {
var segment = switch (stype) {
case ON_HEAP -> createHeapSegment(size);
case OFF_HEAP -> createNativeSegment(size, cleaner);
};
return new MemSegBuffer(segment, segment, Statics.convert(drop), allocatorControl);
}
throw new IllegalArgumentException("Unknown allocation type: " + type);
} }
@Override @Override
public String getImplementationName() { public Buffer allocateConstChild(Buffer readOnlyConstParent) {
assert readOnlyConstParent.readOnly();
MemSegBuffer buf = (MemSegBuffer) readOnlyConstParent;
return new MemSegBuffer(buf);
}
@Override
public Drop<Buffer> drop() {
return Statics.convert(MemSegBuffer.SEGMENT_CLOSE);
}
@Override
public Object unwrapRecoverableMemory(Buffer buf) {
var b = (MemSegBuffer) buf;
return b.recoverableMemory();
}
@Override
public Buffer recoverMemory(AllocatorControl allocatorControl, Object recoverableMemory, Drop<Buffer> drop) {
var segment = (MemorySegment) recoverableMemory;
return new MemSegBuffer(segment, segment, Statics.convert(ArcDrop.acquire(drop)), allocatorControl);
}
@Override
public Object sliceMemory(Object memory, int offset, int length) {
var segment = (MemorySegment) memory;
return segment.asSlice(offset, length);
}
@Override
public String implementationName() {
return "MemorySegment"; return "MemorySegment";
} }
@Override
public String toString() {
return "MS";
}
} }

View File

@ -13,14 +13,13 @@
* License for the specific language governing permissions and limitations * License for the specific language governing permissions and limitations
* under the License. * under the License.
*/ */
import io.netty.buffer.api.MemoryManagers; import io.netty.buffer.api.MemoryManager;
import io.netty.buffer.api.memseg.SegmentMemoryManagers; import io.netty.buffer.api.memseg.SegmentMemoryManagers;
module netty.incubator.buffer.memseg { module netty.incubator.buffer.memseg {
requires jdk.incubator.foreign; requires jdk.incubator.foreign;
requires io.netty.common; requires io.netty.common;
requires io.netty.buffer; requires io.netty.buffer;
requires netty.incubator.buffer;
// Optional dependencies, needed for some examples. // Optional dependencies, needed for some examples.
requires static java.logging; requires static java.logging;
@ -29,6 +28,6 @@ module netty.incubator.buffer.memseg {
// Also means we don't have to make all test methods etc. public for JUnit to access them. // Also means we don't have to make all test methods etc. public for JUnit to access them.
opens io.netty.buffer.api.memseg; opens io.netty.buffer.api.memseg;
provides MemoryManagers with provides MemoryManager with
SegmentMemoryManagers; SegmentMemoryManagers;
} }

View File

@ -15,8 +15,8 @@
*/ */
package io.netty.buffer.api.memseg.benchmarks; package io.netty.buffer.api.memseg.benchmarks;
import io.netty.buffer.api.BufferAllocator;
import io.netty.buffer.api.Buffer; import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.BufferAllocator;
import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork; import org.openjdk.jmh.annotations.Fork;
@ -43,13 +43,13 @@ public class MemSegBufAccessBenchmark {
DIRECT { DIRECT {
@Override @Override
Buffer newBuffer() { Buffer newBuffer() {
return BufferAllocator.direct().allocate(64); return BufferAllocator.offHeapUnpooled().allocate(64);
} }
}, },
HEAP { HEAP {
@Override @Override
Buffer newBuffer() { Buffer newBuffer() {
return BufferAllocator.heap().allocate(64); return BufferAllocator.onHeapUnpooled().allocate(64);
} }
}, },
// COMPOSITE { // COMPOSITE {

View File

@ -16,9 +16,9 @@
package io.netty.buffer.api.memseg.benchmarks; package io.netty.buffer.api.memseg.benchmarks;
import io.netty.buffer.api.BufferAllocator; import io.netty.buffer.api.BufferAllocator;
import io.netty.buffer.api.MemoryManager;
import io.netty.buffer.api.memseg.SegmentMemoryManagers; import io.netty.buffer.api.memseg.SegmentMemoryManagers;
import io.netty.buffer.api.Buffer; import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.MemoryManagers;
import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork; import org.openjdk.jmh.annotations.Fork;
@ -64,9 +64,9 @@ public class MemorySegmentClosedByCleanerBenchmark {
} }
} }
var allocs = MemoryManagers.using(new SegmentMemoryManagers(), () -> { var allocs = MemoryManager.using(new SegmentMemoryManagers(), () -> {
return new Allocators(BufferAllocator.heap(), BufferAllocator.pooledHeap(), return new Allocators(BufferAllocator.onHeapUnpooled(), BufferAllocator.onHeapPooled(),
BufferAllocator.direct(), BufferAllocator.pooledDirect()); BufferAllocator.offHeapUnpooled(), BufferAllocator.offHeapPooled());
}); });
heap = allocs.heap; heap = allocs.heap;

View File

@ -127,9 +127,13 @@
</build> </build>
<dependencies> <dependencies>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-buffer</artifactId>
</dependency>
<dependency> <dependency>
<groupId>io.netty.incubator</groupId> <groupId>io.netty.incubator</groupId>
<artifactId>netty-incubator-buffer-api</artifactId> <artifactId>netty-incubator-buffer-memseg</artifactId>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.junit.jupiter</groupId> <groupId>org.junit.jupiter</groupId>
@ -189,31 +193,4 @@
<artifactId>jmh-generator-annprocess</artifactId> <artifactId>jmh-generator-annprocess</artifactId>
</dependency> </dependency>
</dependencies> </dependencies>
<profiles>
<profile>
<id>Java 17 support</id>
<activation>
<jdk>17</jdk>
</activation>
<dependencies>
<dependency>
<groupId>io.netty.incubator</groupId>
<artifactId>netty-incubator-buffer-memseg</artifactId>
</dependency>
</dependencies>
</profile>
<profile>
<id>Java 11 support for tests</id>
<activation>
<jdk>!17</jdk>
</activation>
<dependencies>
<dependency>
<groupId>io.netty.incubator</groupId>
<artifactId>netty-incubator-buffer-memseg-dummy</artifactId>
</dependency>
</dependencies>
</profile>
</profiles>
</project> </project>

View File

@ -1,3 +1,5 @@
import io.netty.buffer.api.MemoryManager;
/* /*
* Copyright 2021 The Netty Project * Copyright 2021 The Netty Project
* *
@ -20,8 +22,7 @@ open module netty.incubator.buffer.tests {
// Optional dependencies, needed for some examples. // Optional dependencies, needed for some examples.
requires static java.logging; requires static java.logging;
requires netty.incubator.buffer;
// We need to require memseg in order for its implementation to be service loaded. // We need to require memseg in order for its implementation to be service loaded.
// Just having it on the module path is not enough. // Just having it on the module path is not enough.
requires netty.incubator.buffer.memseg; requires static netty.incubator.buffer.memseg;
} }

View File

@ -17,15 +17,12 @@ package io.netty.buffer.api.tests;
import io.netty.buffer.api.Buffer; import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.BufferAllocator; import io.netty.buffer.api.BufferAllocator;
import io.netty.buffer.api.CompositeBuffer;
import io.netty.buffer.api.Scope;
import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.MethodSource;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import static java.nio.ByteOrder.BIG_ENDIAN; import static io.netty.buffer.api.CompositeBuffer.compose;
import static java.nio.ByteOrder.LITTLE_ENDIAN;
import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThat;
public class BufferBulkAccessTest extends BufferTestSupport { public class BufferBulkAccessTest extends BufferTestSupport {
@ -46,18 +43,14 @@ public class BufferBulkAccessTest extends BufferTestSupport {
void copyIntoByteArray(Fixture fixture) { void copyIntoByteArray(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN).writeLong(0x0102030405060708L); buf.writeLong(0x0102030405060708L);
byte[] array = new byte[8]; byte[] array = new byte[8];
buf.copyInto(0, array, 0, array.length); buf.copyInto(0, array, 0, array.length);
assertThat(array).containsExactly(0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08); assertThat(array).containsExactly(0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08);
buf.writerOffset(0).order(LITTLE_ENDIAN).writeLong(0x0102030405060708L);
buf.copyInto(0, array, 0, array.length);
assertThat(array).containsExactly(0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01);
array = new byte[6]; array = new byte[6];
buf.copyInto(1, array, 1, 3); buf.copyInto(1, array, 1, 3);
assertThat(array).containsExactly(0x00, 0x07, 0x06, 0x05, 0x00, 0x00); assertThat(array).containsExactly(0x00, 0x02, 0x03, 0x04, 0x00, 0x00);
} }
} }
@ -76,26 +69,26 @@ public class BufferBulkAccessTest extends BufferTestSupport {
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
void copyIntoOnHeapBuf(Fixture fixture) { void copyIntoOnHeapBuf(Fixture fixture) {
testCopyIntoBuf(fixture, BufferAllocator.heap()::allocate); testCopyIntoBuf(fixture, BufferAllocator.onHeapUnpooled()::allocate);
} }
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
void copyIntoOffHeapBuf(Fixture fixture) { void copyIntoOffHeapBuf(Fixture fixture) {
testCopyIntoBuf(fixture, BufferAllocator.direct()::allocate); testCopyIntoBuf(fixture, BufferAllocator.offHeapUnpooled()::allocate);
} }
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
void copyIntoCompositeOnHeapOnHeapBuf(Fixture fixture) { void copyIntoCompositeOnHeapOnHeapBuf(Fixture fixture) {
try (var a = BufferAllocator.heap(); try (var a = BufferAllocator.onHeapUnpooled();
var b = BufferAllocator.heap()) { var b = BufferAllocator.onHeapUnpooled()) {
testCopyIntoBuf(fixture, size -> { testCopyIntoBuf(fixture, size -> {
int first = size / 2; int first = size / 2;
int second = size - first; int second = size - first;
try (var bufFirst = a.allocate(first); try (var bufFirst = a.allocate(first);
var bufSecond = b.allocate(second)) { var bufSecond = b.allocate(second)) {
return CompositeBuffer.compose(a, bufFirst.send(), bufSecond.send()); return compose(a, bufFirst.send(), bufSecond.send());
} }
}); });
} }
@ -104,14 +97,14 @@ public class BufferBulkAccessTest extends BufferTestSupport {
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
void copyIntoCompositeOnHeapOffHeapBuf(Fixture fixture) { void copyIntoCompositeOnHeapOffHeapBuf(Fixture fixture) {
try (var a = BufferAllocator.heap(); try (var a = BufferAllocator.onHeapUnpooled();
var b = BufferAllocator.direct()) { var b = BufferAllocator.offHeapUnpooled()) {
testCopyIntoBuf(fixture, size -> { testCopyIntoBuf(fixture, size -> {
int first = size / 2; int first = size / 2;
int second = size - first; int second = size - first;
try (var bufFirst = a.allocate(first); try (var bufFirst = a.allocate(first);
var bufSecond = b.allocate(second)) { var bufSecond = b.allocate(second)) {
return CompositeBuffer.compose(a, bufFirst.send(), bufSecond.send()); return compose(a, bufFirst.send(), bufSecond.send());
} }
}); });
} }
@ -120,14 +113,14 @@ public class BufferBulkAccessTest extends BufferTestSupport {
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
void copyIntoCompositeOffHeapOnHeapBuf(Fixture fixture) { void copyIntoCompositeOffHeapOnHeapBuf(Fixture fixture) {
try (var a = BufferAllocator.direct(); try (var a = BufferAllocator.offHeapUnpooled();
var b = BufferAllocator.heap()) { var b = BufferAllocator.onHeapUnpooled()) {
testCopyIntoBuf(fixture, size -> { testCopyIntoBuf(fixture, size -> {
int first = size / 2; int first = size / 2;
int second = size - first; int second = size - first;
try (var bufFirst = a.allocate(first); try (var bufFirst = a.allocate(first);
var bufSecond = b.allocate(second)) { var bufSecond = b.allocate(second)) {
return CompositeBuffer.compose(a, bufFirst.send(), bufSecond.send()); return compose(a, bufFirst.send(), bufSecond.send());
} }
}); });
} }
@ -136,14 +129,14 @@ public class BufferBulkAccessTest extends BufferTestSupport {
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
void copyIntoCompositeOffHeapOffHeapBuf(Fixture fixture) { void copyIntoCompositeOffHeapOffHeapBuf(Fixture fixture) {
try (var a = BufferAllocator.direct(); try (var a = BufferAllocator.offHeapUnpooled();
var b = BufferAllocator.direct()) { var b = BufferAllocator.offHeapUnpooled()) {
testCopyIntoBuf(fixture, size -> { testCopyIntoBuf(fixture, size -> {
int first = size / 2; int first = size / 2;
int second = size - first; int second = size - first;
try (var bufFirst = a.allocate(first); try (var bufFirst = a.allocate(first);
var bufSecond = b.allocate(second)) { var bufSecond = b.allocate(second)) {
return CompositeBuffer.compose(a, bufFirst.send(), bufSecond.send()); return compose(a, bufFirst.send(), bufSecond.send());
} }
}); });
} }
@ -152,15 +145,14 @@ public class BufferBulkAccessTest extends BufferTestSupport {
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
void copyIntoCompositeOnHeapOnHeapBufCopy(Fixture fixture) { void copyIntoCompositeOnHeapOnHeapBufCopy(Fixture fixture) {
try (var a = BufferAllocator.heap(); try (var a = BufferAllocator.onHeapUnpooled();
var b = BufferAllocator.heap(); var b = BufferAllocator.onHeapUnpooled()) {
var scope = new Scope()) {
testCopyIntoBuf(fixture, size -> { testCopyIntoBuf(fixture, size -> {
int first = size / 2; int first = size / 2;
int second = size - first; int second = size - first;
try (var bufFirst = a.allocate(first); try (var bufFirst = a.allocate(first);
var bufSecond = b.allocate(second)) { var bufSecond = b.allocate(second)) {
return scope.add(CompositeBuffer.compose(a, bufFirst.send(), bufSecond.send())).writerOffset(size).copy(); return compose(a, bufFirst.send(), bufSecond.send()).writerOffset(size).copy();
} }
}); });
} }
@ -169,15 +161,14 @@ public class BufferBulkAccessTest extends BufferTestSupport {
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
void copyIntoCompositeOnHeapOffHeapBufCopy(Fixture fixture) { void copyIntoCompositeOnHeapOffHeapBufCopy(Fixture fixture) {
try (var a = BufferAllocator.heap(); try (var a = BufferAllocator.onHeapUnpooled();
var b = BufferAllocator.direct(); var b = BufferAllocator.offHeapUnpooled()) {
var scope = new Scope()) {
testCopyIntoBuf(fixture, size -> { testCopyIntoBuf(fixture, size -> {
int first = size / 2; int first = size / 2;
int second = size - first; int second = size - first;
try (var bufFirst = a.allocate(first); try (var bufFirst = a.allocate(first);
var bufSecond = b.allocate(second)) { var bufSecond = b.allocate(second)) {
return scope.add(CompositeBuffer.compose(a, bufFirst.send(), bufSecond.send())).writerOffset(size).copy(); return compose(a, bufFirst.send(), bufSecond.send()).writerOffset(size).copy();
} }
}); });
} }
@ -186,15 +177,14 @@ public class BufferBulkAccessTest extends BufferTestSupport {
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
void copyIntoCompositeOffHeapOnHeapBufCopy(Fixture fixture) { void copyIntoCompositeOffHeapOnHeapBufCopy(Fixture fixture) {
try (var a = BufferAllocator.direct(); try (var a = BufferAllocator.offHeapUnpooled();
var b = BufferAllocator.heap(); var b = BufferAllocator.onHeapUnpooled()) {
var scope = new Scope()) {
testCopyIntoBuf(fixture, size -> { testCopyIntoBuf(fixture, size -> {
int first = size / 2; int first = size / 2;
int second = size - first; int second = size - first;
try (var bufFirst = a.allocate(first); try (var bufFirst = a.allocate(first);
var bufSecond = b.allocate(second)) { var bufSecond = b.allocate(second)) {
return scope.add(CompositeBuffer.compose(a, bufFirst.send(), bufSecond.send())).writerOffset(size).copy(); return compose(a, bufFirst.send(), bufSecond.send()).writerOffset(size).copy();
} }
}); });
} }
@ -203,15 +193,14 @@ public class BufferBulkAccessTest extends BufferTestSupport {
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
void copyIntoCompositeOffHeapOffHeapBufCopy(Fixture fixture) { void copyIntoCompositeOffHeapOffHeapBufCopy(Fixture fixture) {
try (var a = BufferAllocator.direct(); try (var a = BufferAllocator.offHeapUnpooled();
var b = BufferAllocator.direct(); var b = BufferAllocator.offHeapUnpooled()) {
var scope = new Scope()) {
testCopyIntoBuf(fixture, size -> { testCopyIntoBuf(fixture, size -> {
int first = size / 2; int first = size / 2;
int second = size - first; int second = size - first;
try (var bufFirst = a.allocate(first); try (var bufFirst = a.allocate(first);
var bufSecond = b.allocate(second)) { var bufSecond = b.allocate(second)) {
return scope.add(CompositeBuffer.compose(a, bufFirst.send(), bufSecond.send())).writerOffset(size).copy(); return compose(a, bufFirst.send(), bufSecond.send()).writerOffset(size).copy();
} }
}); });
} }
@ -219,48 +208,22 @@ public class BufferBulkAccessTest extends BufferTestSupport {
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
void byteIterationOfBigEndianBuffers(Fixture fixture) { void byteIterationOfBuffers(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(0x28)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN); // The byte order should have no impact.
checkByteIteration(buf); checkByteIteration(buf);
buf.reset(); buf.resetOffsets();
checkByteIterationOfRegion(buf); checkByteIterationOfRegion(buf);
} }
} }
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
void byteIterationOfLittleEndianBuffers(Fixture fixture) { void reverseByteIterationOfBuffers(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(0x28)) { Buffer buf = allocator.allocate(0x28)) {
buf.order(LITTLE_ENDIAN); // The byte order should have no impact.
checkByteIteration(buf);
buf.reset();
checkByteIterationOfRegion(buf);
}
}
@ParameterizedTest
@MethodSource("allocators")
void reverseByteIterationOfBigEndianBuffers(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(0x28)) {
buf.order(BIG_ENDIAN); // The byte order should have no impact.
checkReverseByteIteration(buf); checkReverseByteIteration(buf);
buf.reset(); buf.resetOffsets();
checkReverseByteIterationOfRegion(buf);
}
}
@ParameterizedTest
@MethodSource("allocators")
void reverseByteIterationOfLittleEndianBuffers(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(0x28)) {
buf.order(LITTLE_ENDIAN); // The byte order should have no impact.
checkReverseByteIteration(buf);
buf.reset();
checkReverseByteIterationOfRegion(buf); checkReverseByteIterationOfRegion(buf);
} }
} }
@ -295,4 +258,17 @@ public class BufferBulkAccessTest extends BufferTestSupport {
assertThat(toByteArray(buffer)).containsExactly(1, 2, 3, 4, 5, 6, 7, 0); assertThat(toByteArray(buffer)).containsExactly(1, 2, 3, 4, 5, 6, 7, 0);
} }
} }
@ParameterizedTest
@MethodSource("allocators")
public void writeBytesWithOffsetMustWriteAllBytesFromByteArray(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator();
Buffer buffer = allocator.allocate(3)) {
buffer.writeByte((byte) 1);
buffer.writeBytes(new byte[] {2, 3, 4, 5, 6, 7}, 1, 2);
assertThat(buffer.writerOffset()).isEqualTo(3);
assertThat(buffer.readerOffset()).isZero();
assertThat(toByteArray(buffer)).containsExactly(1, 3, 4);
}
}
} }

View File

@ -20,7 +20,6 @@ import io.netty.buffer.api.BufferAllocator;
import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.MethodSource;
import static java.nio.ByteOrder.BIG_ENDIAN;
import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrows;
public class BufferByteOffsettedAccessorsTest extends BufferTestSupport { public class BufferByteOffsettedAccessorsTest extends BufferTestSupport {
@ -59,7 +58,6 @@ public class BufferByteOffsettedAccessorsTest extends BufferTestSupport {
void offsettedGetOfByteMustReadWithDefaultEndianByteOrder(Fixture fixture) { void offsettedGetOfByteMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
byte value = 0x01; byte value = 0x01;
buf.writeByte(value); buf.writeByte(value);
buf.setByte(0, (byte) 0x10); buf.setByte(0, (byte) 0x10);
@ -159,7 +157,6 @@ public class BufferByteOffsettedAccessorsTest extends BufferTestSupport {
void offsettedGetOfUnsignedByteMustReadWithDefaultEndianByteOrder(Fixture fixture) { void offsettedGetOfUnsignedByteMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
int value = 0x01; int value = 0x01;
buf.writeUnsignedByte(value); buf.writeUnsignedByte(value);
buf.setByte(0, (byte) 0x10); buf.setByte(0, (byte) 0x10);
@ -278,7 +275,6 @@ public class BufferByteOffsettedAccessorsTest extends BufferTestSupport {
void offsettedSetOfByteMustHaveDefaultEndianByteOrder(Fixture fixture) { void offsettedSetOfByteMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
byte value = 0x01; byte value = 0x01;
buf.setByte(0, value); buf.setByte(0, value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);
@ -326,7 +322,6 @@ public class BufferByteOffsettedAccessorsTest extends BufferTestSupport {
void offsettedSetOfUnsignedByteMustHaveDefaultEndianByteOrder(Fixture fixture) { void offsettedSetOfUnsignedByteMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
int value = 0x01; int value = 0x01;
buf.setUnsignedByte(0, value); buf.setUnsignedByte(0, value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);

View File

@ -20,7 +20,6 @@ import io.netty.buffer.api.BufferAllocator;
import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.MethodSource;
import static java.nio.ByteOrder.BIG_ENDIAN;
import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrows;
public class BufferCharOffsettedAccessorsTest extends BufferTestSupport { public class BufferCharOffsettedAccessorsTest extends BufferTestSupport {
@ -58,7 +57,6 @@ public class BufferCharOffsettedAccessorsTest extends BufferTestSupport {
void offsettedGetOfCharMustReadWithDefaultEndianByteOrder(Fixture fixture) { void offsettedGetOfCharMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
char value = 0x0102; char value = 0x0102;
buf.writeChar(value); buf.writeChar(value);
buf.setByte(0, (byte) 0x10); buf.setByte(0, (byte) 0x10);
@ -175,7 +173,6 @@ public class BufferCharOffsettedAccessorsTest extends BufferTestSupport {
void offsettedSetOfCharMustHaveDefaultEndianByteOrder(Fixture fixture) { void offsettedSetOfCharMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
char value = 0x0102; char value = 0x0102;
buf.setChar(0, value); buf.setChar(0, value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);

View File

@ -15,39 +15,35 @@
*/ */
package io.netty.buffer.api.tests; package io.netty.buffer.api.tests;
import io.netty.buffer.api.MemoryManagers; import io.netty.buffer.api.MemoryManager;
import io.netty.buffer.api.internal.Statics; import io.netty.buffer.api.internal.Statics;
import org.junit.jupiter.api.condition.DisabledForJreRange;
import org.junit.jupiter.api.condition.JRE;
import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.MethodSource;
import java.util.List; import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.Stream; import java.util.stream.Stream;
import static io.netty.buffer.api.MemoryManagers.using; import static io.netty.buffer.api.MemoryManager.using;
import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
public class BufferCleanerTest extends BufferTestSupport { public class BufferCleanerTest extends BufferTestSupport {
@SuppressWarnings("OptionalGetWithoutIsPresent") static Fixture[] unsafeAllocators() {
static Fixture[] memorySegmentAllocators() { Optional<MemoryManager> maybeManager = MemoryManager.lookupImplementation("Unsafe");
MemoryManagers managers = MemoryManagers.getAllManagers() assumeTrue(maybeManager.isPresent());
.map(p -> p.get()) MemoryManager manager = maybeManager.get();
.filter(mm -> "MS".equals(mm.toString()))
.findFirst().get();
List<Fixture> initFixtures = initialAllocators().stream().flatMap(f -> { List<Fixture> initFixtures = initialAllocators().stream().flatMap(f -> {
Stream.Builder<Fixture> builder = Stream.builder(); Stream.Builder<Fixture> builder = Stream.builder();
builder.add(new Fixture(f + "/" + managers, () -> using(managers, f), f.getProperties())); builder.add(new Fixture(f + "/" + manager, () -> using(manager, f), f.getProperties()));
return builder.build(); return builder.build();
}).collect(Collectors.toList()); }).collect(Collectors.toList());
return fixtureCombinations(initFixtures).filter(f -> f.isDirect()).toArray(Fixture[]::new); return fixtureCombinations(initFixtures).filter(f -> f.isDirect()).toArray(Fixture[]::new);
} }
// Only run this one on JDK 17.
@DisabledForJreRange(min = JRE.JAVA_11, max = JRE.JAVA_16)
@ParameterizedTest @ParameterizedTest
@MethodSource("memorySegmentAllocators") @MethodSource("unsafeAllocators")
public void bufferMustBeClosedByCleaner(Fixture fixture) throws InterruptedException { public void bufferMustBeClosedByCleaner(Fixture fixture) throws InterruptedException {
var initial = Statics.MEM_USAGE_NATIVE.sum(); var initial = Statics.MEM_USAGE_NATIVE.sum();
int allocationSize = 1024; int allocationSize = 1024;

View File

@ -22,7 +22,6 @@ import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.MethodSource;
import static io.netty.buffer.api.internal.Statics.acquire; import static io.netty.buffer.api.internal.Statics.acquire;
import static java.nio.ByteOrder.BIG_ENDIAN;
import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrows;
public class BufferCompactTest extends BufferTestSupport { public class BufferCompactTest extends BufferTestSupport {
@ -31,7 +30,7 @@ public class BufferCompactTest extends BufferTestSupport {
@MethodSource("allocators") @MethodSource("allocators")
public void compactMustDiscardReadBytes(Fixture fixture) { public void compactMustDiscardReadBytes(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(16, BIG_ENDIAN)) { Buffer buf = allocator.allocate(16)) {
buf.writeLong(0x0102030405060708L).writeInt(0x090A0B0C); buf.writeLong(0x0102030405060708L).writeInt(0x090A0B0C);
assertEquals(0x01020304, buf.readInt()); assertEquals(0x01020304, buf.readInt());
assertEquals(12, buf.writerOffset()); assertEquals(12, buf.writerOffset());
@ -53,7 +52,7 @@ public class BufferCompactTest extends BufferTestSupport {
@MethodSource("allocators") @MethodSource("allocators")
public void compactMustThrowForUnownedBuffer(Fixture fixture) { public void compactMustThrowForUnownedBuffer(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8, BIG_ENDIAN)) { Buffer buf = allocator.allocate(8)) {
buf.writeLong(0x0102030405060708L); buf.writeLong(0x0102030405060708L);
assertEquals((byte) 0x01, buf.readByte()); assertEquals((byte) 0x01, buf.readByte());
try (Buffer ignore = acquire((ResourceSupport<?, ?>) buf)) { try (Buffer ignore = acquire((ResourceSupport<?, ?>) buf)) {

View File

@ -31,8 +31,6 @@ import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import static java.nio.ByteOrder.BIG_ENDIAN;
import static java.nio.ByteOrder.LITTLE_ENDIAN;
import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrows;
@ -70,7 +68,7 @@ public class BufferComponentIterationTest extends BufferTestSupport {
@Test @Test
public void compositeBufferComponentCountMustBeTransitiveSum() { public void compositeBufferComponentCountMustBeTransitiveSum() {
try (BufferAllocator allocator = BufferAllocator.heap()) { try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
Buffer buf; Buffer buf;
try (Buffer a = allocator.allocate(8); try (Buffer a = allocator.allocate(8);
Buffer b = allocator.allocate(8); Buffer b = allocator.allocate(8);
@ -107,20 +105,16 @@ public class BufferComponentIterationTest extends BufferTestSupport {
public void forEachReadableMustVisitBuffer(Fixture fixture) { public void forEachReadableMustVisitBuffer(Fixture fixture) {
long value = 0x0102030405060708L; long value = 0x0102030405060708L;
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer bufBERW = allocator.allocate(8).order(BIG_ENDIAN).writeLong(value); Buffer bufBERW = allocator.allocate(8).writeLong(value);
Buffer bufLERW = allocator.allocate(8).order(LITTLE_ENDIAN).writeLong(value); Buffer bufBERO = allocator.allocate(8).writeLong(value).makeReadOnly()) {
Buffer bufBERO = allocator.allocate(8).order(BIG_ENDIAN).writeLong(value).makeReadOnly();
Buffer bufLERO = allocator.allocate(8).order(LITTLE_ENDIAN).writeLong(value).makeReadOnly()) {
verifyForEachReadableSingleComponent(fixture, bufBERW); verifyForEachReadableSingleComponent(fixture, bufBERW);
verifyForEachReadableSingleComponent(fixture, bufLERW);
verifyForEachReadableSingleComponent(fixture, bufBERO); verifyForEachReadableSingleComponent(fixture, bufBERO);
verifyForEachReadableSingleComponent(fixture, bufLERO);
} }
} }
@Test @Test
public void forEachReadableMustVisitAllReadableConstituentBuffersInOrder() { public void forEachReadableMustVisitAllReadableConstituentBuffersInOrder() {
try (BufferAllocator allocator = BufferAllocator.heap()) { try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
Buffer composite; Buffer composite;
try (Buffer a = allocator.allocate(4); try (Buffer a = allocator.allocate(4);
Buffer b = allocator.allocate(4); Buffer b = allocator.allocate(4);
@ -157,7 +151,7 @@ public class BufferComponentIterationTest extends BufferTestSupport {
@Test @Test
public void forEachReadableMustStopIterationWhenProcessorReturnsFalse() { public void forEachReadableMustStopIterationWhenProcessorReturnsFalse() {
try (BufferAllocator allocator = BufferAllocator.heap()) { try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
Buffer composite; Buffer composite;
try (Buffer a = allocator.allocate(4); try (Buffer a = allocator.allocate(4);
Buffer b = allocator.allocate(4); Buffer b = allocator.allocate(4);
@ -228,21 +222,18 @@ public class BufferComponentIterationTest extends BufferTestSupport {
@MethodSource("allocators") @MethodSource("allocators")
public void forEachReadableMustExposeByteCursors(Fixture fixture) { public void forEachReadableMustExposeByteCursors(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(32).order(BIG_ENDIAN)) { Buffer buf = allocator.allocate(32)) {
buf.writeLong(0x0102030405060708L); buf.writeLong(0x0102030405060708L);
buf.writeLong(0x1112131415161718L); buf.writeLong(0x1112131415161718L);
assertEquals(0x01020304, buf.readInt()); assertEquals(0x01020304, buf.readInt());
try (Buffer actualData = allocator.allocate(buf.readableBytes()).order(BIG_ENDIAN); try (Buffer actualData = allocator.allocate(buf.readableBytes());
Buffer expectedData = allocator.allocate(12).order(BIG_ENDIAN)) { Buffer expectedData = allocator.allocate(12)) {
expectedData.writeInt(0x05060708); expectedData.writeInt(0x05060708);
expectedData.writeInt(0x11121314); expectedData.writeInt(0x11121314);
expectedData.writeInt(0x15161718); expectedData.writeInt(0x15161718);
buf.forEachReadable(0, (i, component) -> { buf.forEachReadable(0, (i, component) -> {
ByteCursor forward = component.openCursor(); ByteCursor forward = component.openCursor();
while (forward.readLong()) {
actualData.writeLong(forward.getLong());
}
while (forward.readByte()) { while (forward.readByte()) {
actualData.writeByte(forward.getByte()); actualData.writeByte(forward.getByte());
} }
@ -261,23 +252,20 @@ public class BufferComponentIterationTest extends BufferTestSupport {
@MethodSource("nonCompositeAllocators") @MethodSource("nonCompositeAllocators")
public void forEachWritableMustVisitBuffer(Fixture fixture) { public void forEachWritableMustVisitBuffer(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer bufBERW = allocator.allocate(8).order(BIG_ENDIAN); Buffer bufBERW = allocator.allocate(8)) {
Buffer bufLERW = allocator.allocate(8).order(LITTLE_ENDIAN)) {
verifyForEachWritableSingleComponent(fixture, bufBERW); verifyForEachWritableSingleComponent(fixture, bufBERW);
verifyForEachWritableSingleComponent(fixture, bufLERW);
} }
} }
@Test @Test
public void forEachWritableMustVisitAllWritableConstituentBuffersInOrder() { public void forEachWritableMustVisitAllWritableConstituentBuffersInOrder() {
try (BufferAllocator allocator = BufferAllocator.heap()) { try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
Buffer buf; Buffer buf;
try (Buffer a = allocator.allocate(8); try (Buffer a = allocator.allocate(8);
Buffer b = allocator.allocate(8); Buffer b = allocator.allocate(8);
Buffer c = allocator.allocate(8)) { Buffer c = allocator.allocate(8)) {
buf = CompositeBuffer.compose(allocator, a.send(), b.send(), c.send()); buf = CompositeBuffer.compose(allocator, a.send(), b.send(), c.send());
} }
buf.order(BIG_ENDIAN);
buf.forEachWritable(0, (index, component) -> { buf.forEachWritable(0, (index, component) -> {
component.writableBuffer().putLong(0x0102030405060708L + 0x1010101010101010L * index); component.writableBuffer().putLong(0x0102030405060708L + 0x1010101010101010L * index);
return true; return true;
@ -317,7 +305,7 @@ public class BufferComponentIterationTest extends BufferTestSupport {
@MethodSource("allocators") @MethodSource("allocators")
public void forEachWritableChangesMadeToByteBufferComponentMustBeReflectedInBuffer(Fixture fixture) { public void forEachWritableChangesMadeToByteBufferComponentMustBeReflectedInBuffer(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(9).order(BIG_ENDIAN)) { Buffer buf = allocator.allocate(9)) {
buf.writeByte((byte) 0xFF); buf.writeByte((byte) 0xFF);
AtomicInteger writtenCounter = new AtomicInteger(); AtomicInteger writtenCounter = new AtomicInteger();
buf.forEachWritable(0, (index, component) -> { buf.forEachWritable(0, (index, component) -> {

View File

@ -26,12 +26,8 @@ import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.MethodSource;
import java.nio.ByteOrder;
import static io.netty.buffer.api.internal.Statics.acquire; import static io.netty.buffer.api.internal.Statics.acquire;
import static io.netty.buffer.api.internal.Statics.isOwned; import static io.netty.buffer.api.internal.Statics.isOwned;
import static java.nio.ByteOrder.BIG_ENDIAN;
import static java.nio.ByteOrder.LITTLE_ENDIAN;
import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
@ -39,7 +35,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
public class BufferCompositionTest extends BufferTestSupport { public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void compositeBuffersCannotHaveDuplicateComponents() { public void compositeBuffersCannotHaveDuplicateComponents() {
try (BufferAllocator allocator = BufferAllocator.heap()) { try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
Send<Buffer> a = allocator.allocate(4).send(); Send<Buffer> a = allocator.allocate(4).send();
var e = assertThrows(IllegalStateException.class, () -> CompositeBuffer.compose(allocator, a, a)); var e = assertThrows(IllegalStateException.class, () -> CompositeBuffer.compose(allocator, a, a));
assertThat(e).hasMessageContaining("already been received"); assertThat(e).hasMessageContaining("already been received");
@ -54,7 +50,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void compositeBufferFromSends() { public void compositeBufferFromSends() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
Buffer composite = CompositeBuffer.compose(allocator, Buffer composite = CompositeBuffer.compose(allocator,
allocator.allocate(8).send(), allocator.allocate(8).send(),
allocator.allocate(8).send(), allocator.allocate(8).send(),
@ -66,13 +62,13 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void compositeBufferMustNotBeAllowedToContainThemselves() { public void compositeBufferMustNotBeAllowedToContainThemselves() {
try (BufferAllocator allocator = BufferAllocator.heap()) { try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
CompositeBuffer bufA = CompositeBuffer.compose(allocator, allocator.allocate(4).send()); CompositeBuffer bufA = CompositeBuffer.compose(allocator, allocator.allocate(4).send());
Send<Buffer> sendA = bufA.send(); Send<Buffer> sendA = bufA.send();
try { try {
assertThrows(BufferClosedException.class, () -> bufA.extendWith(sendA)); assertThrows(BufferClosedException.class, () -> bufA.extendWith(sendA));
} finally { } finally {
sendA.discard(); sendA.close();
} }
CompositeBuffer bufB = CompositeBuffer.compose(allocator, allocator.allocate(4).send()); CompositeBuffer bufB = CompositeBuffer.compose(allocator, allocator.allocate(4).send());
@ -80,7 +76,7 @@ public class BufferCompositionTest extends BufferTestSupport {
try (CompositeBuffer compositeBuffer = CompositeBuffer.compose(allocator, sendB)) { try (CompositeBuffer compositeBuffer = CompositeBuffer.compose(allocator, sendB)) {
assertThrows(IllegalStateException.class, () -> compositeBuffer.extendWith(sendB)); assertThrows(IllegalStateException.class, () -> compositeBuffer.extendWith(sendB));
} finally { } finally {
sendB.discard(); sendB.close();
} }
} }
} }
@ -90,7 +86,7 @@ public class BufferCompositionTest extends BufferTestSupport {
public void ensureWritableOnCompositeBuffersMustRespectExistingBigEndianByteOrder(Fixture fixture) { public void ensureWritableOnCompositeBuffersMustRespectExistingBigEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator()) { try (BufferAllocator allocator = fixture.createAllocator()) {
Buffer composite; Buffer composite;
try (Buffer a = allocator.allocate(4, BIG_ENDIAN)) { try (Buffer a = allocator.allocate(4)) {
composite = CompositeBuffer.compose(allocator, a.send()); composite = CompositeBuffer.compose(allocator, a.send());
} }
try (composite) { try (composite) {
@ -102,29 +98,9 @@ public class BufferCompositionTest extends BufferTestSupport {
} }
} }
@ParameterizedTest
@MethodSource("allocators")
public void ensureWritableOnCompositeBuffersMustRespectExistingLittleEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator();
Buffer composite = CompositeBuffer.compose(allocator, allocator.allocate(4, LITTLE_ENDIAN).send())) {
composite.writeInt(0x05060708);
composite.ensureWritable(4);
composite.writeInt(0x01020304);
assertEquals(0x0102030405060708L, composite.readLong());
}
}
@Test
public void emptyCompositeBufferMustUseNativeByteOrder() {
try (BufferAllocator allocator = BufferAllocator.heap();
Buffer composite = CompositeBuffer.compose(allocator)) {
assertThat(composite.order()).isEqualTo(ByteOrder.nativeOrder());
}
}
@Test @Test
public void extendOnNonCompositeBufferMustThrow() { public void extendOnNonCompositeBufferMustThrow() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
Buffer a = allocator.allocate(8); Buffer a = allocator.allocate(8);
Buffer b = allocator.allocate(8)) { Buffer b = allocator.allocate(8)) {
assertThrows(ClassCastException.class, () -> ((CompositeBuffer) a).extendWith(b.send())); assertThrows(ClassCastException.class, () -> ((CompositeBuffer) a).extendWith(b.send()));
@ -133,7 +109,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void extendingNonOwnedCompositeBufferMustThrow() { public void extendingNonOwnedCompositeBufferMustThrow() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
Buffer a = allocator.allocate(8); Buffer a = allocator.allocate(8);
Buffer b = allocator.allocate(8); Buffer b = allocator.allocate(8);
CompositeBuffer composed = CompositeBuffer.compose(allocator, a.send())) { CompositeBuffer composed = CompositeBuffer.compose(allocator, a.send())) {
@ -146,7 +122,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void extendingCompositeBufferWithItselfMustThrow() { public void extendingCompositeBufferWithItselfMustThrow() {
try (BufferAllocator allocator = BufferAllocator.heap()) { try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
CompositeBuffer composite; CompositeBuffer composite;
try (Buffer a = allocator.allocate(8)) { try (Buffer a = allocator.allocate(8)) {
composite = CompositeBuffer.compose(allocator, a.send()); composite = CompositeBuffer.compose(allocator, a.send());
@ -159,13 +135,13 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void extendingWithZeroCapacityBufferHasNoEffect() { public void extendingWithZeroCapacityBufferHasNoEffect() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator)) { CompositeBuffer composite = CompositeBuffer.compose(allocator)) {
composite.extendWith(CompositeBuffer.compose(allocator).send()); composite.extendWith(CompositeBuffer.compose(allocator).send());
assertThat(composite.capacity()).isZero(); assertThat(composite.capacity()).isZero();
assertThat(composite.countComponents()).isZero(); assertThat(composite.countComponents()).isZero();
} }
try (BufferAllocator allocator = BufferAllocator.heap()) { try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
Buffer a = allocator.allocate(1); Buffer a = allocator.allocate(1);
CompositeBuffer composite = CompositeBuffer.compose(allocator, a.send()); CompositeBuffer composite = CompositeBuffer.compose(allocator, a.send());
assertTrue(isOwned(composite)); assertTrue(isOwned(composite));
@ -182,18 +158,18 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void extendingCompositeBufferWithNullMustThrow() { public void extendingCompositeBufferWithNullMustThrow() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator)) { CompositeBuffer composite = CompositeBuffer.compose(allocator)) {
assertThrows(NullPointerException.class, () -> composite.extendWith(null)); assertThrows(NullPointerException.class, () -> composite.extendWith(null));
} }
} }
@Test @Test
public void extendingCompositeBufferMustIncreaseCapacityByGivenBigEndianBuffer() { public void extendingCompositeBufferMustIncreaseCapacityByGivenBuffer() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator)) { CompositeBuffer composite = CompositeBuffer.compose(allocator)) {
assertThat(composite.capacity()).isZero(); assertThat(composite.capacity()).isZero();
try (Buffer buf = allocator.allocate(8, BIG_ENDIAN)) { try (Buffer buf = allocator.allocate(8)) {
composite.extendWith(buf.send()); composite.extendWith(buf.send());
} }
assertThat(composite.capacity()).isEqualTo(8); assertThat(composite.capacity()).isEqualTo(8);
@ -203,72 +179,12 @@ public class BufferCompositionTest extends BufferTestSupport {
} }
@Test @Test
public void extendingCompositeBufferMustIncreaseCapacityByGivenLittleEndianBuffer() { public void emptyCompositeBufferMustAllowExtendingWithBuffer() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
CompositeBuffer composite = CompositeBuffer.compose(allocator)) { try (CompositeBuffer composite = CompositeBuffer.compose(allocator)) {
assertThat(composite.capacity()).isZero(); try (Buffer b = allocator.allocate(8)) {
try (Buffer buf = allocator.allocate(8, LITTLE_ENDIAN)) { composite.extendWith(b.send());
composite.extendWith(buf.send());
}
assertThat(composite.capacity()).isEqualTo(8); assertThat(composite.capacity()).isEqualTo(8);
composite.writeLong(0x0102030405060708L);
assertThat(composite.readLong()).isEqualTo(0x0102030405060708L);
}
}
@Test
public void extendingBigEndianCompositeBufferMustThrowIfExtensionIsLittleEndian() {
try (BufferAllocator allocator = BufferAllocator.heap()) {
CompositeBuffer composite;
try (Buffer a = allocator.allocate(8, BIG_ENDIAN)) {
composite = CompositeBuffer.compose(allocator, a.send());
}
try (composite) {
try (Buffer b = allocator.allocate(8, LITTLE_ENDIAN)) {
var exc = assertThrows(IllegalArgumentException.class,
() -> composite.extendWith(b.send()));
assertThat(exc).hasMessageContaining("byte order");
}
}
}
}
@Test
public void extendingLittleEndianCompositeBufferMustThrowIfExtensionIsBigEndian() {
try (BufferAllocator allocator = BufferAllocator.heap()) {
CompositeBuffer composite;
try (Buffer a = allocator.allocate(8, LITTLE_ENDIAN)) {
composite = CompositeBuffer.compose(allocator, a.send());
}
try (composite) {
try (Buffer b = allocator.allocate(8, BIG_ENDIAN)) {
var exc = assertThrows(IllegalArgumentException.class,
() -> composite.extendWith(b.send()));
assertThat(exc).hasMessageContaining("byte order");
}
}
}
}
@Test
public void emptyCompositeBufferMustAllowExtendingWithBufferWithBigEndianByteOrder() {
try (BufferAllocator allocator = BufferAllocator.heap()) {
try (CompositeBuffer composite = CompositeBuffer.compose(allocator)) {
try (Buffer b = allocator.allocate(8, BIG_ENDIAN)) {
composite.extendWith(b.send());
assertThat(composite.order()).isEqualTo(BIG_ENDIAN);
}
}
}
}
@Test
public void emptyCompositeBufferMustAllowExtendingWithBufferWithLittleEndianByteOrder() {
try (BufferAllocator allocator = BufferAllocator.heap()) {
try (CompositeBuffer composite = CompositeBuffer.compose(allocator)) {
try (Buffer b = allocator.allocate(8, LITTLE_ENDIAN)) {
composite.extendWith(b.send());
assertThat(composite.order()).isEqualTo(LITTLE_ENDIAN);
} }
} }
} }
@ -276,7 +192,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void emptyCompositeBufferMustAllowExtendingWithReadOnlyBuffer() { public void emptyCompositeBufferMustAllowExtendingWithReadOnlyBuffer() {
try (BufferAllocator allocator = BufferAllocator.heap()) { try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
try (CompositeBuffer composite = CompositeBuffer.compose(allocator)) { try (CompositeBuffer composite = CompositeBuffer.compose(allocator)) {
try (Buffer b = allocator.allocate(8).makeReadOnly()) { try (Buffer b = allocator.allocate(8).makeReadOnly()) {
composite.extendWith(b.send()); composite.extendWith(b.send());
@ -288,7 +204,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void whenExtendingCompositeBufferWithWriteOffsetAtCapacityExtensionWriteOffsetCanBeNonZero() { public void whenExtendingCompositeBufferWithWriteOffsetAtCapacityExtensionWriteOffsetCanBeNonZero() {
try (BufferAllocator allocator = BufferAllocator.heap()) { try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
CompositeBuffer composite; CompositeBuffer composite;
try (Buffer a = allocator.allocate(8)) { try (Buffer a = allocator.allocate(8)) {
composite = CompositeBuffer.compose(allocator, a.send()); composite = CompositeBuffer.compose(allocator, a.send());
@ -307,7 +223,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void whenExtendingCompositeBufferWithWriteOffsetLessThanCapacityExtensionWriteOffsetMustZero() { public void whenExtendingCompositeBufferWithWriteOffsetLessThanCapacityExtensionWriteOffsetMustZero() {
try (BufferAllocator allocator = BufferAllocator.heap()) { try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
CompositeBuffer composite; CompositeBuffer composite;
try (Buffer a = allocator.allocate(8)) { try (Buffer a = allocator.allocate(8)) {
composite = CompositeBuffer.compose(allocator, a.send()); composite = CompositeBuffer.compose(allocator, a.send());
@ -332,7 +248,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void whenExtendingCompositeBufferWithReadOffsetAtCapacityExtensionReadOffsetCanBeNonZero() { public void whenExtendingCompositeBufferWithReadOffsetAtCapacityExtensionReadOffsetCanBeNonZero() {
try (BufferAllocator allocator = BufferAllocator.heap()) { try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
CompositeBuffer composite; CompositeBuffer composite;
try (Buffer a = allocator.allocate(8)) { try (Buffer a = allocator.allocate(8)) {
composite = CompositeBuffer.compose(allocator, a.send()); composite = CompositeBuffer.compose(allocator, a.send());
@ -353,7 +269,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void whenExtendingCompositeBufferWithReadOffsetLessThanCapacityExtensionReadOffsetMustZero() { public void whenExtendingCompositeBufferWithReadOffsetLessThanCapacityExtensionReadOffsetMustZero() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator, allocator.allocate(8).send())) { CompositeBuffer composite = CompositeBuffer.compose(allocator, allocator.allocate(8).send())) {
composite.writeLong(0); composite.writeLong(0);
composite.readInt(); composite.readInt();
@ -375,18 +291,9 @@ public class BufferCompositionTest extends BufferTestSupport {
} }
} }
@Test
public void composeMustThrowWhenBuffersHaveMismatchedByteOrder() {
try (BufferAllocator allocator = BufferAllocator.heap();
Buffer a = allocator.allocate(4, BIG_ENDIAN);
Buffer b = allocator.allocate(4, LITTLE_ENDIAN)) {
assertThrows(IllegalArgumentException.class, () -> CompositeBuffer.compose(allocator, a.send(), b.send()));
}
}
@Test @Test
public void composingReadOnlyBuffersMustCreateReadOnlyCompositeBuffer() { public void composingReadOnlyBuffersMustCreateReadOnlyCompositeBuffer() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
Buffer a = allocator.allocate(4).makeReadOnly(); Buffer a = allocator.allocate(4).makeReadOnly();
Buffer b = allocator.allocate(4).makeReadOnly(); Buffer b = allocator.allocate(4).makeReadOnly();
Buffer composite = CompositeBuffer.compose(allocator, a.send(), b.send())) { Buffer composite = CompositeBuffer.compose(allocator, a.send(), b.send())) {
@ -397,14 +304,16 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void composingReadOnlyAndWritableBuffersMustThrow() { public void composingReadOnlyAndWritableBuffersMustThrow() {
try (BufferAllocator allocator = BufferAllocator.heap()) { try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
try (Buffer a = allocator.allocate(8).makeReadOnly(); try (Buffer a = allocator.allocate(8).makeReadOnly();
Buffer b = allocator.allocate(8)) { Buffer b = allocator.allocate(8)) {
assertThrows(IllegalArgumentException.class, () -> CompositeBuffer.compose(allocator, a.send(), b.send())); assertThrows(IllegalArgumentException.class,
() -> CompositeBuffer.compose(allocator, a.send(), b.send()));
} }
try (Buffer a = allocator.allocate(8).makeReadOnly(); try (Buffer a = allocator.allocate(8).makeReadOnly();
Buffer b = allocator.allocate(8)) { Buffer b = allocator.allocate(8)) {
assertThrows(IllegalArgumentException.class, () -> CompositeBuffer.compose(allocator, b.send(), a.send())); assertThrows(IllegalArgumentException.class,
() -> CompositeBuffer.compose(allocator, b.send(), a.send()));
} }
try (Buffer a = allocator.allocate(8).makeReadOnly(); try (Buffer a = allocator.allocate(8).makeReadOnly();
Buffer b = allocator.allocate(8); Buffer b = allocator.allocate(8);
@ -423,7 +332,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void compositeWritableBufferCannotBeExtendedWithReadOnlyBuffer() { public void compositeWritableBufferCannotBeExtendedWithReadOnlyBuffer() {
try (BufferAllocator allocator = BufferAllocator.heap()) { try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
CompositeBuffer composite; CompositeBuffer composite;
try (Buffer a = allocator.allocate(8)) { try (Buffer a = allocator.allocate(8)) {
composite = CompositeBuffer.compose(allocator, a.send()); composite = CompositeBuffer.compose(allocator, a.send());
@ -436,7 +345,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void compositeReadOnlyBufferCannotBeExtendedWithWritableBuffer() { public void compositeReadOnlyBufferCannotBeExtendedWithWritableBuffer() {
try (BufferAllocator allocator = BufferAllocator.heap()) { try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) {
CompositeBuffer composite; CompositeBuffer composite;
try (Buffer a = allocator.allocate(8).makeReadOnly()) { try (Buffer a = allocator.allocate(8).makeReadOnly()) {
composite = CompositeBuffer.compose(allocator, a.send()); composite = CompositeBuffer.compose(allocator, a.send());
@ -449,7 +358,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void splitComponentsFloorMustThrowOnOutOfBounds() { public void splitComponentsFloorMustThrowOnOutOfBounds() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator, CompositeBuffer composite = CompositeBuffer.compose(allocator,
allocator.allocate(8).send(), allocator.allocate(8).send(),
allocator.allocate(8).send())) { allocator.allocate(8).send())) {
@ -464,7 +373,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void splitComponentsCeilMustThrowOnOutOfBounds() { public void splitComponentsCeilMustThrowOnOutOfBounds() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator, CompositeBuffer composite = CompositeBuffer.compose(allocator,
allocator.allocate(8).send(), allocator.allocate(8).send(),
allocator.allocate(8).send())) { allocator.allocate(8).send())) {
@ -479,7 +388,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void splitComponentsFloorMustGiveEmptyBufferForOffsetInFirstComponent() { public void splitComponentsFloorMustGiveEmptyBufferForOffsetInFirstComponent() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator, CompositeBuffer composite = CompositeBuffer.compose(allocator,
allocator.allocate(8).send(), allocator.allocate(8).send(),
allocator.allocate(8).send())) { allocator.allocate(8).send())) {
@ -497,7 +406,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void splitComponentsFloorMustGiveEmptyBufferForOffsetLastByteInFirstComponent() { public void splitComponentsFloorMustGiveEmptyBufferForOffsetLastByteInFirstComponent() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator, CompositeBuffer composite = CompositeBuffer.compose(allocator,
allocator.allocate(8).send(), allocator.allocate(8).send(),
allocator.allocate(8).send())) { allocator.allocate(8).send())) {
@ -515,7 +424,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void splitComponentsFloorMustGiveBufferWithFirstComponentForOffsetInSecondComponent() { public void splitComponentsFloorMustGiveBufferWithFirstComponentForOffsetInSecondComponent() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator, CompositeBuffer composite = CompositeBuffer.compose(allocator,
allocator.allocate(8).send(), allocator.allocate(8).send(),
allocator.allocate(8).send())) { allocator.allocate(8).send())) {
@ -533,7 +442,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void splitComponentsFloorMustGiveBufferWithFirstComponentForOffsetOnFirstByteInSecondComponent() { public void splitComponentsFloorMustGiveBufferWithFirstComponentForOffsetOnFirstByteInSecondComponent() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator, CompositeBuffer composite = CompositeBuffer.compose(allocator,
allocator.allocate(8).send(), allocator.allocate(8).send(),
allocator.allocate(8).send())) { allocator.allocate(8).send())) {
@ -551,7 +460,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void splitComponentsCeilMustGiveBufferWithFirstComponentForOffsetInFirstComponent() { public void splitComponentsCeilMustGiveBufferWithFirstComponentForOffsetInFirstComponent() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator, CompositeBuffer composite = CompositeBuffer.compose(allocator,
allocator.allocate(8).send(), allocator.allocate(8).send(),
allocator.allocate(8).send())) { allocator.allocate(8).send())) {
@ -569,7 +478,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void splitComponentsCeilMustGiveBufferWithFirstComponentFofOffsetOnLastByteInFirstComponent() { public void splitComponentsCeilMustGiveBufferWithFirstComponentFofOffsetOnLastByteInFirstComponent() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator, CompositeBuffer composite = CompositeBuffer.compose(allocator,
allocator.allocate(8).send(), allocator.allocate(8).send(),
allocator.allocate(8).send())) { allocator.allocate(8).send())) {
@ -587,7 +496,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void splitComponentsCeilMustGiveBufferWithFirstAndSecondComponentForfOffsetInSecondComponent() { public void splitComponentsCeilMustGiveBufferWithFirstAndSecondComponentForfOffsetInSecondComponent() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator, CompositeBuffer composite = CompositeBuffer.compose(allocator,
allocator.allocate(8).send(), allocator.allocate(8).send(),
allocator.allocate(8).send())) { allocator.allocate(8).send())) {
@ -602,7 +511,7 @@ public class BufferCompositionTest extends BufferTestSupport {
} }
} }
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator, CompositeBuffer composite = CompositeBuffer.compose(allocator,
allocator.allocate(8).send(), allocator.allocate(8).send(),
allocator.allocate(8).send(), allocator.allocate(8).send(),
@ -621,7 +530,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void splitComponentsCeilMustGiveBufferWithFirstComponentForfOffsetOnFirstByteInSecondComponent() { public void splitComponentsCeilMustGiveBufferWithFirstComponentForfOffsetOnFirstByteInSecondComponent() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator, CompositeBuffer composite = CompositeBuffer.compose(allocator,
allocator.allocate(8).send(), allocator.allocate(8).send(),
allocator.allocate(8).send())) { allocator.allocate(8).send())) {
@ -639,7 +548,7 @@ public class BufferCompositionTest extends BufferTestSupport {
@Test @Test
public void splitComponentsCeilMustGiveEmptyBufferForOffsetOnFirstByteInFirstComponent() { public void splitComponentsCeilMustGiveEmptyBufferForOffsetOnFirstByteInFirstComponent() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
CompositeBuffer composite = CompositeBuffer.compose(allocator, CompositeBuffer composite = CompositeBuffer.compose(allocator,
allocator.allocate(8).send(), allocator.allocate(8).send(),
allocator.allocate(8).send())) { allocator.allocate(8).send())) {

View File

@ -20,7 +20,6 @@ import io.netty.buffer.api.BufferAllocator;
import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.MethodSource;
import static java.nio.ByteOrder.BIG_ENDIAN;
import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrows;
public class BufferDoubleOffsettedAccessorsTest extends BufferTestSupport { public class BufferDoubleOffsettedAccessorsTest extends BufferTestSupport {
@ -58,7 +57,6 @@ public class BufferDoubleOffsettedAccessorsTest extends BufferTestSupport {
void offsettedGetOfDoubleMustReadWithDefaultEndianByteOrder(Fixture fixture) { void offsettedGetOfDoubleMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
double value = Double.longBitsToDouble(0x0102030405060708L); double value = Double.longBitsToDouble(0x0102030405060708L);
buf.writeDouble(value); buf.writeDouble(value);
buf.setByte(0, (byte) 0x10); buf.setByte(0, (byte) 0x10);
@ -157,7 +155,6 @@ public class BufferDoubleOffsettedAccessorsTest extends BufferTestSupport {
void offsettedSetOfDoubleMustHaveDefaultEndianByteOrder(Fixture fixture) { void offsettedSetOfDoubleMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
double value = Double.longBitsToDouble(0x0102030405060708L); double value = Double.longBitsToDouble(0x0102030405060708L);
buf.setDouble(0, value); buf.setDouble(0, value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);

View File

@ -77,7 +77,7 @@ public class BufferEnsureWritableTest extends BufferTestSupport {
@Test @Test
public void ensureWritableMustExpandCapacityOfEmptyCompositeBuffer() { public void ensureWritableMustExpandCapacityOfEmptyCompositeBuffer() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
Buffer buf = CompositeBuffer.compose(allocator)) { Buffer buf = CompositeBuffer.compose(allocator)) {
assertThat(buf.writableBytes()).isEqualTo(0); assertThat(buf.writableBytes()).isEqualTo(0);
buf.ensureWritable(8); buf.ensureWritable(8);

View File

@ -20,7 +20,6 @@ import io.netty.buffer.api.BufferAllocator;
import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.MethodSource;
import static java.nio.ByteOrder.BIG_ENDIAN;
import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrows;
public class BufferFloatOffsettedAccessorsTest extends BufferTestSupport { public class BufferFloatOffsettedAccessorsTest extends BufferTestSupport {
@ -59,7 +58,6 @@ public class BufferFloatOffsettedAccessorsTest extends BufferTestSupport {
void offsettedGetOfFloatMustReadWithDefaultEndianByteOrder(Fixture fixture) { void offsettedGetOfFloatMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
float value = Float.intBitsToFloat(0x01020304); float value = Float.intBitsToFloat(0x01020304);
buf.writeFloat(value); buf.writeFloat(value);
buf.setByte(0, (byte) 0x10); buf.setByte(0, (byte) 0x10);
@ -176,7 +174,6 @@ public class BufferFloatOffsettedAccessorsTest extends BufferTestSupport {
void offsettedSetOfFloatMustHaveDefaultEndianByteOrder(Fixture fixture) { void offsettedSetOfFloatMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
float value = Float.intBitsToFloat(0x01020304); float value = Float.intBitsToFloat(0x01020304);
buf.setFloat(0, value); buf.setFloat(0, value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);

View File

@ -20,7 +20,6 @@ import io.netty.buffer.api.BufferAllocator;
import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.MethodSource;
import static java.nio.ByteOrder.BIG_ENDIAN;
import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrows;
public class BufferIntOffsettedAccessorsTest extends BufferTestSupport { public class BufferIntOffsettedAccessorsTest extends BufferTestSupport {
@ -58,7 +57,6 @@ public class BufferIntOffsettedAccessorsTest extends BufferTestSupport {
void offsettedGetOfIntMustReadWithDefaultEndianByteOrder(Fixture fixture) { void offsettedGetOfIntMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
int value = 0x01020304; int value = 0x01020304;
buf.writeInt(value); buf.writeInt(value);
buf.setByte(0, (byte) 0x10); buf.setByte(0, (byte) 0x10);
@ -158,7 +156,6 @@ public class BufferIntOffsettedAccessorsTest extends BufferTestSupport {
void offsettedGetOfUnsignedIntMustReadWithDefaultEndianByteOrder(Fixture fixture) { void offsettedGetOfUnsignedIntMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
long value = 0x01020304; long value = 0x01020304;
buf.writeUnsignedInt(value); buf.writeUnsignedInt(value);
buf.setByte(0, (byte) 0x10); buf.setByte(0, (byte) 0x10);
@ -277,7 +274,6 @@ public class BufferIntOffsettedAccessorsTest extends BufferTestSupport {
void offsettedSetOfIntMustHaveDefaultEndianByteOrder(Fixture fixture) { void offsettedSetOfIntMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
int value = 0x01020304; int value = 0x01020304;
buf.setInt(0, value); buf.setInt(0, value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);
@ -325,7 +321,6 @@ public class BufferIntOffsettedAccessorsTest extends BufferTestSupport {
void offsettedSetOfUnsignedIntMustHaveDefaultEndianByteOrder(Fixture fixture) { void offsettedSetOfUnsignedIntMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
long value = 0x01020304; long value = 0x01020304;
buf.setUnsignedInt(0, value); buf.setUnsignedInt(0, value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);

View File

@ -20,23 +20,23 @@ import io.netty.buffer.api.BufferAllocator;
import io.netty.buffer.api.BufferClosedException; import io.netty.buffer.api.BufferClosedException;
import io.netty.buffer.api.CompositeBuffer; import io.netty.buffer.api.CompositeBuffer;
import io.netty.buffer.api.internal.ResourceSupport; import io.netty.buffer.api.internal.ResourceSupport;
import io.netty.util.internal.EmptyArrays;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.MethodSource;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.ThreadLocalRandom;
import java.util.function.Supplier;
import static io.netty.buffer.api.internal.Statics.acquire; import static io.netty.buffer.api.internal.Statics.acquire;
import static io.netty.buffer.api.internal.Statics.isOwned; import static io.netty.buffer.api.internal.Statics.isOwned;
import static java.nio.ByteOrder.BIG_ENDIAN;
import static java.nio.ByteOrder.LITTLE_ENDIAN;
import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
public class BufferReferenceCountingTest extends BufferTestSupport { public class BufferLifeCycleTest extends BufferTestSupport {
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
void allocateAndAccessingBuffer(Fixture fixture) { void allocateAndAccessingBuffer(Fixture fixture) {
@ -72,6 +72,25 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
} }
} }
@ParameterizedTest
@MethodSource("initialCombinations")
public void allocatingZeroSizedBuffer(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator()) {
Supplier<Buffer> supplier = allocator.constBufferSupplier(EmptyArrays.EMPTY_BYTES);
try (Buffer empty = supplier.get()) {
assertThat(empty.capacity()).isZero();
assertTrue(empty.readOnly());
}
try (Buffer empty = allocator.allocate(0)) {
assertThat(empty.capacity()).isZero();
empty.ensureWritable(8);
assertThat(empty.capacity()).isGreaterThanOrEqualTo(8);
}
}
}
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
void acquireOnClosedBufferMustThrow(Fixture fixture) { void acquireOnClosedBufferMustThrow(Fixture fixture) {
@ -171,10 +190,10 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
try (Buffer copy = buf.copy()) { try (Buffer copy = buf.copy()) {
assertTrue(isOwned((ResourceSupport<?, ?>) buf)); assertTrue(isOwned((ResourceSupport<?, ?>) buf));
assertTrue(isOwned((ResourceSupport<?, ?>) copy)); assertTrue(isOwned((ResourceSupport<?, ?>) copy));
copy.send().discard(); copy.send().close();
} }
assertTrue(isOwned((ResourceSupport<?, ?>) buf)); assertTrue(isOwned((ResourceSupport<?, ?>) buf));
buf.send().discard(); buf.send().close();
} }
} }
@ -186,10 +205,10 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
try (Buffer copy = buf.copy(0, 8)) { try (Buffer copy = buf.copy(0, 8)) {
assertTrue(isOwned((ResourceSupport<?, ?>) buf)); assertTrue(isOwned((ResourceSupport<?, ?>) buf));
assertTrue(isOwned((ResourceSupport<?, ?>) copy)); assertTrue(isOwned((ResourceSupport<?, ?>) copy));
copy.send().discard(); copy.send().close();
} }
assertTrue(isOwned((ResourceSupport<?, ?>) buf)); assertTrue(isOwned((ResourceSupport<?, ?>) buf));
buf.send().discard(); buf.send().close();
} }
} }
@ -198,15 +217,10 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
void copyWithoutOffsetAndSizeHasSameEndianAsParent(Fixture fixture) { void copyWithoutOffsetAndSizeHasSameEndianAsParent(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
buf.writeLong(0x0102030405060708L); buf.writeLong(0x0102030405060708L);
try (Buffer copy = buf.copy()) { try (Buffer copy = buf.copy()) {
assertEquals(0x0102030405060708L, copy.readLong()); assertEquals(0x0102030405060708L, copy.readLong());
} }
buf.order(LITTLE_ENDIAN);
try (Buffer copy = buf.copy()) {
assertEquals(0x0807060504030201L, copy.readLong());
}
} }
} }
@ -215,15 +229,10 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
void copyWithOffsetAndSizeHasSameEndianAsParent(Fixture fixture) { void copyWithOffsetAndSizeHasSameEndianAsParent(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
buf.writeLong(0x0102030405060708L); buf.writeLong(0x0102030405060708L);
try (Buffer copy = buf.copy(0, 8)) { try (Buffer copy = buf.copy(0, 8)) {
assertEquals(0x0102030405060708L, copy.readLong()); assertEquals(0x0102030405060708L, copy.readLong());
} }
buf.order(LITTLE_ENDIAN);
try (Buffer copy = buf.copy(0, 8)) {
assertEquals(0x0807060504030201L, copy.readLong());
}
} }
} }
@ -234,7 +243,7 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
try (Buffer copy = buf.copy()) { try (Buffer copy = buf.copy()) {
assertTrue(isOwned((ResourceSupport<?, ?>) buf)); assertTrue(isOwned((ResourceSupport<?, ?>) buf));
copy.send().discard(); copy.send().close();
} }
// Verify that the copy is closed properly afterwards. // Verify that the copy is closed properly afterwards.
assertTrue(isOwned((ResourceSupport<?, ?>) buf)); assertTrue(isOwned((ResourceSupport<?, ?>) buf));
@ -249,7 +258,7 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
try (Buffer copy = buf.copy(0, 8)) { try (Buffer copy = buf.copy(0, 8)) {
assertTrue(isOwned((ResourceSupport<?, ?>) buf)); assertTrue(isOwned((ResourceSupport<?, ?>) buf));
copy.send().discard(); copy.send().close();
} }
// Verify that the copy is closed properly afterwards. // Verify that the copy is closed properly afterwards.
assertTrue(isOwned((ResourceSupport<?, ?>) buf)); assertTrue(isOwned((ResourceSupport<?, ?>) buf));
@ -305,7 +314,7 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
public void copyMustBeOwned(Fixture fixture) { void copyMustBeOwned(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator()) { try (BufferAllocator allocator = fixture.createAllocator()) {
Buffer buf = allocator.allocate(8); Buffer buf = allocator.allocate(8);
buf.writeInt(42); buf.writeInt(42);
@ -323,6 +332,17 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
} }
} }
@ParameterizedTest
@MethodSource("allocators")
public void copyOfLastByte(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8).writeLong(0x0102030405060708L);
Buffer copy = buf.copy(7, 1)) {
assertThat(copy.capacity()).isOne();
assertEquals((byte) 0x08, copy.readByte());
}
}
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
public void pooledBuffersMustResetStateBeforeReuse(Fixture fixture) { public void pooledBuffersMustResetStateBeforeReuse(Fixture fixture) {
@ -335,13 +355,11 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
assertEquals(expected.readerOffset(), buf.readerOffset()); assertEquals(expected.readerOffset(), buf.readerOffset());
assertEquals(expected.writableBytes(), buf.writableBytes()); assertEquals(expected.writableBytes(), buf.writableBytes());
assertEquals(expected.writerOffset(), buf.writerOffset()); assertEquals(expected.writerOffset(), buf.writerOffset());
assertThat(buf.order()).isEqualTo(expected.order());
byte[] bytes = new byte[8]; byte[] bytes = new byte[8];
buf.copyInto(0, bytes, 0, 8); buf.copyInto(0, bytes, 0, 8);
assertThat(bytes).containsExactly(0, 0, 0, 0, 0, 0, 0, 0); assertThat(bytes).containsExactly(0, 0, 0, 0, 0, 0, 0, 0);
var tlr = ThreadLocalRandom.current(); var tlr = ThreadLocalRandom.current();
buf.order(tlr.nextBoolean()? LITTLE_ENDIAN : BIG_ENDIAN);
for (int j = 0; j < tlr.nextInt(0, 8); j++) { for (int j = 0; j < tlr.nextInt(0, 8); j++) {
buf.writeByte((byte) 1); buf.writeByte((byte) 1);
} }
@ -439,7 +457,7 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
@MethodSource("allocators") @MethodSource("allocators")
public void splitPartMustContainFirstHalfOfBuffer(Fixture fixture) { public void splitPartMustContainFirstHalfOfBuffer(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(16).order(BIG_ENDIAN)) { Buffer buf = allocator.allocate(16)) {
buf.writeLong(0x0102030405060708L); buf.writeLong(0x0102030405060708L);
assertThat(buf.readByte()).isEqualTo((byte) 0x01); assertThat(buf.readByte()).isEqualTo((byte) 0x01);
try (Buffer split = buf.split()) { try (Buffer split = buf.split()) {
@ -475,7 +493,7 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
@MethodSource("allocators") @MethodSource("allocators")
public void splitPartsMustBeIndividuallySendable(Fixture fixture) { public void splitPartsMustBeIndividuallySendable(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(16).order(BIG_ENDIAN)) { Buffer buf = allocator.allocate(16)) {
buf.writeLong(0x0102030405060708L); buf.writeLong(0x0102030405060708L);
assertThat(buf.readByte()).isEqualTo((byte) 0x01); assertThat(buf.readByte()).isEqualTo((byte) 0x01);
try (Buffer sentSplit = buf.split().send().receive()) { try (Buffer sentSplit = buf.split().send().receive()) {
@ -504,7 +522,7 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
@MethodSource("allocators") @MethodSource("allocators")
public void mustBePossibleToSplitMoreThanOnce(Fixture fixture) { public void mustBePossibleToSplitMoreThanOnce(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(16).order(BIG_ENDIAN)) { Buffer buf = allocator.allocate(16)) {
buf.writeLong(0x0102030405060708L); buf.writeLong(0x0102030405060708L);
try (Buffer a = buf.split()) { try (Buffer a = buf.split()) {
a.writerOffset(4); a.writerOffset(4);
@ -532,14 +550,14 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
@MethodSource("allocators") @MethodSource("allocators")
public void mustBePossibleToSplitCopies(Fixture fixture) { public void mustBePossibleToSplitCopies(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator()) { try (BufferAllocator allocator = fixture.createAllocator()) {
Buffer buf = allocator.allocate(16).order(BIG_ENDIAN); Buffer buf = allocator.allocate(16);
buf.writeLong(0x0102030405060708L); buf.writeLong(0x0102030405060708L);
try (Buffer copy = buf.copy()) { try (Buffer copy = buf.copy()) {
buf.close(); buf.close();
assertTrue(isOwned((ResourceSupport<?, ?>) copy)); assertTrue(isOwned((ResourceSupport<?, ?>) copy));
try (Buffer split = copy.split(4)) { try (Buffer split = copy.split(4)) {
split.reset().ensureWritable(Long.BYTES); split.resetOffsets().ensureWritable(Long.BYTES);
copy.reset().ensureWritable(Long.BYTES); copy.resetOffsets().ensureWritable(Long.BYTES);
assertThat(split.capacity()).isEqualTo(Long.BYTES); assertThat(split.capacity()).isEqualTo(Long.BYTES);
assertThat(copy.capacity()).isEqualTo(Long.BYTES); assertThat(copy.capacity()).isEqualTo(Long.BYTES);
assertThat(split.getLong(0)).isEqualTo(0x01020304_00000000L); assertThat(split.getLong(0)).isEqualTo(0x01020304_00000000L);
@ -549,41 +567,6 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
} }
} }
@ParameterizedTest
@MethodSource("allocators")
public void splitBufferMustHaveSameByteOrderAsParent(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8).order(BIG_ENDIAN)) {
buf.writeLong(0x0102030405060708L);
try (Buffer a = buf.split()) {
assertThat(a.order()).isEqualTo(BIG_ENDIAN);
a.order(LITTLE_ENDIAN);
a.writerOffset(4);
try (Buffer b = a.split()) {
assertThat(b.order()).isEqualTo(LITTLE_ENDIAN);
assertThat(buf.order()).isEqualTo(BIG_ENDIAN);
}
}
}
}
@ParameterizedTest
@MethodSource("allocators")
public void splitMustPreserveByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator()) {
try (Buffer a = allocator.allocate(8).order(BIG_ENDIAN);
Buffer b = a.split(4)) {
assertThat(a.order()).isEqualTo(BIG_ENDIAN);
assertThat(b.order()).isEqualTo(BIG_ENDIAN);
}
try (Buffer a = allocator.allocate(8).order(LITTLE_ENDIAN);
Buffer b = a.split(4)) {
assertThat(a.order()).isEqualTo(LITTLE_ENDIAN);
assertThat(b.order()).isEqualTo(LITTLE_ENDIAN);
}
}
}
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
public void ensureWritableOnSplitBuffers(Fixture fixture) { public void ensureWritableOnSplitBuffers(Fixture fixture) {
@ -607,7 +590,7 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
@MethodSource("allocators") @MethodSource("allocators")
public void ensureWritableOnSplitBuffersWithOddOffsets(Fixture fixture) { public void ensureWritableOnSplitBuffersWithOddOffsets(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(10).order(BIG_ENDIAN)) { Buffer buf = allocator.allocate(10)) {
buf.writeLong(0x0102030405060708L); buf.writeLong(0x0102030405060708L);
buf.writeByte((byte) 0x09); buf.writeByte((byte) 0x09);
buf.readByte(); buf.readByte();
@ -625,17 +608,9 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
} }
@Test @Test
public void splitOnEmptyBigEndianCompositeBuffer() { public void splitOnEmptyCompositeBuffer() {
try (BufferAllocator allocator = BufferAllocator.heap(); try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled();
Buffer buf = CompositeBuffer.compose(allocator).order(BIG_ENDIAN)) { Buffer buf = CompositeBuffer.compose(allocator)) {
verifySplitEmptyCompositeBuffer(buf);
}
}
@Test
public void splitOnEmptyLittleEndianCompositeBuffer() {
try (BufferAllocator allocator = BufferAllocator.heap();
Buffer buf = CompositeBuffer.compose(allocator).order(LITTLE_ENDIAN)) {
verifySplitEmptyCompositeBuffer(buf); verifySplitEmptyCompositeBuffer(buf);
} }
} }
@ -672,19 +647,6 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
} }
} }
@ParameterizedTest
@MethodSource("allocators")
public void copyOfReadOnlyBufferMustBeReadOnly(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) {
buf.writeLong(0x0102030405060708L);
buf.makeReadOnly();
try (Buffer copy = buf.copy()) {
assertTrue(copy.readOnly());
}
}
}
@ParameterizedTest @ParameterizedTest
@MethodSource("allocators") @MethodSource("allocators")
public void splitOfReadOnlyBufferMustBeReadOnly(Fixture fixture) { public void splitOfReadOnlyBufferMustBeReadOnly(Fixture fixture) {
@ -698,4 +660,17 @@ public class BufferReferenceCountingTest extends BufferTestSupport {
} }
} }
} }
@ParameterizedTest
@MethodSource("allocators")
public void allocatingOnClosedAllocatorMustThrow(Fixture fixture) {
BufferAllocator allocator = fixture.createAllocator();
Supplier<Buffer> supplier = allocator.constBufferSupplier(new byte[8]);
allocator.close();
assertThrows(IllegalStateException.class, () -> allocator.allocate(8));
assertThrows(IllegalStateException.class, () -> allocator.constBufferSupplier(EmptyArrays.EMPTY_BYTES));
assertThrows(IllegalStateException.class, () -> allocator.constBufferSupplier(new byte[8]));
// Existing const suppliers continue to work because they hold on to static memory allocation.
supplier.get().close();
}
} }

View File

@ -20,7 +20,6 @@ import io.netty.buffer.api.BufferAllocator;
import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.MethodSource;
import static java.nio.ByteOrder.BIG_ENDIAN;
import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrows;
public class BufferLongOffsettedAccessorsTest extends BufferTestSupport { public class BufferLongOffsettedAccessorsTest extends BufferTestSupport {
@ -58,7 +57,6 @@ public class BufferLongOffsettedAccessorsTest extends BufferTestSupport {
void offsettedGetOfLongMustReadWithDefaultEndianByteOrder(Fixture fixture) { void offsettedGetOfLongMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
long value = 0x0102030405060708L; long value = 0x0102030405060708L;
buf.writeLong(value); buf.writeLong(value);
buf.setByte(0, (byte) 0x10); buf.setByte(0, (byte) 0x10);
@ -157,7 +155,6 @@ public class BufferLongOffsettedAccessorsTest extends BufferTestSupport {
void offsettedSetOfLongMustHaveDefaultEndianByteOrder(Fixture fixture) { void offsettedSetOfLongMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
long value = 0x0102030405060708L; long value = 0x0102030405060708L;
buf.setLong(0, value); buf.setLong(0, value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);

View File

@ -20,7 +20,6 @@ import io.netty.buffer.api.BufferAllocator;
import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.MethodSource;
import static java.nio.ByteOrder.BIG_ENDIAN;
import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrows;
public class BufferMediumOffsettedAccessorsTest extends BufferTestSupport { public class BufferMediumOffsettedAccessorsTest extends BufferTestSupport {
@ -58,7 +57,6 @@ public class BufferMediumOffsettedAccessorsTest extends BufferTestSupport {
void offsettedGetOfMediumMustReadWithDefaultEndianByteOrder(Fixture fixture) { void offsettedGetOfMediumMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
int value = 0x010203; int value = 0x010203;
buf.writeMedium(value); buf.writeMedium(value);
buf.setByte(0, (byte) 0x10); buf.setByte(0, (byte) 0x10);
@ -178,7 +176,6 @@ public class BufferMediumOffsettedAccessorsTest extends BufferTestSupport {
void offsettedGetOfUnsignedMediumMustReadWithDefaultEndianByteOrder(Fixture fixture) { void offsettedGetOfUnsignedMediumMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
int value = 0x010203; int value = 0x010203;
buf.writeUnsignedMedium(value); buf.writeUnsignedMedium(value);
buf.setByte(0, (byte) 0x10); buf.setByte(0, (byte) 0x10);
@ -297,7 +294,6 @@ public class BufferMediumOffsettedAccessorsTest extends BufferTestSupport {
void offsettedSetOfMediumMustHaveDefaultEndianByteOrder(Fixture fixture) { void offsettedSetOfMediumMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
int value = 0x010203; int value = 0x010203;
buf.setMedium(0, value); buf.setMedium(0, value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);
@ -345,7 +341,6 @@ public class BufferMediumOffsettedAccessorsTest extends BufferTestSupport {
void offsettedSetOfUnsignedMediumMustHaveDefaultEndianByteOrder(Fixture fixture) { void offsettedSetOfUnsignedMediumMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
int value = 0x010203; int value = 0x010203;
buf.setUnsignedMedium(0, value); buf.setUnsignedMedium(0, value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);

View File

@ -26,15 +26,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows;
public class BufferOffsetsTest extends BufferTestSupport { public class BufferOffsetsTest extends BufferTestSupport {
@ParameterizedTest @ParameterizedTest
@MethodSource("initialAllocators") @MethodSource("initialCombinations")
void mustThrowWhenAllocatingZeroSizedBuffer(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator()) {
assertThrows(IllegalArgumentException.class, () -> allocator.allocate(0));
}
}
@ParameterizedTest
@MethodSource("allocators")
void mustThrowWhenAllocatingNegativeSizedBuffer(Fixture fixture) { void mustThrowWhenAllocatingNegativeSizedBuffer(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator()) { try (BufferAllocator allocator = fixture.createAllocator()) {
assertThrows(IllegalArgumentException.class, () -> allocator.allocate(-1)); assertThrows(IllegalArgumentException.class, () -> allocator.allocate(-1));
@ -169,7 +161,7 @@ public class BufferOffsetsTest extends BufferTestSupport {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.writeInt(0).readShort(); buf.writeInt(0).readShort();
buf.reset(); buf.resetOffsets();
assertEquals(0, buf.readerOffset()); assertEquals(0, buf.readerOffset());
assertEquals(0, buf.writerOffset()); assertEquals(0, buf.writerOffset());
} }

View File

@ -20,7 +20,6 @@ import io.netty.buffer.api.BufferAllocator;
import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.MethodSource;
import static java.nio.ByteOrder.BIG_ENDIAN;
import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrows;
public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport { public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
@ -46,7 +45,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeReadOfByteMustReadWithDefaultEndianByteOrder(Fixture fixture) { void relativeReadOfByteMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
assertEquals(0, buf.readableBytes()); assertEquals(0, buf.readableBytes());
assertEquals(Long.BYTES, buf.writableBytes()); assertEquals(Long.BYTES, buf.writableBytes());
byte value = 0x01; byte value = 0x01;
@ -97,7 +95,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeReadOfUnsignedByteMustReadWithDefaultEndianByteOrder(Fixture fixture) { void relativeReadOfUnsignedByteMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
assertEquals(0, buf.readableBytes()); assertEquals(0, buf.readableBytes());
assertEquals(Long.BYTES, buf.writableBytes()); assertEquals(Long.BYTES, buf.writableBytes());
int value = 0x01; int value = 0x01;
@ -164,7 +161,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeWriteOfByteMustHaveDefaultEndianByteOrder(Fixture fixture) { void relativeWriteOfByteMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
byte value = 0x01; byte value = 0x01;
buf.writeByte(value); buf.writeByte(value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);
@ -199,7 +195,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeWriteOfUnsignedByteMustHaveDefaultEndianByteOrder(Fixture fixture) { void relativeWriteOfUnsignedByteMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
int value = 0x01; int value = 0x01;
buf.writeUnsignedByte(value); buf.writeUnsignedByte(value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);
@ -235,7 +230,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeReadOfCharMustReadWithDefaultEndianByteOrder(Fixture fixture) { void relativeReadOfCharMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
assertEquals(0, buf.readableBytes()); assertEquals(0, buf.readableBytes());
assertEquals(Long.BYTES, buf.writableBytes()); assertEquals(Long.BYTES, buf.writableBytes());
char value = 0x0102; char value = 0x0102;
@ -302,7 +296,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeWriteOfCharMustHaveDefaultEndianByteOrder(Fixture fixture) { void relativeWriteOfCharMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
char value = 0x0102; char value = 0x0102;
buf.writeChar(value); buf.writeChar(value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);
@ -338,7 +331,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeReadOfShortMustReadWithDefaultEndianByteOrder(Fixture fixture) { void relativeReadOfShortMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
assertEquals(0, buf.readableBytes()); assertEquals(0, buf.readableBytes());
assertEquals(Long.BYTES, buf.writableBytes()); assertEquals(Long.BYTES, buf.writableBytes());
short value = 0x0102; short value = 0x0102;
@ -406,7 +398,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeReadOfUnsignedShortMustReadWithDefaultEndianByteOrder(Fixture fixture) { void relativeReadOfUnsignedShortMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
assertEquals(0, buf.readableBytes()); assertEquals(0, buf.readableBytes());
assertEquals(Long.BYTES, buf.writableBytes()); assertEquals(Long.BYTES, buf.writableBytes());
int value = 0x0102; int value = 0x0102;
@ -473,7 +464,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeWriteOfShortMustHaveDefaultEndianByteOrder(Fixture fixture) { void relativeWriteOfShortMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
short value = 0x0102; short value = 0x0102;
buf.writeShort(value); buf.writeShort(value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);
@ -508,7 +498,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeWriteOfUnsignedShortMustHaveDefaultEndianByteOrder(Fixture fixture) { void relativeWriteOfUnsignedShortMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
int value = 0x0102; int value = 0x0102;
buf.writeUnsignedShort(value); buf.writeUnsignedShort(value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);
@ -544,7 +533,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeReadOfMediumMustReadWithDefaultEndianByteOrder(Fixture fixture) { void relativeReadOfMediumMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
assertEquals(0, buf.readableBytes()); assertEquals(0, buf.readableBytes());
assertEquals(Long.BYTES, buf.writableBytes()); assertEquals(Long.BYTES, buf.writableBytes());
int value = 0x010203; int value = 0x010203;
@ -612,7 +600,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeReadOfUnsignedMediumMustReadWithDefaultEndianByteOrder(Fixture fixture) { void relativeReadOfUnsignedMediumMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
assertEquals(0, buf.readableBytes()); assertEquals(0, buf.readableBytes());
assertEquals(Long.BYTES, buf.writableBytes()); assertEquals(Long.BYTES, buf.writableBytes());
int value = 0x010203; int value = 0x010203;
@ -679,7 +666,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeWriteOfMediumMustHaveDefaultEndianByteOrder(Fixture fixture) { void relativeWriteOfMediumMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
int value = 0x010203; int value = 0x010203;
buf.writeMedium(value); buf.writeMedium(value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);
@ -714,7 +700,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeWriteOfUnsignedMediumMustHaveDefaultEndianByteOrder(Fixture fixture) { void relativeWriteOfUnsignedMediumMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
int value = 0x010203; int value = 0x010203;
buf.writeUnsignedMedium(value); buf.writeUnsignedMedium(value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);
@ -750,7 +735,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeReadOfIntMustReadWithDefaultEndianByteOrder(Fixture fixture) { void relativeReadOfIntMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
assertEquals(0, buf.readableBytes()); assertEquals(0, buf.readableBytes());
assertEquals(Long.BYTES, buf.writableBytes()); assertEquals(Long.BYTES, buf.writableBytes());
int value = 0x01020304; int value = 0x01020304;
@ -818,7 +802,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeReadOfUnsignedIntMustReadWithDefaultEndianByteOrder(Fixture fixture) { void relativeReadOfUnsignedIntMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
assertEquals(0, buf.readableBytes()); assertEquals(0, buf.readableBytes());
assertEquals(Long.BYTES, buf.writableBytes()); assertEquals(Long.BYTES, buf.writableBytes());
long value = 0x01020304; long value = 0x01020304;
@ -885,7 +868,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeWriteOfIntMustHaveDefaultEndianByteOrder(Fixture fixture) { void relativeWriteOfIntMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
int value = 0x01020304; int value = 0x01020304;
buf.writeInt(value); buf.writeInt(value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);
@ -920,7 +902,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeWriteOfUnsignedIntMustHaveDefaultEndianByteOrder(Fixture fixture) { void relativeWriteOfUnsignedIntMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
long value = 0x01020304; long value = 0x01020304;
buf.writeUnsignedInt(value); buf.writeUnsignedInt(value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);
@ -956,7 +937,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeReadOfFloatMustReadWithDefaultEndianByteOrder(Fixture fixture) { void relativeReadOfFloatMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
assertEquals(0, buf.readableBytes()); assertEquals(0, buf.readableBytes());
assertEquals(Long.BYTES, buf.writableBytes()); assertEquals(Long.BYTES, buf.writableBytes());
float value = Float.intBitsToFloat(0x01020304); float value = Float.intBitsToFloat(0x01020304);
@ -1023,7 +1003,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeWriteOfFloatMustHaveDefaultEndianByteOrder(Fixture fixture) { void relativeWriteOfFloatMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
float value = Float.intBitsToFloat(0x01020304); float value = Float.intBitsToFloat(0x01020304);
buf.writeFloat(value); buf.writeFloat(value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);
@ -1059,7 +1038,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeReadOfLongMustReadWithDefaultEndianByteOrder(Fixture fixture) { void relativeReadOfLongMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
assertEquals(0, buf.readableBytes()); assertEquals(0, buf.readableBytes());
assertEquals(Long.BYTES, buf.writableBytes()); assertEquals(Long.BYTES, buf.writableBytes());
long value = 0x0102030405060708L; long value = 0x0102030405060708L;
@ -1126,7 +1104,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeWriteOfLongMustHaveDefaultEndianByteOrder(Fixture fixture) { void relativeWriteOfLongMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
long value = 0x0102030405060708L; long value = 0x0102030405060708L;
buf.writeLong(value); buf.writeLong(value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);
@ -1162,7 +1139,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeReadOfDoubleMustReadWithDefaultEndianByteOrder(Fixture fixture) { void relativeReadOfDoubleMustReadWithDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
assertEquals(0, buf.readableBytes()); assertEquals(0, buf.readableBytes());
assertEquals(Long.BYTES, buf.writableBytes()); assertEquals(Long.BYTES, buf.writableBytes());
double value = Double.longBitsToDouble(0x0102030405060708L); double value = Double.longBitsToDouble(0x0102030405060708L);
@ -1229,7 +1205,6 @@ public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport {
void relativeWriteOfDoubleMustHaveDefaultEndianByteOrder(Fixture fixture) { void relativeWriteOfDoubleMustHaveDefaultEndianByteOrder(Fixture fixture) {
try (BufferAllocator allocator = fixture.createAllocator(); try (BufferAllocator allocator = fixture.createAllocator();
Buffer buf = allocator.allocate(8)) { Buffer buf = allocator.allocate(8)) {
buf.order(BIG_ENDIAN);
double value = Double.longBitsToDouble(0x0102030405060708L); double value = Double.longBitsToDouble(0x0102030405060708L);
buf.writeDouble(value); buf.writeDouble(value);
buf.writerOffset(Long.BYTES); buf.writerOffset(Long.BYTES);

Some files were not shown because too many files have changed in this diff Show More