Merge pull request #67 from netty/pooling-allocator

Port over the pooling buffer allocator from Netty
This commit is contained in:
Chris Vest 2021-05-18 22:38:32 +02:00 committed by GitHub
commit 99cddf7749
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
40 changed files with 4156 additions and 332 deletions

View File

@ -22,7 +22,7 @@ package io.netty.buffer.api;
*/
public interface AllocatorControl {
/**
* Allocate a buffer that is not tethered to any particular {@link Drop} implementation,
* Allocate a buffer that is not tethered to any particular {@link Buffer} object,
* and return the recoverable memory object from it.
* <p>
* This allows a buffer to implement {@link Buffer#ensureWritable(int)} by having new memory allocated to it,
@ -30,9 +30,9 @@ public interface AllocatorControl {
*
* @param originator The buffer that originated the request for an untethered memory allocated.
* @param size The size of the requested memory allocation, in bytes.
* @return A "recoverable memory" object that is the requested allocation.
* @return A {@link UntetheredMemory} object that is the requested allocation.
*/
Object allocateUntethered(Buffer originator, int size);
UntetheredMemory allocateUntethered(Buffer originator, int size);
/**
* Return memory to the allocator, after it has been untethered from it's lifetime.
@ -42,4 +42,21 @@ public interface AllocatorControl {
* @param memory The untethered memory to return to the allocator.
*/
void recoverMemory(Object memory);
/**
* Memory that isn't attached to any particular buffer.
*/
interface UntetheredMemory {
/**
* Produce the recoverable memory object associated with this piece of untethered memory.
* @implNote This method should only be called once, since it might be expensive.
*/
<Memory> Memory memory();
/**
* Produce the drop instance associated with this piece of untethered memory.
* @implNote This method should only be called once, since it might be expensive, or interact with Cleaners.
*/
<BufferType extends Buffer> Drop<BufferType> drop();
}
}

View File

@ -15,6 +15,8 @@
*/
package io.netty.buffer.api;
import io.netty.buffer.api.pool.PooledBufferAllocator;
import java.nio.ByteOrder;
import java.util.function.Supplier;
@ -123,10 +125,10 @@ public interface BufferAllocator extends AutoCloseable {
}
static BufferAllocator pooledHeap() {
return new SizeClassedMemoryPool(MemoryManagers.getManagers().getHeapMemoryManager());
return new PooledBufferAllocator(MemoryManagers.getManagers().getHeapMemoryManager());
}
static BufferAllocator pooledDirect() {
return new SizeClassedMemoryPool(MemoryManagers.getManagers().getNativeMemoryManager());
return new PooledBufferAllocator(MemoryManagers.getManagers().getNativeMemoryManager());
}
}

View File

@ -1,115 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
import java.lang.invoke.VarHandle;
import java.lang.ref.Cleaner.Cleanable;
import java.lang.ref.WeakReference;
import java.util.concurrent.atomic.AtomicBoolean;
import static io.netty.buffer.api.internal.Statics.CLEANER;
import static io.netty.buffer.api.internal.Statics.findVarHandle;
import static java.lang.invoke.MethodHandles.lookup;
class CleanerPooledDrop implements Drop<Buffer> {
private static final VarHandle CLEANABLE =
findVarHandle(lookup(), CleanerPooledDrop.class, "cleanable", GatedCleanable.class);
private final SizeClassedMemoryPool pool;
private final MemoryManager manager;
private final Drop<Buffer> delegate;
@SuppressWarnings("unused")
private volatile GatedCleanable cleanable;
CleanerPooledDrop(SizeClassedMemoryPool pool, MemoryManager manager,
Drop<Buffer> delegate) {
this.pool = pool;
this.manager = manager;
this.delegate = delegate;
}
@Override
public void drop(Buffer buf) {
GatedCleanable c = (GatedCleanable) CLEANABLE.getAndSet(this, null);
if (c != null) {
c.clean();
delegate.drop(buf);
}
}
@Override
public void attach(Buffer buf) {
// Unregister old cleanable, if any, to avoid uncontrolled build-up.
GatedCleanable c = (GatedCleanable) CLEANABLE.getAndSet(this, null);
if (c != null) {
c.disable();
c.clean();
}
var mem = manager.unwrapRecoverableMemory(buf);
WeakReference<CleanerPooledDrop> ref = new WeakReference<>(this);
AtomicBoolean gate = new AtomicBoolean(true);
cleanable = new GatedCleanable(gate, CLEANER.register(this, new CleanAction(pool, mem, ref, gate)));
}
@Override
public String toString() {
return "CleanerPooledDrop(" + delegate + ')';
}
private static final class CleanAction implements Runnable {
private final SizeClassedMemoryPool pool;
private final Object mem;
private final WeakReference<CleanerPooledDrop> ref;
private final AtomicBoolean gate;
private CleanAction(SizeClassedMemoryPool pool, Object mem, WeakReference<CleanerPooledDrop> ref,
AtomicBoolean gate) {
this.pool = pool;
this.mem = mem;
this.ref = ref;
this.gate = gate;
}
@Override
public void run() {
if (gate.getAndSet(false)) {
var monitored = ref.get();
if (monitored == null) {
pool.recoverMemory(mem);
}
}
}
}
private static final class GatedCleanable implements Cleanable {
private final AtomicBoolean gate;
private final Cleanable cleanable;
GatedCleanable(AtomicBoolean gate, Cleanable cleanable) {
this.gate = gate;
this.cleanable = cleanable;
}
public void disable() {
gate.set(false);
}
@Override
public void clean() {
cleanable.clean();
}
}
}

View File

@ -41,11 +41,22 @@ class ManagedBufferAllocator implements BufferAllocator, AllocatorControl {
return () -> manager.allocateConstChild(constantBuffer);
}
@SuppressWarnings("unchecked")
@Override
public Object allocateUntethered(Buffer originator, int size) {
public UntetheredMemory allocateUntethered(Buffer originator, int size) {
BufferAllocator.checkSize(size);
var buf = manager.allocateShared(this, size, NO_OP_DROP, Statics.CLEANER);
return manager.unwrapRecoverableMemory(buf);
return new UntetheredMemory() {
@Override
public <Memory> Memory memory() {
return (Memory) manager.unwrapRecoverableMemory(buf);
}
@Override
public <BufferType extends Buffer> Drop<BufferType> drop() {
return (Drop<BufferType>) manager.drop();
}
};
}
@Override

View File

@ -18,11 +18,14 @@ package io.netty.buffer.api;
import java.lang.ref.Cleaner;
public interface MemoryManager {
boolean isNative();
Buffer allocateShared(AllocatorControl allocatorControl, long size, Drop<Buffer> drop, Cleaner cleaner);
Buffer allocateConstChild(Buffer readOnlyConstParent);
Drop<Buffer> drop();
Object unwrapRecoverableMemory(Buffer buf);
int capacityOfRecoverableMemory(Object memory);
void discardRecoverableMemory(Object recoverableMemory);
// todo should recoverMemory re-attach a cleaner?
Buffer recoverMemory(AllocatorControl allocatorControl, Object recoverableMemory, Drop<Buffer> drop);
Object sliceMemory(Object memory, int offset, int length);
}

View File

@ -1,152 +0,0 @@
/*
* Copyright 2020 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api;
import io.netty.buffer.api.internal.Statics;
import java.lang.invoke.VarHandle;
import java.nio.ByteOrder;
import java.util.ArrayList;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.function.Supplier;
import static io.netty.buffer.api.internal.Statics.NO_OP_DROP;
import static java.lang.invoke.MethodHandles.lookup;
class SizeClassedMemoryPool implements BufferAllocator, AllocatorControl, Drop<Buffer> {
private static final VarHandle CLOSE = Statics.findVarHandle(
lookup(), SizeClassedMemoryPool.class, "closed", boolean.class);
private final MemoryManager manager;
private final ConcurrentHashMap<Integer, ConcurrentLinkedQueue<Object>> pool;
@SuppressWarnings("unused")
private volatile boolean closed;
protected SizeClassedMemoryPool(MemoryManager manager) {
this.manager = manager;
pool = new ConcurrentHashMap<>();
}
@Override
public Buffer allocate(int size) {
BufferAllocator.checkSize(size);
var sizeClassPool = getSizeClassPool(size);
Object memory = sizeClassPool.poll();
if (memory != null) {
return recoverMemoryIntoBuffer(memory)
.fill((byte) 0)
.order(ByteOrder.nativeOrder());
}
return createBuf(size, getDrop());
}
@Override
public Supplier<Buffer> constBufferSupplier(byte[] bytes) {
Buffer constantBuffer = manager.allocateShared(this, bytes.length, manager.drop(), Statics.CLEANER);
constantBuffer.writeBytes(bytes).makeReadOnly();
return () -> manager.allocateConstChild(constantBuffer);
}
protected MemoryManager getMemoryManager() {
return manager;
}
protected Buffer createBuf(int size, Drop<Buffer> drop) {
var buf = manager.allocateShared(this, size, drop, null);
drop.attach(buf);
return buf;
}
protected Drop<Buffer> getDrop() {
return new CleanerPooledDrop(this, getMemoryManager(), this);
}
@Override
public void close() {
if (CLOSE.compareAndSet(this, false, true)) {
var capturedExceptions = new ArrayList<Exception>(4);
pool.forEach((k, v) -> {
Object memory;
while ((memory = v.poll()) != null) {
try {
dispose(recoverMemoryIntoBuffer(memory));
} catch (Exception e) {
capturedExceptions.add(e);
}
}
});
if (!capturedExceptions.isEmpty()) {
var exception = new ResourceDisposeFailedException();
capturedExceptions.forEach(exception::addSuppressed);
throw exception;
}
}
}
@Override
public void drop(Buffer buf) {
if (closed) {
dispose(buf);
return;
}
Object mem = manager.unwrapRecoverableMemory(buf);
var sizeClassPool = getSizeClassPool(manager.capacityOfRecoverableMemory(mem));
sizeClassPool.offer(mem);
if (closed) {
Object memory;
while ((memory = sizeClassPool.poll()) != null) {
dispose(recoverMemoryIntoBuffer(memory));
}
}
}
@Override
public String toString() {
return "SizeClassedMemoryPool";
}
@Override
public Object allocateUntethered(Buffer originator, int size) {
var sizeClassPool = getSizeClassPool(size);
Object memory = sizeClassPool.poll();
if (memory == null) {
Buffer untetheredBuf = createBuf(size, NO_OP_DROP);
memory = manager.unwrapRecoverableMemory(untetheredBuf);
}
return memory;
}
@Override
public void recoverMemory(Object memory) {
Buffer buf = recoverMemoryIntoBuffer(memory);
buf.close();
}
private Buffer recoverMemoryIntoBuffer(Object memory) {
var drop = getDrop();
var buf = manager.recoverMemory(this, memory, drop);
drop.attach(buf);
return buf;
}
private ConcurrentLinkedQueue<Object> getSizeClassPool(int size) {
return pool.computeIfAbsent(size, k -> new ConcurrentLinkedQueue<>());
}
private void dispose(Buffer buf) {
manager.drop().drop(buf);
}
}

View File

@ -34,6 +34,11 @@ public class ByteBufferMemoryManager implements MemoryManager {
this.direct = direct;
}
@Override
public boolean isNative() {
return direct;
}
@Override
public Buffer allocateShared(AllocatorControl allocatorControl, long size, Drop<Buffer> drop, Cleaner cleaner) {
int capacity = Math.toIntExact(size);
@ -65,9 +70,20 @@ public class ByteBufferMemoryManager implements MemoryManager {
return ((ByteBuffer) memory).capacity();
}
@Override
public void discardRecoverableMemory(Object recoverableMemory) {
// ByteBuffers have their memory released by the GC, so there is nothing for us to do.
}
@Override
public Buffer recoverMemory(AllocatorControl allocatorControl, Object recoverableMemory, Drop<Buffer> drop) {
ByteBuffer memory = (ByteBuffer) recoverableMemory;
return new NioBuffer(memory, memory, allocatorControl, convert(drop));
}
@Override
public Object sliceMemory(Object memory, int offset, int length) {
var buffer = (ByteBuffer) memory;
return buffer.slice(offset, length);
}
}

View File

@ -408,27 +408,27 @@ class NioBuffer extends RcSupport<Buffer, NioBuffer> implements Buffer, Readable
// Allocate a bigger buffer.
long newSize = capacity() + (long) Math.max(size - writableBytes(), minimumGrowth);
BufferAllocator.checkSize(newSize);
ByteBuffer buffer = (ByteBuffer) control.allocateUntethered(this, (int) newSize);
var untethered = control.allocateUntethered(this, (int) newSize);
ByteBuffer buffer = untethered.memory();
buffer.order(order());
// Copy contents.
copyInto(0, buffer, 0, capacity());
// Release the old memory and install the new:
Drop<NioBuffer> drop = disconnectDrop();
Drop<NioBuffer> drop = untethered.drop();
disconnectDrop(drop);
attachNewBuffer(buffer, drop);
}
private Drop<NioBuffer> disconnectDrop() {
private void disconnectDrop(Drop<NioBuffer> newDrop) {
var drop = (Drop<NioBuffer>) unsafeGetDrop();
int roff = this.roff;
int woff = this.woff;
drop.drop(this);
drop = ArcDrop.unwrapAllArcs(drop);
unsafeSetDrop(new ArcDrop<>(drop));
unsafeSetDrop(new ArcDrop<>(newDrop));
this.roff = roff;
this.woff = woff;
return drop;
}
private void attachNewBuffer(ByteBuffer buffer, Drop<NioBuffer> drop) {

View File

@ -46,13 +46,6 @@ public final class ArcDrop<T> implements Drop<T> {
return new ArcDrop<X>(drop);
}
public static <X> Drop<X> unwrapAllArcs(Drop<X> drop) {
while (drop instanceof ArcDrop) {
drop = ((ArcDrop<X>) drop).unwrap();
}
return drop;
}
public static <X> Drop<X> acquire(Drop<X> drop) {
if (drop.getClass() == ArcDrop.class) {
((ArcDrop<X>) drop).increment();
@ -103,7 +96,10 @@ public final class ArcDrop<T> implements Drop<T> {
@Override
public String toString() {
StringBuilder builder = new StringBuilder().append("ArcDrop(").append(count).append(", ");
StringBuilder builder = new StringBuilder()
.append("ArcDrop@")
.append(Integer.toHexString(System.identityHashCode(this)))
.append('(').append(count).append(", ");
Drop<T> drop = this;
while ((drop = ((ArcDrop<T>) drop).unwrap()) instanceof ArcDrop) {
builder.append(((ArcDrop<T>) drop).count).append(", ");

View File

@ -0,0 +1,81 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.internal;
import io.netty.buffer.api.Drop;
import java.io.Serial;
import java.lang.ref.Cleaner;
import java.util.concurrent.atomic.AtomicReference;
/**
* A drop implementation that delegates to another drop instance, either when called directly, or when it becomes
* cleanable. This ensures that objects are dropped even if they leak.
*/
public final class CleanerDrop<T> implements Drop<T> {
private Cleaner.Cleanable cleanable;
private GatedRunner<T> runner;
/**
* Wrap the given drop instance, and produce a new drop instance that will also call the delegate drop instance if
* it becomes cleanable.
*/
public static <T> Drop<T> wrap(Drop<T> drop) {
CleanerDrop<T> cleanerDrop = new CleanerDrop<>();
GatedRunner<T> runner = new GatedRunner<>(drop);
cleanerDrop.cleanable = Statics.CLEANER.register(cleanerDrop, runner);
cleanerDrop.runner = runner;
return cleanerDrop;
}
private CleanerDrop() {
}
@Override
public void attach(T obj) {
runner.set(obj);
runner.drop.attach(obj);
}
@Override
public void drop(T obj) {
attach(obj);
cleanable.clean();
}
@Override
public String toString() {
return "CleanerDrop(" + runner.drop + ')';
}
private static final class GatedRunner<T> extends AtomicReference<T> implements Runnable {
@Serial
private static final long serialVersionUID = 2685535951915798850L;
final Drop<T> drop;
private GatedRunner(Drop<T> drop) {
this.drop = drop;
}
@Override
public void run() {
T obj = getAndSet(null); // Make absolutely sure we only delegate once.
if (obj != null) {
drop.drop(obj);
}
}
}
}

View File

@ -21,6 +21,7 @@ import io.netty.buffer.api.Drop;
import io.netty.buffer.api.MemoryManager;
import io.netty.buffer.api.internal.ArcDrop;
import jdk.incubator.foreign.MemorySegment;
import jdk.incubator.foreign.ResourceScope;
import java.lang.ref.Cleaner;
@ -58,9 +59,24 @@ public abstract class AbstractMemorySegmentManager implements MemoryManager {
return (int) ((MemorySegment) memory).byteSize();
}
@Override
public void discardRecoverableMemory(Object recoverableMemory) {
var segment = (MemorySegment) recoverableMemory;
ResourceScope scope = segment.scope();
if (!scope.isImplicit()) {
scope.close();
}
}
@Override
public Buffer recoverMemory(AllocatorControl allocatorControl, Object recoverableMemory, Drop<Buffer> drop) {
var segment = (MemorySegment) recoverableMemory;
return new MemSegBuffer(segment, segment, convert(ArcDrop.acquire(drop)), allocatorControl);
}
@Override
public Object sliceMemory(Object memory, int offset, int length) {
var segment = (MemorySegment) memory;
return segment.asSlice(offset, length);
}
}

View File

@ -24,4 +24,9 @@ public class HeapMemorySegmentManager extends AbstractMemorySegmentManager {
protected MemorySegment createSegment(long size, Cleaner cleaner) {
return MemorySegment.ofArray(new byte[Math.toIntExact(size)]);
}
@Override
public boolean isNative() {
return false;
}
}

View File

@ -524,32 +524,27 @@ class MemSegBuffer extends RcSupport<Buffer, MemSegBuffer> implements Buffer, Re
// Allocate a bigger buffer.
long newSize = capacity() + (long) Math.max(size - writableBytes(), minimumGrowth);
BufferAllocator.checkSize(newSize);
MemorySegment newSegment = (MemorySegment) control.allocateUntethered(this, (int) newSize);
var untethered = control.allocateUntethered(this, (int) newSize);
MemorySegment newSegment = untethered.memory();
// Copy contents.
newSegment.copyFrom(seg);
// Release the old memory segment and install the new one:
Drop<MemSegBuffer> drop = disconnectDrop();
Drop<MemSegBuffer> drop = untethered.drop();
disconnectDrop(drop);
attachNewMemorySegment(newSegment, drop);
}
private Drop<MemSegBuffer> disconnectDrop() {
private void disconnectDrop(Drop<MemSegBuffer> newDrop) {
var drop = unsafeGetDrop();
if (drop instanceof ArcDrop) {
// Disconnect from the current arc drop, since we'll get our own fresh memory segment.
int roff = this.roff;
int woff = this.woff;
drop.drop(this);
drop = ArcDrop.unwrapAllArcs(drop);
unsafeSetDrop(new ArcDrop<>(drop));
this.roff = roff;
this.woff = woff;
} else {
// TODO would we ever get here?
control.recoverMemory(recoverableMemory());
}
return drop;
// Disconnect from the current arc drop, since we'll get our own fresh memory segment.
int roff = this.roff;
int woff = this.woff;
drop.drop(this);
unsafeSetDrop(new ArcDrop<>(newDrop));
this.roff = roff;
this.woff = woff;
}
private void attachNewMemorySegment(MemorySegment newSegment, Drop<MemSegBuffer> drop) {

View File

@ -52,6 +52,11 @@ public class NativeMemorySegmentManager extends AbstractMemorySegmentManager {
}
}
@Override
public boolean isNative() {
return true;
}
@Override
protected MemorySegment createSegment(long size, Cleaner cleaner) {
final ResourceScope scope = cleaner == null ? newSharedScope() : newSharedScope(cleaner);

View File

@ -0,0 +1,25 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.BufferAllocator;
public interface BufferAllocatorMetric {
/**
* Returns the number of bytes of heap memory used by a {@link BufferAllocator} or {@code -1} if unknown.
*/
long usedMemory();
}

View File

@ -0,0 +1,26 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.BufferAllocator;
public interface BufferAllocatorMetricProvider {
/**
* Returns a {@link BufferAllocatorMetric} for a {@link BufferAllocator}.
*/
BufferAllocatorMetric metric();
}

View File

@ -0,0 +1,129 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
/**
* Internal primitive map implementation that is specifically optimised for the runs availability map use case in
* {@link PoolChunk}.
*/
final class LongLongHashMap {
private static final int MASK_TEMPLATE = ~1;
private int mask;
private long[] array;
private int maxProbe;
private long zeroVal;
private final long emptyVal;
LongLongHashMap(long emptyVal) {
this.emptyVal = emptyVal;
zeroVal = emptyVal;
int initialSize = 32;
array = new long[initialSize];
mask = initialSize - 1;
computeMaskAndProbe();
}
public long put(long key, long value) {
if (key == 0) {
long prev = zeroVal;
zeroVal = value;
return prev;
}
for (;;) {
int index = index(key);
for (int i = 0; i < maxProbe; i++) {
long existing = array[index];
if (existing == key || existing == 0) {
long prev = existing == 0? emptyVal : array[index + 1];
array[index] = key;
array[index + 1] = value;
for (; i < maxProbe; i++) { // Nerf any existing misplaced entries.
index = index + 2 & mask;
if (array[index] == key) {
array[index] = 0;
prev = array[index + 1];
break;
}
}
return prev;
}
index = index + 2 & mask;
}
expand(); // Grow array and re-hash.
}
}
public void remove(long key) {
if (key == 0) {
zeroVal = emptyVal;
return;
}
int index = index(key);
for (int i = 0; i < maxProbe; i++) {
long existing = array[index];
if (existing == key) {
array[index] = 0;
break;
}
index = index + 2 & mask;
}
}
public long get(long key) {
if (key == 0) {
return zeroVal;
}
int index = index(key);
for (int i = 0; i < maxProbe; i++) {
long existing = array[index];
if (existing == key) {
return array[index + 1];
}
index = index + 2 & mask;
}
return emptyVal;
}
private int index(long key) {
// Hash with murmur64, and mask.
key ^= key >>> 33;
key *= 0xff51afd7ed558ccdL;
key ^= key >>> 33;
key *= 0xc4ceb9fe1a85ec53L;
key ^= key >>> 33;
return (int) key & mask;
}
private void expand() {
long[] prev = array;
array = new long[prev.length * 2];
computeMaskAndProbe();
for (int i = 0; i < prev.length; i += 2) {
long key = prev[i];
if (key != 0) {
long val = prev[i + 1];
put(key, val);
}
}
}
private void computeMaskAndProbe() {
int length = array.length;
mask = length - 1 & MASK_TEMPLATE;
maxProbe = (int) Math.log(length);
}
}

View File

@ -0,0 +1,107 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import java.util.Arrays;
/**
* Internal primitive priority queue, used by {@link PoolChunk}.
* The implementation is based on the binary heap, as described in Algorithms by Sedgewick and Wayne.
*/
final class LongPriorityQueue {
public static final int NO_VALUE = -1;
private long[] array = new long[9];
private int size;
public void offer(long handle) {
if (handle == NO_VALUE) {
throw new IllegalArgumentException("The NO_VALUE (" + NO_VALUE + ") cannot be added to the queue.");
}
size++;
if (size == array.length) {
// Grow queue capacity.
array = Arrays.copyOf(array, 1 + (array.length - 1) * 2);
}
array[size] = handle;
lift(size);
}
public void remove(long value) {
for (int i = 1; i <= size; i++) {
if (array[i] == value) {
array[i] = array[size--];
lift(i);
sink(i);
return;
}
}
}
public long peek() {
if (size == 0) {
return NO_VALUE;
}
return array[1];
}
public long poll() {
if (size == 0) {
return NO_VALUE;
}
long val = array[1];
array[1] = array[size];
array[size] = 0;
size--;
sink(1);
return val;
}
public boolean isEmpty() {
return size == 0;
}
private void lift(int index) {
int parentIndex;
while (index > 1 && subord(parentIndex = index >> 1, index)) {
swap(index, parentIndex);
index = parentIndex;
}
}
private void sink(int index) {
int child;
while ((child = index << 1) <= size) {
if (child < size && subord(child, child + 1)) {
child++;
}
if (!subord(index, child)) {
break;
}
swap(index, child);
index = child;
}
}
private boolean subord(int a, int b) {
return array[a] > array[b];
}
private void swap(int a, int b) {
long value = array[a];
array[a] = array[b];
array[b] = value;
}
}

View File

@ -0,0 +1,469 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.AllocatorControl;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.MemoryManager;
import io.netty.util.internal.StringUtil;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.LongAdder;
import static io.netty.buffer.api.pool.PoolChunk.isSubpage;
import static java.lang.Math.max;
class PoolArena extends SizeClasses implements PoolArenaMetric, AllocatorControl {
enum SizeClass {
Small,
Normal
}
final PooledBufferAllocator parent;
final MemoryManager manager;
final int numSmallSubpagePools;
final int directMemoryCacheAlignment;
private final PoolSubpage[] smallSubpagePools;
private final PoolChunkList q050;
private final PoolChunkList q025;
private final PoolChunkList q000;
private final PoolChunkList qInit;
private final PoolChunkList q075;
private final PoolChunkList q100;
private final List<PoolChunkListMetric> chunkListMetrics;
// Metrics for allocations and deallocations
private long allocationsNormal;
// We need to use the LongAdder here as this is not guarded via synchronized block.
private final LongAdder allocationsSmall = new LongAdder();
private final LongAdder allocationsHuge = new LongAdder();
private final LongAdder activeBytesHuge = new LongAdder();
private long deallocationsSmall;
private long deallocationsNormal;
// We need to use the LongAdder here as this is not guarded via synchronized block.
private final LongAdder deallocationsHuge = new LongAdder();
// Number of thread caches backed by this arena.
final AtomicInteger numThreadCaches = new AtomicInteger();
protected PoolArena(PooledBufferAllocator parent, MemoryManager manager, int pageSize,
int pageShifts, int chunkSize, int cacheAlignment) {
super(pageSize, pageShifts, chunkSize, cacheAlignment);
this.parent = parent;
this.manager = manager;
directMemoryCacheAlignment = cacheAlignment;
numSmallSubpagePools = nSubpages;
smallSubpagePools = newSubpagePoolArray(numSmallSubpagePools);
for (int i = 0; i < smallSubpagePools.length; i ++) {
smallSubpagePools[i] = newSubpagePoolHead();
}
q100 = new PoolChunkList(this, null, 100, Integer.MAX_VALUE, chunkSize);
q075 = new PoolChunkList(this, q100, 75, 100, chunkSize);
q050 = new PoolChunkList(this, q075, 50, 100, chunkSize);
q025 = new PoolChunkList(this, q050, 25, 75, chunkSize);
q000 = new PoolChunkList(this, q025, 1, 50, chunkSize);
qInit = new PoolChunkList(this, q000, Integer.MIN_VALUE, 25, chunkSize);
q100.prevList(q075);
q075.prevList(q050);
q050.prevList(q025);
q025.prevList(q000);
q000.prevList(null);
qInit.prevList(qInit);
chunkListMetrics = List.of(qInit, q000, q025, q050, q075, q100);
}
private static PoolSubpage newSubpagePoolHead() {
PoolSubpage head = new PoolSubpage();
head.prev = head;
head.next = head;
return head;
}
private static PoolSubpage[] newSubpagePoolArray(int size) {
return new PoolSubpage[size];
}
UntetheredMemory allocate(PooledAllocatorControl control, PoolThreadCache cache, int size) {
final int sizeIdx = size2SizeIdx(size);
if (sizeIdx <= smallMaxSizeIdx) {
return tcacheAllocateSmall(control, cache, size, sizeIdx);
} else if (sizeIdx < nSizes) {
return tcacheAllocateNormal(control, cache, size, sizeIdx);
} else {
int normCapacity = directMemoryCacheAlignment > 0
? normalizeSize(size) : size;
// Huge allocations are never served via the cache so just call allocateHuge
return allocateHuge(normCapacity);
}
}
private UntetheredMemory tcacheAllocateSmall(PooledAllocatorControl control, PoolThreadCache cache, final int size,
final int sizeIdx) {
UntetheredMemory memory = cache.allocateSmall(control, size, sizeIdx);
if (memory != null) {
// was able to allocate out of the cache so move on
return memory;
}
/*
* Synchronize on the head. This is needed as {@link PoolChunk#allocateSubpage(int)} and
* {@link PoolChunk#free(long)} may modify the doubly linked list as well.
*/
final PoolSubpage head = smallSubpagePools[sizeIdx];
final boolean needsNormalAllocation;
synchronized (head) {
final PoolSubpage s = head.next;
needsNormalAllocation = s == head;
if (!needsNormalAllocation) {
assert s.doNotDestroy && s.elemSize == sizeIdx2size(sizeIdx);
long handle = s.allocate();
assert handle >= 0;
memory = s.chunk.allocateBufferWithSubpage(handle, size, cache, control);
}
}
if (needsNormalAllocation) {
synchronized (this) {
memory = allocateNormal(size, sizeIdx, cache, control);
}
}
incSmallAllocation();
return memory;
}
private UntetheredMemory tcacheAllocateNormal(
PooledAllocatorControl control, PoolThreadCache cache, int size, int sizeIdx) {
UntetheredMemory memory = cache.allocateNormal(this, control, size, sizeIdx);
if (memory != null) {
// was able to allocate out of the cache so move on
return memory;
}
synchronized (this) {
memory = allocateNormal(size, sizeIdx, cache, control);
allocationsNormal++;
}
return memory;
}
// Method must be called inside synchronized(this) { ... } block
private UntetheredMemory allocateNormal(
int size, int sizeIdx, PoolThreadCache threadCache, PooledAllocatorControl control) {
UntetheredMemory memory = q050.allocate(size, sizeIdx, threadCache, control);
if (memory != null) {
return memory;
}
memory = q025.allocate(size, sizeIdx, threadCache, control);
if (memory != null) {
return memory;
}
memory = q000.allocate(size, sizeIdx, threadCache, control);
if (memory != null) {
return memory;
}
memory = qInit.allocate(size, sizeIdx, threadCache, control);
if (memory != null) {
return memory;
}
memory = q075.allocate(size, sizeIdx, threadCache, control);
if (memory != null) {
return memory;
}
// Add a new chunk.
PoolChunk c = newChunk(pageSize, nPSizes, pageShifts, chunkSize);
memory = c.allocate(size, sizeIdx, threadCache, control);
assert memory != null;
qInit.add(c);
return memory;
}
private void incSmallAllocation() {
allocationsSmall.increment();
}
private UntetheredMemory allocateHuge(int size) {
activeBytesHuge.add(size);
allocationsHuge.increment();
return new UnpooledUnthetheredMemory(parent, manager, size);
}
void free(PoolChunk chunk, long handle, int normCapacity, PoolThreadCache cache) {
SizeClass sizeClass = sizeClass(handle);
if (cache != null && cache.add(this, chunk, handle, normCapacity, sizeClass)) {
// cached so not free it.
return;
}
freeChunk(chunk, handle, normCapacity, sizeClass);
}
private static SizeClass sizeClass(long handle) {
return isSubpage(handle) ? SizeClass.Small : SizeClass.Normal;
}
void freeChunk(PoolChunk chunk, long handle, int normCapacity, SizeClass sizeClass) {
final boolean destroyChunk;
synchronized (this) {
if (sizeClass == SizeClass.Normal) {
++deallocationsNormal;
} else if (sizeClass == SizeClass.Small) {
++deallocationsSmall;
} else {
throw new AssertionError("Unexpected size class: " + sizeClass);
}
destroyChunk = !chunk.parent.free(chunk, handle, normCapacity);
}
if (destroyChunk) {
// destroyChunk not need to be called while holding the synchronized lock.
chunk.destroy();
}
}
PoolSubpage findSubpagePoolHead(int sizeIdx) {
return smallSubpagePools[sizeIdx];
}
@Override
public UntetheredMemory allocateUntethered(Buffer originator, int size) {
throw new AssertionError("PoolChunk base buffers should never need to reallocate.");
}
@Override
public void recoverMemory(Object memory) {
// This means we've lost all strong references to a PoolChunk.
// Probably means we don't need it anymore, so just free its memory.
manager.discardRecoverableMemory(memory);
}
@Override
public int numThreadCaches() {
return numThreadCaches.get();
}
@Override
public int numSmallSubpages() {
return smallSubpagePools.length;
}
@Override
public int numChunkLists() {
return chunkListMetrics.size();
}
@Override
public List<PoolSubpageMetric> smallSubpages() {
return subPageMetricList(smallSubpagePools);
}
@Override
public List<PoolChunkListMetric> chunkLists() {
return chunkListMetrics;
}
private static List<PoolSubpageMetric> subPageMetricList(PoolSubpage[] pages) {
List<PoolSubpageMetric> metrics = new ArrayList<>();
for (PoolSubpage head : pages) {
if (head.next == head) {
continue;
}
PoolSubpage s = head.next;
do {
metrics.add(s);
s = s.next;
} while (s != head);
}
return metrics;
}
@Override
public long numAllocations() {
final long allocsNormal;
synchronized (this) {
allocsNormal = allocationsNormal;
}
return allocationsSmall.longValue() + allocsNormal + allocationsHuge.longValue();
}
@Override
public long numTinyAllocations() {
return 0;
}
@Override
public long numSmallAllocations() {
return allocationsSmall.longValue();
}
@Override
public synchronized long numNormalAllocations() {
return allocationsNormal;
}
@Override
public long numDeallocations() {
final long deallocs;
synchronized (this) {
deallocs = deallocationsSmall + deallocationsNormal;
}
return deallocs + deallocationsHuge.longValue();
}
@Override
public long numTinyDeallocations() {
return 0;
}
@Override
public synchronized long numSmallDeallocations() {
return deallocationsSmall;
}
@Override
public synchronized long numNormalDeallocations() {
return deallocationsNormal;
}
@Override
public long numHugeAllocations() {
return allocationsHuge.longValue();
}
@Override
public long numHugeDeallocations() {
return deallocationsHuge.longValue();
}
@Override
public long numActiveAllocations() {
long val = allocationsSmall.longValue() + allocationsHuge.longValue()
- deallocationsHuge.longValue();
synchronized (this) {
val += allocationsNormal - (deallocationsSmall + deallocationsNormal);
}
return max(val, 0);
}
@Override
public long numActiveSmallAllocations() {
return max(numSmallAllocations() - numSmallDeallocations(), 0);
}
@Override
public long numActiveNormalAllocations() {
final long val;
synchronized (this) {
val = allocationsNormal - deallocationsNormal;
}
return max(val, 0);
}
@Override
public long numActiveHugeAllocations() {
return max(numHugeAllocations() - numHugeDeallocations(), 0);
}
@Override
public long numActiveBytes() {
long val = activeBytesHuge.longValue();
synchronized (this) {
for (int i = 0; i < chunkListMetrics.size(); i++) {
for (PoolChunkMetric m: chunkListMetrics.get(i)) {
val += m.chunkSize();
}
}
}
return max(0, val);
}
protected final PoolChunk newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize) {
return new PoolChunk(this, pageSize, pageShifts, chunkSize, maxPageIdx);
}
@Override
public synchronized String toString() {
StringBuilder buf = new StringBuilder()
.append("Chunk(s) at 0~25%:")
.append(StringUtil.NEWLINE)
.append(qInit)
.append(StringUtil.NEWLINE)
.append("Chunk(s) at 0~50%:")
.append(StringUtil.NEWLINE)
.append(q000)
.append(StringUtil.NEWLINE)
.append("Chunk(s) at 25~75%:")
.append(StringUtil.NEWLINE)
.append(q025)
.append(StringUtil.NEWLINE)
.append("Chunk(s) at 50~100%:")
.append(StringUtil.NEWLINE)
.append(q050)
.append(StringUtil.NEWLINE)
.append("Chunk(s) at 75~100%:")
.append(StringUtil.NEWLINE)
.append(q075)
.append(StringUtil.NEWLINE)
.append("Chunk(s) at 100%:")
.append(StringUtil.NEWLINE)
.append(q100)
.append(StringUtil.NEWLINE)
.append("small subpages:");
appendPoolSubPages(buf, smallSubpagePools);
buf.append(StringUtil.NEWLINE);
return buf.toString();
}
private static void appendPoolSubPages(StringBuilder buf, PoolSubpage[] subpages) {
for (int i = 0; i < subpages.length; i ++) {
PoolSubpage head = subpages[i];
if (head.next == head) {
continue;
}
buf.append(StringUtil.NEWLINE)
.append(i)
.append(": ");
PoolSubpage s = head.next;
do {
buf.append(s);
s = s.next;
} while (s != head);
}
}
public void close() {
for (PoolSubpage page : smallSubpagePools) {
page.destroy();
}
for (PoolChunkList list : new PoolChunkList[] {qInit, q000, q025, q050, q100}) {
list.destroy();
}
}
}

View File

@ -0,0 +1,130 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import java.util.List;
/**
* Expose metrics for an arena.
*/
public interface PoolArenaMetric extends SizeClassesMetric {
/**
* Returns the number of thread caches backed by this arena.
*/
int numThreadCaches();
/**
* Returns the number of small sub-pages for the arena.
*/
int numSmallSubpages();
/**
* Returns the number of chunk lists for the arena.
*/
int numChunkLists();
/**
* Returns an unmodifiable {@link List} which holds {@link PoolSubpageMetric}s for small sub-pages.
*/
List<PoolSubpageMetric> smallSubpages();
/**
* Returns an unmodifiable {@link List} which holds {@link PoolChunkListMetric}s.
*/
List<PoolChunkListMetric> chunkLists();
/**
* Return the number of allocations done via the arena. This includes all sizes.
*/
long numAllocations();
/**
* Return the number of tiny allocations done via the arena.
*
* @deprecated Tiny allocations have been merged into small allocations.
*/
@Deprecated
long numTinyAllocations();
/**
* Return the number of small allocations done via the arena.
*/
long numSmallAllocations();
/**
* Return the number of normal allocations done via the arena.
*/
long numNormalAllocations();
/**
* Return the number of huge allocations done via the arena.
*/
long numHugeAllocations();
/**
* Return the number of deallocations done via the arena. This includes all sizes.
*/
long numDeallocations();
/**
* Return the number of tiny deallocations done via the arena.
*
* @deprecated Tiny deallocations have been merged into small deallocations.
*/
@Deprecated
long numTinyDeallocations();
/**
* Return the number of small deallocations done via the arena.
*/
long numSmallDeallocations();
/**
* Return the number of normal deallocations done via the arena.
*/
long numNormalDeallocations();
/**
* Return the number of huge deallocations done via the arena.
*/
long numHugeDeallocations();
/**
* Return the number of currently active allocations.
*/
long numActiveAllocations();
/**
* Return the number of currently active small allocations.
*/
long numActiveSmallAllocations();
/**
* Return the number of currently active normal allocations.
*/
long numActiveNormalAllocations();
/**
* Return the number of currently active huge allocations.
*/
long numActiveHugeAllocations();
/**
* Return the number of active bytes that are currently allocated by the arena.
*/
long numActiveBytes();
}

View File

@ -0,0 +1,662 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.AllocatorControl.UntetheredMemory;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.Drop;
import io.netty.buffer.api.MemoryManager;
import io.netty.buffer.api.internal.ArcDrop;
import io.netty.buffer.api.internal.CleanerDrop;
import io.netty.buffer.api.internal.Statics;
import java.util.PriorityQueue;
/**
* Description of algorithm for PageRun/PoolSubpage allocation from PoolChunk
*
* Notation: The following terms are important to understand the code
* > page - a page is the smallest unit of memory chunk that can be allocated
* > run - a run is a collection of pages
* > chunk - a chunk is a collection of runs
* > in this code chunkSize = maxPages * pageSize
*
* To begin we allocate a byte array of size = chunkSize
* Whenever a ByteBuf of given size needs to be created we search for the first position
* in the byte array that has enough empty space to accommodate the requested size and
* return a (long) handle that encodes this offset information, (this memory segment is then
* marked as reserved, so it is always used by exactly one ByteBuf and no more)
*
* For simplicity all sizes are normalized according to {@link PoolArena#size2SizeIdx(int)} method.
* This ensures that when we request for memory segments of size > pageSize the normalizedCapacity
* equals the next nearest size in {@link SizeClasses}.
*
*
* A chunk has the following layout:
*
* /-----------------\
* | run |
* | |
* | |
* |-----------------|
* | run |
* | |
* |-----------------|
* | unalloctated |
* | (freed) |
* | |
* |-----------------|
* | subpage |
* |-----------------|
* | unallocated |
* | (freed) |
* | ... |
* | ... |
* | ... |
* | |
* | |
* | |
* \-----------------/
*
*
* handle:
* -------
* a handle is a long number, the bit layout of a run looks like:
*
* oooooooo ooooooos ssssssss ssssssue bbbbbbbb bbbbbbbb bbbbbbbb bbbbbbbb
*
* o: runOffset (page offset in the chunk), 15bit
* s: size (number of pages) of this run, 15bit
* u: isUsed?, 1bit
* e: isSubpage?, 1bit
* b: bitmapIdx of subpage, zero if it's not subpage, 32bit
*
* runsAvailMap:
* ------
* a map which manages all runs (used and not in used).
* For each run, the first runOffset and last runOffset are stored in runsAvailMap.
* key: runOffset
* value: handle
*
* runsAvail:
* ----------
* an array of {@link PriorityQueue}.
* Each queue manages same size of runs.
* Runs are sorted by offset, so that we always allocate runs with smaller offset.
*
*
* Algorithm:
* ----------
*
* As we allocate runs, we update values stored in runsAvailMap and runsAvail so that the property is maintained.
*
* Initialization -
* In the beginning we store the initial run which is the whole chunk.
* The initial run:
* runOffset = 0
* size = chunkSize
* isUsed = no
* isSubpage = no
* bitmapIdx = 0
*
*
* Algorithm: [allocateRun(size)]
* ----------
* 1) find the first avail run using in runsAvails according to size
* 2) if pages of run is larger than request pages then split it, and save the tailing run
* for later using
*
* Algorithm: [allocateSubpage(size)]
* ----------
* 1) find a not full subpage according to size.
* if it already exists just return, otherwise allocate a new PoolSubpage and call init()
* note that this subpage object is added to subpagesPool in the PoolArena when we init() it
* 2) call subpage.allocate()
*
* Algorithm: [free(handle, length, nioBuffer)]
* ----------
* 1) if it is a subpage, return the slab back into this subpage
* 2) if the subpage is not used, or it is a run, then start free this run
* 3) merge continuous avail runs
* 4) save the merged run
*
*/
final class PoolChunk implements PoolChunkMetric {
private static final int SIZE_BIT_LENGTH = 15;
private static final int INUSED_BIT_LENGTH = 1;
private static final int SUBPAGE_BIT_LENGTH = 1;
private static final int BITMAP_IDX_BIT_LENGTH = 32;
static final int IS_SUBPAGE_SHIFT = BITMAP_IDX_BIT_LENGTH;
static final int IS_USED_SHIFT = SUBPAGE_BIT_LENGTH + IS_SUBPAGE_SHIFT;
static final int SIZE_SHIFT = INUSED_BIT_LENGTH + IS_USED_SHIFT;
static final int RUN_OFFSET_SHIFT = SIZE_BIT_LENGTH + SIZE_SHIFT;
final PoolArena arena;
final Buffer base; // The buffer that is the source of the memory. Closing it will free the memory.
final Object memory;
final Drop<Buffer> baseDrop; // An ArcDrop that manages references to the base Buffer.
/**
* store the first page and last page of each avail run
*/
private final LongLongHashMap runsAvailMap;
/**
* manage all avail runs
*/
private final LongPriorityQueue[] runsAvail;
/**
* manage all subpages in this chunk
*/
private final PoolSubpage[] subpages;
private final int pageSize;
private final int pageShifts;
private final int chunkSize;
int freeBytes;
PoolChunkList parent;
PoolChunk prev;
PoolChunk next;
PoolChunk(PoolArena arena, int pageSize, int pageShifts, int chunkSize,
int maxPageIdx) {
this.arena = arena;
MemoryManager manager = arena.manager;
base = manager.allocateShared(arena, chunkSize, manager.drop(), Statics.CLEANER);
memory = manager.unwrapRecoverableMemory(base);
baseDrop = ArcDrop.wrap(Buffer::close);
this.pageSize = pageSize;
this.pageShifts = pageShifts;
this.chunkSize = chunkSize;
freeBytes = chunkSize;
runsAvail = newRunsAvailqueueArray(maxPageIdx);
runsAvailMap = new LongLongHashMap(-1);
subpages = new PoolSubpage[chunkSize >> pageShifts];
//insert initial run, offset = 0, pages = chunkSize / pageSize
int pages = chunkSize >> pageShifts;
long initHandle = (long) pages << SIZE_SHIFT;
insertAvailRun(0, pages, initHandle);
}
private static LongPriorityQueue[] newRunsAvailqueueArray(int size) {
LongPriorityQueue[] queueArray = new LongPriorityQueue[size];
for (int i = 0; i < queueArray.length; i++) {
queueArray[i] = new LongPriorityQueue();
}
return queueArray;
}
private void insertAvailRun(int runOffset, int pages, long handle) {
int pageIdxFloor = arena.pages2pageIdxFloor(pages);
LongPriorityQueue queue = runsAvail[pageIdxFloor];
queue.offer(handle);
//insert first page of run
insertAvailRun0(runOffset, handle);
if (pages > 1) {
//insert last page of run
insertAvailRun0(lastPage(runOffset, pages), handle);
}
}
private void insertAvailRun0(int runOffset, long handle) {
long pre = runsAvailMap.put(runOffset, handle);
assert pre == -1;
}
private void removeAvailRun(long handle) {
int pageIdxFloor = arena.pages2pageIdxFloor(runPages(handle));
LongPriorityQueue queue = runsAvail[pageIdxFloor];
removeAvailRun(queue, handle);
}
private void removeAvailRun(LongPriorityQueue queue, long handle) {
queue.remove(handle);
int runOffset = runOffset(handle);
int pages = runPages(handle);
//remove first page of run
runsAvailMap.remove(runOffset);
if (pages > 1) {
//remove last page of run
runsAvailMap.remove(lastPage(runOffset, pages));
}
}
private static int lastPage(int runOffset, int pages) {
return runOffset + pages - 1;
}
private long getAvailRunByOffset(int runOffset) {
return runsAvailMap.get(runOffset);
}
@Override
public int usage() {
final int freeBytes;
synchronized (arena) {
freeBytes = this.freeBytes;
}
return usage(freeBytes);
}
private int usage(int freeBytes) {
if (freeBytes == 0) {
return 100;
}
int freePercentage = (int) (freeBytes * 100L / chunkSize);
if (freePercentage == 0) {
return 99;
}
return 100 - freePercentage;
}
UntetheredMemory allocate(int size, int sizeIdx, PoolThreadCache cache, PooledAllocatorControl control) {
final long handle;
if (sizeIdx <= arena.smallMaxSizeIdx) {
// small
handle = allocateSubpage(sizeIdx);
if (handle < 0) {
return null;
}
assert isSubpage(handle);
} else {
// normal
// runSize must be multiple of pageSize
int runSize = arena.sizeIdx2size(sizeIdx);
handle = allocateRun(runSize);
if (handle < 0) {
return null;
}
}
return allocateBuffer(handle, size, cache, control);
}
private long allocateRun(int runSize) {
int pages = runSize >> pageShifts;
int pageIdx = arena.pages2pageIdx(pages);
synchronized (runsAvail) {
//find first queue which has at least one big enough run
int queueIdx = runFirstBestFit(pageIdx);
if (queueIdx == -1) {
return -1;
}
//get run with min offset in this queue
LongPriorityQueue queue = runsAvail[queueIdx];
long handle = queue.poll();
assert handle != LongPriorityQueue.NO_VALUE && !isUsed(handle) : "invalid handle: " + handle;
removeAvailRun(queue, handle);
if (handle != -1) {
handle = splitLargeRun(handle, pages);
}
freeBytes -= runSize(pageShifts, handle);
return handle;
}
}
private int calculateRunSize(int sizeIdx) {
int maxElements = 1 << pageShifts - SizeClasses.LOG2_QUANTUM;
int runSize = 0;
int nElements;
final int elemSize = arena.sizeIdx2size(sizeIdx);
// Find the lowest common multiple of pageSize and elemSize
do {
runSize += pageSize;
nElements = runSize / elemSize;
} while (nElements < maxElements && runSize != nElements * elemSize);
while (nElements > maxElements) {
runSize -= pageSize;
nElements = runSize / elemSize;
}
assert nElements > 0;
assert runSize <= chunkSize;
assert runSize >= elemSize;
return runSize;
}
private int runFirstBestFit(int pageIdx) {
if (freeBytes == chunkSize) {
return arena.nPSizes - 1;
}
for (int i = pageIdx; i < arena.nPSizes; i++) {
LongPriorityQueue queue = runsAvail[i];
if (queue != null && !queue.isEmpty()) {
return i;
}
}
return -1;
}
private long splitLargeRun(long handle, int needPages) {
assert needPages > 0;
int totalPages = runPages(handle);
assert needPages <= totalPages;
int remPages = totalPages - needPages;
if (remPages > 0) {
int runOffset = runOffset(handle);
// keep track of trailing unused pages for later use
int availOffset = runOffset + needPages;
long availRun = toRunHandle(availOffset, remPages, 0);
insertAvailRun(availOffset, remPages, availRun);
// not avail
return toRunHandle(runOffset, needPages, 1);
}
//mark it as used
handle |= 1L << IS_USED_SHIFT;
return handle;
}
/**
* Create / initialize a new PoolSubpage of normCapacity. Any PoolSubpage created / initialized here is added to
* subpage pool in the PoolArena that owns this PoolChunk
*
* @param sizeIdx sizeIdx of normalized size
*
* @return index in memoryMap
*/
private long allocateSubpage(int sizeIdx) {
// Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
// This is need as we may add it back and so alter the linked-list structure.
PoolSubpage head = arena.findSubpagePoolHead(sizeIdx);
synchronized (head) {
//allocate a new run
int runSize = calculateRunSize(sizeIdx);
//runSize must be multiples of pageSize
long runHandle = allocateRun(runSize);
if (runHandle < 0) {
return -1;
}
int runOffset = runOffset(runHandle);
assert subpages[runOffset] == null;
int elemSize = arena.sizeIdx2size(sizeIdx);
PoolSubpage subpage = new PoolSubpage(head, this, pageShifts, runOffset,
runSize(pageShifts, runHandle), elemSize);
subpages[runOffset] = subpage;
return subpage.allocate();
}
}
/**
* Free a subpage, or a run of pages When a subpage is freed from PoolSubpage, it might be added back to subpage
* pool of the owning PoolArena. If the subpage pool in PoolArena has at least one other PoolSubpage of given
* elemSize, we can completely free the owning Page, so it is available for subsequent allocations.
*
* @param handle handle to free
*/
void free(long handle, int normCapacity) {
baseDrop.drop(base); // Decrement reference count.
if (isSubpage(handle)) {
int sizeIdx = arena.size2SizeIdx(normCapacity);
PoolSubpage head = arena.findSubpagePoolHead(sizeIdx);
int sIdx = runOffset(handle);
PoolSubpage subpage = subpages[sIdx];
assert subpage != null && subpage.doNotDestroy;
// Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
// This is need as we may add it back and so alter the linked-list structure.
synchronized (head) {
if (subpage.free(head, bitmapIdx(handle))) {
//the subpage is still used, do not free it
return;
}
assert !subpage.doNotDestroy;
// Null out slot in the array as it was freed, and we should not use it anymore.
subpages[sIdx] = null;
}
}
//start free run
int pages = runPages(handle);
synchronized (runsAvail) {
// collapse continuous runs, successfully collapsed runs
// will be removed from runsAvail and runsAvailMap
long finalRun = collapseRuns(handle);
//set run as not used
finalRun &= ~(1L << IS_USED_SHIFT);
//if it is a subpage, set it to run
finalRun &= ~(1L << IS_SUBPAGE_SHIFT);
insertAvailRun(runOffset(finalRun), runPages(finalRun), finalRun);
freeBytes += pages << pageShifts;
}
}
private long collapseRuns(long handle) {
return collapseNext(collapsePast(handle));
}
private long collapsePast(long handle) {
for (;;) {
int runOffset = runOffset(handle);
int runPages = runPages(handle);
long pastRun = getAvailRunByOffset(runOffset - 1);
if (pastRun == -1) {
return handle;
}
int pastOffset = runOffset(pastRun);
int pastPages = runPages(pastRun);
//is continuous
if (pastRun != handle && pastOffset + pastPages == runOffset) {
//remove past run
removeAvailRun(pastRun);
handle = toRunHandle(pastOffset, pastPages + runPages, 0);
} else {
return handle;
}
}
}
private long collapseNext(long handle) {
for (;;) {
int runOffset = runOffset(handle);
int runPages = runPages(handle);
long nextRun = getAvailRunByOffset(runOffset + runPages);
if (nextRun == -1) {
return handle;
}
int nextOffset = runOffset(nextRun);
int nextPages = runPages(nextRun);
//is continuous
if (nextRun != handle && runOffset + runPages == nextOffset) {
//remove next run
removeAvailRun(nextRun);
handle = toRunHandle(runOffset, runPages + nextPages, 0);
} else {
return handle;
}
}
}
private static long toRunHandle(int runOffset, int runPages, int inUsed) {
return (long) runOffset << RUN_OFFSET_SHIFT
| (long) runPages << SIZE_SHIFT
| (long) inUsed << IS_USED_SHIFT;
}
UntetheredMemory allocateBuffer(long handle, int size, PoolThreadCache threadCache,
PooledAllocatorControl control) {
if (isRun(handle)) {
int offset = runOffset(handle) << pageShifts;
int maxLength = runSize(pageShifts, handle);
PoolThreadCache poolThreadCache = arena.parent.threadCache();
initAllocatorControl(control, poolThreadCache, handle, maxLength);
ArcDrop.acquire(baseDrop);
return new UntetheredChunkAllocation(
memory, this, poolThreadCache, handle, maxLength, offset, size);
} else {
return allocateBufferWithSubpage(handle, size, threadCache, control);
}
}
UntetheredMemory allocateBufferWithSubpage(long handle, int size, PoolThreadCache threadCache,
PooledAllocatorControl control) {
int runOffset = runOffset(handle);
int bitmapIdx = bitmapIdx(handle);
PoolSubpage s = subpages[runOffset];
assert s.doNotDestroy;
assert size <= s.elemSize;
int offset = (runOffset << pageShifts) + bitmapIdx * s.elemSize;
initAllocatorControl(control, threadCache, handle, s.elemSize);
ArcDrop.acquire(baseDrop);
return new UntetheredChunkAllocation(memory, this, threadCache, handle, s.elemSize, offset, size);
}
@SuppressWarnings("unchecked")
private static final class UntetheredChunkAllocation implements UntetheredMemory {
private final Object memory;
private final PoolChunk chunk;
private final PoolThreadCache threadCache;
private final long handle;
private final int maxLength;
private final int offset;
private final int size;
private UntetheredChunkAllocation(
Object memory, PoolChunk chunk, PoolThreadCache threadCache,
long handle, int maxLength, int offset, int size) {
this.memory = memory;
this.chunk = chunk;
this.threadCache = threadCache;
this.handle = handle;
this.maxLength = maxLength;
this.offset = offset;
this.size = size;
}
@Override
public <Memory> Memory memory() {
return (Memory) chunk.arena.manager.sliceMemory(memory, offset, size);
}
@Override
public <BufferType extends Buffer> Drop<BufferType> drop() {
PooledDrop pooledDrop = new PooledDrop(chunk.arena, chunk, threadCache, handle, maxLength);
return (Drop<BufferType>) CleanerDrop.wrap(pooledDrop);
}
}
private void initAllocatorControl(PooledAllocatorControl control, PoolThreadCache threadCache, long handle,
int normSize) {
control.arena = arena;
control.chunk = this;
control.threadCache = threadCache;
control.handle = handle;
control.normSize = normSize;
}
@Override
public int chunkSize() {
return chunkSize;
}
@Override
public int freeBytes() {
synchronized (arena) {
return freeBytes;
}
}
@Override
public String toString() {
final int freeBytes;
synchronized (arena) {
freeBytes = this.freeBytes;
}
return new StringBuilder()
.append("Chunk(")
.append(Integer.toHexString(System.identityHashCode(this)))
.append(": ")
.append(usage(freeBytes))
.append("%, ")
.append(chunkSize - freeBytes)
.append('/')
.append(chunkSize)
.append(')')
.toString();
}
void destroy() {
baseDrop.drop(base); // Decrement reference count from the chunk (allocated buffers may keep the base alive)
}
static int runOffset(long handle) {
return (int) (handle >> RUN_OFFSET_SHIFT);
}
static int runSize(int pageShifts, long handle) {
return runPages(handle) << pageShifts;
}
static int runPages(long handle) {
return (int) (handle >> SIZE_SHIFT & 0x7fff);
}
static boolean isUsed(long handle) {
return (handle >> IS_USED_SHIFT & 1) == 1L;
}
static boolean isRun(long handle) {
return !isSubpage(handle);
}
static boolean isSubpage(long handle) {
return (handle >> IS_SUBPAGE_SHIFT & 1) == 1L;
}
static int bitmapIdx(long handle) {
return (int) handle;
}
}

View File

@ -0,0 +1,250 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.AllocatorControl.UntetheredMemory;
import io.netty.util.internal.StringUtil;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import static java.lang.Math.max;
import static java.lang.Math.min;
final class PoolChunkList implements PoolChunkListMetric {
private static final Iterator<PoolChunkMetric> EMPTY_METRICS = Collections.emptyIterator();
private final PoolArena arena;
private final PoolChunkList nextList;
private final int minUsage;
private final int maxUsage;
private final int maxCapacity;
private PoolChunk head;
private final int freeMinThreshold;
private final int freeMaxThreshold;
// This is only update once when create the linked like list of PoolChunkList in PoolArena constructor.
private PoolChunkList prevList;
PoolChunkList(PoolArena arena, PoolChunkList nextList, int minUsage, int maxUsage, int chunkSize) {
assert minUsage <= maxUsage;
this.arena = arena;
this.nextList = nextList;
this.minUsage = minUsage;
this.maxUsage = maxUsage;
maxCapacity = calculateMaxCapacity(minUsage, chunkSize);
// the thresholds are aligned with PoolChunk.usage() logic:
// 1) basic logic: usage() = 100 - freeBytes * 100L / chunkSize
// so, for example: (usage() >= maxUsage) condition can be transformed in the following way:
// 100 - freeBytes * 100L / chunkSize >= maxUsage
// freeBytes <= chunkSize * (100 - maxUsage) / 100
// let freeMinThreshold = chunkSize * (100 - maxUsage) / 100, then freeBytes <= freeMinThreshold
//
// 2) usage() returns an int value and has a floor rounding during a calculation,
// to be aligned absolute thresholds should be shifted for "the rounding step":
// freeBytes * 100 / chunkSize < 1
// the condition can be converted to: freeBytes < 1 * chunkSize / 100
// this is why we have + 0.99999999 shifts. A example why just +1 shift cannot be used:
// freeBytes = 16777216 == freeMaxThreshold: 16777216, usage = 0 < minUsage: 1, chunkSize: 16777216
// At the same time we want to have zero thresholds in case of (maxUsage == 100) and (minUsage == 100).
//
freeMinThreshold = maxUsage == 100 ? 0 : (int) (chunkSize * (100.0 - maxUsage + 0.99999999) / 100L);
freeMaxThreshold = minUsage == 100 ? 0 : (int) (chunkSize * (100.0 - minUsage + 0.99999999) / 100L);
}
/**
* Calculates the maximum capacity of a buffer that will ever be possible to allocate out of the {@link PoolChunk}s
* that belong to the {@link PoolChunkList} with the given {@code minUsage} and {@code maxUsage} settings.
*/
private static int calculateMaxCapacity(int minUsage, int chunkSize) {
minUsage = minUsage0(minUsage);
if (minUsage == 100) {
// If the minUsage is 100 we can not allocate anything out of this list.
return 0;
}
// Calculate the maximum amount of bytes that can be allocated from a PoolChunk in this PoolChunkList.
//
// As an example:
// - If a PoolChunkList has minUsage == 25 we are allowed to allocate at most 75% of the chunkSize because
// this is the maximum amount available in any PoolChunk in this PoolChunkList.
return (int) (chunkSize * (100L - minUsage) / 100L);
}
void prevList(PoolChunkList prevList) {
assert this.prevList == null;
this.prevList = prevList;
}
UntetheredMemory allocate(int size, int sizeIdx, PoolThreadCache threadCache, PooledAllocatorControl control) {
int normCapacity = arena.sizeIdx2size(sizeIdx);
if (normCapacity > maxCapacity) {
// Either this PoolChunkList is empty, or the requested capacity is larger than the capacity which can
// be handled by the PoolChunks that are contained in this PoolChunkList.
return null;
}
for (PoolChunk cur = head; cur != null; cur = cur.next) {
UntetheredMemory memory = cur.allocate(size, sizeIdx, threadCache, control);
if (memory != null) {
if (cur.freeBytes <= freeMinThreshold) {
remove(cur);
nextList.add(cur);
}
return memory;
}
}
return null;
}
boolean free(PoolChunk chunk, long handle, int normCapacity) {
chunk.free(handle, normCapacity);
if (chunk.freeBytes > freeMaxThreshold) {
remove(chunk);
// Move the PoolChunk down the PoolChunkList linked-list.
return move0(chunk);
}
return true;
}
private boolean move(PoolChunk chunk) {
if (chunk.freeBytes > freeMaxThreshold) {
// Move the PoolChunk down the PoolChunkList linked-list.
return move0(chunk);
}
// PoolChunk fits into this PoolChunkList, adding it here.
add0(chunk);
return true;
}
/**
* Moves the {@link PoolChunk} down the {@link PoolChunkList} linked-list, so it will end up in the right
* {@link PoolChunkList} that has the correct minUsage / maxUsage in respect to {@link PoolChunk#usage()}.
*/
private boolean move0(PoolChunk chunk) {
if (prevList == null) {
// There is no previous PoolChunkList so return false which result in having the PoolChunk destroyed and
// all memory associated with the PoolChunk will be released.
return false;
}
return prevList.move(chunk);
}
void add(PoolChunk chunk) {
if (chunk.freeBytes <= freeMinThreshold) {
nextList.add(chunk);
return;
}
add0(chunk);
}
/**
* Adds the {@link PoolChunk} to this {@link PoolChunkList}.
*/
void add0(PoolChunk chunk) {
chunk.parent = this;
if (head == null) {
head = chunk;
chunk.prev = null;
chunk.next = null;
} else {
chunk.prev = null;
chunk.next = head;
head.prev = chunk;
head = chunk;
}
}
private void remove(PoolChunk cur) {
if (cur == head) {
head = cur.next;
if (head != null) {
head.prev = null;
}
} else {
PoolChunk next = cur.next;
cur.prev.next = next;
if (next != null) {
next.prev = cur.prev;
}
}
}
@Override
public int minUsage() {
return minUsage0(minUsage);
}
@Override
public int maxUsage() {
return min(maxUsage, 100);
}
private static int minUsage0(int value) {
return max(1, value);
}
@Override
public Iterator<PoolChunkMetric> iterator() {
synchronized (arena) {
if (head == null) {
return EMPTY_METRICS;
}
List<PoolChunkMetric> metrics = new ArrayList<>();
for (PoolChunk cur = head;;) {
metrics.add(cur);
cur = cur.next;
if (cur == null) {
break;
}
}
return metrics.iterator();
}
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder();
synchronized (arena) {
if (head == null) {
return "none";
}
for (PoolChunk cur = head;;) {
buf.append(cur);
cur = cur.next;
if (cur == null) {
break;
}
buf.append(StringUtil.NEWLINE);
}
}
return buf.toString();
}
void destroy() {
PoolChunk chunk = head;
while (chunk != null) {
chunk.destroy();
chunk = chunk.next;
}
head = null;
}
}

View File

@ -0,0 +1,32 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
/**
* Metrics for a list of chunks.
*/
public interface PoolChunkListMetric extends Iterable<PoolChunkMetric> {
/**
* Return the minimum usage of the chunk list before which chunks are promoted to the previous list.
*/
int minUsage();
/**
* Return the maximum usage of the chunk list after which chunks are promoted to the next list.
*/
int maxUsage();
}

View File

@ -0,0 +1,37 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
/**
* Metrics for a chunk.
*/
public interface PoolChunkMetric {
/**
* Return the percentage of the current usage of the chunk.
*/
int usage();
/**
* Return the size of the chunk in bytes, this is the maximum of bytes that can be served out of the chunk.
*/
int chunkSize();
/**
* Return the number of free bytes in the chunk.
*/
int freeBytes();
}

View File

@ -0,0 +1,287 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import static io.netty.buffer.api.pool.PoolChunk.RUN_OFFSET_SHIFT;
import static io.netty.buffer.api.pool.PoolChunk.SIZE_SHIFT;
import static io.netty.buffer.api.pool.PoolChunk.IS_USED_SHIFT;
import static io.netty.buffer.api.pool.PoolChunk.IS_SUBPAGE_SHIFT;
import static io.netty.buffer.api.pool.SizeClasses.LOG2_QUANTUM;
final class PoolSubpage implements PoolSubpageMetric {
final PoolChunk chunk;
private final int pageShifts;
private final int runOffset;
private final int runSize;
private final long[] bitmap;
PoolSubpage prev;
PoolSubpage next;
boolean doNotDestroy;
int elemSize;
private int maxNumElems;
private int bitmapLength;
private int nextAvail;
private int numAvail;
/** Special constructor that creates a linked list head */
PoolSubpage() {
chunk = null;
pageShifts = -1;
runOffset = -1;
elemSize = -1;
runSize = -1;
bitmap = null;
}
PoolSubpage(PoolSubpage head, PoolChunk chunk, int pageShifts, int runOffset, int runSize, int elemSize) {
this.chunk = chunk;
this.pageShifts = pageShifts;
this.runOffset = runOffset;
this.runSize = runSize;
this.elemSize = elemSize;
bitmap = new long[runSize >>> 6 + LOG2_QUANTUM]; // runSize / 64 / QUANTUM
doNotDestroy = true;
if (elemSize != 0) {
maxNumElems = numAvail = runSize / elemSize;
nextAvail = 0;
bitmapLength = maxNumElems >>> 6;
if ((maxNumElems & 63) != 0) {
bitmapLength ++;
}
for (int i = 0; i < bitmapLength; i ++) {
bitmap[i] = 0;
}
}
addToPool(head);
}
/**
* Returns the bitmap index of the subpage allocation.
*/
long allocate() {
if (numAvail == 0 || !doNotDestroy) {
return -1;
}
final int bitmapIdx = getNextAvail();
int q = bitmapIdx >>> 6;
int r = bitmapIdx & 63;
assert (bitmap[q] >>> r & 1) == 0;
bitmap[q] |= 1L << r;
if (-- numAvail == 0) {
removeFromPool();
}
return toHandle(bitmapIdx);
}
/**
* @return {@code true} if this subpage is in use.
* {@code false} if this subpage is not used by its chunk and thus it's OK to be released.
*/
boolean free(PoolSubpage head, int bitmapIdx) {
if (elemSize == 0) {
return true;
}
int q = bitmapIdx >>> 6;
int r = bitmapIdx & 63;
assert (bitmap[q] >>> r & 1) != 0;
bitmap[q] ^= 1L << r;
setNextAvail(bitmapIdx);
if (numAvail++ == 0) {
addToPool(head);
// When maxNumElems == 1, the maximum numAvail is also 1.
// Each of these PoolSubpages will go in here when they do free operation.
// If they return true directly from here, then the rest of the code will be unreachable,
// and they will not actually be recycled. So return true only on maxNumElems > 1.
if (maxNumElems > 1) {
return true;
}
}
if (numAvail != maxNumElems) {
return true;
} else {
// Subpage not in use (numAvail == maxNumElems)
if (prev == next) {
// Do not remove if this subpage is the only one left in the pool.
return true;
}
// Remove this subpage from the pool if there are other subpages left in the pool.
doNotDestroy = false;
removeFromPool();
return false;
}
}
private void addToPool(PoolSubpage head) {
assert prev == null && next == null;
prev = head;
next = head.next;
next.prev = this;
head.next = this;
}
private void removeFromPool() {
assert prev != null && next != null;
prev.next = next;
next.prev = prev;
next = null;
prev = null;
}
private void setNextAvail(int bitmapIdx) {
nextAvail = bitmapIdx;
}
private int getNextAvail() {
int nextAvail = this.nextAvail;
if (nextAvail >= 0) {
this.nextAvail = -1;
return nextAvail;
}
return findNextAvail();
}
private int findNextAvail() {
final long[] bitmap = this.bitmap;
final int bitmapLength = this.bitmapLength;
for (int i = 0; i < bitmapLength; i ++) {
long bits = bitmap[i];
if (~bits != 0) {
return findNextAvail0(i, bits);
}
}
return -1;
}
private int findNextAvail0(int i, long bits) {
final int maxNumElems = this.maxNumElems;
final int baseVal = i << 6;
for (int j = 0; j < 64; j ++) {
if ((bits & 1) == 0) {
int val = baseVal | j;
if (val < maxNumElems) {
return val;
} else {
break;
}
}
bits >>>= 1;
}
return -1;
}
private long toHandle(int bitmapIdx) {
int pages = runSize >> pageShifts;
return (long) runOffset << RUN_OFFSET_SHIFT
| (long) pages << SIZE_SHIFT
| 1L << IS_USED_SHIFT
| 1L << IS_SUBPAGE_SHIFT
| bitmapIdx;
}
@Override
public String toString() {
final boolean doNotDestroy;
final int maxNumElems;
final int numAvail;
final int elemSize;
if (chunk == null) {
// This is the head so there is no need to synchronize at all as these never change.
doNotDestroy = true;
maxNumElems = 0;
numAvail = 0;
elemSize = -1;
} else {
synchronized (chunk.arena) {
if (!this.doNotDestroy) {
doNotDestroy = false;
// Not used for creating the String.
maxNumElems = numAvail = elemSize = -1;
} else {
doNotDestroy = true;
maxNumElems = this.maxNumElems;
numAvail = this.numAvail;
elemSize = this.elemSize;
}
}
}
if (!doNotDestroy) {
return "(" + runOffset + ": not in use)";
}
return "(" + runOffset + ": " + (maxNumElems - numAvail) + '/' + maxNumElems +
", offset: " + runOffset + ", length: " + runSize + ", elemSize: " + elemSize + ')';
}
@Override
public int maxNumElements() {
if (chunk == null) {
// It's the head.
return 0;
}
synchronized (chunk.arena) {
return maxNumElems;
}
}
@Override
public int numAvailable() {
if (chunk == null) {
// It's the head.
return 0;
}
synchronized (chunk.arena) {
return numAvail;
}
}
@Override
public int elementSize() {
if (chunk == null) {
// It's the head.
return -1;
}
synchronized (chunk.arena) {
return elemSize;
}
}
@Override
public int pageSize() {
return 1 << pageShifts;
}
void destroy() {
if (chunk != null) {
chunk.destroy();
}
}
}

View File

@ -0,0 +1,42 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
/**
* Metrics for a sub-page.
*/
public interface PoolSubpageMetric {
/**
* Return the number of maximal elements that can be allocated out of the sub-page.
*/
int maxNumElements();
/**
* Return the number of available elements to be allocated.
*/
int numAvailable();
/**
* Return the size (in bytes) of the elements that will be allocated.
*/
int elementSize();
/**
* Return the page size (in bytes) of this page.
*/
int pageSize();
}

View File

@ -0,0 +1,393 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.AllocatorControl.UntetheredMemory;
import io.netty.buffer.api.pool.PoolArena.SizeClass;
import io.netty.util.internal.MathUtil;
import io.netty.util.internal.ObjectPool;
import io.netty.util.internal.ObjectPool.Handle;
import io.netty.util.internal.PlatformDependent;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.Queue;
import static io.netty.buffer.api.pool.PoolArena.SizeClass.Normal;
import static io.netty.buffer.api.pool.PoolArena.SizeClass.Small;
import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
/**
* Acts a Thread cache for allocations. This implementation is modelled after
* <a href="https://people.freebsd.org/~jasone/jemalloc/bsdcan2006/jemalloc.pdf">jemalloc</a> and the described
* techniques of
* <a href="https://www.facebook.com/notes/facebook-engineering/scalable-memory-allocation-using-jemalloc/480222803919">
* Scalable memory allocation using jemalloc</a>.
*/
final class PoolThreadCache {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(PoolThreadCache.class);
private static final int INTEGER_SIZE_MINUS_ONE = Integer.SIZE - 1;
final PoolArena arena;
// Hold the caches for the different size classes, which are tiny, small and normal.
private final MemoryRegionCache[] smallSubPageCaches;
private final MemoryRegionCache[] normalCaches;
private final int freeSweepAllocationThreshold;
private int allocations;
PoolThreadCache(PoolArena arena,
int smallCacheSize, int normalCacheSize, int maxCachedBufferCapacity,
int freeSweepAllocationThreshold) {
checkPositiveOrZero(maxCachedBufferCapacity, "maxCachedBufferCapacity");
this.freeSweepAllocationThreshold = freeSweepAllocationThreshold;
this.arena = arena;
if (arena != null) {
// Create the caches for the heap allocations
smallSubPageCaches = createSubPageCaches(
smallCacheSize, arena.numSmallSubpagePools);
normalCaches = createNormalCaches(
normalCacheSize, maxCachedBufferCapacity, arena);
arena.numThreadCaches.getAndIncrement();
} else {
// No heapArea is configured so just null out all caches
smallSubPageCaches = null;
normalCaches = null;
}
// Only check if there are caches in use.
if ((smallSubPageCaches != null || normalCaches != null)
&& freeSweepAllocationThreshold < 1) {
throw new IllegalArgumentException("freeSweepAllocationThreshold: "
+ freeSweepAllocationThreshold + " (expected: > 0)");
}
}
private static MemoryRegionCache[] createSubPageCaches(
int cacheSize, int numCaches) {
if (cacheSize > 0 && numCaches > 0) {
MemoryRegionCache[] cache = new MemoryRegionCache[numCaches];
for (int i = 0; i < cache.length; i++) {
// TODO: maybe use cacheSize / cache.length
cache[i] = new SubPageMemoryRegionCache(cacheSize);
}
return cache;
} else {
return null;
}
}
private static MemoryRegionCache[] createNormalCaches(
int cacheSize, int maxCachedBufferCapacity, PoolArena area) {
if (cacheSize > 0 && maxCachedBufferCapacity > 0) {
int max = Math.min(area.chunkSize, maxCachedBufferCapacity);
// Create as many normal caches as we support based on how many sizeIdx we have and what the upper
// bound is that we want to cache in general.
List<MemoryRegionCache> cache = new ArrayList<>() ;
for (int idx = area.numSmallSubpagePools; idx < area.nSizes && area.sizeIdx2size(idx) <= max ; idx++) {
cache.add(new NormalMemoryRegionCache(cacheSize));
}
return cache.toArray(MemoryRegionCache[]::new);
} else {
return null;
}
}
// val > 0
static int log2(int val) {
return INTEGER_SIZE_MINUS_ONE - Integer.numberOfLeadingZeros(val);
}
/**
* Try to allocate a small buffer out of the cache. Returns {@code true} if successful {@code false} otherwise
*/
UntetheredMemory allocateSmall(PooledAllocatorControl control, int size, int sizeIdx) {
return allocate(cacheForSmall(sizeIdx), control, size);
}
/**
* Try to allocate a normal buffer out of the cache. Returns {@code true} if successful {@code false} otherwise
*/
UntetheredMemory allocateNormal(PoolArena area, PooledAllocatorControl control, int size, int sizeIdx) {
return allocate(cacheForNormal(area, sizeIdx), control, size);
}
private UntetheredMemory allocate(MemoryRegionCache cache, PooledAllocatorControl control, int size) {
if (cache == null) {
// no cache found so just return false here
return null;
}
UntetheredMemory allocated = cache.allocate(size, this, control);
if (++allocations >= freeSweepAllocationThreshold) {
allocations = 0;
trim();
}
return allocated;
}
/**
* Add {@link PoolChunk} and {@code handle} to the cache if there is enough room.
* Returns {@code true} if it fit into the cache {@code false} otherwise.
*/
boolean add(PoolArena area, PoolChunk chunk,
long handle, int normCapacity, SizeClass sizeClass) {
int sizeIdx = area.size2SizeIdx(normCapacity);
MemoryRegionCache cache = cache(area, sizeIdx, sizeClass);
if (cache == null) {
return false;
}
return cache.add(chunk, handle, normCapacity);
}
private MemoryRegionCache cache(PoolArena area, int sizeIdx, SizeClass sizeClass) {
if (sizeClass == Normal) {
return cacheForNormal(area, sizeIdx);
}
if (sizeClass == Small) {
return cacheForSmall(sizeIdx);
}
throw new AssertionError("Unexpected size class: " + sizeClass);
}
/**
* Should be called if the Thread that uses this cache is about to exist to release resources out of the cache
*/
void free() {
int numFreed = free(smallSubPageCaches) + free(normalCaches);
if (numFreed > 0 && logger.isDebugEnabled()) {
logger.debug("Freed {} thread-local buffer(s) from thread: {}", numFreed,
Thread.currentThread().getName());
}
if (arena != null) {
arena.numThreadCaches.getAndDecrement();
}
}
private static int free(MemoryRegionCache[] caches) {
if (caches == null) {
return 0;
}
int numFreed = 0;
for (MemoryRegionCache c: caches) {
numFreed += free(c);
}
return numFreed;
}
private static int free(MemoryRegionCache cache) {
if (cache == null) {
return 0;
}
return cache.free();
}
void trim() {
trim(smallSubPageCaches);
trim(normalCaches);
}
private static void trim(MemoryRegionCache[] caches) {
if (caches == null) {
return;
}
for (MemoryRegionCache c: caches) {
trim(c);
}
}
private static void trim(MemoryRegionCache cache) {
if (cache == null) {
return;
}
cache.trim();
}
private MemoryRegionCache cacheForSmall(int sizeIdx) {
return cache(smallSubPageCaches, sizeIdx);
}
private MemoryRegionCache cacheForNormal(PoolArena area, int sizeIdx) {
// We need to substract area.numSmallSubpagePools as sizeIdx is the overall index for all sizes.
int idx = sizeIdx - area.numSmallSubpagePools;
return cache(normalCaches, idx);
}
private static MemoryRegionCache cache(MemoryRegionCache[] cache, int sizeIdx) {
if (cache == null || sizeIdx > cache.length - 1) {
return null;
}
return cache[sizeIdx];
}
/**
* Cache used for buffers which are backed by SMALL size.
*/
private static final class SubPageMemoryRegionCache extends MemoryRegionCache {
SubPageMemoryRegionCache(int size) {
super(size, Small);
}
@Override
protected UntetheredMemory allocBuf(PoolChunk chunk, long handle, int size, PoolThreadCache threadCache,
PooledAllocatorControl control) {
return chunk.allocateBufferWithSubpage(handle, size, threadCache, control);
}
}
/**
* Cache used for buffers which are backed by NORMAL size.
*/
private static final class NormalMemoryRegionCache extends MemoryRegionCache {
NormalMemoryRegionCache(int size) {
super(size, Normal);
}
@Override
protected UntetheredMemory allocBuf(PoolChunk chunk, long handle, int size, PoolThreadCache threadCache,
PooledAllocatorControl control) {
return chunk.allocateBuffer(handle, size, threadCache, control);
}
}
private abstract static class MemoryRegionCache {
private final int size;
private final Queue<Entry> queue;
private final SizeClass sizeClass;
private int allocations;
MemoryRegionCache(int size, SizeClass sizeClass) {
this.size = MathUtil.safeFindNextPositivePowerOfTwo(size);
queue = PlatformDependent.newFixedMpscQueue(this.size);
this.sizeClass = sizeClass;
}
/**
* Allocate a new {@link UntetheredMemory} using the provided chunk and handle with the capacity restrictions.
*/
protected abstract UntetheredMemory allocBuf(
PoolChunk chunk, long handle, int size, PoolThreadCache threadCache, PooledAllocatorControl control);
/**
* Add to cache if not already full.
*/
public final boolean add(PoolChunk chunk, long handle, int normCapacity) {
Entry entry = newEntry(chunk, handle, normCapacity);
boolean queued = queue.offer(entry);
if (!queued) {
// If it was not possible to cache the chunk, immediately recycle the entry
entry.recycle();
}
return queued;
}
/**
* Allocate something out of the cache if possible and remove the entry from the cache.
*/
public final UntetheredMemory allocate(int size, PoolThreadCache threadCache, PooledAllocatorControl control) {
Entry entry = queue.poll();
if (entry == null) {
return null;
}
UntetheredMemory buffer = allocBuf(entry.chunk, entry.handle, size, threadCache, control);
entry.recycle();
// allocations are not thread-safe which is fine as this is only called from the same thread all time.
allocations++;
return buffer;
}
/**
* Clear out this cache and free up all previous cached {@link PoolChunk}s and {@code handle}s.
*/
public final int free() {
return free(Integer.MAX_VALUE);
}
private int free(int max) {
int numFreed = 0;
for (; numFreed < max; numFreed++) {
Entry entry = queue.poll();
if (entry != null) {
freeEntry(entry);
} else {
// all cleared
return numFreed;
}
}
return numFreed;
}
/**
* Free up cached {@link PoolChunk}s if not allocated frequently enough.
*/
public final void trim() {
int free = size - allocations;
allocations = 0;
// We not even allocated all the number that are
if (free > 0) {
free(free);
}
}
private void freeEntry(Entry entry) {
PoolChunk chunk = entry.chunk;
long handle = entry.handle;
entry.recycle();
chunk.arena.freeChunk(chunk, handle, entry.normCapacity, sizeClass);
}
static final class Entry {
final Handle<Entry> recyclerHandle;
PoolChunk chunk;
long handle = -1;
int normCapacity;
Entry(Handle<Entry> recyclerHandle) {
this.recyclerHandle = recyclerHandle;
}
void recycle() {
chunk = null;
handle = -1;
recyclerHandle.recycle(this);
}
}
private static Entry newEntry(PoolChunk chunk, long handle, int normCapacity) {
Entry entry = RECYCLER.get();
entry.chunk = chunk;
entry.handle = handle;
entry.normCapacity = normCapacity;
return entry;
}
private static final ObjectPool<Entry> RECYCLER = ObjectPool.newPool(handle -> new Entry(handle));
}
}

View File

@ -0,0 +1,38 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.AllocatorControl;
import io.netty.buffer.api.Buffer;
class PooledAllocatorControl implements AllocatorControl {
public PooledBufferAllocator parent;
public PoolArena arena;
public PoolChunk chunk;
public PoolThreadCache threadCache;
public long handle;
public int normSize;
@Override
public UntetheredMemory allocateUntethered(Buffer originator, int size) {
return parent.allocate(this, size);
}
@Override
public void recoverMemory(Object memory) {
arena.free(chunk, handle, normSize, threadCache);
}
}

View File

@ -0,0 +1,558 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.AllocatorControl.UntetheredMemory;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.BufferAllocator;
import io.netty.buffer.api.MemoryManager;
import io.netty.util.NettyRuntime;
import io.netty.util.concurrent.EventExecutor;
import io.netty.util.concurrent.FastThreadLocal;
import io.netty.util.concurrent.FastThreadLocalThread;
import io.netty.util.internal.PlatformDependent;
import io.netty.util.internal.StringUtil;
import io.netty.util.internal.SystemPropertyUtil;
import io.netty.util.internal.ThreadExecutorMap;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
import java.nio.ByteOrder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
import static java.util.Objects.requireNonNull;
public class PooledBufferAllocator implements BufferAllocator, BufferAllocatorMetricProvider {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(PooledBufferAllocator.class);
private static final int DEFAULT_NUM_HEAP_ARENA;
private static final int DEFAULT_NUM_DIRECT_ARENA;
private static final int DEFAULT_PAGE_SIZE;
private static final int DEFAULT_MAX_ORDER; // 8192 << 9 = 4 MiB per chunk
private static final int DEFAULT_SMALL_CACHE_SIZE;
private static final int DEFAULT_NORMAL_CACHE_SIZE;
static final int DEFAULT_MAX_CACHED_BUFFER_CAPACITY;
private static final int DEFAULT_CACHE_TRIM_INTERVAL;
private static final long DEFAULT_CACHE_TRIM_INTERVAL_MILLIS;
private static final boolean DEFAULT_USE_CACHE_FOR_ALL_THREADS;
private static final int DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT;
static final int DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK;
private static final int MIN_PAGE_SIZE = 4096;
private static final int MAX_CHUNK_SIZE = (int) (((long) Integer.MAX_VALUE + 1) / 2);
private final Runnable trimTask = this::trimCurrentThreadCache;
static {
int defaultAlignment = SystemPropertyUtil.getInt(
"io.netty.allocator.directMemoryCacheAlignment", 0);
int defaultPageSize = SystemPropertyUtil.getInt("io.netty.allocator.pageSize", 8192);
Throwable pageSizeFallbackCause = null;
try {
validateAndCalculatePageShifts(defaultPageSize, defaultAlignment);
} catch (Throwable t) {
pageSizeFallbackCause = t;
defaultPageSize = 8192;
defaultAlignment = 0;
}
DEFAULT_PAGE_SIZE = defaultPageSize;
DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT = defaultAlignment;
int defaultMaxOrder = SystemPropertyUtil.getInt("io.netty.allocator.maxOrder", 9);
Throwable maxOrderFallbackCause = null;
try {
validateAndCalculateChunkSize(DEFAULT_PAGE_SIZE, defaultMaxOrder);
} catch (Throwable t) {
maxOrderFallbackCause = t;
defaultMaxOrder = 11;
}
DEFAULT_MAX_ORDER = defaultMaxOrder;
// Determine reasonable default for nHeapArena and nDirectArena.
// Assuming each arena has 3 chunks, the pool should not consume more than 50% of max memory.
final Runtime runtime = Runtime.getRuntime();
/*
* We use 2 * available processors by default to reduce contention as we use 2 * available processors for the
* number of EventLoops in NIO and EPOLL as well. If we choose a smaller number we will run into hot spots as
* allocation and de-allocation needs to be synchronized on the PoolArena.
*
* See https://github.com/netty/netty/issues/3888.
*/
final int defaultMinNumArena = NettyRuntime.availableProcessors() * 2;
final int defaultChunkSize = DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER;
DEFAULT_NUM_HEAP_ARENA = Math.max(0,
SystemPropertyUtil.getInt(
"io.netty.allocator.numArenas",
(int) Math.min(
defaultMinNumArena,
runtime.maxMemory() / defaultChunkSize / 2 / 3)));
DEFAULT_NUM_DIRECT_ARENA = Math.max(0,
SystemPropertyUtil.getInt(
"io.netty.allocator.numDirectArenas",
(int) Math.min(
defaultMinNumArena,
PlatformDependent.maxDirectMemory() / defaultChunkSize / 2 / 3)));
// cache sizes
DEFAULT_SMALL_CACHE_SIZE = SystemPropertyUtil.getInt("io.netty.allocator.smallCacheSize", 256);
DEFAULT_NORMAL_CACHE_SIZE = SystemPropertyUtil.getInt("io.netty.allocator.normalCacheSize", 64);
// 32 kb is the default maximum capacity of the cached buffer. Similar to what is explained in
// 'Scalable memory allocation using jemalloc'
DEFAULT_MAX_CACHED_BUFFER_CAPACITY = SystemPropertyUtil.getInt(
"io.netty.allocator.maxCachedBufferCapacity", 32 * 1024);
// the number of threshold of allocations when cached entries will be freed up if not frequently used
DEFAULT_CACHE_TRIM_INTERVAL = SystemPropertyUtil.getInt(
"io.netty.allocator.cacheTrimInterval", 8192);
DEFAULT_CACHE_TRIM_INTERVAL_MILLIS = SystemPropertyUtil.getLong(
"io.netty.allocator.cacheTrimIntervalMillis", 0);
DEFAULT_USE_CACHE_FOR_ALL_THREADS = SystemPropertyUtil.getBoolean(
"io.netty.allocator.useCacheForAllThreads", false);
// Use 1023 by default as we use an ArrayDeque as backing storage which will then allocate an internal array
// of 1024 elements. Otherwise, we would allocate 2048 and only use 1024 which is wasteful.
DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK = SystemPropertyUtil.getInt(
"io.netty.allocator.maxCachedByteBuffersPerChunk", 1023);
if (logger.isDebugEnabled()) {
logger.debug("-Dio.netty.allocator.numArenas: {}", DEFAULT_NUM_HEAP_ARENA);
logger.debug("-Dio.netty.allocator.numDirectArenas: {}", DEFAULT_NUM_DIRECT_ARENA);
if (pageSizeFallbackCause == null) {
logger.debug("-Dio.netty.allocator.pageSize: {}", DEFAULT_PAGE_SIZE);
} else {
logger.debug("-Dio.netty.allocator.pageSize: {}", DEFAULT_PAGE_SIZE, pageSizeFallbackCause);
}
if (maxOrderFallbackCause == null) {
logger.debug("-Dio.netty.allocator.maxOrder: {}", DEFAULT_MAX_ORDER);
} else {
logger.debug("-Dio.netty.allocator.maxOrder: {}", DEFAULT_MAX_ORDER, maxOrderFallbackCause);
}
logger.debug("-Dio.netty.allocator.chunkSize: {}", DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER);
logger.debug("-Dio.netty.allocator.smallCacheSize: {}", DEFAULT_SMALL_CACHE_SIZE);
logger.debug("-Dio.netty.allocator.normalCacheSize: {}", DEFAULT_NORMAL_CACHE_SIZE);
logger.debug("-Dio.netty.allocator.maxCachedBufferCapacity: {}", DEFAULT_MAX_CACHED_BUFFER_CAPACITY);
logger.debug("-Dio.netty.allocator.cacheTrimInterval: {}", DEFAULT_CACHE_TRIM_INTERVAL);
logger.debug("-Dio.netty.allocator.cacheTrimIntervalMillis: {}", DEFAULT_CACHE_TRIM_INTERVAL_MILLIS);
logger.debug("-Dio.netty.allocator.useCacheForAllThreads: {}", DEFAULT_USE_CACHE_FOR_ALL_THREADS);
logger.debug("-Dio.netty.allocator.maxCachedByteBuffersPerChunk: {}",
DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK);
}
}
private final MemoryManager manager;
private final PoolArena[] arenas;
private final int smallCacheSize;
private final int normalCacheSize;
private final List<PoolArenaMetric> arenaMetrics;
private final List<PoolArenaMetric> arenaMetricsView;
private final PoolThreadLocalCache threadCache;
private final int chunkSize;
private final PooledBufferAllocatorMetric metric;
public PooledBufferAllocator(MemoryManager manager) {
this(manager, manager.isNative()? DEFAULT_NUM_DIRECT_ARENA : DEFAULT_NUM_HEAP_ARENA,
DEFAULT_PAGE_SIZE, DEFAULT_MAX_ORDER, DEFAULT_SMALL_CACHE_SIZE,
DEFAULT_NORMAL_CACHE_SIZE, DEFAULT_USE_CACHE_FOR_ALL_THREADS,
DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT);
}
public PooledBufferAllocator(MemoryManager manager, int numArenas, int pageSize, int maxOrder) {
this(manager, numArenas, pageSize, maxOrder, DEFAULT_SMALL_CACHE_SIZE,
DEFAULT_NORMAL_CACHE_SIZE, DEFAULT_USE_CACHE_FOR_ALL_THREADS,
DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT);
}
public PooledBufferAllocator(MemoryManager manager, int numArenas, int pageSize, int maxOrder,
int smallCacheSize, int normalCacheSize,
boolean useCacheForAllThreads) {
this(manager, numArenas, pageSize, maxOrder,
smallCacheSize, normalCacheSize,
useCacheForAllThreads, DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT);
}
public PooledBufferAllocator(MemoryManager manager, int numArenas, int pageSize, int maxOrder,
int smallCacheSize, int normalCacheSize,
boolean useCacheForAllThreads, int directMemoryCacheAlignment) {
this.manager = requireNonNull(manager, "MemoryManager");
threadCache = new PoolThreadLocalCache(useCacheForAllThreads);
this.smallCacheSize = smallCacheSize;
this.normalCacheSize = normalCacheSize;
if (directMemoryCacheAlignment != 0) {
if (!PlatformDependent.hasAlignDirectByteBuffer()) {
throw new UnsupportedOperationException("Buffer alignment is not supported. " +
"Either Unsafe or ByteBuffer.alignSlice() must be available.");
}
// Ensure page size is a whole multiple of the alignment, or bump it to the next whole multiple.
pageSize = (int) PlatformDependent.align(pageSize, directMemoryCacheAlignment);
}
chunkSize = validateAndCalculateChunkSize(pageSize, maxOrder);
checkPositiveOrZero(numArenas, "numArenas");
checkPositiveOrZero(directMemoryCacheAlignment, "directMemoryCacheAlignment");
if (directMemoryCacheAlignment > 0 && !isDirectMemoryCacheAlignmentSupported()) {
throw new IllegalArgumentException("directMemoryCacheAlignment is not supported");
}
if ((directMemoryCacheAlignment & -directMemoryCacheAlignment) != directMemoryCacheAlignment) {
throw new IllegalArgumentException("directMemoryCacheAlignment: "
+ directMemoryCacheAlignment + " (expected: power of two)");
}
int pageShifts = validateAndCalculatePageShifts(pageSize, directMemoryCacheAlignment);
if (numArenas > 0) {
arenas = newArenaArray(numArenas);
List<PoolArenaMetric> metrics = new ArrayList<>(arenas.length);
for (int i = 0; i < arenas.length; i ++) {
PoolArena arena = new PoolArena(this, manager,
pageSize, pageShifts, chunkSize,
directMemoryCacheAlignment);
arenas[i] = arena;
metrics.add(arena);
}
arenaMetrics = metrics;
arenaMetricsView = Collections.unmodifiableList(metrics);
} else {
arenas = null;
arenaMetrics = new ArrayList<>(1);
arenaMetricsView = Collections.emptyList();
}
metric = new PooledBufferAllocatorMetric(this);
}
private static PoolArena[] newArenaArray(int size) {
return new PoolArena[size];
}
private static int validateAndCalculatePageShifts(int pageSize, int alignment) {
if (pageSize < MIN_PAGE_SIZE) {
throw new IllegalArgumentException("pageSize: " + pageSize + " (expected: " + MIN_PAGE_SIZE + ')');
}
if ((pageSize & pageSize - 1) != 0) {
throw new IllegalArgumentException("pageSize: " + pageSize + " (expected: power of 2)");
}
if (pageSize < alignment) {
throw new IllegalArgumentException("Alignment cannot be greater than page size. " +
"Alignment: " + alignment + ", page size: " + pageSize + '.');
}
// Logarithm base 2. At this point we know that pageSize is a power of two.
return Integer.SIZE - 1 - Integer.numberOfLeadingZeros(pageSize);
}
private static int validateAndCalculateChunkSize(int pageSize, int maxOrder) {
if (maxOrder > 14) {
throw new IllegalArgumentException("maxOrder: " + maxOrder + " (expected: 0-14)");
}
// Ensure the resulting chunkSize does not overflow.
int chunkSize = pageSize;
for (int i = maxOrder; i > 0; i--) {
if (chunkSize > MAX_CHUNK_SIZE / 2) {
throw new IllegalArgumentException(String.format(
"pageSize (%d) << maxOrder (%d) must not exceed %d", pageSize, maxOrder, MAX_CHUNK_SIZE));
}
chunkSize <<= 1;
}
return chunkSize;
}
@Override
public Buffer allocate(int size) {
if (size < 1) {
throw new IllegalArgumentException("Allocation size must be positive, but was " + size + '.');
}
PooledAllocatorControl control = new PooledAllocatorControl();
control.parent = this;
UntetheredMemory memory = allocate(control, size);
Buffer buffer = manager.recoverMemory(control, memory.memory(), memory.drop());
return buffer.fill((byte) 0).order(ByteOrder.nativeOrder());
}
UntetheredMemory allocate(PooledAllocatorControl control, int size) {
PoolThreadCache cache = threadCache.get();
PoolArena arena = cache.arena;
if (arena != null) {
return arena.allocate(control, cache, size);
}
return allocateUnpooled(size);
}
private UntetheredMemory allocateUnpooled(int size) {
return new UnpooledUnthetheredMemory(this, manager, size);
}
@Override
public void close() {
trimCurrentThreadCache();
threadCache.remove();
for (int i = 0, arenasLength = arenas.length; i < arenasLength; i++) {
PoolArena arena = arenas[i];
if (arena != null) {
arena.close();
arenas[i] = null;
}
}
arenaMetrics.clear();
}
/**
* Default number of heap arenas - System Property: io.netty.allocator.numHeapArenas - default 2 * cores
*/
public static int defaultNumHeapArena() {
return DEFAULT_NUM_HEAP_ARENA;
}
/**
* Default number of direct arenas - System Property: io.netty.allocator.numDirectArenas - default 2 * cores
*/
public static int defaultNumDirectArena() {
return DEFAULT_NUM_DIRECT_ARENA;
}
/**
* Default buffer page size - System Property: io.netty.allocator.pageSize - default 8192
*/
public static int defaultPageSize() {
return DEFAULT_PAGE_SIZE;
}
/**
* Default maximum order - System Property: io.netty.allocator.maxOrder - default 11
*/
public static int defaultMaxOrder() {
return DEFAULT_MAX_ORDER;
}
/**
* Default thread caching behavior - System Property: io.netty.allocator.useCacheForAllThreads - default true
*/
public static boolean defaultUseCacheForAllThreads() {
return DEFAULT_USE_CACHE_FOR_ALL_THREADS;
}
/**
* Default prefer direct - System Property: io.netty.noPreferDirect - default false
*/
public static boolean defaultPreferDirect() {
return PlatformDependent.directBufferPreferred();
}
/**
* Default small cache size - System Property: io.netty.allocator.smallCacheSize - default 256
*/
public static int defaultSmallCacheSize() {
return DEFAULT_SMALL_CACHE_SIZE;
}
/**
* Default normal cache size - System Property: io.netty.allocator.normalCacheSize - default 64
*/
public static int defaultNormalCacheSize() {
return DEFAULT_NORMAL_CACHE_SIZE;
}
/**
* Return {@code true} if direct memory cache alignment is supported, {@code false} otherwise.
*/
public static boolean isDirectMemoryCacheAlignmentSupported() {
return PlatformDependent.hasUnsafe();
}
public boolean isDirectBufferPooled() {
return manager.isNative();
}
public int numArenas() {
return arenas.length;
}
final class PoolThreadLocalCache extends FastThreadLocal<PoolThreadCache> {
private final boolean useCacheForAllThreads;
PoolThreadLocalCache(boolean useCacheForAllThreads) {
this.useCacheForAllThreads = useCacheForAllThreads;
}
@Override
protected synchronized PoolThreadCache initialValue() {
final PoolArena arena = leastUsedArena(arenas);
final Thread current = Thread.currentThread();
if (useCacheForAllThreads || current instanceof FastThreadLocalThread) {
final PoolThreadCache cache = new PoolThreadCache(
arena, smallCacheSize, normalCacheSize,
DEFAULT_MAX_CACHED_BUFFER_CAPACITY, DEFAULT_CACHE_TRIM_INTERVAL);
if (DEFAULT_CACHE_TRIM_INTERVAL_MILLIS > 0) {
final EventExecutor executor = ThreadExecutorMap.currentExecutor();
if (executor != null) {
executor.scheduleAtFixedRate(trimTask, DEFAULT_CACHE_TRIM_INTERVAL_MILLIS,
DEFAULT_CACHE_TRIM_INTERVAL_MILLIS, TimeUnit.MILLISECONDS);
}
}
return cache;
}
// No caching so just use 0 as sizes.
return new PoolThreadCache(null, 0, 0, 0, 0);
}
@Override
protected void onRemoval(PoolThreadCache threadCache) {
threadCache.free();
}
private static PoolArena leastUsedArena(PoolArena[] arenas) {
if (arenas == null || arenas.length == 0) {
return null;
}
PoolArena minArena = arenas[0];
for (int i = 1; i < arenas.length; i++) {
PoolArena arena = arenas[i];
if (arena.numThreadCaches.get() < minArena.numThreadCaches.get()) {
minArena = arena;
}
}
return minArena;
}
}
@Override
public PooledBufferAllocatorMetric metric() {
return metric;
}
/**
* Return a {@link List} of all heap {@link PoolArenaMetric}s that are provided by this pool.
*/
List<PoolArenaMetric> arenaMetrics() {
return arenaMetricsView;
}
/**
* Return the number of thread local caches used by this {@link PooledBufferAllocator}.
*/
int numThreadLocalCaches() {
if (arenas == null) {
return 0;
}
int total = 0;
for (PoolArena arena : arenas) {
total += arena.numThreadCaches.get();
}
return total;
}
/**
* Return the size of the small cache.
*/
int smallCacheSize() {
return smallCacheSize;
}
/**
* Return the size of the normal cache.
*/
int normalCacheSize() {
return normalCacheSize;
}
/**
* Return the chunk size for an arena.
*/
final int chunkSize() {
return chunkSize;
}
final long usedMemory() {
return usedMemory(arenas);
}
private static long usedMemory(PoolArena[] arenas) {
if (arenas == null) {
return -1;
}
long used = 0;
for (PoolArena arena : arenas) {
used += arena.numActiveBytes();
if (used < 0) {
return Long.MAX_VALUE;
}
}
return used;
}
final PoolThreadCache threadCache() {
PoolThreadCache cache = threadCache.get();
assert cache != null;
return cache;
}
/**
* Trim thread local cache for the current {@link Thread}, which will give back any cached memory that was not
* allocated frequently since the last trim operation.
*
* Returns {@code true} if a cache for the current {@link Thread} exists and so was trimmed, false otherwise.
*/
public boolean trimCurrentThreadCache() {
PoolThreadCache cache = threadCache.getIfExists();
if (cache != null) {
cache.trim();
return true;
}
return false;
}
/**
* Returns the status of the allocator (which contains all metrics) as string. Be aware this may be expensive
* and so should not be called too frequently.
*/
public String dumpStats() {
int heapArenasLen = arenas == null ? 0 : arenas.length;
StringBuilder buf = new StringBuilder(512)
.append(heapArenasLen)
.append(" arena(s):")
.append(StringUtil.NEWLINE);
if (heapArenasLen > 0) {
for (PoolArena a: arenas) {
buf.append(a);
}
}
return buf.toString();
}
}

View File

@ -0,0 +1,92 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.util.internal.StringUtil;
import java.util.List;
/**
* Exposed metric for {@link PooledBufferAllocator}.
*/
final class PooledBufferAllocatorMetric implements BufferAllocatorMetric {
private final PooledBufferAllocator allocator;
PooledBufferAllocatorMetric(PooledBufferAllocator allocator) {
this.allocator = allocator;
}
/**
* Return the number of arenas.
*/
public int numArenas() {
return allocator.numArenas();
}
/**
* Return a {@link List} of all {@link PoolArenaMetric}s that are provided by this pool.
*/
public List<PoolArenaMetric> arenaMetrics() {
return allocator.arenaMetrics();
}
/**
* Return the number of thread local caches used by this {@link PooledBufferAllocator}.
*/
public int numThreadLocalCaches() {
return allocator.numThreadLocalCaches();
}
/**
* Return the size of the small cache.
*/
public int smallCacheSize() {
return allocator.smallCacheSize();
}
/**
* Return the size of the normal cache.
*/
public int normalCacheSize() {
return allocator.normalCacheSize();
}
/**
* Return the chunk size for an arena.
*/
public int chunkSize() {
return allocator.chunkSize();
}
@Override
public long usedMemory() {
return allocator.usedMemory();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder(256);
sb.append(StringUtil.simpleClassName(this))
.append("(usedMemory: ").append(usedMemory())
.append("; numArenas: ").append(numArenas())
.append("; smallCacheSize: ").append(smallCacheSize())
.append("; normalCacheSize: ").append(normalCacheSize())
.append("; numThreadLocalCaches: ").append(numThreadLocalCaches())
.append("; chunkSize: ").append(chunkSize()).append(')');
return sb.toString();
}
}

View File

@ -0,0 +1,40 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.Drop;
class PooledDrop implements Drop<Buffer> {
private final PoolArena arena;
private final PoolChunk chunk;
private final PoolThreadCache threadCache;
private final long handle;
private final int normSize;
PooledDrop(PoolArena arena, PoolChunk chunk, PoolThreadCache threadCache, long handle, int normSize) {
this.arena = arena;
this.chunk = chunk;
this.threadCache = threadCache;
this.handle = handle;
this.normSize = normSize;
}
@Override
public void drop(Buffer obj) {
arena.free(chunk, handle, normSize, threadCache);
}
}

View File

@ -0,0 +1,407 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import static io.netty.buffer.api.pool.PoolThreadCache.*;
/**
* SizeClasses requires {@code pageShifts} to be defined prior to inclusion,
* and it in turn defines:
* <p>
* LOG2_SIZE_CLASS_GROUP: Log of size class count for each size doubling.
* LOG2_MAX_LOOKUP_SIZE: Log of max size class in the lookup table.
* sizeClasses: Complete table of [index, log2Group, log2Delta, nDelta, isMultiPageSize,
* isSubPage, log2DeltaLookup] tuples.
* index: Size class index.
* log2Group: Log of group base size (no deltas added).
* log2Delta: Log of delta to previous size class.
* nDelta: Delta multiplier.
* isMultiPageSize: 'yes' if a multiple of the page size, 'no' otherwise.
* isSubPage: 'yes' if a subpage size class, 'no' otherwise.
* log2DeltaLookup: Same as log2Delta if a lookup table size class, 'no'
* otherwise.
* <p>
* nSubpages: Number of subpages size classes.
* nSizes: Number of size classes.
* nPSizes: Number of size classes that are multiples of pageSize.
*
* smallMaxSizeIdx: Maximum small size class index.
*
* lookupMaxclass: Maximum size class included in lookup table.
* log2NormalMinClass: Log of minimum normal size class.
* <p>
* The first size class and spacing are 1 << LOG2_QUANTUM.
* Each group has 1 << LOG2_SIZE_CLASS_GROUP of size classes.
*
* size = 1 << log2Group + nDelta * (1 << log2Delta)
*
* The first size class has an unusual encoding, because the size has to be
* split between group and delta*nDelta.
*
* If pageShift = 13, sizeClasses looks like this:
*
* (index, log2Group, log2Delta, nDelta, isMultiPageSize, isSubPage, log2DeltaLookup)
* <p>
* ( 0, 4, 4, 0, no, yes, 4)
* ( 1, 4, 4, 1, no, yes, 4)
* ( 2, 4, 4, 2, no, yes, 4)
* ( 3, 4, 4, 3, no, yes, 4)
* <p>
* ( 4, 6, 4, 1, no, yes, 4)
* ( 5, 6, 4, 2, no, yes, 4)
* ( 6, 6, 4, 3, no, yes, 4)
* ( 7, 6, 4, 4, no, yes, 4)
* <p>
* ( 8, 7, 5, 1, no, yes, 5)
* ( 9, 7, 5, 2, no, yes, 5)
* ( 10, 7, 5, 3, no, yes, 5)
* ( 11, 7, 5, 4, no, yes, 5)
* ...
* ...
* ( 72, 23, 21, 1, yes, no, no)
* ( 73, 23, 21, 2, yes, no, no)
* ( 74, 23, 21, 3, yes, no, no)
* ( 75, 23, 21, 4, yes, no, no)
* <p>
* ( 76, 24, 22, 1, yes, no, no)
*/
abstract class SizeClasses implements SizeClassesMetric {
static final int LOG2_QUANTUM = 4;
private static final int LOG2_SIZE_CLASS_GROUP = 2;
private static final int LOG2_MAX_LOOKUP_SIZE = 12;
private static final int INDEX_IDX = 0;
private static final int LOG2GROUP_IDX = 1;
private static final int LOG2DELTA_IDX = 2;
private static final int NDELTA_IDX = 3;
private static final int PAGESIZE_IDX = 4;
private static final int SUBPAGE_IDX = 5;
private static final int LOG2_DELTA_LOOKUP_IDX = 6;
private static final byte no = 0, yes = 1;
protected SizeClasses(int pageSize, int pageShifts, int chunkSize, int directMemoryCacheAlignment) {
this.pageSize = pageSize;
this.pageShifts = pageShifts;
this.chunkSize = chunkSize;
this.directMemoryCacheAlignment = directMemoryCacheAlignment;
int group = log2(chunkSize) + 1 - LOG2_QUANTUM;
//generate size classes
//[index, log2Group, log2Delta, nDelta, isMultiPageSize, isSubPage, log2DeltaLookup]
sizeClasses = new short[group << LOG2_SIZE_CLASS_GROUP][7];
nSizes = sizeClasses();
//generate lookup table
sizeIdx2sizeTab = new int[nSizes];
pageIdx2sizeTab = new int[nPSizes];
idx2SizeTab(sizeIdx2sizeTab, pageIdx2sizeTab);
size2idxTab = new int[lookupMaxSize >> LOG2_QUANTUM];
size2idxTab(size2idxTab);
}
protected final int pageSize;
protected final int pageShifts;
protected final int chunkSize;
protected final int directMemoryCacheAlignment;
final int nSizes;
int nSubpages;
int nPSizes;
int smallMaxSizeIdx;
private int lookupMaxSize;
private final short[][] sizeClasses;
private final int[] pageIdx2sizeTab;
// lookup table for sizeIdx <= smallMaxSizeIdx
private final int[] sizeIdx2sizeTab;
// lookup table used for size <= lookupMaxclass
// spacing is 1 << LOG2_QUANTUM, so the size of array is lookupMaxclass >> LOG2_QUANTUM
private final int[] size2idxTab;
private int sizeClasses() {
int normalMaxSize = -1;
int index = 0;
int size = 0;
int log2Group = LOG2_QUANTUM;
int log2Delta = LOG2_QUANTUM;
int ndeltaLimit = 1 << LOG2_SIZE_CLASS_GROUP;
//First small group, nDelta start at 0.
//first size class is 1 << LOG2_QUANTUM
int nDelta = 0;
while (nDelta < ndeltaLimit) {
size = sizeClass(index++, log2Group, log2Delta, nDelta++);
}
log2Group += LOG2_SIZE_CLASS_GROUP;
//All remaining groups, nDelta start at 1.
while (size < chunkSize) {
nDelta = 1;
while (nDelta <= ndeltaLimit && size < chunkSize) {
size = sizeClass(index++, log2Group, log2Delta, nDelta++);
normalMaxSize = size;
}
log2Group++;
log2Delta++;
}
//chunkSize must be normalMaxSize
assert chunkSize == normalMaxSize;
//return number of size index
return index;
}
//calculate size class
private int sizeClass(int index, int log2Group, int log2Delta, int nDelta) {
short isMultiPageSize;
if (log2Delta >= pageShifts) {
isMultiPageSize = yes;
} else {
int pageSize = 1 << pageShifts;
int size = (1 << log2Group) + (1 << log2Delta) * nDelta;
isMultiPageSize = size == size / pageSize * pageSize? yes : no;
}
int log2Ndelta = nDelta == 0? 0 : log2(nDelta);
byte remove = 1 << log2Ndelta < nDelta? yes : no;
int log2Size = log2Delta + log2Ndelta == log2Group? log2Group + 1 : log2Group;
if (log2Size == log2Group) {
remove = yes;
}
short isSubpage = log2Size < pageShifts + LOG2_SIZE_CLASS_GROUP? yes : no;
int log2DeltaLookup = log2Size < LOG2_MAX_LOOKUP_SIZE ||
log2Size == LOG2_MAX_LOOKUP_SIZE && remove == no
? log2Delta : no;
short[] sz = {
(short) index, (short) log2Group, (short) log2Delta,
(short) nDelta, isMultiPageSize, isSubpage, (short) log2DeltaLookup
};
sizeClasses[index] = sz;
int size = (1 << log2Group) + (nDelta << log2Delta);
if (sz[PAGESIZE_IDX] == yes) {
nPSizes++;
}
if (sz[SUBPAGE_IDX] == yes) {
nSubpages++;
smallMaxSizeIdx = index;
}
if (sz[LOG2_DELTA_LOOKUP_IDX] != no) {
lookupMaxSize = size;
}
return size;
}
private void idx2SizeTab(int[] sizeIdx2sizeTab, int[] pageIdx2sizeTab) {
int pageIdx = 0;
for (int i = 0; i < nSizes; i++) {
short[] sizeClass = sizeClasses[i];
int log2Group = sizeClass[LOG2GROUP_IDX];
int log2Delta = sizeClass[LOG2DELTA_IDX];
int nDelta = sizeClass[NDELTA_IDX];
int size = (1 << log2Group) + (nDelta << log2Delta);
sizeIdx2sizeTab[i] = size;
if (sizeClass[PAGESIZE_IDX] == yes) {
pageIdx2sizeTab[pageIdx++] = size;
}
}
}
private void size2idxTab(int[] size2idxTab) {
int idx = 0;
int size = 0;
for (int i = 0; size <= lookupMaxSize; i++) {
int log2Delta = sizeClasses[i][LOG2DELTA_IDX];
int times = 1 << log2Delta - LOG2_QUANTUM;
while (size <= lookupMaxSize && times-- > 0) {
size2idxTab[idx++] = i;
size = idx + 1 << LOG2_QUANTUM;
}
}
}
@Override
public int sizeIdx2size(int sizeIdx) {
return sizeIdx2sizeTab[sizeIdx];
}
@Override
public int sizeIdx2sizeCompute(int sizeIdx) {
int group = sizeIdx >> LOG2_SIZE_CLASS_GROUP;
int mod = sizeIdx & (1 << LOG2_SIZE_CLASS_GROUP) - 1;
int groupSize = group == 0? 0 :
1 << LOG2_QUANTUM + LOG2_SIZE_CLASS_GROUP - 1 << group;
int shift = group == 0? 1 : group;
int lgDelta = shift + LOG2_QUANTUM - 1;
int modSize = mod + 1 << lgDelta;
return groupSize + modSize;
}
@Override
public long pageIdx2size(int pageIdx) {
return pageIdx2sizeTab[pageIdx];
}
@Override
public long pageIdx2sizeCompute(int pageIdx) {
int group = pageIdx >> LOG2_SIZE_CLASS_GROUP;
int mod = pageIdx & (1 << LOG2_SIZE_CLASS_GROUP) - 1;
long groupSize = group == 0? 0 :
1L << pageShifts + LOG2_SIZE_CLASS_GROUP - 1 << group;
int shift = group == 0? 1 : group;
int log2Delta = shift + pageShifts - 1;
int modSize = mod + 1 << log2Delta;
return groupSize + modSize;
}
@Override
public int size2SizeIdx(int size) {
if (size == 0) {
return 0;
}
if (size > chunkSize) {
return nSizes;
}
if (directMemoryCacheAlignment > 0) {
size = alignSize(size);
}
if (size <= lookupMaxSize) {
//size-1 / MIN_TINY
return size2idxTab[size - 1 >> LOG2_QUANTUM];
}
int x = log2((size << 1) - 1);
int shift = x < LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM + 1
? 0 : x - (LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM);
int group = shift << LOG2_SIZE_CLASS_GROUP;
int log2Delta = x < LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM + 1
? LOG2_QUANTUM : x - LOG2_SIZE_CLASS_GROUP - 1;
int deltaInverseMask = -1 << log2Delta;
int mod = (size - 1 & deltaInverseMask) >> log2Delta &
(1 << LOG2_SIZE_CLASS_GROUP) - 1;
return group + mod;
}
@Override
public int pages2pageIdx(int pages) {
return pages2pageIdxCompute(pages, false);
}
@Override
public int pages2pageIdxFloor(int pages) {
return pages2pageIdxCompute(pages, true);
}
private int pages2pageIdxCompute(int pages, boolean floor) {
int pageSize = pages << pageShifts;
if (pageSize > chunkSize) {
return nPSizes;
}
int x = log2((pageSize << 1) - 1);
int shift = x < LOG2_SIZE_CLASS_GROUP + pageShifts
? 0 : x - (LOG2_SIZE_CLASS_GROUP + pageShifts);
int group = shift << LOG2_SIZE_CLASS_GROUP;
int log2Delta = x < LOG2_SIZE_CLASS_GROUP + pageShifts + 1?
pageShifts : x - LOG2_SIZE_CLASS_GROUP - 1;
int deltaInverseMask = -1 << log2Delta;
int mod = (pageSize - 1 & deltaInverseMask) >> log2Delta &
(1 << LOG2_SIZE_CLASS_GROUP) - 1;
int pageIdx = group + mod;
if (floor && pageIdx2sizeTab[pageIdx] > pages << pageShifts) {
pageIdx--;
}
return pageIdx;
}
// Round size up to the nearest multiple of alignment.
private int alignSize(int size) {
int delta = size & directMemoryCacheAlignment - 1;
return delta == 0? size : size + directMemoryCacheAlignment - delta;
}
@Override
public int normalizeSize(int size) {
if (size == 0) {
return sizeIdx2sizeTab[0];
}
if (directMemoryCacheAlignment > 0) {
size = alignSize(size);
}
if (size <= lookupMaxSize) {
int ret = sizeIdx2sizeTab[size2idxTab[size - 1 >> LOG2_QUANTUM]];
assert ret == normalizeSizeCompute(size);
return ret;
}
return normalizeSizeCompute(size);
}
private static int normalizeSizeCompute(int size) {
int x = log2((size << 1) - 1);
int log2Delta = x < LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM + 1
? LOG2_QUANTUM : x - LOG2_SIZE_CLASS_GROUP - 1;
int delta = 1 << log2Delta;
int delta_mask = delta - 1;
return size + delta_mask & ~delta_mask;
}
}

View File

@ -0,0 +1,87 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
/**
* Expose metrics for an SizeClasses.
*/
public interface SizeClassesMetric {
/**
* Computes size from lookup table according to sizeIdx.
*
* @return size
*/
int sizeIdx2size(int sizeIdx);
/**
* Computes size according to sizeIdx.
*
* @return size
*/
int sizeIdx2sizeCompute(int sizeIdx);
/**
* Computes size from lookup table according to pageIdx.
*
* @return size which is multiples of pageSize.
*/
long pageIdx2size(int pageIdx);
/**
* Computes size according to pageIdx.
*
* @return size which is multiples of pageSize
*/
long pageIdx2sizeCompute(int pageIdx);
/**
* Normalizes request size up to the nearest size class.
*
* @param size request size
*
* @return sizeIdx of the size class
*/
int size2SizeIdx(int size);
/**
* Normalizes request size up to the nearest pageSize class.
*
* @param pages multiples of pageSizes
*
* @return pageIdx of the pageSize class
*/
int pages2pageIdx(int pages);
/**
* Normalizes request size down to the nearest pageSize class.
*
* @param pages multiples of pageSizes
*
* @return pageIdx of the pageSize class
*/
int pages2pageIdxFloor(int pages);
/**
* Normalizes usable size that would result from allocating an object with the
* specified size and alignment.
*
* @param size request size
*
* @return normalized size
*/
int normalizeSize(int size);
}

View File

@ -0,0 +1,45 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer.api.pool;
import io.netty.buffer.api.AllocatorControl;
import io.netty.buffer.api.Buffer;
import io.netty.buffer.api.Drop;
import io.netty.buffer.api.MemoryManager;
import io.netty.buffer.api.internal.Statics;
@SuppressWarnings("unchecked")
class UnpooledUnthetheredMemory implements AllocatorControl.UntetheredMemory {
private final MemoryManager manager;
private final Buffer buffer;
UnpooledUnthetheredMemory(PooledBufferAllocator allocator, MemoryManager manager, int size) {
this.manager = manager;
PooledAllocatorControl allocatorControl = new PooledAllocatorControl();
allocatorControl.parent = allocator;
buffer = manager.allocateShared(allocatorControl, size, manager.drop(), Statics.CLEANER);
}
@Override
public <Memory> Memory memory() {
return (Memory) manager.unwrapRecoverableMemory(buffer);
}
@Override
public <BufferType extends Buffer> Drop<BufferType> drop() {
return (Drop<BufferType>) manager.drop();
}
}

View File

@ -0,0 +1,19 @@
/*
* Copyright 2021 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
/**
* A pooling {@link io.netty.buffer.api.BufferAllocator} implementation based on jemalloc.
*/
package io.netty.buffer.api.pool;

View File

@ -447,7 +447,8 @@ class UnsafeBuffer extends RcSupport<Buffer, UnsafeBuffer> implements Buffer, Re
// Allocate a bigger buffer.
long newSize = capacity() + (long) Math.max(size - writableBytes(), minimumGrowth);
BufferAllocator.checkSize(newSize);
UnsafeMemory memory = (UnsafeMemory) control.allocateUntethered(this, (int) newSize);
var untethered = control.allocateUntethered(this, (int) newSize);
UnsafeMemory memory = untethered.memory();
// Copy contents.
try {
@ -458,17 +459,17 @@ class UnsafeBuffer extends RcSupport<Buffer, UnsafeBuffer> implements Buffer, Re
}
// Release the old memory, and install the new memory:
Drop<UnsafeBuffer> drop = disconnectDrop();
Drop<UnsafeBuffer> drop = untethered.drop();
disconnectDrop(drop);
attachNewMemory(memory, drop);
}
private Drop<UnsafeBuffer> disconnectDrop() {
private Drop<UnsafeBuffer> disconnectDrop(Drop<UnsafeBuffer> newDrop) {
var drop = (Drop<UnsafeBuffer>) unsafeGetDrop();
int roff = this.roff;
int woff = this.woff;
drop.drop(this);
drop = ArcDrop.unwrapAllArcs(drop);
unsafeSetDrop(new ArcDrop<>(drop));
unsafeSetDrop(new ArcDrop<>(newDrop));
this.roff = roff;
this.woff = woff;
return drop;

View File

@ -25,4 +25,8 @@ class UnsafeMemory {
this.address = address;
this.size = size;
}
public UnsafeMemory slice(int offset, int length) {
return new UnsafeMemory(base, address + offset, length);
}
}

View File

@ -33,6 +33,11 @@ public class UnsafeMemoryManager implements MemoryManager {
this.offheap = offheap;
}
@Override
public boolean isNative() {
return offheap;
}
@Override
public Buffer allocateShared(AllocatorControl allocatorControl, long size, Drop<Buffer> drop, Cleaner cleaner) {
final Object base;
@ -79,9 +84,19 @@ public class UnsafeMemoryManager implements MemoryManager {
return ((UnsafeMemory) memory).size;
}
@Override
public void discardRecoverableMemory(Object recoverableMemory) {
// We cannot reliably drop unsafe memory. We have to rely on the cleaner to do that.
}
@Override
public Buffer recoverMemory(AllocatorControl allocatorControl, Object recoverableMemory, Drop<Buffer> drop) {
UnsafeMemory memory = (UnsafeMemory) recoverableMemory;
return new UnsafeBuffer(memory, 0, memory.size, allocatorControl, convert(drop));
}
@Override
public Object sliceMemory(Object memory, int offset, int length) {
return ((UnsafeMemory) memory).slice(offset, length);
}
}

View File

@ -19,11 +19,29 @@ import io.netty.buffer.api.memseg.NativeMemorySegmentManager;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;
import java.util.List;
import java.util.stream.Stream;
import static io.netty.buffer.api.MemoryManagers.using;
import static org.assertj.core.api.Assertions.assertThat;
public class BufferCleanerTest extends BufferTestSupport {
@SuppressWarnings("OptionalGetWithoutIsPresent")
static Fixture[] memorySegmentAllocators() {
MemoryManagers managers = MemoryManagers.getAllManagers()
.map(p -> p.get())
.filter(mm -> "MS".equals(mm.toString()))
.findFirst().get();
List<Fixture> initFixtures = initialAllocators().stream().flatMap(f -> {
Stream.Builder<Fixture> builder = Stream.builder();
builder.add(new Fixture(f + "/" + managers, () -> using(managers, f), f.getProperties()));
return builder.build();
}).toList();
return fixtureCombinations(initFixtures).filter(f -> f.isDirect()).toArray(Fixture[]::new);
}
@ParameterizedTest
@MethodSource("directAllocators")
@MethodSource("memorySegmentAllocators")
public void bufferMustBeClosedByCleaner(Fixture fixture) throws InterruptedException {
var initial = NativeMemorySegmentManager.MEM_USAGE_NATIVE.sum();
int allocationSize = 1024;
@ -34,6 +52,7 @@ public class BufferCleanerTest extends BufferTestSupport {
System.runFinalization();
sum = NativeMemorySegmentManager.MEM_USAGE_NATIVE.sum() - initial;
if (sum < allocationSize) {
// The memory must have been cleaned.
return;
}
}
@ -42,7 +61,7 @@ public class BufferCleanerTest extends BufferTestSupport {
private static void allocateAndForget(Fixture fixture, int size) {
var allocator = fixture.createAllocator();
allocator.close();
allocator.allocate(size);
allocator.close();
}
}

View File

@ -59,7 +59,7 @@ public abstract class BufferTestSupport {
private static final Memoize<Fixture[]> INITIAL_NO_CONST = new Memoize<>(
() -> initialFixturesForEachImplementation().stream().filter(f -> !f.isConst()).toArray(Fixture[]::new));
private static final Memoize<Fixture[]> ALL_COMBINATIONS = new Memoize<>(
() -> fixtureCombinations().toArray(Fixture[]::new));
() -> fixtureCombinations(initialFixturesForEachImplementation()).toArray(Fixture[]::new));
private static final Memoize<Fixture[]> ALL_ALLOCATORS = new Memoize<>(
() -> Arrays.stream(ALL_COMBINATIONS.get())
.filter(sample())
@ -160,21 +160,21 @@ public abstract class BufferTestSupport {
return initFixtures;
}
private static Stream<Fixture> fixtureCombinations() {
List<Fixture> initFixtures = initialFixturesForEachImplementation();
static Stream<Fixture> fixtureCombinations(List<Fixture> initFixtures) {
Builder<Fixture> builder = Stream.builder();
initFixtures.forEach(builder);
// Add 2-way composite buffers of all combinations.
for (Fixture first : initFixtures) {
for (Fixture second : initFixtures) {
var a = first.get();
var b = second.get();
builder.add(new Fixture("compose(" + first + ", " + second + ')', () -> {
return new BufferAllocator() {
BufferAllocator a;
BufferAllocator b;
@Override
public Buffer allocate(int size) {
a = first.get();
b = second.get();
int half = size / 2;
try (Buffer firstHalf = a.allocate(half);
Buffer secondHalf = b.allocate(size - half)) {
@ -184,8 +184,14 @@ public abstract class BufferTestSupport {
@Override
public void close() {
a.close();
b.close();
if (a != null) {
a.close();
a = null;
}
if (b != null) {
b.close();
b = null;
}
}
};
}, COMPOSITE));
@ -195,9 +201,10 @@ public abstract class BufferTestSupport {
// Also add a 3-way composite buffer.
builder.add(new Fixture("compose(heap,heap,heap)", () -> {
return new BufferAllocator() {
final BufferAllocator alloc = BufferAllocator.heap();
BufferAllocator alloc;
@Override
public Buffer allocate(int size) {
alloc = BufferAllocator.heap();
int part = size / 3;
try (Buffer a = alloc.allocate(part);
Buffer b = alloc.allocate(part);
@ -208,17 +215,21 @@ public abstract class BufferTestSupport {
@Override
public void close() {
alloc.close();
if (alloc != null) {
alloc.close();
alloc = null;
}
}
};
}, COMPOSITE));
for (Fixture fixture : initFixtures) {
builder.add(new Fixture(fixture + ".ensureWritable", () -> {
var allocator = fixture.createAllocator();
return new BufferAllocator() {
BufferAllocator allocator;
@Override
public Buffer allocate(int size) {
allocator = fixture.createAllocator();
if (size < 2) {
return allocator.allocate(size);
}
@ -229,15 +240,19 @@ public abstract class BufferTestSupport {
@Override
public void close() {
allocator.close();
if (allocator != null) {
allocator.close();
allocator = null;
}
}
};
}, fixture.getProperties()));
builder.add(new Fixture(fixture + ".compose.ensureWritable", () -> {
var allocator = fixture.createAllocator();
return new BufferAllocator() {
BufferAllocator allocator;
@Override
public Buffer allocate(int size) {
allocator = fixture.createAllocator();
if (size < 2) {
return allocator.allocate(size);
}
@ -248,7 +263,10 @@ public abstract class BufferTestSupport {
@Override
public void close() {
allocator.close();
if (allocator != null) {
allocator.close();
allocator = null;
}
}
};
}, COMPOSITE));
@ -263,10 +281,11 @@ public abstract class BufferTestSupport {
Builder<Fixture> builder = Stream.builder();
builder.add(f);
builder.add(new Fixture(f + ".split", () -> {
var allocatorBase = f.get();
return new BufferAllocator() {
BufferAllocator allocatorBase;
@Override
public Buffer allocate(int size) {
allocatorBase = f.get();
try (Buffer buf = allocatorBase.allocate(size + 1)) {
buf.writerOffset(size);
return buf.split().writerOffset(0);
@ -275,7 +294,10 @@ public abstract class BufferTestSupport {
@Override
public void close() {
allocatorBase.close();
if (allocatorBase != null) {
allocatorBase.close();
allocatorBase = null;
}
}
};
}, f.getProperties()));
@ -287,10 +309,11 @@ public abstract class BufferTestSupport {
builder.add(f);
var props = concat(f.getProperties(), Properties.SLICE);
builder.add(new Fixture(f + ".slice(0, capacity())", () -> {
var allocatorBase = f.get();
return new BufferAllocator() {
BufferAllocator allocatorBase;
@Override
public Buffer allocate(int size) {
allocatorBase = f.get();
try (Buffer base = allocatorBase.allocate(size)) {
return base.slice(0, base.capacity()).writerOffset(0);
}
@ -298,15 +321,19 @@ public abstract class BufferTestSupport {
@Override
public void close() {
allocatorBase.close();
if (allocatorBase != null) {
allocatorBase.close();
allocatorBase = null;
}
}
};
}, props));
builder.add(new Fixture(f + ".slice(1, capacity() - 2)", () -> {
var allocatorBase = f.get();
return new BufferAllocator() {
BufferAllocator allocatorBase;
@Override
public Buffer allocate(int size) {
allocatorBase = f.get();
try (Buffer base = allocatorBase.allocate(size + 2)) {
return base.slice(1, size).writerOffset(0);
}
@ -314,7 +341,10 @@ public abstract class BufferTestSupport {
@Override
public void close() {
allocatorBase.close();
if (allocatorBase != null) {
allocatorBase.close();
allocatorBase = null;
}
}
};
}, props));