[#4198] Fix race-condition when allocate from multiple-thread.

Motivation:

Fix a race condition that was introduced by f18990a8a5 that could lead to a NPE when allocate from the PooledByteBufAllocator concurrently by many threads.

Modifications:

Correctly synchronize on the PoolSubPage head.

Result:

No more race.
This commit is contained in:
Norman Maurer 2015-10-22 22:09:41 +02:00
parent 85236d5446
commit d93f906255
4 changed files with 212 additions and 84 deletions

View File

@ -183,8 +183,8 @@ abstract class PoolArena<T> implements PoolArenaMetric {
final PoolSubpage<T> head = table[tableIdx]; final PoolSubpage<T> head = table[tableIdx];
/** /**
* Synchronize on the head. This is needed as {@link PoolSubpage#allocate()} and * Synchronize on the head. This is needed as {@link PoolChunk#allocateSubpage(int)} and
* {@link PoolSubpage#free(int)} may modify the doubly linked list as well. * {@link PoolChunk#free(long)} may modify the doubly linked list as well.
*/ */
synchronized (head) { synchronized (head) {
final PoolSubpage<T> s = head.next; final PoolSubpage<T> s = head.next;

View File

@ -306,26 +306,31 @@ final class PoolChunk<T> implements PoolChunkMetric {
* @return index in memoryMap * @return index in memoryMap
*/ */
private long allocateSubpage(int normCapacity) { private long allocateSubpage(int normCapacity) {
int d = maxOrder; // subpages are only be allocated from pages i.e., leaves // Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
int id = allocateNode(d); // This is need as we may add it back and so alter the linked-list structure.
if (id < 0) { PoolSubpage<T> head = arena.findSubpagePoolHead(normCapacity);
return id; synchronized (head) {
int d = maxOrder; // subpages are only be allocated from pages i.e., leaves
int id = allocateNode(d);
if (id < 0) {
return id;
}
final PoolSubpage<T>[] subpages = this.subpages;
final int pageSize = this.pageSize;
freeBytes -= pageSize;
int subpageIdx = subpageIdx(id);
PoolSubpage<T> subpage = subpages[subpageIdx];
if (subpage == null) {
subpage = new PoolSubpage<T>(head, this, id, runOffset(id), pageSize, normCapacity);
subpages[subpageIdx] = subpage;
} else {
subpage.init(head, normCapacity);
}
return subpage.allocate();
} }
final PoolSubpage<T>[] subpages = this.subpages;
final int pageSize = this.pageSize;
freeBytes -= pageSize;
int subpageIdx = subpageIdx(id);
PoolSubpage<T> subpage = subpages[subpageIdx];
if (subpage == null) {
subpage = new PoolSubpage<T>(this, id, runOffset(id), pageSize, normCapacity);
subpages[subpageIdx] = subpage;
} else {
subpage.init(normCapacity);
}
return subpage.allocate();
} }
/** /**
@ -343,8 +348,14 @@ final class PoolChunk<T> implements PoolChunkMetric {
if (bitmapIdx != 0) { // free a subpage if (bitmapIdx != 0) { // free a subpage
PoolSubpage<T> subpage = subpages[subpageIdx(memoryMapIdx)]; PoolSubpage<T> subpage = subpages[subpageIdx(memoryMapIdx)];
assert subpage != null && subpage.doNotDestroy; assert subpage != null && subpage.doNotDestroy;
if (subpage.free(bitmapIdx & 0x3FFFFFFF)) {
return; // Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
// This is need as we may add it back and so alter the linked-list structure.
PoolSubpage<T> head = arena.findSubpagePoolHead(subpage.elemSize);
synchronized (head) {
if (subpage.free(head, bitmapIdx & 0x3FFFFFFF)) {
return;
}
} }
} }
freeBytes += runLength(memoryMapIdx); freeBytes += runLength(memoryMapIdx);

View File

@ -47,16 +47,16 @@ final class PoolSubpage<T> implements PoolSubpageMetric {
bitmap = null; bitmap = null;
} }
PoolSubpage(PoolChunk<T> chunk, int memoryMapIdx, int runOffset, int pageSize, int elemSize) { PoolSubpage(PoolSubpage<T> head, PoolChunk<T> chunk, int memoryMapIdx, int runOffset, int pageSize, int elemSize) {
this.chunk = chunk; this.chunk = chunk;
this.memoryMapIdx = memoryMapIdx; this.memoryMapIdx = memoryMapIdx;
this.runOffset = runOffset; this.runOffset = runOffset;
this.pageSize = pageSize; this.pageSize = pageSize;
bitmap = new long[pageSize >>> 10]; // pageSize / 16 / 64 bitmap = new long[pageSize >>> 10]; // pageSize / 16 / 64
init(elemSize); init(head, elemSize);
} }
void init(int elemSize) { void init(PoolSubpage<T> head, int elemSize) {
doNotDestroy = true; doNotDestroy = true;
this.elemSize = elemSize; this.elemSize = elemSize;
if (elemSize != 0) { if (elemSize != 0) {
@ -71,11 +71,7 @@ final class PoolSubpage<T> implements PoolSubpageMetric {
bitmap[i] = 0; bitmap[i] = 0;
} }
} }
addToPool(head);
PoolSubpage<T> head = chunk.arena.findSubpagePoolHead(elemSize);
synchronized (head) {
addToPool(head);
}
} }
/** /**
@ -86,75 +82,56 @@ final class PoolSubpage<T> implements PoolSubpageMetric {
return toHandle(0); return toHandle(0);
} }
/** if (numAvail == 0 || !doNotDestroy) {
* Synchronize on the head of the SubpagePool stored in the {@link PoolArena. This is needed as we synchronize return -1;
* on it when calling {@link PoolArena#allocate(PoolThreadCache, int, int)} und try to allocate out of the
* {@link PoolSubpage} pool for a given size.
*/
PoolSubpage<T> head = chunk.arena.findSubpagePoolHead(elemSize);
synchronized (head) {
if (numAvail == 0 || !doNotDestroy) {
return -1;
}
final int bitmapIdx = getNextAvail();
int q = bitmapIdx >>> 6;
int r = bitmapIdx & 63;
assert (bitmap[q] >>> r & 1) == 0;
bitmap[q] |= 1L << r;
if (-- numAvail == 0) {
removeFromPool();
}
return toHandle(bitmapIdx);
} }
final int bitmapIdx = getNextAvail();
int q = bitmapIdx >>> 6;
int r = bitmapIdx & 63;
assert (bitmap[q] >>> r & 1) == 0;
bitmap[q] |= 1L << r;
if (-- numAvail == 0) {
removeFromPool();
}
return toHandle(bitmapIdx);
} }
/** /**
* @return {@code true} if this subpage is in use. * @return {@code true} if this subpage is in use.
* {@code false} if this subpage is not used by its chunk and thus it's OK to be released. * {@code false} if this subpage is not used by its chunk and thus it's OK to be released.
*/ */
boolean free(int bitmapIdx) { boolean free(PoolSubpage<T> head, int bitmapIdx) {
if (elemSize == 0) { if (elemSize == 0) {
return true; return true;
} }
int q = bitmapIdx >>> 6;
int r = bitmapIdx & 63;
assert (bitmap[q] >>> r & 1) != 0;
bitmap[q] ^= 1L << r;
/** setNextAvail(bitmapIdx);
* Synchronize on the head of the SubpagePool stored in the {@link PoolArena. This is needed as we synchronize
* on it when calling {@link PoolArena#allocate(PoolThreadCache, int, int)} und try to allocate out of the
* {@link PoolSubpage} pool for a given size.
*/
PoolSubpage<T> head = chunk.arena.findSubpagePoolHead(elemSize);
synchronized (head) { if (numAvail ++ == 0) {
int q = bitmapIdx >>> 6; addToPool(head);
int r = bitmapIdx & 63; return true;
assert (bitmap[q] >>> r & 1) != 0; }
bitmap[q] ^= 1L << r;
setNextAvail(bitmapIdx); if (numAvail != maxNumElems) {
return true;
if (numAvail ++ == 0) { } else {
addToPool(head); // Subpage not in use (numAvail == maxNumElems)
if (prev == next) {
// Do not remove if this subpage is the only one left in the pool.
return true; return true;
} }
if (numAvail != maxNumElems) { // Remove this subpage from the pool if there are other subpages left in the pool.
return true; doNotDestroy = false;
} else { removeFromPool();
// Subpage not in use (numAvail == maxNumElems) return false;
if (prev == next) {
// Do not remove if this subpage is the only one left in the pool.
return true;
}
// Remove this subpage from the pool if there are other subpages left in the pool.
doNotDestroy = false;
removeFromPool();
return false;
}
} }
} }

View File

@ -0,0 +1,140 @@
/*
* Copyright 2015 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.buffer;
import io.netty.util.internal.SystemPropertyUtil;
import org.junit.Test;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
public class PooledByteBufAllocatorTest {
private static final int[] ALLOCATION_SIZES = new int[16 * 1024];
static {
for (int i = 0; i < ALLOCATION_SIZES.length; i++) {
ALLOCATION_SIZES[i] = i;
}
}
@Test
public void testConcurrentUsage() throws Throwable {
long runningTime = TimeUnit.MILLISECONDS.toNanos(SystemPropertyUtil.getLong(
"io.netty.buffer.PooledByteBufAllocatorTest.testConcurrentUsageTime", 15000));
// We use no caches and only one arena to maximize the chance of hitting the race-condition we
// had before.
ByteBufAllocator allocator = new PooledByteBufAllocator(true, 1, 1, 8192, 11, 0, 0, 0);
List<AllocationThread> threads = new ArrayList<AllocationThread>();
try {
for (int i = 0; i < 512; i++) {
AllocationThread thread = new AllocationThread(allocator);
thread.start();
threads.add(thread);
}
long start = System.nanoTime();
while (!isExpired(start, runningTime)) {
checkForErrors(threads);
Thread.sleep(100);
}
} finally {
for (AllocationThread t : threads) {
t.finish();
}
}
}
private static boolean isExpired(long start, long expireTime) {
return System.nanoTime() - start > expireTime;
}
private static void checkForErrors(List<AllocationThread> threads) throws Throwable {
for (AllocationThread t : threads) {
if (t.isFinished()) {
t.checkForError();
}
}
}
private static final class AllocationThread extends Thread {
private final CountDownLatch latch = new CountDownLatch(1);
private final Queue<ByteBuf> buffers = new ArrayDeque<ByteBuf>(10);
private final ByteBufAllocator allocator;
private volatile boolean finished;
private volatile Throwable error;
public AllocationThread(ByteBufAllocator allocator) {
this.allocator = allocator;
}
@Override
public void run() {
try {
int idx = 0;
while (!finished) {
for (int i = 0; i < 10; i++) {
buffers.add(allocator.directBuffer(
ALLOCATION_SIZES[Math.abs(idx++ % ALLOCATION_SIZES.length)],
Integer.MAX_VALUE));
}
releaseBuffers();
}
} catch (Throwable cause) {
error = cause;
finished = true;
} finally {
releaseBuffers();
}
latch.countDown();
}
private void releaseBuffers() {
for (;;) {
ByteBuf buf = buffers.poll();
if (buf == null) {
break;
}
buf.release();
}
}
public boolean isFinished() {
return finished;
}
public void finish() throws Throwable {
try {
finished = true;
latch.await();
checkForError();
} finally {
releaseBuffers();
}
}
public void checkForError() throws Throwable {
if (error != null) {
throw error;
}
}
}
}