[#3654] Synchronize on PoolSubpage head when allocate / free PoolSubpages
Motivation: Currently we hold a lock on the PoolArena when we allocate / free PoolSubpages, which is wasteful as this also affects "normal" allocations. The same is true vice-verse. Modifications: Ensure we synchronize on the head of the PoolSubPages pool. This is done per size and so it is possible to concurrently allocate / deallocate PoolSubPages with different sizes, and also normal allocations. Result: Less condition and so faster allocation/deallocation. Before this commit: xxx:~/wrk $ ./wrk -H 'Connection: keep-alive' -d 120 -c 256 -t 16 -s scripts/pipeline-many.lua http://xxx:8080/plaintext Running 2m test @ http://xxx:8080/plaintext 16 threads and 256 connections Thread Stats Avg Stdev Max +/- Stdev Latency 17.61ms 29.52ms 689.73ms 97.27% Req/Sec 278.93k 41.97k 351.04k 84.83% 530527460 requests in 2.00m, 71.64GB read Requests/sec: 4422226.13 Transfer/sec: 611.52MB After this commit: xxx:~/wrk $ ./wrk -H 'Connection: keep-alive' -d 120 -c 256 -t 16 -s scripts/pipeline-many.lua http://xxx:8080/plaintext Running 2m test @ http://xxx:8080/plaintext 16 threads and 256 connections Thread Stats Avg Stdev Max +/- Stdev Latency 15.85ms 24.50ms 681.61ms 97.42% Req/Sec 287.14k 38.39k 360.33k 85.88% 547902773 requests in 2.00m, 73.99GB read Requests/sec: 4567066.11 Transfer/sec: 631.55MB This is reproducable every time.
This commit is contained in:
parent
7e80e1bf97
commit
f18990a8a5
@ -180,7 +180,12 @@ abstract class PoolArena<T> implements PoolArenaMetric {
|
|||||||
}
|
}
|
||||||
|
|
||||||
final PoolSubpage<T> head = table[tableIdx];
|
final PoolSubpage<T> head = table[tableIdx];
|
||||||
synchronized (this) {
|
|
||||||
|
/**
|
||||||
|
* Synchronize on the head. This is needed as {@link PoolSubpage#allocate()} and
|
||||||
|
* {@link PoolSubpage#free(int)} may modify the doubly linked list as well.
|
||||||
|
*/
|
||||||
|
synchronized (head) {
|
||||||
final PoolSubpage<T> s = head.next;
|
final PoolSubpage<T> s = head.next;
|
||||||
if (s != head) {
|
if (s != head) {
|
||||||
assert s.doNotDestroy && s.elemSize == normCapacity;
|
assert s.doNotDestroy && s.elemSize == normCapacity;
|
||||||
@ -195,27 +200,24 @@ abstract class PoolArena<T> implements PoolArenaMetric {
|
|||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
allocateNormal(buf, reqCapacity, normCapacity);
|
allocateNormal(buf, reqCapacity, normCapacity);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (normCapacity <= chunkSize) {
|
if (normCapacity <= chunkSize) {
|
||||||
if (cache.allocateNormal(this, buf, reqCapacity, normCapacity)) {
|
if (cache.allocateNormal(this, buf, reqCapacity, normCapacity)) {
|
||||||
// was able to allocate out of the cache so move on
|
// was able to allocate out of the cache so move on
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
synchronized (this) {
|
|
||||||
allocateNormal(buf, reqCapacity, normCapacity);
|
allocateNormal(buf, reqCapacity, normCapacity);
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// Huge allocations are never served via the cache so just call allocateHuge
|
// Huge allocations are never served via the cache so just call allocateHuge
|
||||||
allocateHuge(buf, reqCapacity);
|
allocateHuge(buf, reqCapacity);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void allocateNormal(PooledByteBuf<T> buf, int reqCapacity, int normCapacity) {
|
private synchronized void allocateNormal(PooledByteBuf<T> buf, int reqCapacity, int normCapacity) {
|
||||||
++allocationsNormal;
|
++allocationsNormal;
|
||||||
|
|
||||||
if (q050.allocate(buf, reqCapacity, normCapacity) || q025.allocate(buf, reqCapacity, normCapacity) ||
|
if (q050.allocate(buf, reqCapacity, normCapacity) || q025.allocate(buf, reqCapacity, normCapacity) ||
|
||||||
q000.allocate(buf, reqCapacity, normCapacity) || qInit.allocate(buf, reqCapacity, normCapacity) ||
|
q000.allocate(buf, reqCapacity, normCapacity) || qInit.allocate(buf, reqCapacity, normCapacity) ||
|
||||||
q075.allocate(buf, reqCapacity, normCapacity) || q100.allocate(buf, reqCapacity, normCapacity)) {
|
q075.allocate(buf, reqCapacity, normCapacity) || q100.allocate(buf, reqCapacity, normCapacity)) {
|
||||||
|
@ -72,7 +72,10 @@ final class PoolSubpage<T> implements PoolSubpageMetric {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
addToPool();
|
PoolSubpage<T> head = chunk.arena.findSubpagePoolHead(elemSize);
|
||||||
|
synchronized (head) {
|
||||||
|
addToPool(head);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -83,6 +86,13 @@ final class PoolSubpage<T> implements PoolSubpageMetric {
|
|||||||
return toHandle(0);
|
return toHandle(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Synchronize on the head of the SubpagePool stored in the {@link PoolArena. This is needed as we synchronize
|
||||||
|
* on it when calling {@link PoolArena#allocate(PoolThreadCache, int, int)} und try to allocate out of the
|
||||||
|
* {@link PoolSubpage} pool for a given size.
|
||||||
|
*/
|
||||||
|
PoolSubpage<T> head = chunk.arena.findSubpagePoolHead(elemSize);
|
||||||
|
synchronized (head) {
|
||||||
if (numAvail == 0 || !doNotDestroy) {
|
if (numAvail == 0 || !doNotDestroy) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -99,6 +109,7 @@ final class PoolSubpage<T> implements PoolSubpageMetric {
|
|||||||
|
|
||||||
return toHandle(bitmapIdx);
|
return toHandle(bitmapIdx);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return {@code true} if this subpage is in use.
|
* @return {@code true} if this subpage is in use.
|
||||||
@ -110,6 +121,14 @@ final class PoolSubpage<T> implements PoolSubpageMetric {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Synchronize on the head of the SubpagePool stored in the {@link PoolArena. This is needed as we synchronize
|
||||||
|
* on it when calling {@link PoolArena#allocate(PoolThreadCache, int, int)} und try to allocate out of the
|
||||||
|
* {@link PoolSubpage} pool for a given size.
|
||||||
|
*/
|
||||||
|
PoolSubpage<T> head = chunk.arena.findSubpagePoolHead(elemSize);
|
||||||
|
|
||||||
|
synchronized (head) {
|
||||||
int q = bitmapIdx >>> 6;
|
int q = bitmapIdx >>> 6;
|
||||||
int r = bitmapIdx & 63;
|
int r = bitmapIdx & 63;
|
||||||
assert (bitmap[q] >>> r & 1) != 0;
|
assert (bitmap[q] >>> r & 1) != 0;
|
||||||
@ -118,7 +137,7 @@ final class PoolSubpage<T> implements PoolSubpageMetric {
|
|||||||
setNextAvail(bitmapIdx);
|
setNextAvail(bitmapIdx);
|
||||||
|
|
||||||
if (numAvail ++ == 0) {
|
if (numAvail ++ == 0) {
|
||||||
addToPool();
|
addToPool(head);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -137,9 +156,9 @@ final class PoolSubpage<T> implements PoolSubpageMetric {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private void addToPool() {
|
private void addToPool(PoolSubpage<T> head) {
|
||||||
PoolSubpage<T> head = chunk.arena.findSubpagePoolHead(elemSize);
|
|
||||||
assert prev == null && next == null;
|
assert prev == null && next == null;
|
||||||
prev = head;
|
prev = head;
|
||||||
next = head.next;
|
next = head.next;
|
||||||
|
Loading…
Reference in New Issue
Block a user