Fix hash collision handling in DefaultHeaders iterator remove (#11028)
Motivation: If two different headers end up in the same hash bucket, and you are iterating the header that is not the first in the bucket, and you use the iterator to remove the first element returned from the iterator, then you would get a NullPointerException. Modification: Change the DefaultHeaders iterator remove method, to re-iterate the hash bucket and unlink the entry once found, if we don't have any existing iteration starting point. Also made DefaultHeaders.remove0 package private to avoid a synthetic method indirection. Result: Removing from iterators from DefaultHeaders is now robust towards hash collisions.
This commit is contained in:
parent
bd8a337e99
commit
28d4154fff
@ -1012,12 +1012,22 @@ public class DefaultHeaders<K, V, T extends Headers<K, V, T>> implements Headers
|
|||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
private HeaderEntry<K, V> remove0(HeaderEntry<K, V> entry, HeaderEntry<K, V> previous) {
|
HeaderEntry<K, V> remove0(HeaderEntry<K, V> entry, HeaderEntry<K, V> previous) {
|
||||||
int i = index(entry.hash);
|
int i = index(entry.hash);
|
||||||
HeaderEntry<K, V> e = entries[i];
|
HeaderEntry<K, V> firstEntry = entries[i];
|
||||||
if (e == entry) {
|
if (firstEntry == entry) {
|
||||||
entries[i] = entry.next;
|
entries[i] = entry.next;
|
||||||
previous = entries[i];
|
previous = entries[i];
|
||||||
|
} else if (previous == null) {
|
||||||
|
// If we don't have any existing starting point, then start from the beginning.
|
||||||
|
previous = firstEntry;
|
||||||
|
HeaderEntry<K, V> next = firstEntry.next;
|
||||||
|
while (next != null && next != entry) {
|
||||||
|
previous = next;
|
||||||
|
next = next.next;
|
||||||
|
}
|
||||||
|
assert next != null: "Entry not found in its hash bucket: " + entry;
|
||||||
|
previous.next = entry.next;
|
||||||
} else {
|
} else {
|
||||||
previous.next = entry.next;
|
previous.next = entry.next;
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
package io.netty.handler.codec;
|
package io.netty.handler.codec;
|
||||||
|
|
||||||
import io.netty.util.AsciiString;
|
import io.netty.util.AsciiString;
|
||||||
|
import io.netty.util.HashingStrategy;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
@ -26,6 +27,9 @@ import java.util.NoSuchElementException;
|
|||||||
|
|
||||||
import static io.netty.util.AsciiString.of;
|
import static io.netty.util.AsciiString.of;
|
||||||
import static java.util.Arrays.asList;
|
import static java.util.Arrays.asList;
|
||||||
|
import static org.hamcrest.MatcherAssert.assertThat;
|
||||||
|
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||||
|
import static org.hamcrest.Matchers.hasSize;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertNotEquals;
|
import static org.junit.Assert.assertNotEquals;
|
||||||
@ -48,6 +52,10 @@ public class DefaultHeadersTest {
|
|||||||
TestDefaultHeaders(ValueConverter<CharSequence> converter) {
|
TestDefaultHeaders(ValueConverter<CharSequence> converter) {
|
||||||
super(converter);
|
super(converter);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TestDefaultHeaders(HashingStrategy<CharSequence> nameHashingStrategy) {
|
||||||
|
super(nameHashingStrategy, CharSequenceValueConverter.INSTANCE);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static TestDefaultHeaders newInstance() {
|
private static TestDefaultHeaders newInstance() {
|
||||||
@ -759,4 +767,50 @@ public class DefaultHeadersTest {
|
|||||||
assertTrue(headers.getBoolean("name2", false));
|
assertTrue(headers.getBoolean("name2", false));
|
||||||
assertTrue(headers.getBoolean("name3", false));
|
assertTrue(headers.getBoolean("name3", false));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void handlingOfHeaderNameHashCollisions() {
|
||||||
|
TestDefaultHeaders headers = new TestDefaultHeaders(new HashingStrategy<CharSequence>() {
|
||||||
|
@Override
|
||||||
|
public int hashCode(CharSequence obj) {
|
||||||
|
return 0; // Degenerate hashing strategy to enforce collisions.
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(CharSequence a, CharSequence b) {
|
||||||
|
return a.equals(b);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
headers.add("Cookie", "a=b; c=d; e=f");
|
||||||
|
headers.add("other", "text/plain"); // Add another header which will be saved in the same entries[index]
|
||||||
|
|
||||||
|
simulateCookieSplitting(headers);
|
||||||
|
List<CharSequence> cookies = headers.getAll("Cookie");
|
||||||
|
|
||||||
|
assertThat(cookies, hasSize(3));
|
||||||
|
assertThat(cookies, containsInAnyOrder((CharSequence) "a=b", "c=d", "e=f"));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Split up cookies into individual cookie crumb headers.
|
||||||
|
*/
|
||||||
|
static void simulateCookieSplitting(TestDefaultHeaders headers) {
|
||||||
|
Iterator<CharSequence> cookieItr = headers.valueIterator("Cookie");
|
||||||
|
if (!cookieItr.hasNext()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// We want to avoid "concurrent modifications" of the headers while we are iterating. So we insert crumbs
|
||||||
|
// into an intermediate collection and insert them after the split process concludes.
|
||||||
|
List<CharSequence> cookiesToAdd = new ArrayList<CharSequence>();
|
||||||
|
while (cookieItr.hasNext()) {
|
||||||
|
//noinspection DynamicRegexReplaceableByCompiledPattern
|
||||||
|
String[] cookies = cookieItr.next().toString().split("; ");
|
||||||
|
cookiesToAdd.addAll(asList(cookies));
|
||||||
|
cookieItr.remove();
|
||||||
|
}
|
||||||
|
for (CharSequence crumb : cookiesToAdd) {
|
||||||
|
headers.add("Cookie", crumb);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user