fix the typos

This commit is contained in:
Nikolay Fedorovskikh 2017-04-20 01:37:03 +05:00 committed by Norman Maurer
parent 38483e8790
commit 0692bf1b6a
142 changed files with 312 additions and 316 deletions

View File

@ -219,7 +219,7 @@ public abstract class AbstractByteBufAllocator implements ByteBufAllocator {
private static void validate(int initialCapacity, int maxCapacity) {
if (initialCapacity < 0) {
throw new IllegalArgumentException("initialCapacity: " + initialCapacity + " (expectd: 0+)");
throw new IllegalArgumentException("initialCapacity: " + initialCapacity + " (expected: 0+)");
}
if (initialCapacity > maxCapacity) {
throw new IllegalArgumentException(String.format(
@ -246,7 +246,7 @@ public abstract class AbstractByteBufAllocator implements ByteBufAllocator {
@Override
public int calculateNewCapacity(int minNewCapacity, int maxCapacity) {
if (minNewCapacity < 0) {
throw new IllegalArgumentException("minNewCapacity: " + minNewCapacity + " (expectd: 0+)");
throw new IllegalArgumentException("minNewCapacity: " + minNewCapacity + " (expected: 0+)");
}
if (minNewCapacity > maxCapacity) {
throw new IllegalArgumentException(String.format(

View File

@ -123,7 +123,7 @@ public class DefaultByteBufHolder implements ByteBufHolder {
}
/**
* Return {@link ByteBuf#toString()} without checking the reference count first. This is useful to implemement
* Return {@link ByteBuf#toString()} without checking the reference count first. This is useful to implement
* {@link #toString()}.
*/
protected final String contentToString() {

View File

@ -232,7 +232,7 @@ abstract class PoolArena<T> implements PoolArenaMetric {
}
}
// Method must be called insided synchronized(this) { ... } block
// Method must be called inside synchronized(this) { ... } block
private void allocateNormal(PooledByteBuf<T> buf, int reqCapacity, int normCapacity) {
if (q050.allocate(buf, reqCapacity, normCapacity) || q025.allocate(buf, reqCapacity, normCapacity) ||
q000.allocate(buf, reqCapacity, normCapacity) || qInit.allocate(buf, reqCapacity, normCapacity) ||

View File

@ -21,12 +21,12 @@ package io.netty.buffer;
public interface PoolChunkListMetric extends Iterable<PoolChunkMetric> {
/**
* Return the minum usage of the chunk list before which chunks are promoted to the previous list.
* Return the minimum usage of the chunk list before which chunks are promoted to the previous list.
*/
int minUsage();
/**
* Return the minum usage of the chunk list after which chunks are promoted to the next list.
* Return the maximum usage of the chunk list after which chunks are promoted to the next list.
*/
int maxUsage();
}

View File

@ -328,14 +328,14 @@ public class PooledByteBufAllocator extends AbstractByteBufAllocator implements
}
/**
* Default number of heap areanas - System Property: io.netty.allocator.numHeapArenas - default 2 * cores
* Default number of heap arenas - System Property: io.netty.allocator.numHeapArenas - default 2 * cores
*/
public static int defaultNumHeapArena() {
return DEFAULT_NUM_HEAP_ARENA;
}
/**
* Default numer of direct arenas - System Property: io.netty.allocator.numDirectArenas - default 2 * cores
* Default number of direct arenas - System Property: io.netty.allocator.numDirectArenas - default 2 * cores
*/
public static int defaultNumDirectArena() {
return DEFAULT_NUM_DIRECT_ARENA;
@ -377,7 +377,7 @@ public class PooledByteBufAllocator extends AbstractByteBufAllocator implements
}
/**
* Return {@code true} if direct memory cache aligment is supported, {@code false} otherwise.
* Return {@code true} if direct memory cache alignment is supported, {@code false} otherwise.
*/
public static boolean isDirectMemoryCacheAlignmentSupported() {
return PlatformDependent.hasUnsafe();

View File

@ -1,7 +1,7 @@
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file tothe License at:
* The Netty Project licenses this file to the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*

View File

@ -38,7 +38,7 @@ import java.util.List;
* {@link ByteBuf} heapBuffer = buffer(128);
* {@link ByteBuf} directBuffer = directBuffer(256);
* {@link ByteBuf} wrappedBuffer = wrappedBuffer(new byte[128], new byte[256]);
* {@link ByteBuf} copiedBuffe r = copiedBuffer({@link ByteBuffer}.allocate(128));
* {@link ByteBuf} copiedBuffer = copiedBuffer({@link ByteBuffer}.allocate(128));
* </pre>
*
* <h3>Allocating a new buffer</h3>

View File

@ -3462,66 +3462,66 @@ public abstract class AbstractByteBufTest {
}
@Test
public void testRetainedSliceUnreleasble1() {
testRetainedSliceUnreleasble(true, true);
public void testRetainedSliceUnreleasable1() {
testRetainedSliceUnreleasable(true, true);
}
@Test
public void testRetainedSliceUnreleasble2() {
testRetainedSliceUnreleasble(true, false);
public void testRetainedSliceUnreleasable2() {
testRetainedSliceUnreleasable(true, false);
}
@Test
public void testRetainedSliceUnreleasble3() {
testRetainedSliceUnreleasble(false, true);
public void testRetainedSliceUnreleasable3() {
testRetainedSliceUnreleasable(false, true);
}
@Test
public void testRetainedSliceUnreleasble4() {
testRetainedSliceUnreleasble(false, false);
public void testRetainedSliceUnreleasable4() {
testRetainedSliceUnreleasable(false, false);
}
@Test
public void testReadRetainedSliceUnreleasble1() {
testReadRetainedSliceUnreleasble(true, true);
public void testReadRetainedSliceUnreleasable1() {
testReadRetainedSliceUnreleasable(true, true);
}
@Test
public void testReadRetainedSliceUnreleasble2() {
testReadRetainedSliceUnreleasble(true, false);
public void testReadRetainedSliceUnreleasable2() {
testReadRetainedSliceUnreleasable(true, false);
}
@Test
public void testReadRetainedSliceUnreleasble3() {
testReadRetainedSliceUnreleasble(false, true);
public void testReadRetainedSliceUnreleasable3() {
testReadRetainedSliceUnreleasable(false, true);
}
@Test
public void testReadRetainedSliceUnreleasble4() {
testReadRetainedSliceUnreleasble(false, false);
public void testReadRetainedSliceUnreleasable4() {
testReadRetainedSliceUnreleasable(false, false);
}
@Test
public void testRetainedDuplicateUnreleasble1() {
testRetainedDuplicateUnreleasble(true, true);
public void testRetainedDuplicateUnreleasable1() {
testRetainedDuplicateUnreleasable(true, true);
}
@Test
public void testRetainedDuplicateUnreleasble2() {
testRetainedDuplicateUnreleasble(true, false);
public void testRetainedDuplicateUnreleasable2() {
testRetainedDuplicateUnreleasable(true, false);
}
@Test
public void testRetainedDuplicateUnreleasble3() {
testRetainedDuplicateUnreleasble(false, true);
public void testRetainedDuplicateUnreleasable3() {
testRetainedDuplicateUnreleasable(false, true);
}
@Test
public void testRetainedDuplicateUnreleasble4() {
testRetainedDuplicateUnreleasble(false, false);
public void testRetainedDuplicateUnreleasable4() {
testRetainedDuplicateUnreleasable(false, false);
}
private void testRetainedSliceUnreleasble(boolean initRetainedSlice, boolean finalRetainedSlice) {
private void testRetainedSliceUnreleasable(boolean initRetainedSlice, boolean finalRetainedSlice) {
ByteBuf buf = newBuffer(8);
ByteBuf buf1 = initRetainedSlice ? buf.retainedSlice() : buf.slice().retain();
ByteBuf buf2 = unreleasableBuffer(buf1);
@ -3534,7 +3534,7 @@ public abstract class AbstractByteBufTest {
assertEquals(0, buf.refCnt());
}
private void testReadRetainedSliceUnreleasble(boolean initRetainedSlice, boolean finalRetainedSlice) {
private void testReadRetainedSliceUnreleasable(boolean initRetainedSlice, boolean finalRetainedSlice) {
ByteBuf buf = newBuffer(8);
ByteBuf buf1 = initRetainedSlice ? buf.retainedSlice() : buf.slice().retain();
ByteBuf buf2 = unreleasableBuffer(buf1);
@ -3548,7 +3548,7 @@ public abstract class AbstractByteBufTest {
assertEquals(0, buf.refCnt());
}
private void testRetainedDuplicateUnreleasble(boolean initRetainedDuplicate, boolean finalRetainedDuplicate) {
private void testRetainedDuplicateUnreleasable(boolean initRetainedDuplicate, boolean finalRetainedDuplicate) {
ByteBuf buf = newBuffer(8);
ByteBuf buf1 = initRetainedDuplicate ? buf.retainedDuplicate() : buf.duplicate().retain();
ByteBuf buf2 = unreleasableBuffer(buf1);

View File

@ -16,8 +16,6 @@
package io.netty.buffer;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.Mockito;
import java.io.IOException;
import java.io.InputStream;
@ -174,12 +172,12 @@ public class ReadOnlyByteBufTest {
}
@Test
public void shouldIndicateNotWriteable() {
public void shouldIndicateNotWritable() {
assertFalse(unmodifiableBuffer(buffer(1)).isWritable());
}
@Test
public void shouldIndicteNotWritableAnyNumber() {
public void shouldIndicateNotWritableAnyNumber() {
assertFalse(unmodifiableBuffer(buffer(1)).isWritable(1));
}

View File

@ -37,7 +37,7 @@ public interface DnsOptEcsRecord extends DnsOptPseudoRecord {
int scopePrefixLength();
/**
* Retuns the bytes of the {@link InetAddress} to use.
* Returns the bytes of the {@link InetAddress} to use.
*/
byte[] address();
}

View File

@ -93,7 +93,7 @@ public class DefaultDnsRecordDecoderTest {
public void testDecodeMessageCompression() throws Exception {
// See https://www.ietf.org/rfc/rfc1035 [4.1.4. Message compression]
DefaultDnsRecordDecoder decoder = new DefaultDnsRecordDecoder();
byte[] rfcExample = new byte[] { 1, 'F', 3, 'I', 'S', 'I', 4, 'A', 'R', 'P', 'A',
byte[] rfcExample = { 1, 'F', 3, 'I', 'S', 'I', 4, 'A', 'R', 'P', 'A',
0, 3, 'F', 'O', 'O',
(byte) 0xC0, 0, // this is 20 in the example
(byte) 0xC0, 6, // this is 26 in the example
@ -103,7 +103,7 @@ public class DefaultDnsRecordDecoderTest {
DefaultDnsRawRecord rawUncompressedIndexedRecord = null;
ByteBuf buffer = Unpooled.wrappedBuffer(rfcExample);
try {
// First lets test that our utility funciton can correctly handle index references and decompression.
// First lets test that our utility function can correctly handle index references and decompression.
String plainName = DefaultDnsRecordDecoder.decodeName(buffer.duplicate());
assertEquals("F.ISI.ARPA.", plainName);
String uncompressedPlainName = DefaultDnsRecordDecoder.decodeName(buffer.duplicate().setIndex(16, 20));

View File

@ -96,7 +96,7 @@ public class DnsResponseTest {
public ExpectedException exception = ExpectedException.none();
@Test
public void readMalormedResponseTest() throws Exception {
public void readMalformedResponseTest() throws Exception {
EmbeddedChannel embedder = new EmbeddedChannel(new DatagramDnsResponseDecoder());
ByteBuf packet = embedder.alloc().buffer(512).writeBytes(malformedLoopPacket);
exception.expect(CorruptedFrameException.class);

View File

@ -204,12 +204,12 @@ public final class HAProxyMessage {
addressLen = 16;
} else {
throw new HAProxyProtocolException(
"unable to parse address information (unkown address family: " + addressFamily + ')');
"unable to parse address information (unknown address family: " + addressFamily + ')');
}
// Per spec, the src address begins at the 17th byte
srcAddress = ipBytestoString(header, addressLen);
dstAddress = ipBytestoString(header, addressLen);
srcAddress = ipBytesToString(header, addressLen);
dstAddress = ipBytesToString(header, addressLen);
srcPort = header.readUnsignedShort();
dstPort = header.readUnsignedShort();
}
@ -274,7 +274,7 @@ public final class HAProxyMessage {
* @param addressLen number of bytes to read (4 bytes for IPv4, 16 bytes for IPv6)
* @return string representation of the ip address
*/
private static String ipBytestoString(ByteBuf header, int addressLen) {
private static String ipBytesToString(ByteBuf header, int addressLen) {
StringBuilder sb = new StringBuilder();
if (addressLen == 4) {
sb.append(header.readByte() & 0xff);

View File

@ -45,7 +45,7 @@ public enum HAProxyProtocolVersion {
}
/**
* Returns the {@link HAProxyProtocolVersion} represented by the higest 4 bits of the specified byte.
* Returns the {@link HAProxyProtocolVersion} represented by the highest 4 bits of the specified byte.
*
* @param verCmdByte protocol version and command byte
*/

View File

@ -120,7 +120,7 @@ public enum HAProxyProxiedProtocol {
*/
public enum AddressFamily {
/**
* The UNSPECIFIED address family represents a connection which was forwarded for an unkown protocol.
* The UNSPECIFIED address family represents a connection which was forwarded for an unknown protocol.
*/
AF_UNSPEC(AF_UNSPEC_BYTE),
/**
@ -184,7 +184,7 @@ public enum HAProxyProxiedProtocol {
*/
public enum TransportProtocol {
/**
* The UNSPEC transport protocol represents a connection which was forwarded for an unkown protocol.
* The UNSPEC transport protocol represents a connection which was forwarded for an unknown protocol.
*/
UNSPEC(TRANSPORT_UNSPEC_BYTE),
/**

View File

@ -184,7 +184,7 @@ public class HAProxyMessageDecoderTest {
@Test
public void testTransportProtocolAndAddressFamily() {
final byte unkown = HAProxyProxiedProtocol.UNKNOWN.byteValue();
final byte unknown = HAProxyProxiedProtocol.UNKNOWN.byteValue();
final byte tcp4 = HAProxyProxiedProtocol.TCP4.byteValue();
final byte tcp6 = HAProxyProxiedProtocol.TCP6.byteValue();
final byte udp4 = HAProxyProxiedProtocol.UDP4.byteValue();
@ -192,7 +192,7 @@ public class HAProxyMessageDecoderTest {
final byte unix_stream = HAProxyProxiedProtocol.UNIX_STREAM.byteValue();
final byte unix_dgram = HAProxyProxiedProtocol.UNIX_DGRAM.byteValue();
assertEquals(TransportProtocol.UNSPEC, TransportProtocol.valueOf(unkown));
assertEquals(TransportProtocol.UNSPEC, TransportProtocol.valueOf(unknown));
assertEquals(TransportProtocol.STREAM, TransportProtocol.valueOf(tcp4));
assertEquals(TransportProtocol.STREAM, TransportProtocol.valueOf(tcp6));
assertEquals(TransportProtocol.STREAM, TransportProtocol.valueOf(unix_stream));
@ -200,7 +200,7 @@ public class HAProxyMessageDecoderTest {
assertEquals(TransportProtocol.DGRAM, TransportProtocol.valueOf(udp6));
assertEquals(TransportProtocol.DGRAM, TransportProtocol.valueOf(unix_dgram));
assertEquals(AddressFamily.AF_UNSPEC, AddressFamily.valueOf(unkown));
assertEquals(AddressFamily.AF_UNSPEC, AddressFamily.valueOf(unknown));
assertEquals(AddressFamily.AF_IPv4, AddressFamily.valueOf(tcp4));
assertEquals(AddressFamily.AF_IPv4, AddressFamily.valueOf(udp4));
assertEquals(AddressFamily.AF_IPv6, AddressFamily.valueOf(tcp6));

View File

@ -54,7 +54,7 @@ public final class HttpUtil {
}
/**
* Determine if a uri is in asteric-form according to
* Determine if a uri is in asterisk-form according to
* <a href="https://tools.ietf.org/html/rfc7230#section-5.3">rfc7230, 5.3</a>.
*/
public static boolean isAsteriskForm(URI uri) {
@ -217,7 +217,7 @@ public final class HttpUtil {
* specified message is not a web socket message, {@code -1} is returned.
*/
private static int getWebSocketContentLength(HttpMessage message) {
// WebSockset messages have constant content-lengths.
// WebSocket messages have constant content-lengths.
HttpHeaders h = message.headers();
if (message instanceof HttpRequest) {
HttpRequest req = (HttpRequest) message;

View File

@ -148,7 +148,7 @@ public final class CorsConfig {
* xhr.withCredentials = true;
* </pre>
* The default value for 'withCredentials' is false in which case no cookies are sent.
* Settning this to true will included cookies in cross origin requests.
* Setting this to true will included cookies in cross origin requests.
*
* @return {@code true} if cookies are supported.
*/
@ -221,7 +221,7 @@ public final class CorsConfig {
* and this setting will check that the Origin is valid and if it is not valid no
* further processing will take place, and a error will be returned to the calling client.
*
* @return {@code true} if a CORS request should short-curcuit upon receiving an invalid Origin header.
* @return {@code true} if a CORS request should short-circuit upon receiving an invalid Origin header.
*/
public boolean isShortCircuit() {
return shortCircuit;

View File

@ -93,7 +93,7 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
private int bodyListHttpDataRank;
/**
* If multipart, this is the boundary for the flobal multipart
* If multipart, this is the boundary for the global multipart
*/
private String multipartDataBoundary;
@ -983,7 +983,7 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
*
* @return the String from one line
* @throws NotEnoughDataDecoderException
* Need more chunks and reset the readerInder to the previous
* Need more chunks and reset the {@code readerIndex} to the previous
* value
*/
private String readLineStandard() {
@ -1023,7 +1023,7 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
*
* @return the String from one line
* @throws NotEnoughDataDecoderException
* Need more chunks and reset the readerInder to the previous
* Need more chunks and reset the {@code readerIndex} to the previous
* value
*/
private String readLine() {
@ -1080,7 +1080,7 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
* @return the String from one line as the delimiter searched (opening or
* closing)
* @throws NotEnoughDataDecoderException
* Need more chunks and reset the readerInder to the previous
* Need more chunks and reset the {@code readerIndex} to the previous
* value
*/
private String readDelimiterStandard(String delimiter) {
@ -1400,7 +1400,7 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
* FileUpload. If the delimiter is found, the FileUpload is completed.
*
* @throws NotEnoughDataDecoderException
* Need more chunks but do not reset the readerInder since some
* Need more chunks but do not reset the {@code readerIndex} since some
* values will be already added to the FileOutput
* @throws ErrorDataDecoderException
* write IO error occurs with the FileUpload

View File

@ -506,7 +506,7 @@ public class HttpPostRequestEncoder implements ChunkedInput<HttpContent> {
* add multipart delimiter, multipart body header and Data to multipart list
* reset currentFileUpload, duringMixedMode
* if FileUpload: take care of multiple file for one field => mixed mode
* if (duringMixeMode)
* if (duringMixedMode)
* if (currentFileUpload.name == data.name)
* add mixedmultipart delimiter, mixedmultipart body header and Data to multipart list
* else

View File

@ -384,7 +384,7 @@ public abstract class WebSocketClientHandshaker {
}
/**
* Verfiy the {@link FullHttpResponse} and throws a {@link WebSocketHandshakeException} if something is wrong.
* Verify the {@link FullHttpResponse} and throws a {@link WebSocketHandshakeException} if something is wrong.
*/
protected abstract void verify(FullHttpResponse response);

View File

@ -65,8 +65,8 @@ public class WebSocketClientExtensionHandler extends ChannelDuplexHandler {
HttpRequest request = (HttpRequest) msg;
String headerValue = request.headers().getAsString(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS);
for (WebSocketClientExtensionHandshaker extentionHandshaker : extensionHandshakers) {
WebSocketExtensionData extensionData = extentionHandshaker.newRequestData();
for (WebSocketClientExtensionHandshaker extensionHandshaker : extensionHandshakers) {
WebSocketExtensionData extensionData = extensionHandshaker.newRequestData();
headerValue = WebSocketExtensionUtil.appendExtension(headerValue,
extensionData.name(), extensionData.parameters());
}
@ -109,7 +109,7 @@ public class WebSocketClientExtensionHandler extends ChannelDuplexHandler {
validExtensions.add(validExtension);
} else {
throw new CodecException(
"invalid WebSocket Extension handhshake for \"" + extensionsHeader + "\"");
"invalid WebSocket Extension handshake for \"" + extensionsHeader + '"');
}
}

View File

@ -35,7 +35,7 @@ public final class RtspHeaderNames {
*/
public static final AsciiString ACCEPT_ENCODING = HttpHeaderNames.ACCEPT_ENCODING;
/**
* {@code "accept-lanugage"}
* {@code "accept-language"}
*/
public static final AsciiString ACCEPT_LANGUAGE = HttpHeaderNames.ACCEPT_LANGUAGE;
/**

View File

@ -43,7 +43,7 @@ public final class RtspHeaders {
*/
public static final String ACCEPT_ENCODING = HttpHeaders.Names.ACCEPT_ENCODING;
/**
* {@code "Accept-Lanugage"}
* {@code "Accept-Language"}
*/
public static final String ACCEPT_LANGUAGE = HttpHeaders.Names.ACCEPT_LANGUAGE;
/**

View File

@ -27,7 +27,7 @@ import static io.netty.util.AsciiString.CASE_INSENSITIVE_HASHER;
import static io.netty.util.AsciiString.CASE_SENSITIVE_HASHER;
public class DefaultSpdyHeaders extends DefaultHeaders<CharSequence, CharSequence, SpdyHeaders> implements SpdyHeaders {
private static final NameValidator<CharSequence> SpydNameValidator = new NameValidator<CharSequence>() {
private static final NameValidator<CharSequence> SpdyNameValidator = new NameValidator<CharSequence>() {
@Override
public void validateName(CharSequence name) {
SpdyCodecUtil.validateHeaderName(name);
@ -42,7 +42,7 @@ public class DefaultSpdyHeaders extends DefaultHeaders<CharSequence, CharSequenc
public DefaultSpdyHeaders(boolean validate) {
super(CASE_INSENSITIVE_HASHER,
validate ? HeaderValueConverterAndValidator.INSTANCE : CharSequenceValueConverter.INSTANCE,
validate ? SpydNameValidator : NameValidator.NOT_NULL);
validate ? SpdyNameValidator : NameValidator.NOT_NULL);
}
@Override

View File

@ -179,7 +179,7 @@ public class SpdyHttpDecoder extends MessageToMessageDecoder<SpdyFrame> {
try {
FullHttpRequest httpRequestWithEntity = createHttpRequest(spdySynStreamFrame, ctx.alloc());
// Set the Stream-ID, Associated-To-Stream-ID, iand Priority as headers
// Set the Stream-ID, Associated-To-Stream-ID, and Priority as headers
httpRequestWithEntity.headers().setInt(Names.STREAM_ID, streamId);
httpRequestWithEntity.headers().setInt(Names.ASSOCIATED_TO_STREAM_ID, associatedToStreamId);
httpRequestWithEntity.headers().setInt(Names.PRIORITY, spdySynStreamFrame.priority());

View File

@ -16,7 +16,6 @@
package io.netty.handler.codec.http;
import io.netty.handler.codec.http.HttpHeadersTestUtils.HeaderValue;
import io.netty.util.internal.StringUtil;
import org.junit.Test;
import java.util.Arrays;
@ -25,7 +24,6 @@ import java.util.Collections;
import static io.netty.util.AsciiString.contentEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
public class CombinedHttpHeadersTest {
@ -169,7 +167,7 @@ public class CombinedHttpHeadersTest {
}
@Test
public void addIterableCsvEmtpy() {
public void addIterableCsvEmpty() {
final CombinedHttpHeaders headers = newCombinedHttpHeaders();
headers.add(HEADER_NAME, Collections.<CharSequence>emptyList());
assertEquals(Arrays.asList(""), headers.getAll(HEADER_NAME));

View File

@ -43,7 +43,7 @@ public class DefaultHttpHeadersTest {
}
@Test(expected = IllegalArgumentException.class)
public void emtpyHeaderNameNotAllowed() {
public void emptyHeaderNameNotAllowed() {
new DefaultHttpHeaders().add(StringUtil.EMPTY_STRING, "foo");
}

View File

@ -155,7 +155,7 @@ public class HttpClientCodecTest {
ServerBootstrap sb = new ServerBootstrap();
Bootstrap cb = new Bootstrap();
final CountDownLatch serverChannelLatch = new CountDownLatch(1);
final CountDownLatch responseRecievedLatch = new CountDownLatch(1);
final CountDownLatch responseReceivedLatch = new CountDownLatch(1);
try {
sb.group(new NioEventLoopGroup(2));
sb.channel(NioServerSocketChannel.class);
@ -212,7 +212,7 @@ public class HttpClientCodecTest {
ch.pipeline().addLast(new SimpleChannelInboundHandler<FullHttpResponse>() {
@Override
protected void channelRead0(ChannelHandlerContext ctx, FullHttpResponse msg) {
responseRecievedLatch.countDown();
responseReceivedLatch.countDown();
}
});
}
@ -226,7 +226,7 @@ public class HttpClientCodecTest {
Channel clientChannel = ccf.channel();
assertTrue(serverChannelLatch.await(5, SECONDS));
clientChannel.writeAndFlush(new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"));
assertTrue(responseRecievedLatch.await(5, SECONDS));
assertTrue(responseReceivedLatch.await(5, SECONDS));
} finally {
sb.config().group().shutdownGracefully();
sb.config().childGroup().shutdownGracefully();

View File

@ -60,13 +60,13 @@ public class HttpObjectAggregatorTest {
// this should trigger a channelRead event so return true
assertTrue(embedder.writeInbound(chunk3));
assertTrue(embedder.finish());
FullHttpRequest aggratedMessage = embedder.readInbound();
assertNotNull(aggratedMessage);
FullHttpRequest aggregatedMessage = embedder.readInbound();
assertNotNull(aggregatedMessage);
assertEquals(chunk1.content().readableBytes() + chunk2.content().readableBytes(),
HttpUtil.getContentLength(aggratedMessage));
assertEquals(Boolean.TRUE.toString(), aggratedMessage.headers().get(of("X-Test")));
checkContentBuffer(aggratedMessage);
HttpUtil.getContentLength(aggregatedMessage));
assertEquals(Boolean.TRUE.toString(), aggregatedMessage.headers().get(of("X-Test")));
checkContentBuffer(aggregatedMessage);
assertNull(embedder.readInbound());
}
@ -101,14 +101,14 @@ public class HttpObjectAggregatorTest {
// this should trigger a channelRead event so return true
assertTrue(embedder.writeInbound(trailer));
assertTrue(embedder.finish());
FullHttpRequest aggratedMessage = embedder.readInbound();
assertNotNull(aggratedMessage);
FullHttpRequest aggregatedMessage = embedder.readInbound();
assertNotNull(aggregatedMessage);
assertEquals(chunk1.content().readableBytes() + chunk2.content().readableBytes(),
HttpUtil.getContentLength(aggratedMessage));
assertEquals(Boolean.TRUE.toString(), aggratedMessage.headers().get(of("X-Test")));
assertEquals(Boolean.TRUE.toString(), aggratedMessage.trailingHeaders().get(of("X-Trailer")));
checkContentBuffer(aggratedMessage);
HttpUtil.getContentLength(aggregatedMessage));
assertEquals(Boolean.TRUE.toString(), aggregatedMessage.headers().get(of("X-Test")));
assertEquals(Boolean.TRUE.toString(), aggregatedMessage.trailingHeaders().get(of("X-Trailer")));
checkContentBuffer(aggregatedMessage);
assertNull(embedder.readInbound());
}
@ -245,13 +245,13 @@ public class HttpObjectAggregatorTest {
// this should trigger a channelRead event so return true
assertTrue(embedder.writeInbound(chunk3));
assertTrue(embedder.finish());
FullHttpRequest aggratedMessage = embedder.readInbound();
assertNotNull(aggratedMessage);
FullHttpRequest aggregatedMessage = embedder.readInbound();
assertNotNull(aggregatedMessage);
assertEquals(chunk1.content().readableBytes() + chunk2.content().readableBytes(),
HttpUtil.getContentLength(aggratedMessage));
assertEquals(Boolean.TRUE.toString(), aggratedMessage.headers().get(of("X-Test")));
checkContentBuffer(aggratedMessage);
HttpUtil.getContentLength(aggregatedMessage));
assertEquals(Boolean.TRUE.toString(), aggregatedMessage.headers().get(of("X-Test")));
checkContentBuffer(aggregatedMessage);
assertNull(embedder.readInbound());
}

View File

@ -29,7 +29,7 @@ import static org.hamcrest.Matchers.*;
import static org.junit.Assert.*;
public class HttpResponseEncoderTest {
private static final long INTEGER_OVERLFLOW = (long) Integer.MAX_VALUE + 1;
private static final long INTEGER_OVERFLOW = (long) Integer.MAX_VALUE + 1;
private static final FileRegion FILE_REGION = new DummyLongFileRegion();
@Test
@ -83,7 +83,7 @@ public class HttpResponseEncoderTest {
@Override
public long count() {
return INTEGER_OVERLFLOW;
return INTEGER_OVERFLOW;
}
@Override

View File

@ -272,14 +272,14 @@ public class CorsHandlerTest {
}
@Test
public void simpleRequestShortCurcuit() {
public void simpleRequestShortCircuit() {
final CorsConfig config = forOrigin("http://localhost:8080").shortCircuit().build();
final HttpResponse response = simpleRequest(config, "http://localhost:7777");
assertThat(response.status(), is(FORBIDDEN));
}
@Test
public void simpleRequestNoShortCurcuit() {
public void simpleRequestNoShortCircuit() {
final CorsConfig config = forOrigin("http://localhost:8080").build();
final HttpResponse response = simpleRequest(config, "http://localhost:7777");
assertThat(response.status(), is(OK));
@ -287,7 +287,7 @@ public class CorsHandlerTest {
}
@Test
public void shortCurcuitNonCorsRequest() {
public void shortCircuitNonCorsRequest() {
final CorsConfig config = forOrigin("https://localhost").shortCircuit().build();
final HttpResponse response = simpleRequest(config, null);
assertThat(response.status(), is(OK));
@ -295,7 +295,7 @@ public class CorsHandlerTest {
}
@Test
public void shortCurcuitWithConnectionKeepAliveShouldStayOpen() {
public void shortCircuitWithConnectionKeepAliveShouldStayOpen() {
final CorsConfig config = forOrigin("http://localhost:8080").shortCircuit().build();
final EmbeddedChannel channel = new EmbeddedChannel(new CorsHandler(config));
final FullHttpRequest request = createHttpRequest(GET);
@ -313,7 +313,7 @@ public class CorsHandlerTest {
}
@Test
public void shortCurcuitWithoutConnectionShouldStayOpen() {
public void shortCircuitWithoutConnectionShouldStayOpen() {
final CorsConfig config = forOrigin("http://localhost:8080").shortCircuit().build();
final EmbeddedChannel channel = new EmbeddedChannel(new CorsHandler(config));
final FullHttpRequest request = createHttpRequest(GET);
@ -330,7 +330,7 @@ public class CorsHandlerTest {
}
@Test
public void shortCurcuitWithConnectionCloseShouldClose() {
public void shortCircuitWithConnectionCloseShouldClose() {
final CorsConfig config = forOrigin("http://localhost:8080").shortCircuit().build();
final EmbeddedChannel channel = new EmbeddedChannel(new CorsHandler(config));
final FullHttpRequest request = createHttpRequest(GET);

View File

@ -55,7 +55,7 @@ public class WebSocketHandshakeHandOverTest {
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
if (evt == ServerHandshakeStateEvent.HANDSHAKE_COMPLETE) {
serverReceivedHandshake = true;
// immediatly send a message to the client on connect
// immediately send a message to the client on connect
ctx.writeAndFlush(new TextWebSocketFrame("abc"));
} else if (evt instanceof WebSocketServerProtocolHandler.HandshakeComplete) {
serverHandshakeComplete = (WebSocketServerProtocolHandler.HandshakeComplete) evt;

View File

@ -146,7 +146,7 @@ public class WebSocketRequestBuilder {
return req;
}
public static HttpRequest sucessful() {
public static HttpRequest successful() {
return new WebSocketRequestBuilder().httpVersion(HTTP_1_1)
.method(HttpMethod.GET)
.uri("/test")

View File

@ -124,7 +124,7 @@ public class WebSocketServerProtocolHandlerTest {
if (ch.pipeline().context(HttpRequestDecoder.class) != null) {
// Removing the HttpRequestDecoder because we are writing a TextWebSocketFrame and thus
// decoding is not neccessary.
// decoding is not necessary.
ch.pipeline().remove(HttpRequestDecoder.class);
}
@ -147,7 +147,7 @@ public class WebSocketServerProtocolHandlerTest {
}
private static void writeUpgradeRequest(EmbeddedChannel ch) {
ch.writeInbound(WebSocketRequestBuilder.sucessful());
ch.writeInbound(WebSocketRequestBuilder.successful());
}
private static String getResponseMessage(FullHttpResponse response) {

View File

@ -433,9 +433,9 @@ public abstract class AbstractHttp2ConnectionHandlerBuilder<T extends Http2Conne
return (B) this;
}
private void enforceNonCodecConstraints(String rejectee) {
enforceConstraint(rejectee, "server/connection", decoder);
enforceConstraint(rejectee, "server/connection", encoder);
private void enforceNonCodecConstraints(String rejected) {
enforceConstraint(rejected, "server/connection", decoder);
enforceConstraint(rejected, "server/connection", encoder);
}
private static void enforceConstraint(String methodName, String rejectorName, Object value) {

View File

@ -379,17 +379,17 @@ public class DefaultHttp2ConnectionEncoder implements Http2ConnectionEncoder {
}
// Determine how much data to write.
int writeableData = min(queuedData, allowedBytes);
int writableData = min(queuedData, allowedBytes);
ChannelPromise writePromise = ctx.newPromise().addListener(this);
ByteBuf toWrite = queue.remove(writeableData, writePromise);
ByteBuf toWrite = queue.remove(writableData, writePromise);
dataSize = queue.readableBytes();
// Determine how much padding to write.
int writeablePadding = min(allowedBytes - writeableData, padding);
padding -= writeablePadding;
int writablePadding = min(allowedBytes - writableData, padding);
padding -= writablePadding;
// Write the frame(s).
frameWriter().writeData(ctx, stream.id(), toWrite, writeablePadding,
frameWriter().writeData(ctx, stream.id(), toWrite, writablePadding,
endOfStream && size() == 0, writePromise);
}

View File

@ -86,12 +86,12 @@ public class DefaultHttp2FrameWriter implements Http2FrameWriter, Http2FrameSize
this(new DefaultHttp2HeadersEncoder());
}
public DefaultHttp2FrameWriter(SensitivityDetector headersSensativityDetector) {
this(new DefaultHttp2HeadersEncoder(headersSensativityDetector));
public DefaultHttp2FrameWriter(SensitivityDetector headersSensitivityDetector) {
this(new DefaultHttp2HeadersEncoder(headersSensitivityDetector));
}
public DefaultHttp2FrameWriter(SensitivityDetector headersSensativityDetector, boolean ignoreMaxHeaderListSize) {
this(new DefaultHttp2HeadersEncoder(headersSensativityDetector, ignoreMaxHeaderListSize));
public DefaultHttp2FrameWriter(SensitivityDetector headersSensitivityDetector, boolean ignoreMaxHeaderListSize) {
this(new DefaultHttp2HeadersEncoder(headersSensitivityDetector, ignoreMaxHeaderListSize));
}
public DefaultHttp2FrameWriter(Http2HeadersEncoder headersEncoder) {

View File

@ -71,7 +71,7 @@ public class DefaultHttp2HeadersDecoder implements Http2HeadersDecoder, Http2Hea
}
/**
* Exposed Used for testing only! Default values used in the initial settings frame are overriden intentionally
* Exposed Used for testing only! Default values used in the initial settings frame are overridden intentionally
* for testing but violate the RFC if used outside the scope of testing.
*/
DefaultHttp2HeadersDecoder(boolean validateHeaders, HpackDecoder hpackDecoder) {

View File

@ -47,7 +47,7 @@ public class DefaultHttp2HeadersEncoder implements Http2HeadersEncoder, Http2Hea
}
/**
* Exposed Used for testing only! Default values used in the initial settings frame are overriden intentionally
* Exposed Used for testing only! Default values used in the initial settings frame are overridden intentionally
* for testing but violate the RFC if used outside the scope of testing.
*/
DefaultHttp2HeadersEncoder(SensitivityDetector sensitivityDetector, HpackEncoder hpackEncoder) {

View File

@ -239,12 +239,12 @@ public class DefaultHttp2RemoteFlowController implements Http2RemoteFlowControll
}
private int maxUsableChannelBytes() {
// If the channel isWritable, allow at least minUseableChannelBytes.
// If the channel isWritable, allow at least minUsableChannelBytes.
int channelWritableBytes = (int) min(Integer.MAX_VALUE, ctx.channel().bytesBeforeUnwritable());
int useableBytes = channelWritableBytes > 0 ? max(channelWritableBytes, minUsableChannelBytes()) : 0;
int usableBytes = channelWritableBytes > 0 ? max(channelWritableBytes, minUsableChannelBytes()) : 0;
// Clip the usable bytes by the connection window.
return min(connectionState.windowSize(), useableBytes);
return min(connectionState.windowSize(), usableBytes);
}
/**

View File

@ -101,7 +101,7 @@ final class HpackDecoder {
}
/**
* Exposed Used for testing only! Default values used in the initial settings frame are overriden intentionally
* Exposed Used for testing only! Default values used in the initial settings frame are overridden intentionally
* for testing but violate the RFC if used outside the scope of testing.
*/
HpackDecoder(long maxHeaderListSize, int initialHuffmanDecodeCapacity, int maxHeaderTableSize) {

View File

@ -173,7 +173,7 @@ public final class Http2CodecUtil {
}
/**
* Iteratively looks through the causaility chain for the given exception and returns the first
* Iteratively looks through the causality chain for the given exception and returns the first
* {@link Http2Exception} or {@code null} if none.
*/
public static Http2Exception getEmbeddedHttp2Exception(Throwable cause) {

View File

@ -270,7 +270,7 @@ public interface Http2Connection {
/**
* Removes a listener of stream life-cycle events. If the same listener was added multiple times
* then only the first occurence gets removed.
* then only the first occurrence gets removed.
*/
void removeListener(Listener listener);

View File

@ -162,7 +162,7 @@ public interface Http2FrameListener {
* Handles an inbound {@code PUSH_PROMISE} frame. Only called if {@code END_HEADERS} encountered.
* <p>
* Promised requests MUST be authoritative, cacheable, and safe.
* See <a href="https://tools.ietf.org/html/draft-ietf-httpbis-http2-17#section-8.2">[RFC http2], Seciton 8.2</a>.
* See <a href="https://tools.ietf.org/html/draft-ietf-httpbis-http2-17#section-8.2">[RFC http2], Section 8.2</a>.
* <p>
* Only one of the following methods will be called for each {@code HEADERS} frame sequence.
* One will be called when the {@code END_HEADERS} flag has been received.

View File

@ -446,16 +446,16 @@ public final class HttpConversionUtil {
return path.isEmpty() ? EMPTY_REQUEST_PATH : new AsciiString(path);
}
private static void setHttp2Authority(String autority, Http2Headers out) {
private static void setHttp2Authority(String authority, Http2Headers out) {
// The authority MUST NOT include the deprecated "userinfo" subcomponent
if (autority != null) {
int endOfUserInfo = autority.indexOf('@');
if (authority != null) {
int endOfUserInfo = authority.indexOf('@');
if (endOfUserInfo < 0) {
out.authority(new AsciiString(autority));
} else if (endOfUserInfo + 1 < autority.length()) {
out.authority(new AsciiString(autority.substring(endOfUserInfo + 1)));
out.authority(new AsciiString(authority));
} else if (endOfUserInfo + 1 < authority.length()) {
out.authority(new AsciiString(authority.substring(endOfUserInfo + 1)));
} else {
throw new IllegalArgumentException("autority: " + autority);
throw new IllegalArgumentException("authority: " + authority);
}
}
}

View File

@ -161,7 +161,7 @@ public final class UniformStreamByteDistributor implements StreamByteDistributor
}
// In addition to only enqueuing state when they have frames we enforce the following restrictions:
// 1. If the window has gone negative. We never want to queue a state. However we also don't want to
// Immediately remove the item if it is already queued because removal from dequeue is O(n). So
// Immediately remove the item if it is already queued because removal from deque is O(n). So
// we allow it to stay queued and rely on the distribution loop to remove this state.
// 2. If the window is zero we only want to queue if we are not writing. If we are writing that means
// we gave the state a chance to write zero length frames. We wait until updateStreamableBytes is

View File

@ -114,7 +114,7 @@ public class DataCompressionHttp2Test {
}
@After
public void cleaup() throws IOException {
public void cleanup() throws IOException {
serverOut.close();
}

View File

@ -304,7 +304,7 @@ public class DefaultHttp2ConnectionDecoderTest {
}
@Test
public void dataReadAfterGoAwaySentOnUknownStreamShouldIgnore() throws Exception {
public void dataReadAfterGoAwaySentOnUnknownStreamShouldIgnore() throws Exception {
// Throw an exception when checking stream state.
when(connection.stream(STREAM_ID)).thenReturn(null);
mockGoAwaySent();
@ -403,7 +403,7 @@ public class DefaultHttp2ConnectionDecoderTest {
verify(remote, never()).createStream(anyInt(), anyBoolean());
verify(stream, never()).open(anyBoolean());
// Verify that the event was absorbed and not propagated to the oberver.
// Verify that the event was absorbed and not propagated to the observer.
verify(listener, never()).onHeadersRead(eq(ctx), anyInt(), any(Http2Headers.class), anyInt(), anyBoolean());
verify(remote, never()).createStream(anyInt(), anyBoolean());
verify(stream, never()).open(anyBoolean());
@ -417,7 +417,7 @@ public class DefaultHttp2ConnectionDecoderTest {
verify(remote, never()).createStream(anyInt(), anyBoolean());
verify(stream, never()).open(anyBoolean());
// Verify that the event was absorbed and not propagated to the oberver.
// Verify that the event was absorbed and not propagated to the observer.
verify(listener, never()).onHeadersRead(eq(ctx), anyInt(), any(Http2Headers.class), anyInt(), anyBoolean());
verify(remote, never()).createStream(anyInt(), anyBoolean());
verify(stream, never()).open(anyBoolean());
@ -614,7 +614,7 @@ public class DefaultHttp2ConnectionDecoderTest {
}
@Test
public void pingReadWithAckShouldNotifylistener() throws Exception {
public void pingReadWithAckShouldNotifyListener() throws Exception {
decode().onPingAckRead(ctx, emptyPingBuf());
verify(listener).onPingAckRead(eq(ctx), eq(emptyPingBuf()));
}
@ -627,7 +627,7 @@ public class DefaultHttp2ConnectionDecoderTest {
}
@Test
public void settingsReadWithAckShouldNotifylistener() throws Exception {
public void settingsReadWithAckShouldNotifyListener() throws Exception {
decode().onSettingsAckRead(ctx);
// Take into account the time this was called during setup().
verify(listener, times(2)).onSettingsAckRead(eq(ctx));
@ -661,10 +661,10 @@ public class DefaultHttp2ConnectionDecoderTest {
* Calls the decode method on the handler and gets back the captured internal listener
*/
private Http2FrameListener decode() throws Exception {
ArgumentCaptor<Http2FrameListener> internallistener = ArgumentCaptor.forClass(Http2FrameListener.class);
doNothing().when(reader).readFrame(eq(ctx), any(ByteBuf.class), internallistener.capture());
ArgumentCaptor<Http2FrameListener> internalListener = ArgumentCaptor.forClass(Http2FrameListener.class);
doNothing().when(reader).readFrame(eq(ctx), any(ByteBuf.class), internalListener.capture());
decoder.decodeFrame(ctx, EMPTY_BUFFER, Collections.emptyList());
return internallistener.getValue();
return internalListener.getValue();
}
private void mockFlowControl(final int processedBytes) throws Http2Exception {

View File

@ -37,7 +37,7 @@ public class DefaultHttp2HeadersTest {
}
@Test(expected = Http2Exception.class)
public void emtpyHeaderNameNotAllowed() {
public void emptyHeaderNameNotAllowed() {
new DefaultHttp2Headers().add(StringUtil.EMPTY_STRING, "foo");
}
@ -105,7 +105,7 @@ public class DefaultHttp2HeadersTest {
}
@Test
public void testSetHeadersOrdersPsuedoHeadersCorrectly() {
public void testSetHeadersOrdersPseudoHeadersCorrectly() {
Http2Headers headers = newHeaders();
Http2Headers other = new DefaultHttp2Headers().add("name2", "value2").authority("foo");
@ -117,7 +117,7 @@ public class DefaultHttp2HeadersTest {
}
@Test
public void testSetAllOrdersPsuedoHeadersCorrectly() {
public void testSetAllOrdersPseudoHeadersCorrectly() {
Http2Headers headers = newHeaders();
Http2Headers other = new DefaultHttp2Headers().add("name2", "value2").authority("foo");

View File

@ -620,7 +620,7 @@ public class Http2ConnectionHandlerTest {
}
@Test
public void writeRstStreamForUnkownStreamUsingVoidPromise() throws Exception {
public void writeRstStreamForUnknownStreamUsingVoidPromise() throws Exception {
writeRstStreamUsingVoidPromise(NON_EXISTANT_STREAM_ID);
}

View File

@ -132,7 +132,7 @@ public class Http2ConnectionRoundtripTest {
}
@Test
public void inflightFrameAfterStreamResetShouldNotMakeConnectionUnsuable() throws Exception {
public void inflightFrameAfterStreamResetShouldNotMakeConnectionUnusable() throws Exception {
bootstrapEnv(1, 1, 2, 1);
final CountDownLatch latch = new CountDownLatch(1);
doAnswer(new Answer<Void>() {

View File

@ -60,7 +60,7 @@ public class ReadOnlyHttp2HeadersTest {
}
@Test
public void emtpyHeaderNameAllowed() {
public void emptyHeaderNameAllowed() {
ReadOnlyHttp2Headers.trailers(false, AsciiString.EMPTY_STRING, new AsciiString("foo"));
}

View File

@ -35,7 +35,7 @@ import static io.netty.handler.codec.mqtt.MqttCodecUtil.validateFixedHeader;
/**
* Decodes Mqtt messages from bytes, following
* <a href="http://public.dhe.ibm.com/software/dw/webservices/ws-mqtt/mqtt-v3r1.html">
* the MQTT protocl specification v3.1</a>
* the MQTT protocol specification v3.1</a>
*/
public final class MqttDecoder extends ReplayingDecoder<DecoderState> {

View File

@ -22,7 +22,7 @@ import java.util.Collections;
import java.util.List;
/**
* Pyaload of the {@link MqttUnsubscribeMessage}
* Payload of the {@link MqttUnsubscribeMessage}
*/
public final class MqttUnsubscribePayload {

View File

@ -356,7 +356,7 @@ public class MqttCodecTest {
return new MqttUnsubscribeMessage(mqttFixedHeader, mqttMessageIdVariableHeader, mqttUnsubscribePayload);
}
// Helper methdos to compare expected and actual
// Helper methods to compare expected and actual
// MQTT messages
private static void validateFixedHeaders(MqttFixedHeader expected, MqttFixedHeader actual) {

View File

@ -53,7 +53,7 @@ public final class RedisDecoder extends ByteToMessageDecoder {
}
/**
* Creates a new instance with default {@code maxInlineMessageLength} and {@code messageaPool}.
* Creates a new instance with default {@code maxInlineMessageLength} and {@code messagePool}.
*/
public RedisDecoder() {
// 1024 * 64 is max inline length of current Redis server implementation.

View File

@ -40,9 +40,9 @@ public class SocksInitResponseDecoder extends ReplayingDecoder<State> {
out.add(SocksCommonUtils.UNKNOWN_SOCKS_RESPONSE);
break;
}
checkpoint(State.READ_PREFFERED_AUTH_TYPE);
checkpoint(State.READ_PREFERRED_AUTH_TYPE);
}
case READ_PREFFERED_AUTH_TYPE: {
case READ_PREFERRED_AUTH_TYPE: {
SocksAuthScheme authScheme = SocksAuthScheme.valueOf(byteBuf.readByte());
out.add(new SocksInitResponse(authScheme));
break;
@ -56,6 +56,6 @@ public class SocksInitResponseDecoder extends ReplayingDecoder<State> {
enum State {
CHECK_PROTOCOL_VERSION,
READ_PREFFERED_AUTH_TYPE
READ_PREFERRED_AUTH_TYPE
}
}

View File

@ -451,7 +451,7 @@ public abstract class ByteToMessageDecoder extends ChannelInboundHandlerAdapter
* @param ctx the {@link ChannelHandlerContext} which this {@link ByteToMessageDecoder} belongs to
* @param in the {@link ByteBuf} from which to read data
* @param out the {@link List} to which decoded messages should be added
* @throws Exception is thrown if an error accour
* @throws Exception is thrown if an error occurs
*/
protected abstract void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception;

View File

@ -16,7 +16,7 @@
package io.netty.handler.codec;
/**
* An {@link CodecException} which is thrown by a dencoder.
* An {@link CodecException} which is thrown by a decoder.
*/
public class DecoderException extends CodecException {

View File

@ -77,7 +77,7 @@ public abstract class MessageToByteEncoder<I> extends ChannelOutboundHandlerAdap
/**
* Create a new instance
*
* @param outboundMessageType The tpye of messages to match
* @param outboundMessageType The type of messages to match
* @param preferDirect {@code true} if a direct {@link ByteBuf} should be tried to be used as target for
* the encoded messages. If {@code false} is used it will allocate a heap
* {@link ByteBuf}, which is backed by an byte array.
@ -132,7 +132,7 @@ public abstract class MessageToByteEncoder<I> extends ChannelOutboundHandlerAdap
/**
* Allocate a {@link ByteBuf} which will be used as argument of {@link #encode(ChannelHandlerContext, I, ByteBuf)}.
* Sub-classes may override this method to returna {@link ByteBuf} with a perfect matching {@code initialCapacity}.
* Sub-classes may override this method to return {@link ByteBuf} with a perfect matching {@code initialCapacity}.
*/
protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, @SuppressWarnings("unused") I msg,
boolean preferDirect) throws Exception {
@ -150,7 +150,7 @@ public abstract class MessageToByteEncoder<I> extends ChannelOutboundHandlerAdap
* @param ctx the {@link ChannelHandlerContext} which this {@link MessageToByteEncoder} belongs to
* @param msg the message to encode
* @param out the {@link ByteBuf} into which the encoded message will be written
* @throws Exception is thrown if an error accour
* @throws Exception is thrown if an error occurs
*/
protected abstract void encode(ChannelHandlerContext ctx, I msg, ByteBuf out) throws Exception;

View File

@ -40,7 +40,7 @@ import java.util.List;
* out.add(msg.longValue());
* }
*
* {@code @Overrride}
* {@code @Override}
* public {@link Integer} encode({@link ChannelHandlerContext} ctx, {@link Long} msg, List&lt;Object&gt; out)
* throws {@link Exception} {
* out.add(msg.intValue());

View File

@ -112,7 +112,7 @@ public abstract class MessageToMessageDecoder<I> extends ChannelInboundHandlerAd
* @param ctx the {@link ChannelHandlerContext} which this {@link MessageToMessageDecoder} belongs to
* @param msg the message to decode to an other one
* @param out the {@link List} to which decoded messages should be added
* @throws Exception is thrown if an error accour
* @throws Exception is thrown if an error occurs
*/
protected abstract void decode(ChannelHandlerContext ctx, I msg, List<Object> out) throws Exception;
}

View File

@ -137,8 +137,8 @@ public abstract class MessageToMessageEncoder<I> extends ChannelOutboundHandlerA
* @param ctx the {@link ChannelHandlerContext} which this {@link MessageToMessageEncoder} belongs to
* @param msg the message to encode to an other one
* @param out the {@link List} into which the encoded msg should be added
* needs to do some kind of aggragation
* @throws Exception is thrown if an error accour
* needs to do some kind of aggregation
* @throws Exception is thrown if an error occurs
*/
protected abstract void encode(ChannelHandlerContext ctx, I msg, List<Object> out) throws Exception;
}

View File

@ -277,7 +277,7 @@ public class Bzip2Decoder extends ByteToMessageDecoder {
}
// It used to avoid "Bzip2Decoder.decode() did not read anything but decoded a message" exception.
// Because previous operation may read only a few bits from Bzip2BitReader.bitBuffer and
// don't read incomming ByteBuf.
// don't read incoming ByteBuf.
if (in.readerIndex() == oldReaderIndex && in.isReadable()) {
reader.refill();
}

View File

@ -59,7 +59,7 @@ final class FastLz {
/**
* In this case {@link #compress(byte[], int, int, byte[], int, int)} will choose level
* automatically depending on the length of the input buffer. If length less than
* {@link #MIN_RECOMENDED_LENGTH_FOR_LEVEL_2} {@link #LEVEL_1} will be choosen,
* {@link #MIN_RECOMENDED_LENGTH_FOR_LEVEL_2} {@link #LEVEL_1} will be chosen,
* otherwise {@link #LEVEL_2}.
*/
static final int LEVEL_AUTO = 0;

View File

@ -71,7 +71,7 @@ public class FastLzFrameDecoder extends ByteToMessageDecoder {
private boolean hasChecksum;
/**
* Chechsum value of current received chunk of data which has checksum.
* Checksum value of current received chunk of data which has checksum.
*/
private int currentChecksum;

View File

@ -372,7 +372,7 @@ public class JdkZlibDecoder extends ZlibDecoder {
long readCrc = crc.getValue();
if (crcValue != readCrc) {
throw new DecompressionException(
"CRC value missmatch. Expected: " + crcValue + ", Got: " + readCrc);
"CRC value mismatch. Expected: " + crcValue + ", Got: " + readCrc);
}
}

View File

@ -73,7 +73,7 @@ public class LzfEncoder extends MessageToByteEncoder<ByteBuf> {
/**
* Creates a new LZF encoder with specified total length of encoded chunk. You can configure it to encode
* your data flow more efficient if you know the avarage size of messages that you send.
* your data flow more efficient if you know the average size of messages that you send.
*
* @param totalLength
* Expected total length of content to compress; only matters for outgoing messages that is smaller

View File

@ -21,5 +21,5 @@ package io.netty.handler.codec.compression;
*/
@Deprecated
public class SnappyFramedDecoder extends SnappyFrameDecoder {
// Nothing new. Just stting here for backward compatibility.
// Nothing new. Just staying here for backward compatibility.
}

View File

@ -21,5 +21,5 @@ package io.netty.handler.codec.compression;
*/
@Deprecated
public class SnappyFramedEncoder extends SnappyFrameEncoder {
// Nothing new. Just stting here for backward compatibility.
// Nothing new. Just staying here for backward compatibility.
}

View File

@ -28,7 +28,7 @@ import org.jboss.marshalling.Marshaller;
* an Object.
*
* See <a href="http://www.jboss.org/jbossmarshalling">JBoss Marshalling website</a>
* for more informations
* for more information
*
* Use {@link MarshallingEncoder} if possible.
*

View File

@ -31,7 +31,7 @@ import org.jboss.marshalling.Marshaller;
* Use this with {@link MarshallingDecoder}
*
* See <a href="http://www.jboss.org/jbossmarshalling">JBoss Marshalling website</a>
* for more informations
* for more information
*
*/
@Sharable

View File

@ -53,7 +53,7 @@ public interface ReferenceCounted {
ReferenceCounted touch();
/**
* Records the current access location of this object with an additonal arbitrary information for debugging
* Records the current access location of this object with an additional arbitrary information for debugging
* purposes. If this object is determined to be leaked, the information recorded by this operation will be
* provided to you via {@link ResourceLeakDetector}.
*/

View File

@ -45,7 +45,7 @@ public final class ThreadDeathWatcher {
static final ThreadFactory threadFactory;
// Use a MPMC queue as we may end up checking isEmpty() from multiple threads which may not be allowed to do
// concurrently depending on the implemenation of it in a MPSC queue.
// concurrently depending on the implementation of it in a MPSC queue.
private static final Queue<Entry> pendingEntries = new ConcurrentLinkedQueue<Entry>();
private static final Watcher watcher = new Watcher();
private static final AtomicBoolean started = new AtomicBoolean();

View File

@ -31,7 +31,7 @@ public interface EventExecutorGroup extends ScheduledExecutorService, Iterable<E
/**
* Returns {@code true} if and only if all {@link EventExecutor}s managed by this {@link EventExecutorGroup}
* are being {@linkplain #shutdownGracefully() shut down gracefuclly} or was {@linkplain #isShutdown() shut down}.
* are being {@linkplain #shutdownGracefully() shut down gracefully} or was {@linkplain #isShutdown() shut down}.
*/
boolean isShuttingDown();

View File

@ -54,7 +54,7 @@ public final class RejectedExecutionHandlers {
public void rejected(Runnable task, SingleThreadEventExecutor executor) {
if (!executor.inEventLoop()) {
for (int i = 0; i < retries; i++) {
// Try to wakup the executor so it will empty its task queue.
// Try to wake up the executor so it will empty its task queue.
executor.wakeup(false);
LockSupport.parkNanos(backOffNanos);

View File

@ -368,7 +368,7 @@ public abstract class SingleThreadEventExecutor extends AbstractScheduledEventEx
*
* @param taskQueue To poll and execute all tasks.
*
* @return {@code true} if atleast one task was executed.
* @return {@code true} if at least one task was executed.
*/
protected final boolean runAllTasksFrom(Queue<Runnable> taskQueue) {
Runnable task = pollTaskFrom(taskQueue);

View File

@ -187,7 +187,7 @@ public final class MacAddressUtil {
private static void validateMacSeparator(char separator) {
if (separator != ':' && separator != '-') {
throw new IllegalArgumentException("unsupported seperator: " + separator + " (expected: [:-])");
throw new IllegalArgumentException("unsupported separator: " + separator + " (expected: [:-])");
}
}

View File

@ -59,7 +59,7 @@ public final class PendingWrite {
}
/**
* Fails the underlying {@link Promise} with the given cause and reycle this instance.
* Fails the underlying {@link Promise} with the given cause and recycle this instance.
*/
public boolean failAndRecycle(Throwable cause) {
ReferenceCountUtil.release(msg);
@ -70,7 +70,7 @@ public final class PendingWrite {
}
/**
* Mark the underlying {@link Promise} successed and reycle this instance.
* Mark the underlying {@link Promise} successfully and recycle this instance.
*/
public boolean successAndRecycle() {
if (promise != null) {

View File

@ -132,7 +132,7 @@ public final class PlatformDependent {
logger.info(
"Your platform does not provide complete low-level API for accessing direct buffers reliably. " +
"Unless explicitly requested, heap buffer will always be preferred to avoid potential system " +
"unstability.");
"instability.");
}
// Here is how the system property is used:
@ -226,7 +226,7 @@ public final class PlatformDependent {
}
/**
* Return {@code true} if {@code sun.misc.Unsafe} was found on the classpath and can be used for acclerated
* Return {@code true} if {@code sun.misc.Unsafe} was found on the classpath and can be used for accelerated
* direct memory access.
*/
public static boolean hasUnsafe() {
@ -304,14 +304,14 @@ public final class PlatformDependent {
}
/**
* Creates a new fastest {@link ConcurrentMap} implementaion for the current platform.
* Creates a new fastest {@link ConcurrentMap} implementation for the current platform.
*/
public static <K, V> ConcurrentMap<K, V> newConcurrentHashMap() {
return new ConcurrentHashMap<K, V>();
}
/**
* Creates a new fastest {@link LongCounter} implementaion for the current platform.
* Creates a new fastest {@link LongCounter} implementation for the current platform.
*/
public static LongCounter newLongCounter() {
if (javaVersion() >= 8) {
@ -322,21 +322,21 @@ public final class PlatformDependent {
}
/**
* Creates a new fastest {@link ConcurrentMap} implementaion for the current platform.
* Creates a new fastest {@link ConcurrentMap} implementation for the current platform.
*/
public static <K, V> ConcurrentMap<K, V> newConcurrentHashMap(int initialCapacity) {
return new ConcurrentHashMap<K, V>(initialCapacity);
}
/**
* Creates a new fastest {@link ConcurrentMap} implementaion for the current platform.
* Creates a new fastest {@link ConcurrentMap} implementation for the current platform.
*/
public static <K, V> ConcurrentMap<K, V> newConcurrentHashMap(int initialCapacity, float loadFactor) {
return new ConcurrentHashMap<K, V>(initialCapacity, loadFactor);
}
/**
* Creates a new fastest {@link ConcurrentMap} implementaion for the current platform.
* Creates a new fastest {@link ConcurrentMap} implementation for the current platform.
*/
public static <K, V> ConcurrentMap<K, V> newConcurrentHashMap(
int initialCapacity, float loadFactor, int concurrencyLevel) {
@ -344,7 +344,7 @@ public final class PlatformDependent {
}
/**
* Creates a new fastest {@link ConcurrentMap} implementaion for the current platform.
* Creates a new fastest {@link ConcurrentMap} implementation for the current platform.
*/
public static <K, V> ConcurrentMap<K, V> newConcurrentHashMap(Map<? extends K, ? extends V> map) {
return new ConcurrentHashMap<K, V>(map);
@ -498,7 +498,7 @@ public final class PlatformDependent {
/**
* Identical to {@link PlatformDependent0#hashCodeAsciiSanitize(byte)} but for {@link CharSequence}.
*/
private static int hashCodeAsciiSanitizsByte(char value) {
private static int hashCodeAsciiSanitizeByte(char value) {
return value & 0x1f;
}
@ -754,24 +754,24 @@ public final class PlatformDependent {
}
switch(remainingBytes) {
case 7:
return ((hash * HASH_CODE_C1 + hashCodeAsciiSanitizsByte(bytes.charAt(0)))
return ((hash * HASH_CODE_C1 + hashCodeAsciiSanitizeByte(bytes.charAt(0)))
* HASH_CODE_C2 + hashCodeAsciiSanitizeShort(bytes, 1))
* HASH_CODE_C1 + hashCodeAsciiSanitizeInt(bytes, 3);
case 6:
return (hash * HASH_CODE_C1 + hashCodeAsciiSanitizeShort(bytes, 0))
* HASH_CODE_C2 + hashCodeAsciiSanitizeInt(bytes, 2);
case 5:
return (hash * HASH_CODE_C1 + hashCodeAsciiSanitizsByte(bytes.charAt(0)))
return (hash * HASH_CODE_C1 + hashCodeAsciiSanitizeByte(bytes.charAt(0)))
* HASH_CODE_C2 + hashCodeAsciiSanitizeInt(bytes, 1);
case 4:
return hash * HASH_CODE_C1 + hashCodeAsciiSanitizeInt(bytes, 0);
case 3:
return (hash * HASH_CODE_C1 + hashCodeAsciiSanitizsByte(bytes.charAt(0)))
return (hash * HASH_CODE_C1 + hashCodeAsciiSanitizeByte(bytes.charAt(0)))
* HASH_CODE_C2 + hashCodeAsciiSanitizeShort(bytes, 1);
case 2:
return hash * HASH_CODE_C1 + hashCodeAsciiSanitizeShort(bytes, 0);
case 1:
return hash * HASH_CODE_C1 + hashCodeAsciiSanitizsByte(bytes.charAt(0));
return hash * HASH_CODE_C1 + hashCodeAsciiSanitizeByte(bytes.charAt(0));
default:
return hash;
}

View File

@ -289,7 +289,7 @@ final class PlatformDependent0 {
@Override
public Object run() {
try {
// Java9 has jdk.internal.misc.Unsafe and not all methods are propergated to
// Java9 has jdk.internal.misc.Unsafe and not all methods are propagated to
// sun.misc.Unsafe
Class<?> internalUnsafeClass = getClassLoader(PlatformDependent0.class)
.loadClass("jdk.internal.misc.Unsafe");

View File

@ -51,7 +51,7 @@ public abstract class TypeParameterMatcher {
}
public static TypeParameterMatcher find(
final Object object, final Class<?> parameterizedSuperclass, final String typeParamName) {
final Object object, final Class<?> parametrizedSuperclass, final String typeParamName) {
final Map<Class<?>, Map<String, TypeParameterMatcher>> findCache =
InternalThreadLocalMap.get().typeParameterMatcherFindCache();
@ -65,7 +65,7 @@ public abstract class TypeParameterMatcher {
TypeParameterMatcher matcher = map.get(typeParamName);
if (matcher == null) {
matcher = get(find0(object, parameterizedSuperclass, typeParamName));
matcher = get(find0(object, parametrizedSuperclass, typeParamName));
map.put(typeParamName, matcher);
}
@ -73,12 +73,12 @@ public abstract class TypeParameterMatcher {
}
private static Class<?> find0(
final Object object, Class<?> parameterizedSuperclass, String typeParamName) {
final Object object, Class<?> parametrizedSuperclass, String typeParamName) {
final Class<?> thisClass = object.getClass();
Class<?> currentClass = thisClass;
for (;;) {
if (currentClass.getSuperclass() == parameterizedSuperclass) {
if (currentClass.getSuperclass() == parametrizedSuperclass) {
int typeParamIndex = -1;
TypeVariable<?>[] typeParams = currentClass.getSuperclass().getTypeParameters();
for (int i = 0; i < typeParams.length; i ++) {
@ -90,7 +90,7 @@ public abstract class TypeParameterMatcher {
if (typeParamIndex < 0) {
throw new IllegalStateException(
"unknown type parameter '" + typeParamName + "': " + parameterizedSuperclass);
"unknown type parameter '" + typeParamName + "': " + parametrizedSuperclass);
}
Type genericSuperType = currentClass.getGenericSuperclass();
@ -124,9 +124,9 @@ public abstract class TypeParameterMatcher {
return Object.class;
}
parameterizedSuperclass = (Class<?>) v.getGenericDeclaration();
parametrizedSuperclass = (Class<?>) v.getGenericDeclaration();
typeParamName = v.getName();
if (parameterizedSuperclass.isAssignableFrom(thisClass)) {
if (parametrizedSuperclass.isAssignableFrom(thisClass)) {
continue;
} else {
return Object.class;

View File

@ -66,11 +66,11 @@ class FormattingTuple {
static Object[] trimmedCopy(Object[] argArray) {
if (argArray == null || argArray.length == 0) {
throw new IllegalStateException("non-sensical empty or null argument array");
throw new IllegalStateException("nonsensical empty or null argument array");
}
final int trimemdLen = argArray.length - 1;
Object[] trimmed = new Object[trimemdLen];
System.arraycopy(argArray, 0, trimmed, 0, trimemdLen);
final int trimmedLen = argArray.length - 1;
Object[] trimmed = new Object[trimmedLen];
System.arraycopy(argArray, 0, trimmed, 0, trimmedLen);
return trimmed;
}

View File

@ -26,7 +26,7 @@ import java.util.concurrent.atomic.AtomicReference;
public class ResourceLeakDetectorTest {
@Test(timeout = 60000)
public void testConcurentUsage() throws Throwable {
public void testConcurrentUsage() throws Throwable {
final AtomicBoolean finished = new AtomicBoolean();
final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
// With 50 threads issue #6087 is reproducible on every run.

View File

@ -458,7 +458,8 @@ public class DefaultPromiseTest {
}
});
assertTrue("Should have notifed " + expectedCount + " listeners", latch.await(5, TimeUnit.SECONDS));
assertTrue("Should have notified " + expectedCount + " listeners",
latch.await(5, TimeUnit.SECONDS));
executor.shutdownGracefully().sync();
}

View File

@ -131,7 +131,7 @@ public class DefaultThreadFactoryTest {
// test that when DefaultThreadFactory is constructed it is sticky to the thread group from the thread group of the
// thread that created it
@Test(timeout = 2000)
public void testDefaulThreadFactoryInheritsThreadGroup() throws InterruptedException {
public void testDefaultThreadFactoryInheritsThreadGroup() throws InterruptedException {
final ThreadGroup sticky = new ThreadGroup("sticky");
runStickyThreadGroupTest(

View File

@ -19,7 +19,6 @@ package io.netty.util.concurrent;
import static org.hamcrest.CoreMatchers.*;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import static org.mockito.Mockito.verify;
import org.junit.Rule;
import org.junit.Test;
@ -48,7 +47,7 @@ public class PromiseAggregatorTest {
@SuppressWarnings("unchecked")
@Test
public void testSucessfulNoPending() throws Exception {
public void testSuccessfulNoPending() throws Exception {
Promise<Void> p = mock(Promise.class);
PromiseAggregator<Void, Future<Void>> a =
new PromiseAggregator<Void, Future<Void>>(p);

View File

@ -29,7 +29,7 @@ public class FileServerHandler extends SimpleChannelInboundHandler<String> {
@Override
public void channelActive(ChannelHandlerContext ctx) {
ctx.writeAndFlush("HELO: Type the path of the file to retrieve.\n");
ctx.writeAndFlush("HELLO: Type the path of the file to retrieve.\n");
}
@Override

View File

@ -29,7 +29,7 @@ import io.netty.handler.ssl.util.SelfSignedCertificate;
* This example server aims to demonstrate
* <a href="http://www.w3.org/TR/cors/">Cross Origin Resource Sharing</a> (CORS) in Netty.
* It does not have a client like most of the other examples, but instead has
* a html page that is loaded to try out CORS support in a web brower.
* a html page that is loaded to try out CORS support in a web browser.
* <p>
*
* CORS is configured in {@link HttpCorsServerInitializer} and by updating the config you can
@ -50,7 +50,7 @@ import io.netty.handler.ssl.util.SelfSignedCertificate;
* <h3>Using a web server</h3>
* To test CORS support you can serve the file {@code src/main/resources/cors/cors.html}
* using a web server. You can then add a new host name to your systems hosts file, for
* example if you are on Linux you may update /etc/hosts to add an addtional name
* example if you are on Linux you may update /etc/hosts to add an additional name
* for you local system:
* <pre>
* 127.0.0.1 localhost domain1.com

View File

@ -61,7 +61,7 @@ import io.netty.handler.stream.ChunkedWriteHandler;
* <li>Last-Modified</li>
* <li>Pragma</li>
* </ul>
* Any of the above response headers can be retreived by:
* Any of the above response headers can be retrieved by:
* <pre>
* xhr.getResponseHeader("Content-Type");
* </pre>

View File

@ -113,7 +113,7 @@ public final class HttpUploadClient {
try {
Bootstrap b = new Bootstrap();
b.group(group).channel(NioSocketChannel.class).handler(new HttpUploadClientIntializer(sslCtx));
b.group(group).channel(NioSocketChannel.class).handler(new HttpUploadClientInitializer(sslCtx));
// Simple Get form: no factory used (not usable)
List<Entry<String, String>> headers = formget(b, host, port, get, uriSimple);

View File

@ -23,11 +23,11 @@ import io.netty.handler.codec.http.HttpContentDecompressor;
import io.netty.handler.ssl.SslContext;
import io.netty.handler.stream.ChunkedWriteHandler;
public class HttpUploadClientIntializer extends ChannelInitializer<SocketChannel> {
public class HttpUploadClientInitializer extends ChannelInitializer<SocketChannel> {
private final SslContext sslCtx;
public HttpUploadClientIntializer(SslContext sslCtx) {
public HttpUploadClientInitializer(SslContext sslCtx) {
this.sslCtx = sslCtx;
}

View File

@ -29,7 +29,7 @@ public class HexDumpProxyFrontendHandler extends ChannelInboundHandlerAdapter {
private final String remoteHost;
private final int remotePort;
// As we use inboundChannel.eventLoop() when buildling the Bootstrap this does not need to be volatile as
// As we use inboundChannel.eventLoop() when building the Bootstrap this does not need to be volatile as
// the outboundChannel will use the same EventLoop (and therefore Thread) as the inboundChannel.
private Channel outboundChannel;

View File

@ -6,7 +6,7 @@
<link rel="stylesheet" href="css/cors.css">
</head>
<body onload="simpleGetRequest();">
<h1>Repsonse from Server</h1>
<h1>Response from Server</h1>
<textarea id="responseText"></textarea>
<script>
function simpleGetRequest() {

View File

@ -103,7 +103,7 @@ public abstract class ProxyHandler extends ChannelDuplexHandler {
}
/**
* Rerutns {@code true} if and only if the connection to the destination has been established successfully.
* Returns {@code true} if and only if the connection to the destination has been established successfully.
*/
public final boolean isConnected() {
return connectPromise.isSuccess();

View File

@ -43,7 +43,7 @@ final class CipherSuiteConverter {
* C - bulk cipher
* D - HMAC algorithm
*
* This regular expression assumees that:
* This regular expression assumes that:
*
* 1) A is always TLS or SSL, and
* 2) D is always a single word.

View File

@ -110,7 +110,7 @@ public final class JdkAlpnApplicationProtocolNegotiator extends JdkBaseApplicati
@Override
public SSLEngine wrapSslEngine(SSLEngine engine, JdkApplicationProtocolNegotiator applicationNegotiator,
boolean isServer) {
throw new RuntimeException("ALPN unsupported. Is your classpatch configured correctly?"
throw new RuntimeException("ALPN unsupported. Is your classpath configured correctly?"
+ " For Conscrypt, add the appropriate Conscrypt JAR to classpath and set the security provider."
+ " For Jetty-ALPN, see "
+ "http://www.eclipse.org/jetty/documentation/current/alpn-chapter.html#alpn-starting");

View File

@ -24,7 +24,7 @@ public final class JdkNpnApplicationProtocolNegotiator extends JdkBaseApplicatio
private static final SslEngineWrapperFactory NPN_WRAPPER = new SslEngineWrapperFactory() {
{
if (!JettyNpnSslEngine.isAvailable()) {
throw new RuntimeException("NPN unsupported. Is your classpatch configured correctly?"
throw new RuntimeException("NPN unsupported. Is your classpath configured correctly?"
+ " See https://wiki.eclipse.org/Jetty/Feature/NPN");
}
}

View File

@ -159,7 +159,7 @@ public class JdkSslContext extends SslContext {
*
* @param sslContext the {@link SSLContext} to use.
* @param isClient {@code true} if this context should create {@link SSLEngine}s for client-side usage.
* @param ciphers the ciphers to use or {@code null} if the standart should be used.
* @param ciphers the ciphers to use or {@code null} if the standard should be used.
* @param cipherFilter the filter to use.
* @param apn the {@link ApplicationProtocolConfig} to use.
* @param clientAuth the {@link ClientAuth} to use. This will only be used when {@param isClient} is {@code false}.

Some files were not shown because too many files have changed in this diff Show More