Netty源码分析-HttpObjectDecoder
Posted 征服.刘华强
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了Netty源码分析-HttpObjectDecoder相关的知识,希望对你有一定的参考价值。
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http;
import static io.netty.util.internal.ObjectUtil.checkPositive;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPipeline;
import io.netty.handler.codec.ByteToMessageDecoder;
import io.netty.handler.codec.DecoderResult;
import io.netty.handler.codec.PrematureChannelClosureException;
import io.netty.handler.codec.TooLongFrameException;
import io.netty.util.ByteProcessor;
import io.netty.util.internal.AppendableCharSequence;
import java.util.List;
/**
* Decodes @link ByteBufs into @link HttpMessages and
* @link HttpContents.
*
* <h3>Parameters that prevents excessive memory consumption</h3>
* <table border="1">
* <tr>
* <th>Name</th><th>Meaning</th>
* </tr>
* <tr>
* <td>@code maxInitialLineLength</td>
* <td>The maximum length of the initial line
* (e.g. @code "GET / HTTP/1.0" or @code "HTTP/1.0 200 OK")
* If the length of the initial line exceeds this value, a
* @link TooLongFrameException will be raised.</td>
* </tr>
* <tr>
* <td>@code maxHeaderSize</td>
* <td>The maximum length of all headers. If the sum of the length of each
* header exceeds this value, a @link TooLongFrameException will be raised.</td>
* </tr>
* <tr>
* <td>@code maxChunkSize</td>
* <td>The maximum length of the content or each chunk. If the content length
* (or the length of each chunk) exceeds this value, the content or chunk
* will be split into multiple @link HttpContents whose length is
* @code maxChunkSize at maximum.</td>
* </tr>
* </table>
*
* <h3>Chunked Content</h3>
*
* If the content of an HTTP message is greater than @code maxChunkSize or
* the transfer encoding of the HTTP message is 'chunked', this decoder
* generates one @link HttpMessage instance and its following
* @link HttpContents per single HTTP message to avoid excessive memory
* consumption. For example, the following HTTP message:
* <pre>
* GET / HTTP/1.1
* Transfer-Encoding: chunked
*
* 1a
* abcdefghijklmnopqrstuvwxyz
* 10
* 1234567890abcdef
* 0
* Content-MD5: ...
* <i>[blank line]</i>
* </pre>
* triggers @link HttpRequestDecoder to generate 3 objects:
* <ol>
* <li>An @link HttpRequest,</li>
* <li>The first @link HttpContent whose content is @code 'abcdefghijklmnopqrstuvwxyz',</li>
* <li>The second @link LastHttpContent whose content is @code '1234567890abcdef', which marks
* the end of the content.</li>
* </ol>
*
* If you prefer not to handle @link HttpContents by yourself for your
* convenience, insert @link HttpObjectAggregator after this decoder in the
* @link ChannelPipeline. However, please note that your server might not
* be as memory efficient as without the aggregator.
*
* <h3>Extensibility</h3>
*
* Please note that this decoder is designed to be extended to implement
* a protocol derived from HTTP, such as
* <a href="http://en.wikipedia.org/wiki/Real_Time_Streaming_Protocol">RTSP</a> and
* <a href="http://en.wikipedia.org/wiki/Internet_Content_Adaptation_Protocol">ICAP</a>.
* To implement the decoder of such a derived protocol, extend this class and
* implement all abstract methods properly.
*/
public abstract class HttpObjectDecoder extends ByteToMessageDecoder
private static final String EMPTY_VALUE = "";
//最大读取一个数据块的大小 默认8192
private final int maxChunkSize;
//是否支持Http块方式协议
private final boolean chunkedSupported;
protected final boolean validateHeaders;
//Http头解析器,按行解析,通过回车换行符(\\r\\n)解析出一行字符串
private final HeaderParser headerParser;
private final LineParser lineParser;
//实体类封装Http消息,只包含了版本,METHOD,Http头等信息,没有Body数据.
private HttpMessage message;
//每块大小
private long chunkSize;
private long contentLength = Long.MIN_VALUE;
private volatile boolean resetRequested;
// These will be updated by splitHeader(...)
private CharSequence name;
private CharSequence value;
private LastHttpContent trailer;
/**
* The internal state of @link HttpObjectDecoder.
* <em>Internal use only</em>.
*/
//当前解码器状态
private enum State
SKIP_CONTROL_CHARS,
READ_INITIAL, //读取第一行(POST http://127.0.0.17:8080/ HTTP/1.1)
READ_HEADER, //读取Http头信息
READ_VARIABLE_LENGTH_CONTENT, //Http内容长度可变
READ_FIXED_LENGTH_CONTENT, //Http内容长度固定
READ_CHUNK_SIZE, //以块的方式读取
READ_CHUNKED_CONTENT,
READ_CHUNK_DELIMITER,
READ_CHUNK_FOOTER,
BAD_MESSAGE, //错误请求状态
UPGRADED
private State currentState = State.SKIP_CONTROL_CHARS;
/**
* Creates a new instance with the default
* @code maxInitialLineLength (4096, @code maxHeaderSize (8192), and
* @code maxChunkSize (8192).
*/
protected HttpObjectDecoder()
this(4096, 8192, 8192, true);
/**
* Creates a new instance with the specified parameters.
*/
protected HttpObjectDecoder(
int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean chunkedSupported)
this(maxInitialLineLength, maxHeaderSize, maxChunkSize, chunkedSupported, true);
/**
* Creates a new instance with the specified parameters.
*/
protected HttpObjectDecoder(
int maxInitialLineLength, int maxHeaderSize, int maxChunkSize,
boolean chunkedSupported, boolean validateHeaders)
this(maxInitialLineLength, maxHeaderSize, maxChunkSize, chunkedSupported, validateHeaders, 128);
protected HttpObjectDecoder(
int maxInitialLineLength, int maxHeaderSize, int maxChunkSize,
boolean chunkedSupported, boolean validateHeaders, int initialBufferSize)
checkPositive(maxInitialLineLength, "maxInitialLineLength");
checkPositive(maxHeaderSize, "maxHeaderSize");
checkPositive(maxChunkSize, "maxChunkSize");
AppendableCharSequence seq = new AppendableCharSequence(initialBufferSize);
lineParser = new LineParser(seq, maxInitialLineLength);
headerParser = new HeaderParser(seq, maxHeaderSize);
this.maxChunkSize = maxChunkSize;
this.chunkedSupported = chunkedSupported;
this.validateHeaders = validateHeaders;
@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List<Object> out) throws Exception
//是否重置解码器状态
if (resetRequested)
//把当前解码器的变量全部还原为初始状态
resetNow();
//当前状态
switch (currentState)
case SKIP_CONTROL_CHARS:
// Fall-through
//一个新的Http请求从这里开始
case READ_INITIAL: try
//首先解码Http协议第一行例如: POST http://127.0.0.17:8080/ HTTP/1.1
AppendableCharSequence line = lineParser.parse(buffer);
if (line == null)
return;
//转换为三个字段 POST | http://127.0.0.17:8080/ | HTTP/1.1
String[] initialLine = splitInitialLine(line);
if (initialLine.length < 3)
// Invalid initial line - ignore.
//遇到不符合Http协议规范的格式,则回到起始状态重新读取
currentState = State.SKIP_CONTROL_CHARS;
return;
//构建Http消息,包含头信息和版本号与URL
message = createMessage(initialLine);
//切换解码器状态为-读取头部信息
currentState = State.READ_HEADER;
// fall-through
catch (Exception e)
out.add(invalidMessage(buffer, e));
return;
//开始解码Http头
case READ_HEADER: try
//buffer中字节流不能解析完整的http头就会返回null
//那么需要等待下次有足够多的数据时继续解析
State nextState = readHeaders(buffer);
if (nextState == null)
return;
//解析完Http头以后,返回的状态
currentState = nextState;
switch (nextState)
case SKIP_CONTROL_CHARS:
// fast-path
// No content is expected.
out.add(message);
out.add(LastHttpContent.EMPTY_LAST_CONTENT);
resetNow();
return;
case READ_CHUNK_SIZE: //按块读取
//不支持抛异常
if (!chunkedSupported)
throw new IllegalArgumentException("Chunked messages not supported");
//先把message发送给下一个解码器,然后HttpChunks会在下次事件进行解析
// Chunked encoding - generate HttpMessage first. HttpChunks will follow.
out.add(message);
return;
default:
/**
* <a href="https://tools.ietf.org/html/rfc7230#section-3.3.3">RFC 7230, 3.3.3</a> states that if a
* request does not have either a transfer-encoding or a content-length header then the message body
* length is 0. However for a response the body length is the number of octets received prior to the
* server closing the connection. So we treat this as variable length chunked encoding.
*/
//如果没有http主体数据
long contentLength = contentLength();
if (contentLength == 0 || contentLength == -1 && isDecodingRequest())
out.add(message); //发送http消息(不含内容)
//发送一个空的内容,这样PPline上的聚合器就会构建一个完整的消息交个业务
out.add(LastHttpContent.EMPTY_LAST_CONTENT);
resetNow();
return;
//比如是定长活可变长度
assert nextState == State.READ_FIXED_LENGTH_CONTENT ||
nextState == State.READ_VARIABLE_LENGTH_CONTENT;
//发送http消息(不含内容)
out.add(message);
//是定长
if (nextState == State.READ_FIXED_LENGTH_CONTENT)
// chunkSize will be decreased as the READ_FIXED_LENGTH_CONTENT state reads data chunk by chunk.
//如果是定长,设置http内存的长度
chunkSize = contentLength;
// We return here, this forces decode to be called again where we will decode the content
return;
catch (Exception e)
out.add(invalidMessage(buffer, e));
return;
case READ_VARIABLE_LENGTH_CONTENT:
// Keep reading data as a chunk until the end of connection is reached.
int toRead = Math.min(buffer.readableBytes(), maxChunkSize);
if (toRead > 0)
ByteBuf content = buffer.readRetainedSlice(toRead);
out.add(new DefaultHttpContent(content));
return;
//读取定长数据
case READ_FIXED_LENGTH_CONTENT:
int readLimit = buffer.readableBytes();
// Check if the buffer is readable first as we use the readable byte count
// to create the HttpChunk. This is needed as otherwise we may end up with
// create an HttpChunk instance that contains an empty buffer and so is
// handled like it is the last HttpChunk.
//
// See https://github.com/netty/netty/issues/433
if (readLimit == 0)
return;
//定长数据不能超过最大块阀值
int toRead = Math.min(readLimit, maxChunkSize);
//读取数据不能超过定长的数据内容大小,如果ByteBuf可读数据比较多,有可能是第二个请求
if (toRead > chunkSize)
toRead = (int) chunkSize;
//读取一部分字节
ByteBuf content = buffer.readRetainedSlice(toRead);
//计数
chunkSize -= toRead;
//如果chunkSize=0,说明已经把定长数据全部读完
if (chunkSize == 0)
// Read all content.
//发送DefaultLast类型内容,这样聚合器就会连同message打包成一个完整的消息向下传递
out.add(new DefaultLastHttpContent(content, validateHeaders));
resetNow();
else
//一部分内容
out.add(new DefaultHttpContent(content));
return;
/**
* everything else after this point takes care of reading chunked content. basically, read chunk size,
* read chunk, read and ignore the CRLF and repeat until 0
*/
//Http头Transfer-Encoding: chunked说明响应的长度不固定,按块解析
//读取数据块的大小信息
case READ_CHUNK_SIZE: try
//如果客户端按块上传报文,那么第一个块之前要表明长度 [\\r\\n\\r\\n8192\\r\\n]
//长度格式为 [\\r\\n\\r\\n8192\\r\\n][后续报文]
//所以要先解码一行数据把[\\r\\n\\r\\n8192\\r\\n]解析出来
AppendableCharSequence line = lineParser.parse(buffer);
if (line == null)
return;
//把回车换行都去掉,解码出长度大小
int chunkSize = getChunkSize(line.toString());
//把块的大小赋值给变量
this.chunkSize = chunkSize;
//如果为0,说明读取到最后一个块
if (chunkSize == 0)
currentState = State.READ_CHUNK_FOOTER;
return;
//否则状态切换为读取数据块内容
currentState = State.READ_CHUNKED_CONTENT;
// fall-through
catch (Exception e)
out.add(invalidChunk(buffer, e));
return;
//开始读取数据块
case READ_CHUNKED_CONTENT:
assert chunkSize <= Integer.MAX_VALUE;
//块大小不能超过阀值 默认是8192可配置
int toRead = Math.min((int) chunkSize, maxChunkSize);
toRead = Math.min(toRead, buffer.readableBytes());
if (toRead == 0)
return;
//封装一个HttpContent对象,内部包含了字节流
HttpContent chunk = new DefaultHttpContent(buffer.readRetainedSlice(toRead));
chunkSize -= toRead; //用块大小 减去实际读取的字节数
//发送给下一个解码器,需要由HttpObjectAggregator进行缓存聚合
out.add(chunk);
//如果当前块没读完,则继续读
if (chunkSize != 0)
return;
//否则说明当前块已经读取完毕,准备读取下一个块
currentState = State.READ_CHUNK_DELIMITER;
// fall-through
//说明上一个块已经读取完毕
case READ_CHUNK_DELIMITER:
//如果还有下一个块,那么数据格式还是这样的 [\\r\\n\\r\\n8192\\r\\n][后续报文]
final int wIdx = buffer.writerIndex();
int rIdx = buffer.readerIndex();
while (wIdx > rIdx)
//读取看有没有\\r
byte next = buffer.getByte(rIdx++);
if (next == HttpConstants.LF)
//切换为读取下一个块大小
currentState = State.READ_CHUNK_SIZE;
break;
buffer.readerIndex(rIdx);
return;
//块读取结束
case READ_CHUNK_FOOTER: try
LastHttpContent trailer = readTrailingHeaders(buffer);
if (trailer == null)
return;
//发送结束块,是一个空对象,让HttpObjectAggregator进行聚合
out.add(trailer);
resetNow();
return;
catch (Exception e)
out.add(invalidChunk(buffer, e));
return;
//错误请求格式,丢弃字节
case BAD_MESSAGE:
// Keep discarding until disconnection.
buffer.skipBytes(buffer.readableBytes());
break;
case UPGRADED:
int readableBytes = buffer.readableBytes();
if (readableBytes > 0)
// Keep on consuming as otherwise we may trigger an DecoderException,
// other handler will replace this codec with the upgraded protocol codec to
// take the traffic over at some point then.
// See https://github.com/netty/netty/issues/2173
out.add(buffer.readBytes(readableBytes));
break;
@Override
protected void decodeLast(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception
super.decodeLast(ctx, in, out);
if (resetRequested)
// If a reset was requested by decodeLast() we need to do it now otherwise we may produce a
// LastHttpContent while there was already one.
resetNow();
// Handle the last unfinished message.
if (message != null)
boolean chunked = HttpUtil.isTransferEncodingChunked(message);
if (currentState == State.READ_VARIABLE_LENGTH_CONTENT && !in.isReadable() && !chunked)
// End of connection.
out.add(LastHttpContent.EMPTY_LAST_CONTENT);
resetNow();
return;
if (currentState == State.READ_HEADER)
// If we are still in the state of reading headers we need to create a new invalid message that
// signals that the connection was closed before we received the headers.
out.add(invalidMessage(Unpooled.EMPTY_BUFFER,
new PrematureChannelClosureException("Connection closed before received headers")));
resetNow();
return;
// Check if the closure of the connection signifies the end of the content.
boolean prematureClosure;
if (isDecodingRequest() || chunked)
// The last request did not wait for a response.
prematureClosure = true;
else
// Compare the length of the received content and the 'Content-Length' header.
// If the 'Content-Length' header is absent, the length of the content is determined by the end of the
// connection, so it is perfectly fine.
prematureClosure = contentLength() > 0;
if (!prematureClosure)
out.add(LastHttpContent.EMPTY_LAST_CONTENT);
resetNow();
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception
if (evt instanceof HttpExpectationFailedEvent)
switch (currentState)
case READ_FIXED_LENGTH_CONTENT:
case READ_VARIABLE_LENGTH_CONTENT:
case READ_CHUNK_SIZE:
reset();
break;
default:
break;
super.userEventTriggered(ctx, evt);
protected boolean isContentAlwaysEmpty(HttpMessage msg)
if (msg instanceof HttpResponse)
HttpResponse res = (HttpResponse) msg;
int code = res.status().code();
// Correctly handle return codes of 1xx.
//
// See:
// - http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html Section 4.4
// - https://github.com/netty/netty/issues/222
if (code >= 100 && code < 200)
// One exception: Hixie 76 websocket handshake response
return !(code == 101 && !res.headers().contains(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT)
&& res.headers().contains(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET, true));
switch (code)
case 204: case 304:
return true;
return false;
/**
* Returns true if the server switched to a different protocol than HTTP/1.0 or HTTP/1.1, e.g. HTTP/2 or Websocket.
* Returns false if the upgrade happened in a different layer, e.g. upgrade from HTTP/1.1 to HTTP/1.1 over TLS.
*/
protected boolean isSwitchingToNonHttp1Protocol(HttpResponse msg)
if (msg.status().code() != HttpResponseStatus.SWITCHING_PROTOCOLS.code())
return false;
String newProtocol = msg.headers().get(HttpHeaderNames.UPGRADE);
return newProtocol == null ||
!newProtocol.contains(HttpVersion.HTTP_1_0.text()) &&
!newProtocol.contains(HttpVersion.HTTP_1_1.text());
/**
* Resets the state of the decoder so that it is ready to decode a new message.
* This method is useful for handling a rejected request with @code Expect: 100-continue header.
*/
public void reset()
resetRequested = true;
private void resetNow()
HttpMessage message = this.message;
this.message = null;
name = null;
value = null;
contentLength = Long.MIN_VALUE;
lineParser.reset();
headerParser.reset();
trailer = null;
if (!isDecodingRequest())
HttpResponse res = (HttpResponse) message;
if (res != null && isSwitchingToNonHttp1Protocol(res))
currentState = State.UPGRADED;
return;
resetRequested = false;
currentState = State.SKIP_CONTROL_CHARS;
private HttpMessage invalidMessage(ByteBuf in, Exception cause)
//切换为错误状态
currentState = State.BAD_MESSAGE;
// Advance the readerIndex so that ByteToMessageDecoder does not complain
// when we produced an invalid message without consuming anything.
//丢弃当前byteBuf内可读字节
in.skipBytes(in.readableBytes());
//创建一个包含错误信息的Http对象
if (message == null)
message = createInvalidMessage();
//设置为解码失败
message.setDecoderResult(DecoderResult.failure(cause));
HttpMessage ret = message;
message = null;
return ret;
private HttpContent invalidChunk(ByteBuf in, Exception cause)
currentState = State.BAD_MESSAGE;
// Advance the readerIndex so that ByteToMessageDecoder does not complain
// when we produced an invalid message without consuming anything.
in.skipBytes(in.readableBytes());
HttpContent chunk = new DefaultLastHttpContent(Unpooled.EMPTY_BUFFER);
chunk.setDecoderResult(DecoderResult.failure(cause));
message = null;
trailer = null;
return chunk;
private State readHeaders(ByteBuf buffer)
final HttpMessage message = this.message;
final HttpHeaders headers = message.headers();
//解码Http头信息
AppendableCharSequence line = headerParser.parse(buffer);
if (line == null)
return null;
if (line.length() > 0)
do
char firstChar = line.charAtUnsafe(0);
if (name != null && (firstChar == ' ' || firstChar == '\\t'))
//please do not make one line from below code
//as it breaks +XX:OptimizeStringConcat optimization
String trimmedLine = line.toString().trim();
String valueStr = String.valueOf(value);
value = valueStr + ' ' + trimmedLine;
else
if (name != null)
headers.add(name, value);
splitHeader(line);
// POST http://127.0.0.17:8080/ HTTP/1.1
// User-Agent: PostmanRuntime/7.24.1
// Accept: */*
// Cache-Control: no-cache
// Postman-Token: ec3374da-5f84-4eee-89af-503bfd45e2da
// Host: 127.0.0.17:8080
// Accept-Encoding: gzip, deflate, br
// Connection: keep-alive
// Content-Type: multipart/form-data; boundary=--------------------------692878693204550027949712
// Content-Length: 287
// 解析到这的时候line = headerParser.parse(buffer);会返回空字符串对象
// ----------------------------692878693204550027949712
// Content-Disposition: form-data; name="username"
//如果返回null,说明头还没解析完,返回null
line = headerParser.parse(buffer);
if (line == null)
return null;
while (line.length() > 0);
// Add the last header.
if (name != null)
headers.add(name, value);
// reset name and value fields
name = null;
value = null;
List<String> values = headers.getAll(HttpHeaderNames.CONTENT_LENGTH);
int contentLengthValuesCount = values.size();
//如果存在多个CONTENT_LENGTH就报错
if (contentLengthValuesCount > 0)
// Guard against multiple Content-Length headers as stated in
// https://tools.ietf.org/html/rfc7230#section-3.3.2:
//
// If a message is received that has multiple Content-Length header
// fields with field-values consisting of the same decimal value, or a
// single Content-Length header field with a field value containing a
// list of identical decimal values (e.g., "Content-Length: 42, 42"),
// indicating that duplicate Content-Length header fields have been
// generated or combined by an upstream message processor, then the
// recipient MUST either reject the message as invalid or replace the
// duplicated field-values with a single valid Content-Length field
// containing that decimal value prior to determining the message body
// length or forwarding the message.
if (contentLengthValuesCount > 1 && message.protocolVersion() == HttpVersion.HTTP_1_1)
throw new IllegalArgumentException("Multiple Content-Length headers found");
contentLength = Long.parseLong(values.get(0));
if (isContentAlwaysEmpty(message))
HttpUtil.setTransferEncodingChunked(message, false);
return State.SKIP_CONTROL_CHARS;
//transfer-encoding 如果请求头是块方式提交
else if (HttpUtil.isTransferEncodingChunked(message))
if (contentLengthValuesCount > 0 && message.protocolVersion() == HttpVersion.HTTP_1_1)
//message.headers().remove(HttpHeaderNames.CONTENT_LENGTH); 把CONTENT_LENGTH头信息去掉
handleTransferEncodingChunkedWithContentLength(message);
//状态切换为按块读取
return State.READ_CHUNK_SIZE;
else if (contentLength() >= 0)
//请求头包含固定的长度字段 例如 Content-Length: 287
return State.READ_FIXED_LENGTH_CONTENT;
else
//请求头长度不固定
return State.READ_VARIABLE_LENGTH_CONTENT;
/**
* Invoked when a message with both a "Transfer-Encoding: chunked" and a "Content-Length" header field is detected.
* The default behavior is to <i>remove</i> the Content-Length field, but this method could be overridden
* to change the behavior (to, e.g., throw an exception and produce an invalid message).
* <p>
* See: https://tools.ietf.org/html/rfc7230#section-3.3.3
* <pre>
* If a message is received with both a Transfer-Encoding and a
* Content-Length header field, the Transfer-Encoding overrides the
* Content-Length. Such a message might indicate an attempt to
* perform request smuggling (Section 9.5) or response splitting
* (Section 9.4) and ought to be handled as an error. A sender MUST
* remove the received Content-Length field prior to forwarding such
* a message downstream.
* </pre>
* Also see:
* https://github.com/apache/tomcat/blob/b693d7c1981fa7f51e58bc8c8e72e3fe80b7b773/
* java/org/apache/coyote/http11/Http11Processor.java#L747-L755
* https://github.com/nginx/nginx/blob/0ad4393e30c119d250415cb769e3d8bc8dce5186/
* src/http/ngx_http_request.c#L1946-L1953
*/
protected void handleTransferEncodingChunkedWithContentLength(HttpMessage message)
message.headers().remove(HttpHeaderNames.CONTENT_LENGTH);
contentLength = Long.MIN_VALUE;
private long contentLength()
if (contentLength == Long.MIN_VALUE)
contentLength = HttpUtil.getContentLength(message, -1L);
return contentLength;
private LastHttpContent readTrailingHeaders(ByteBuf buffer)
AppendableCharSequence line = headerParser.parse(buffer);
if (line == null)
return null;
LastHttpContent trailer = this.trailer;
if (line.length() == 0 && trailer == null)
// We have received the empty line which signals the trailer is complete and did not parse any trailers
// before. Just return an empty last content to reduce allocations.
return LastHttpContent.EMPTY_LAST_CONTENT;
CharSequence lastHeader = null;
if (trailer == null)
trailer = this.trailer = new DefaultLastHttpContent(Unpooled.EMPTY_BUFFER, validateHeaders);
while (line.length() > 0)
char firstChar = line.charAtUnsafe(0);
if (lastHeader != null && (firstChar == ' ' || firstChar == '\\t'))
List<String> current = trailer.trailingHeaders().getAll(lastHeader);
if (!current.isEmpty())
int lastPos = current.size() - 1;
//please do not make one line from below code
//as it breaks +XX:OptimizeStringConcat optimization
String lineTrimmed = line.toString().trim();
String currentLastPos = current.get(lastPos);
current.set(lastPos, currentLastPos + lineTrimmed);
else
splitHeader(line);
CharSequence headerName = name;
if (!HttpHeaderNames.CONTENT_LENGTH.contentEqualsIgnoreCase(headerName) &&
!HttpHeaderNames.TRANSFER_ENCODING.contentEqualsIgnoreCase(headerName) &&
!HttpHeaderNames.TRAILER.contentEqualsIgnoreCase(headerName))
trailer.trailingHeaders().add(headerName, value);
lastHeader = name;
// reset name and value fields
name = null;
value = null;
line = headerParser.parse(buffer);
if (line == null)
return null;
this.trailer = null;
return trailer;
protected abstract boolean isDecodingRequest();
protected abstract HttpMessage createMessage(String[] initialLine) throws Exception;
protected abstract HttpMessage createInvalidMessage();
private static int getChunkSize(String hex)
hex = hex.trim();
for (int i = 0; i < hex.length(); i ++)
char c = hex.charAt(i);
if (c == ';' || Character.isWhitespace(c) || Character.isISOControl(c))
hex = hex.substring(0, i);
break;
return Integer.parseInt(hex, 16);
private static String[] splitInitialLine(AppendableCharSequence sb)
int aStart;
int aEnd;
int bStart;
int bEnd;
int cStart;
int cEnd;
aStart = findNonSPLenient(sb, 0);
aEnd = findSPLenient(sb, aStart);
bStart = findNonSPLenient(sb, aEnd);
bEnd = findSPLenient(sb, bStart);
cStart = findNonSPLenient(sb, bEnd);
cEnd = findEndOfString(sb);
return new String[]
sb.subStringUnsafe(aStart, aEnd),
sb.subStringUnsafe(bStart, bEnd),
cStart < cEnd? sb.subStringUnsafe(cStart, cEnd) : "" ;
private void splitHeader(AppendableCharSequence sb)
final int length = sb.length();
int nameStart;
int nameEnd;
int colonEnd;
int valueStart;
int valueEnd;
nameStart = findNonWhitespace(sb, 0, false);
for (nameEnd = nameStart; nameEnd < length; nameEnd ++)
char ch = sb.charAtUnsafe(nameEnd);
// https://tools.ietf.org/html/rfc7230#section-3.2.4
//
// No whitespace is allowed between the header field-name and colon. In
// the past, differences in the handling of such whitespace have led to
// security vulnerabilities in request routing and response handling. A
// server MUST reject any received request message that contains
// whitespace between a header field-name and colon with a response code
// of 400 (Bad Request). A proxy MUST remove any such whitespace from a
// response message before forwarding the message downstream.
if (ch == ':' ||
// In case of decoding a request we will just continue processing and header validation
// is done in the DefaultHttpHeaders implementation.
//
// In the case of decoding a response we will "skip" the whitespace.
(!isDecodingRequest() && isOWS(ch)))
break;
if (nameEnd == length)
// There was no colon present at all.
throw new IllegalArgumentException("No colon found");
for (colonEnd = nameEnd; colonEnd < length; colonEnd ++)
if (sb.charAtUnsafe(colonEnd) == ':')
colonEnd ++;
break;
name = sb.subStringUnsafe(nameStart, nameEnd);
valueStart = findNonWhitespace(sb, colonEnd, true);
if (valueStart == length)
value = EMPTY_VALUE;
else
valueEnd = findEndOfString(sb);
value = sb.subStringUnsafe(valueStart, valueEnd);
private static int findNonSPLenient(AppendableCharSequence sb, int offset)
for (int result = offset; result < sb.length(); ++result)
char c = sb.charAtUnsafe(result);
// See https://tools.ietf.org/html/rfc7230#section-3.5
if (isSPLenient(c))
continue;
if (Character.isWhitespace(c))
// Any other whitespace delimiter is invalid
throw new IllegalArgumentException("Invalid separator");
return result;
return sb.length();
private static int findSPLenient(AppendableCharSequence sb, int offset)
for (int result = offset; result < sb.length(); ++result)
if (isSPLenient(sb.charAtUnsafe(result)))
return result;
return sb.length();
private static boolean isSPLenient(char c)
// See https://tools.ietf.org/html/rfc7230#section-3.5
return c == ' ' || c == (char) 0x09 || c == (char) 0x0B || c == (char) 0x0C || c == (char) 0x0D;
private static int findNonWhitespace(AppendableCharSequence sb, int offset, boolean validateOWS)
for (int result = offset; result < sb.length(); ++result)
char c = sb.charAtUnsafe(result);
if (!Character.isWhitespace(c))
return result;
else if (validateOWS && !isOWS(c))
// Only OWS is supported for whitespace
throw new IllegalArgumentException("Invalid separator, only a single space or horizontal tab allowed," +
" but received a '" + c + "'");
return sb.length();
private static int findEndOfString(AppendableCharSequence sb)
for (int result = sb.length() - 1; result > 0; --result)
if (!Character.isWhitespace(sb.charAtUnsafe(result)))
return result + 1;
return 0;
private static boolean isOWS(char ch)
return ch == ' ' || ch == (char) 0x09;
private static class HeaderParser implements ByteProcessor
private final AppendableCharSequence seq;
private final int maxLength;
private int size;
HeaderParser(AppendableCharSequence seq, int maxLength)
this.seq = seq;
this.maxLength = maxLength;
public AppendableCharSequence parse(ByteBuf buffer)
final int oldSize = size;
seq.reset();
int i = buffer.forEachByte(this);
if (i == -1)
size = oldSize;
return null;
buffer.readerIndex(i + 1);
return seq;
public void reset()
size = 0;
@Override
public boolean process(byte value) throws Exception
char nextByte = (char) (value & 0xFF);
if (nextByte == HttpConstants.LF)
int len = seq.length();
// Drop CR if we had a CRLF pair
if (len >= 1 && seq.charAtUnsafe(len - 1) == HttpConstants.CR)
-- size;
seq.setLength(len - 1);
return false;
increaseCount();
seq.append(nextByte);
return true;
protected final void increaseCount()
if (++ size > maxLength)
// TODO: Respond with Bad Request and discard the traffic
// or close the connection.
// No need to notify the upstream handlers - just log.
// If decoding a response, just throw an exception.
throw newException(maxLength);
protected TooLongFrameException newException(int maxLength)
return new TooLongFrameException("HTTP header is larger than " + maxLength + " bytes.");
private final class LineParser extends HeaderParser
LineParser(AppendableCharSequence seq, int maxLength)
super(seq, maxLength);
@Override
public AppendableCharSequence parse(ByteBuf buffer)
reset();
return super.parse(buffer);
@Override
public boolean process(byte value) throws Exception
if (currentState == State.SKIP_CONTROL_CHARS)
char c = (char) (value & 0xFF);
if (Character.isISOControl(c) || Character.isWhitespace(c))
increaseCount();
return true;
currentState = State.READ_INITIAL;
return super.process(value);
@Override
protected TooLongFrameException newException(int maxLength)
return new TooLongFrameException("An HTTP line is larger than " + maxLength + " bytes.");
以上是关于Netty源码分析-HttpObjectDecoder的主要内容,如果未能解决你的问题,请参考以下文章