http: improve chunked res.write(buf) performance
Avoid a costly buffer-to-string operation. Instead, allocate a new buffer, copy the chunk header and data into it and send that. The speed difference is negligible on small payloads but it really shines with larger (10+ kB) chunks. benchmark/http/end-vs-write-end with 64 kB chunks gives 45-50% higher throughput. With 1 MB chunks, the difference is a staggering 590%. Of course, YMMV will vary with real workloads and networks but this commit should have a positive impact on CPU and memory consumption. Big kudos to Wyatt Preul (@wpreul) for reporting the issue and providing the initial patch. Fixes #5941 and #5944.
This commit is contained in:
parent
6359e017ac
commit
3398cce193
157
lib/http.js
157
lib/http.js
@ -447,6 +447,8 @@ function OutgoingMessage() {
|
||||
this.useChunkedEncodingByDefault = true;
|
||||
this.sendDate = false;
|
||||
|
||||
this._headerSent = false;
|
||||
this._header = '';
|
||||
this._hasBody = true;
|
||||
this._trailer = '';
|
||||
|
||||
@ -768,6 +770,92 @@ Object.defineProperty(OutgoingMessage.prototype, 'headersSent', {
|
||||
});
|
||||
|
||||
|
||||
// Convert a number in the range 0-15 to a lowercase hexadecimal digit.
|
||||
function hex(val) {
|
||||
// The comparison and bit hacks are deliberate. We could look up the
|
||||
// value in a buffer with hexdigits[val & 15] but that adds a couple
|
||||
// of bounds checks to each conversion.
|
||||
return val <= 9 ? (val | 48) : ((val - 9) | 96);
|
||||
}
|
||||
|
||||
|
||||
function chunkify(chunk, headers, trailers, last) {
|
||||
var chunklen = chunk.length;
|
||||
var buflen = chunklen + 4; // '\r\n' + chunk + '\r\n'
|
||||
var offset = 0;
|
||||
var octets = 1;
|
||||
|
||||
// Skip expensive Buffer.byteLength() calls; only ISO-8859-1 characters
|
||||
// are allowed in HTTP headers, therefore:
|
||||
//
|
||||
// headers.length == Buffer.byteLength(headers.length)
|
||||
// trailers.length == Buffer.byteLength(trailers.length)
|
||||
//
|
||||
// Note: the actual encoding that is used is ASCII. That's de jure
|
||||
// a violation of the spec but de facto correct because many HTTP
|
||||
// clients get confused by non-ASCII headers.
|
||||
if (last === true) buflen += 5; // '0\r\n\r\n'
|
||||
if (headers !== '') buflen += headers.length;
|
||||
if (trailers !== '') buflen += trailers.length;
|
||||
|
||||
if (chunklen & 0xf0000000) octets += 7;
|
||||
else if (chunklen & 0xf000000) octets += 6;
|
||||
else if (chunklen & 0xf00000) octets += 5;
|
||||
else if (chunklen & 0xf0000) octets += 4;
|
||||
else if (chunklen & 0xf000) octets += 3;
|
||||
else if (chunklen & 0xf00) octets += 2;
|
||||
else if (chunklen & 0xf0) octets += 1;
|
||||
buflen += octets;
|
||||
|
||||
var buf = new Buffer(buflen);
|
||||
|
||||
if (headers !== '') {
|
||||
buf.write(headers, 0, headers.length, 'ascii');
|
||||
offset = headers.length;
|
||||
}
|
||||
|
||||
// Write chunk length in hex to buffer. This effectively limits us
|
||||
// to 4 GB chunks but that's okay because buffers are max 1 GB anyway.
|
||||
switch (octets) {
|
||||
case 8: buf[offset++] = hex((chunklen >>> 28) & 15);
|
||||
case 7: buf[offset++] = hex((chunklen >>> 24) & 15);
|
||||
case 6: buf[offset++] = hex((chunklen >>> 20) & 15);
|
||||
case 5: buf[offset++] = hex((chunklen >>> 16) & 15);
|
||||
case 4: buf[offset++] = hex((chunklen >>> 12) & 15);
|
||||
case 3: buf[offset++] = hex((chunklen >>> 8) & 15);
|
||||
case 2: buf[offset++] = hex((chunklen >>> 4) & 15);
|
||||
}
|
||||
buf[offset++] = hex(chunklen & 15);
|
||||
|
||||
// Add '\r\n'.
|
||||
buf[offset++] = 13;
|
||||
buf[offset++] = 10;
|
||||
|
||||
// Copy buffer.
|
||||
chunk.copy(buf, offset);
|
||||
offset += chunklen;
|
||||
|
||||
// Add trailing '\r\n'.
|
||||
buf[offset++] = 13;
|
||||
buf[offset++] = 10;
|
||||
|
||||
if (last === true) {
|
||||
// Add trailing '0\r\n\r\n'.
|
||||
buf[offset++] = 48;
|
||||
buf[offset++] = 13;
|
||||
buf[offset++] = 10;
|
||||
buf[offset++] = 13;
|
||||
buf[offset++] = 10;
|
||||
}
|
||||
|
||||
if (trailers !== '') {
|
||||
buf.write(trailers, offset, trailers.length, 'ascii');
|
||||
}
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
||||
OutgoingMessage.prototype.write = function(chunk, encoding) {
|
||||
if (!this._header) {
|
||||
this._implicitHeader();
|
||||
@ -787,23 +875,6 @@ OutgoingMessage.prototype.write = function(chunk, encoding) {
|
||||
// signal the user to keep writing.
|
||||
if (chunk.length === 0) return true;
|
||||
|
||||
// TODO(bnoordhuis) Temporary optimization hack, remove in v0.11. We only
|
||||
// want to convert the buffer when we're sending:
|
||||
//
|
||||
// a) Transfer-Encoding chunks, because it lets us pack the chunk header
|
||||
// and the chunk into a single write(), or
|
||||
//
|
||||
// b) the first chunk of a fixed-length request, because it lets us pack
|
||||
// the request headers and the chunk into a single write().
|
||||
//
|
||||
// Converting to strings is expensive, CPU-wise, but reducing the number
|
||||
// of write() calls more than makes up for that because we're dramatically
|
||||
// reducing the number of TCP roundtrips.
|
||||
if (chunk instanceof Buffer && (this.chunkedEncoding || !this._headerSent)) {
|
||||
chunk = chunk.toString('binary');
|
||||
encoding = 'binary';
|
||||
}
|
||||
|
||||
var len, ret;
|
||||
if (this.chunkedEncoding) {
|
||||
if (typeof(chunk) === 'string' &&
|
||||
@ -812,8 +883,11 @@ OutgoingMessage.prototype.write = function(chunk, encoding) {
|
||||
len = Buffer.byteLength(chunk, encoding);
|
||||
chunk = len.toString(16) + CRLF + chunk + CRLF;
|
||||
ret = this._send(chunk, encoding);
|
||||
} else if (Buffer.isBuffer(chunk)) {
|
||||
var buf = chunkify(chunk, '', '', false);
|
||||
ret = this._send(buf, encoding);
|
||||
} else {
|
||||
// buffer, or a non-toString-friendly encoding
|
||||
// Non-toString-friendly encoding.
|
||||
len = chunk.length;
|
||||
this._send(len.toString(16) + CRLF);
|
||||
this._send(chunk, encoding);
|
||||
@ -900,52 +974,7 @@ OutgoingMessage.prototype.end = function(data, encoding) {
|
||||
}
|
||||
} else if (Buffer.isBuffer(data)) {
|
||||
if (this.chunkedEncoding) {
|
||||
var chunk_size = data.length.toString(16);
|
||||
|
||||
// Skip expensive Buffer.byteLength() calls; only ISO-8859-1 characters
|
||||
// are allowed in HTTP headers. Therefore:
|
||||
//
|
||||
// this._header.length == Buffer.byteLength(this._header.length)
|
||||
// this._trailer.length == Buffer.byteLength(this._trailer.length)
|
||||
//
|
||||
var header_len = this._header.length;
|
||||
var chunk_size_len = chunk_size.length;
|
||||
var data_len = data.length;
|
||||
var trailer_len = this._trailer.length;
|
||||
|
||||
var len = header_len +
|
||||
chunk_size_len +
|
||||
2 + // '\r\n'.length
|
||||
data_len +
|
||||
5 + // '\r\n0\r\n'.length
|
||||
trailer_len +
|
||||
2; // '\r\n'.length
|
||||
|
||||
var buf = new Buffer(len);
|
||||
var off = 0;
|
||||
|
||||
buf.write(this._header, off, header_len, 'ascii');
|
||||
off += header_len;
|
||||
|
||||
buf.write(chunk_size, off, chunk_size_len, 'ascii');
|
||||
off += chunk_size_len;
|
||||
|
||||
crlf_buf.copy(buf, off);
|
||||
off += 2;
|
||||
|
||||
data.copy(buf, off);
|
||||
off += data_len;
|
||||
|
||||
zero_chunk_buf.copy(buf, off);
|
||||
off += 5;
|
||||
|
||||
if (trailer_len > 0) {
|
||||
buf.write(this._trailer, off, trailer_len, 'ascii');
|
||||
off += trailer_len;
|
||||
}
|
||||
|
||||
crlf_buf.copy(buf, off);
|
||||
|
||||
var buf = chunkify(data, this._header, this._trailer, true);
|
||||
ret = this.connection.write(buf);
|
||||
} else {
|
||||
var header_len = this._header.length;
|
||||
|
Loading…
x
Reference in New Issue
Block a user