src: re-add Realloc()
shrink after reading stream data
This would otherwise keep a lot of unused memory lying around,
and in particular add up to a page per chunk of memory overhead
for network reads, potentially opening a DoS vector if the resulting
`Buffer` objects are kept around indefinitely (e.g. stored in a list
and not concatenated until the socket finishes).
This fixes CVE-2018-7164.
Refs: https://github.com/nodejs-private/security/issues/186
Refs: 7c4b09b24b
PR-URL: https://github.com/nodejs-private/node-private/pull/128
Reviewed-By: Michael Dawson <michael_dawson@ca.ibm.com>
Reviewed-By: Evan Lucas <evanlucas@me.com>
This commit is contained in:
parent
785e5ba48c
commit
3217e8e66f
@ -374,8 +374,9 @@ void EmitToJSStreamListener::OnStreamRead(ssize_t nread, const uv_buf_t& buf) {
|
||||
}
|
||||
|
||||
CHECK_LE(static_cast<size_t>(nread), buf.len);
|
||||
char* base = Realloc(buf.base, nread);
|
||||
|
||||
Local<Object> obj = Buffer::New(env, buf.base, nread).ToLocalChecked();
|
||||
Local<Object> obj = Buffer::New(env, base, nread).ToLocalChecked();
|
||||
stream->CallJSOnreadMethod(nread, obj);
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,41 @@
|
||||
// Flags: --expose-gc
|
||||
'use strict';
|
||||
|
||||
const common = require('../common');
|
||||
const assert = require('assert');
|
||||
const net = require('net');
|
||||
|
||||
// Tests that, when receiving small chunks, we do not keep the full length
|
||||
// of the original allocation for the libuv read call in memory.
|
||||
|
||||
let client;
|
||||
let baseRSS;
|
||||
const receivedChunks = [];
|
||||
const N = 250000;
|
||||
|
||||
const server = net.createServer(common.mustCall((socket) => {
|
||||
baseRSS = process.memoryUsage().rss;
|
||||
|
||||
socket.setNoDelay(true);
|
||||
socket.on('data', (chunk) => {
|
||||
receivedChunks.push(chunk);
|
||||
if (receivedChunks.length < N) {
|
||||
client.write('a');
|
||||
} else {
|
||||
client.end();
|
||||
server.close();
|
||||
}
|
||||
});
|
||||
})).listen(0, common.mustCall(() => {
|
||||
client = net.connect(server.address().port);
|
||||
client.setNoDelay(true);
|
||||
client.write('hello!');
|
||||
}));
|
||||
|
||||
process.on('exit', () => {
|
||||
global.gc();
|
||||
const bytesPerChunk =
|
||||
(process.memoryUsage().rss - baseRSS) / receivedChunks.length;
|
||||
// We should always have less than one page (usually ~ 4 kB) per chunk.
|
||||
assert(bytesPerChunk < 512, `measured ${bytesPerChunk} bytes per chunk`);
|
||||
});
|
Loading…
x
Reference in New Issue
Block a user