fs: reduce memory retention when streaming small files
Fixes: https://github.com/nodejs/node/issues/21967 PR-URL: https://github.com/nodejs/node/pull/21968 Reviewed-By: Сковорода Никита Андреевич <chalkerx@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Benjamin Gruenbaum <benjamingr@gmail.com> Reviewed-By: Trivikram Kamat <trivikr.dev@gmail.com> Reviewed-By: James M Snell <jasnell@gmail.com>
This commit is contained in:
parent
07cb69720b
commit
e3a47025ac
@ -21,9 +21,18 @@ const util = require('util');
|
|||||||
const kMinPoolSpace = 128;
|
const kMinPoolSpace = 128;
|
||||||
|
|
||||||
let pool;
|
let pool;
|
||||||
|
// It can happen that we expect to read a large chunk of data, and reserve
|
||||||
|
// a large chunk of the pool accordingly, but the read() call only filled
|
||||||
|
// a portion of it. If a concurrently executing read() then uses the same pool,
|
||||||
|
// the "reserved" portion cannot be used, so we allow it to be re-used as a
|
||||||
|
// new pool later.
|
||||||
|
const poolFragments = [];
|
||||||
|
|
||||||
function allocNewPool(poolSize) {
|
function allocNewPool(poolSize) {
|
||||||
pool = Buffer.allocUnsafe(poolSize);
|
if (poolFragments.length > 0)
|
||||||
|
pool = poolFragments.pop();
|
||||||
|
else
|
||||||
|
pool = Buffer.allocUnsafe(poolSize);
|
||||||
pool.used = 0;
|
pool.used = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -171,6 +180,14 @@ ReadStream.prototype._read = function(n) {
|
|||||||
this.emit('error', er);
|
this.emit('error', er);
|
||||||
} else {
|
} else {
|
||||||
let b = null;
|
let b = null;
|
||||||
|
// Now that we know how much data we have actually read, re-wind the
|
||||||
|
// 'used' field if we can, and otherwise allow the remainder of our
|
||||||
|
// reservation to be used as a new pool later.
|
||||||
|
if (start + toRead === thisPool.used && thisPool === pool)
|
||||||
|
thisPool.used += bytesRead - toRead;
|
||||||
|
else if (toRead - bytesRead > kMinPoolSpace)
|
||||||
|
poolFragments.push(thisPool.slice(start + bytesRead, start + toRead));
|
||||||
|
|
||||||
if (bytesRead > 0) {
|
if (bytesRead > 0) {
|
||||||
this.bytesRead += bytesRead;
|
this.bytesRead += bytesRead;
|
||||||
b = thisPool.slice(start, start + bytesRead);
|
b = thisPool.slice(start, start + bytesRead);
|
||||||
|
47
test/parallel/test-fs-read-stream-concurrent-reads.js
Normal file
47
test/parallel/test-fs-read-stream-concurrent-reads.js
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
'use strict';
|
||||||
|
const common = require('../common');
|
||||||
|
const fixtures = require('../common/fixtures');
|
||||||
|
const assert = require('assert');
|
||||||
|
const fs = require('fs');
|
||||||
|
|
||||||
|
// Test that concurrent file read streams don’t interfere with each other’s
|
||||||
|
// contents, and that the chunks generated by the reads only retain a
|
||||||
|
// 'reasonable' amount of memory.
|
||||||
|
|
||||||
|
// Refs: https://github.com/nodejs/node/issues/21967
|
||||||
|
|
||||||
|
const filename = fixtures.path('loop.js'); // Some small non-homogeneous file.
|
||||||
|
const content = fs.readFileSync(filename);
|
||||||
|
|
||||||
|
const N = 1000;
|
||||||
|
let started = 0;
|
||||||
|
let done = 0;
|
||||||
|
|
||||||
|
const arrayBuffers = new Set();
|
||||||
|
|
||||||
|
function startRead() {
|
||||||
|
++started;
|
||||||
|
const chunks = [];
|
||||||
|
fs.createReadStream(filename)
|
||||||
|
.on('data', (chunk) => {
|
||||||
|
chunks.push(chunk);
|
||||||
|
arrayBuffers.add(chunk.buffer);
|
||||||
|
if (started < N)
|
||||||
|
startRead();
|
||||||
|
})
|
||||||
|
.on('end', common.mustCall(() => {
|
||||||
|
assert.deepStrictEqual(Buffer.concat(chunks), content);
|
||||||
|
if (++done === N) {
|
||||||
|
const retainedMemory =
|
||||||
|
[...arrayBuffers].map((ab) => ab.byteLength).reduce((a, b) => a + b);
|
||||||
|
assert(retainedMemory / (N * content.length) <= 3,
|
||||||
|
`Retaining ${retainedMemory} bytes in ABs for ${N} ` +
|
||||||
|
`chunks of size ${content.length}`);
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don’t start the reads all at once – that way we would have to allocate
|
||||||
|
// a large amount of memory upfront.
|
||||||
|
for (let i = 0; i < 4; ++i)
|
||||||
|
startRead();
|
Loading…
x
Reference in New Issue
Block a user