Skip to content

Commit e3a4702

Browse files
committed
fs: reduce memory retention when streaming small files
Fixes: #21967 PR-URL: #21968 Reviewed-By: Сковорода Никита Андреевич <[email protected]> Reviewed-By: Matteo Collina <[email protected]> Reviewed-By: Benjamin Gruenbaum <[email protected]> Reviewed-By: Trivikram Kamat <[email protected]> Reviewed-By: James M Snell <[email protected]>
1 parent 07cb697 commit e3a4702

File tree

2 files changed

+65
-1
lines changed

2 files changed

+65
-1
lines changed

lib/internal/fs/streams.js

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,18 @@ const util = require('util');
2121
const kMinPoolSpace = 128;
2222

2323
let pool;
24+
// It can happen that we expect to read a large chunk of data, and reserve
25+
// a large chunk of the pool accordingly, but the read() call only filled
26+
// a portion of it. If a concurrently executing read() then uses the same pool,
27+
// the "reserved" portion cannot be used, so we allow it to be re-used as a
28+
// new pool later.
29+
const poolFragments = [];
2430

2531
function allocNewPool(poolSize) {
26-
pool = Buffer.allocUnsafe(poolSize);
32+
if (poolFragments.length > 0)
33+
pool = poolFragments.pop();
34+
else
35+
pool = Buffer.allocUnsafe(poolSize);
2736
pool.used = 0;
2837
}
2938

@@ -171,6 +180,14 @@ ReadStream.prototype._read = function(n) {
171180
this.emit('error', er);
172181
} else {
173182
let b = null;
183+
// Now that we know how much data we have actually read, re-wind the
184+
// 'used' field if we can, and otherwise allow the remainder of our
185+
// reservation to be used as a new pool later.
186+
if (start + toRead === thisPool.used && thisPool === pool)
187+
thisPool.used += bytesRead - toRead;
188+
else if (toRead - bytesRead > kMinPoolSpace)
189+
poolFragments.push(thisPool.slice(start + bytesRead, start + toRead));
190+
174191
if (bytesRead > 0) {
175192
this.bytesRead += bytesRead;
176193
b = thisPool.slice(start, start + bytesRead);
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
'use strict';
2+
const common = require('../common');
3+
const fixtures = require('../common/fixtures');
4+
const assert = require('assert');
5+
const fs = require('fs');
6+
7+
// Test that concurrent file read streams don’t interfere with each other’s
8+
// contents, and that the chunks generated by the reads only retain a
9+
// 'reasonable' amount of memory.
10+
11+
// Refs: https://github.com/nodejs/node/issues/21967
12+
13+
const filename = fixtures.path('loop.js'); // Some small non-homogeneous file.
14+
const content = fs.readFileSync(filename);
15+
16+
const N = 1000;
17+
let started = 0;
18+
let done = 0;
19+
20+
const arrayBuffers = new Set();
21+
22+
function startRead() {
23+
++started;
24+
const chunks = [];
25+
fs.createReadStream(filename)
26+
.on('data', (chunk) => {
27+
chunks.push(chunk);
28+
arrayBuffers.add(chunk.buffer);
29+
if (started < N)
30+
startRead();
31+
})
32+
.on('end', common.mustCall(() => {
33+
assert.deepStrictEqual(Buffer.concat(chunks), content);
34+
if (++done === N) {
35+
const retainedMemory =
36+
[...arrayBuffers].map((ab) => ab.byteLength).reduce((a, b) => a + b);
37+
assert(retainedMemory / (N * content.length) <= 3,
38+
`Retaining ${retainedMemory} bytes in ABs for ${N} ` +
39+
`chunks of size ${content.length}`);
40+
}
41+
}));
42+
}
43+
44+
// Don’t start the reads all at once – that way we would have to allocate
45+
// a large amount of memory upfront.
46+
for (let i = 0; i < 4; ++i)
47+
startRead();

0 commit comments

Comments
 (0)