http2: no stream destroy while its data is on the wire
This fixes a crash that occurred when a `Http2Stream` write is completed after it is already destroyed. Instead, don’t destroy the stream in that case and wait for GC to take over. PR-URL: https://github.com/nodejs/node/pull/19002 Fixes: https://github.com/nodejs/node/issues/18973 Reviewed-By: James M Snell <jasnell@gmail.com> Reviewed-By: Anatoli Papirovski <apapirovski@mac.com>
This commit is contained in:
parent
caaf7e3a9f
commit
584cfc9bae
@ -1700,6 +1700,14 @@ void Http2Session::OnStreamRead(ssize_t nread, const uv_buf_t& buf) {
|
||||
stream_buf_ = uv_buf_init(nullptr, 0);
|
||||
}
|
||||
|
||||
bool Http2Session::HasWritesOnSocketForStream(Http2Stream* stream) {
|
||||
for (const nghttp2_stream_write& wr : outgoing_buffers_) {
|
||||
if (wr.req_wrap != nullptr && wr.req_wrap->stream() == stream)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Every Http2Session session is tightly bound to a single i/o StreamBase
|
||||
// (typically a net.Socket or tls.TLSSocket). The lifecycle of the two is
|
||||
// tightly coupled with all data transfer between the two happening at the
|
||||
@ -1753,13 +1761,11 @@ Http2Stream::Http2Stream(
|
||||
|
||||
|
||||
Http2Stream::~Http2Stream() {
|
||||
DEBUG_HTTP2STREAM(this, "tearing down stream");
|
||||
if (session_ != nullptr) {
|
||||
session_->RemoveStream(this);
|
||||
session_ = nullptr;
|
||||
}
|
||||
|
||||
if (!object().IsEmpty())
|
||||
ClearWrap(object());
|
||||
}
|
||||
|
||||
// Notify the Http2Stream that a new block of HEADERS is being processed.
|
||||
@ -1837,7 +1843,7 @@ inline void Http2Stream::Destroy() {
|
||||
Http2Stream* stream = static_cast<Http2Stream*>(data);
|
||||
// Free any remaining outgoing data chunks here. This should be done
|
||||
// here because it's possible for destroy to have been called while
|
||||
// we still have qeueued outbound writes.
|
||||
// we still have queued outbound writes.
|
||||
while (!stream->queue_.empty()) {
|
||||
nghttp2_stream_write& head = stream->queue_.front();
|
||||
if (head.req_wrap != nullptr)
|
||||
@ -1845,7 +1851,11 @@ inline void Http2Stream::Destroy() {
|
||||
stream->queue_.pop();
|
||||
}
|
||||
|
||||
delete stream;
|
||||
// We can destroy the stream now if there are no writes for it
|
||||
// already on the socket. Otherwise, we'll wait for the garbage collector
|
||||
// to take care of cleaning up.
|
||||
if (!stream->session()->HasWritesOnSocketForStream(stream))
|
||||
delete stream;
|
||||
}, this, this->object());
|
||||
|
||||
statistics_.end_time = uv_hrtime();
|
||||
|
@ -878,6 +878,9 @@ class Http2Session : public AsyncWrap, public StreamListener {
|
||||
// Removes a stream instance from this session
|
||||
inline void RemoveStream(Http2Stream* stream);
|
||||
|
||||
// Indicates whether there currently exist outgoing buffers for this stream.
|
||||
bool HasWritesOnSocketForStream(Http2Stream* stream);
|
||||
|
||||
// Write data to the session
|
||||
inline ssize_t Write(const uv_buf_t* bufs, size_t nbufs);
|
||||
|
||||
|
@ -0,0 +1,62 @@
|
||||
// Flags: --expose-gc
|
||||
'use strict';
|
||||
const common = require('../common');
|
||||
if (!common.hasCrypto)
|
||||
common.skip('missing crypto');
|
||||
const assert = require('assert');
|
||||
const http2 = require('http2');
|
||||
const makeDuplexPair = require('../common/duplexpair');
|
||||
|
||||
// Make sure the Http2Stream destructor works, since we don't clean the
|
||||
// stream up like we would otherwise do.
|
||||
process.on('exit', global.gc);
|
||||
|
||||
{
|
||||
const { clientSide, serverSide } = makeDuplexPair();
|
||||
|
||||
let serverSideHttp2Stream;
|
||||
let serverSideHttp2StreamDestroyed = false;
|
||||
const server = http2.createServer();
|
||||
server.on('stream', common.mustCall((stream, headers) => {
|
||||
serverSideHttp2Stream = stream;
|
||||
stream.respond({
|
||||
'content-type': 'text/html',
|
||||
':status': 200
|
||||
});
|
||||
|
||||
const originalWrite = serverSide._write;
|
||||
serverSide._write = (buf, enc, cb) => {
|
||||
if (serverSideHttp2StreamDestroyed) {
|
||||
serverSide.destroy();
|
||||
serverSide.write = () => {};
|
||||
} else {
|
||||
setImmediate(() => {
|
||||
originalWrite.call(serverSide, buf, enc, () => setImmediate(cb));
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
// Enough data to fit into a single *session* window,
|
||||
// not enough data to fit into a single *stream* window.
|
||||
stream.write(Buffer.alloc(40000));
|
||||
}));
|
||||
|
||||
server.emit('connection', serverSide);
|
||||
|
||||
const client = http2.connect('http://localhost:80', {
|
||||
createConnection: common.mustCall(() => clientSide)
|
||||
});
|
||||
|
||||
const req = client.request({ ':path': '/' });
|
||||
|
||||
req.on('response', common.mustCall((headers) => {
|
||||
assert.strictEqual(headers[':status'], 200);
|
||||
}));
|
||||
|
||||
req.on('data', common.mustCallAtLeast(() => {
|
||||
if (!serverSideHttp2StreamDestroyed) {
|
||||
serverSideHttp2Stream.destroy();
|
||||
serverSideHttp2StreamDestroyed = true;
|
||||
}
|
||||
}));
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user