cluster: reset handle index on close
It allows reopening a server after it has been closed. Fixes: https://github.com/nodejs/node/issues/6693 PR-URL: https://github.com/nodejs/node/pull/6981 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Ron Korving <ron@ronkorving.nl> Reviewed-By: James M Snell <jasnell@gmail.com>
This commit is contained in:
parent
e916218ba5
commit
0c29436250
@ -564,18 +564,18 @@ function workerInit() {
|
||||
|
||||
// obj is a net#Server or a dgram#Socket object.
|
||||
cluster._getServer = function(obj, options, cb) {
|
||||
const key = [ options.address,
|
||||
options.port,
|
||||
options.addressType,
|
||||
options.fd ].join(':');
|
||||
if (indexes[key] === undefined)
|
||||
indexes[key] = 0;
|
||||
const indexesKey = [ options.address,
|
||||
options.port,
|
||||
options.addressType,
|
||||
options.fd ].join(':');
|
||||
if (indexes[indexesKey] === undefined)
|
||||
indexes[indexesKey] = 0;
|
||||
else
|
||||
indexes[key]++;
|
||||
indexes[indexesKey]++;
|
||||
|
||||
const message = util._extend({
|
||||
act: 'queryServer',
|
||||
index: indexes[key],
|
||||
index: indexes[indexesKey],
|
||||
data: null
|
||||
}, options);
|
||||
|
||||
@ -585,9 +585,9 @@ function workerInit() {
|
||||
if (obj._setServerData) obj._setServerData(reply.data);
|
||||
|
||||
if (handle)
|
||||
shared(reply, handle, cb); // Shared listen socket.
|
||||
shared(reply, handle, indexesKey, cb); // Shared listen socket.
|
||||
else
|
||||
rr(reply, cb); // Round-robin.
|
||||
rr(reply, indexesKey, cb); // Round-robin.
|
||||
});
|
||||
obj.once('listening', function() {
|
||||
cluster.worker.state = 'listening';
|
||||
@ -599,7 +599,7 @@ function workerInit() {
|
||||
};
|
||||
|
||||
// Shared listen socket.
|
||||
function shared(message, handle, cb) {
|
||||
function shared(message, handle, indexesKey, cb) {
|
||||
var key = message.key;
|
||||
// Monkey-patch the close() method so we can keep track of when it's
|
||||
// closed. Avoids resource leaks when the handle is short-lived.
|
||||
@ -607,6 +607,7 @@ function workerInit() {
|
||||
handle.close = function() {
|
||||
send({ act: 'close', key: key });
|
||||
delete handles[key];
|
||||
delete indexes[indexesKey];
|
||||
return close.apply(this, arguments);
|
||||
};
|
||||
assert(handles[key] === undefined);
|
||||
@ -615,7 +616,7 @@ function workerInit() {
|
||||
}
|
||||
|
||||
// Round-robin. Master distributes handles across workers.
|
||||
function rr(message, cb) {
|
||||
function rr(message, indexesKey, cb) {
|
||||
if (message.errno)
|
||||
return cb(message.errno, null);
|
||||
|
||||
@ -636,6 +637,7 @@ function workerInit() {
|
||||
if (key === undefined) return;
|
||||
send({ act: 'close', key: key });
|
||||
delete handles[key];
|
||||
delete indexes[indexesKey];
|
||||
key = undefined;
|
||||
}
|
||||
|
||||
|
37
test/parallel/test-cluster-server-restart-none.js
Normal file
37
test/parallel/test-cluster-server-restart-none.js
Normal file
@ -0,0 +1,37 @@
|
||||
'use strict';
|
||||
const common = require('../common');
|
||||
const assert = require('assert');
|
||||
const cluster = require('cluster');
|
||||
|
||||
cluster.schedulingPolicy = cluster.SCHED_NONE;
|
||||
|
||||
if (cluster.isMaster) {
|
||||
const worker1 = cluster.fork();
|
||||
worker1.on('listening', common.mustCall(() => {
|
||||
const worker2 = cluster.fork();
|
||||
worker2.on('exit', (code, signal) => {
|
||||
assert.strictEqual(code, 0, 'worker2 did not exit normally');
|
||||
assert.strictEqual(signal, null, 'worker2 did not exit normally');
|
||||
worker1.disconnect();
|
||||
});
|
||||
}));
|
||||
|
||||
worker1.on('exit', common.mustCall((code, signal) => {
|
||||
assert.strictEqual(code, 0, 'worker1 did not exit normally');
|
||||
assert.strictEqual(signal, null, 'worker1 did not exit normally');
|
||||
}));
|
||||
} else {
|
||||
const net = require('net');
|
||||
const server = net.createServer();
|
||||
server.listen(common.PORT, common.mustCall(() => {
|
||||
if (cluster.worker.id === 2) {
|
||||
server.close(() => {
|
||||
server.listen(common.PORT, common.mustCall(() => {
|
||||
server.close(() => {
|
||||
process.disconnect();
|
||||
});
|
||||
}));
|
||||
});
|
||||
}
|
||||
}));
|
||||
}
|
37
test/parallel/test-cluster-server-restart-rr.js
Normal file
37
test/parallel/test-cluster-server-restart-rr.js
Normal file
@ -0,0 +1,37 @@
|
||||
'use strict';
|
||||
const common = require('../common');
|
||||
const assert = require('assert');
|
||||
const cluster = require('cluster');
|
||||
|
||||
cluster.schedulingPolicy = cluster.SCHED_RR;
|
||||
|
||||
if (cluster.isMaster) {
|
||||
const worker1 = cluster.fork();
|
||||
worker1.on('listening', common.mustCall(() => {
|
||||
const worker2 = cluster.fork();
|
||||
worker2.on('exit', (code, signal) => {
|
||||
assert.strictEqual(code, 0, 'worker2 did not exit normally');
|
||||
assert.strictEqual(signal, null, 'worker2 did not exit normally');
|
||||
worker1.disconnect();
|
||||
});
|
||||
}));
|
||||
|
||||
worker1.on('exit', common.mustCall((code, signal) => {
|
||||
assert.strictEqual(code, 0, 'worker1 did not exit normally');
|
||||
assert.strictEqual(signal, null, 'worker1 did not exit normally');
|
||||
}));
|
||||
} else {
|
||||
const net = require('net');
|
||||
const server = net.createServer();
|
||||
server.listen(common.PORT, common.mustCall(() => {
|
||||
if (cluster.worker.id === 2) {
|
||||
server.close(() => {
|
||||
server.listen(common.PORT, common.mustCall(() => {
|
||||
server.close(() => {
|
||||
process.disconnect();
|
||||
});
|
||||
}));
|
||||
});
|
||||
}
|
||||
}));
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user