dns: refactor QueryWrap lifetime management
- Prefer RAII-style management over manual resource management. - Prefer `env->SetImmediate()` over a separate `uv_async_t`. - Perform `ares_destroy()` before possibly tearing down c-ares state. - Verify that the number of active queries is non-negative. - Let pending callbacks know when their underlying `QueryWrap` object has been destroyed. The last item has been a real bug, in that when Workers shut down during currently running DNS queries, they may run into use-after-free situations because: 1. Shutting the `Worker` down leads to the cleanup code deleting the `QueryWrap` objects first; then 2. deleting the `ChannelWrap` object (as it has been created before the `QueryWrap`s), whose destructor runs `ares_destroy()`, which in turn invokes all pending query callbacks with `ARES_ECANCELLED`, 3. which lead to use-after-free, as the callback tried to access the deleted `QueryWrap` object. The added test verifies that this is no longer an issue. PR-URL: https://github.com/nodejs/node/pull/26253 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: James M Snell <jasnell@gmail.com>
This commit is contained in:
parent
adbe3b837e
commit
018e95ad13
@ -407,14 +407,11 @@ void safe_free_hostent(struct hostent* host) {
|
|||||||
host->h_aliases = nullptr;
|
host->h_aliases = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (host->h_name != nullptr) {
|
free(host->h_name);
|
||||||
free(host->h_name);
|
free(host);
|
||||||
}
|
|
||||||
|
|
||||||
host->h_addrtype = host->h_length = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void cares_wrap_hostent_cpy(struct hostent* dest, struct hostent* src) {
|
void cares_wrap_hostent_cpy(struct hostent* dest, const struct hostent* src) {
|
||||||
dest->h_addr_list = nullptr;
|
dest->h_addr_list = nullptr;
|
||||||
dest->h_addrtype = 0;
|
dest->h_addrtype = 0;
|
||||||
dest->h_aliases = nullptr;
|
dest->h_aliases = nullptr;
|
||||||
@ -461,18 +458,6 @@ void cares_wrap_hostent_cpy(struct hostent* dest, struct hostent* src) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
class QueryWrap;
|
class QueryWrap;
|
||||||
struct CaresAsyncData {
|
|
||||||
QueryWrap* wrap;
|
|
||||||
int status;
|
|
||||||
bool is_host;
|
|
||||||
union {
|
|
||||||
hostent* host;
|
|
||||||
unsigned char* buf;
|
|
||||||
} data;
|
|
||||||
int len;
|
|
||||||
|
|
||||||
uv_async_t async_handle;
|
|
||||||
};
|
|
||||||
|
|
||||||
void ChannelWrap::Setup() {
|
void ChannelWrap::Setup() {
|
||||||
struct ares_options options;
|
struct ares_options options;
|
||||||
@ -525,20 +510,21 @@ void ChannelWrap::CloseTimer() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ChannelWrap::~ChannelWrap() {
|
ChannelWrap::~ChannelWrap() {
|
||||||
|
ares_destroy(channel_);
|
||||||
|
|
||||||
if (library_inited_) {
|
if (library_inited_) {
|
||||||
Mutex::ScopedLock lock(ares_library_mutex);
|
Mutex::ScopedLock lock(ares_library_mutex);
|
||||||
// This decreases the reference counter increased by ares_library_init().
|
// This decreases the reference counter increased by ares_library_init().
|
||||||
ares_library_cleanup();
|
ares_library_cleanup();
|
||||||
}
|
}
|
||||||
|
|
||||||
ares_destroy(channel_);
|
|
||||||
CloseTimer();
|
CloseTimer();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void ChannelWrap::ModifyActivityQueryCount(int count) {
|
void ChannelWrap::ModifyActivityQueryCount(int count) {
|
||||||
active_query_count_ += count;
|
active_query_count_ += count;
|
||||||
if (active_query_count_ < 0) active_query_count_ = 0;
|
CHECK_GE(active_query_count_, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -602,6 +588,10 @@ class QueryWrap : public AsyncWrap {
|
|||||||
|
|
||||||
~QueryWrap() override {
|
~QueryWrap() override {
|
||||||
CHECK_EQ(false, persistent().IsEmpty());
|
CHECK_EQ(false, persistent().IsEmpty());
|
||||||
|
|
||||||
|
// Let Callback() know that this object no longer exists.
|
||||||
|
if (callback_ptr_ != nullptr)
|
||||||
|
*callback_ptr_ = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Subclasses should implement the appropriate Send method.
|
// Subclasses should implement the appropriate Send method.
|
||||||
@ -624,40 +614,50 @@ class QueryWrap : public AsyncWrap {
|
|||||||
TRACING_CATEGORY_NODE2(dns, native), trace_name_, this,
|
TRACING_CATEGORY_NODE2(dns, native), trace_name_, this,
|
||||||
"name", TRACE_STR_COPY(name));
|
"name", TRACE_STR_COPY(name));
|
||||||
ares_query(channel_->cares_channel(), name, dnsclass, type, Callback,
|
ares_query(channel_->cares_channel(), name, dnsclass, type, Callback,
|
||||||
static_cast<void*>(this));
|
MakeCallbackPointer());
|
||||||
}
|
}
|
||||||
|
|
||||||
static void CaresAsyncClose(uv_async_t* async) {
|
struct ResponseData {
|
||||||
auto data = static_cast<struct CaresAsyncData*>(async->data);
|
int status;
|
||||||
delete data->wrap;
|
bool is_host;
|
||||||
delete data;
|
DeleteFnPtr<hostent, safe_free_hostent> host;
|
||||||
}
|
MallocedBuffer<unsigned char> buf;
|
||||||
|
};
|
||||||
|
|
||||||
static void CaresAsyncCb(uv_async_t* handle) {
|
void AfterResponse() {
|
||||||
auto data = static_cast<struct CaresAsyncData*>(handle->data);
|
CHECK(response_data_);
|
||||||
|
|
||||||
QueryWrap* wrap = data->wrap;
|
const int status = response_data_->status;
|
||||||
int status = data->status;
|
|
||||||
|
|
||||||
if (status != ARES_SUCCESS) {
|
if (status != ARES_SUCCESS) {
|
||||||
wrap->ParseError(status);
|
ParseError(status);
|
||||||
} else if (!data->is_host) {
|
} else if (!response_data_->is_host) {
|
||||||
unsigned char* buf = data->data.buf;
|
Parse(response_data_->buf.data, response_data_->buf.size);
|
||||||
wrap->Parse(buf, data->len);
|
|
||||||
free(buf);
|
|
||||||
} else {
|
} else {
|
||||||
hostent* host = data->data.host;
|
Parse(response_data_->host.get());
|
||||||
wrap->Parse(host);
|
|
||||||
safe_free_hostent(host);
|
|
||||||
free(host);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
wrap->env()->CloseHandle(handle, CaresAsyncClose);
|
delete this;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* MakeCallbackPointer() {
|
||||||
|
CHECK_NULL(callback_ptr_);
|
||||||
|
callback_ptr_ = new QueryWrap*(this);
|
||||||
|
return callback_ptr_;
|
||||||
|
}
|
||||||
|
|
||||||
|
static QueryWrap* FromCallbackPointer(void* arg) {
|
||||||
|
std::unique_ptr<QueryWrap*> wrap_ptr { static_cast<QueryWrap**>(arg) };
|
||||||
|
QueryWrap* wrap = *wrap_ptr.get();
|
||||||
|
if (wrap == nullptr) return nullptr;
|
||||||
|
wrap->callback_ptr_ = nullptr;
|
||||||
|
return wrap;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void Callback(void* arg, int status, int timeouts,
|
static void Callback(void* arg, int status, int timeouts,
|
||||||
unsigned char* answer_buf, int answer_len) {
|
unsigned char* answer_buf, int answer_len) {
|
||||||
QueryWrap* wrap = static_cast<QueryWrap*>(arg);
|
QueryWrap* wrap = FromCallbackPointer(arg);
|
||||||
|
if (wrap == nullptr) return;
|
||||||
|
|
||||||
unsigned char* buf_copy = nullptr;
|
unsigned char* buf_copy = nullptr;
|
||||||
if (status == ARES_SUCCESS) {
|
if (status == ARES_SUCCESS) {
|
||||||
@ -665,27 +665,19 @@ class QueryWrap : public AsyncWrap {
|
|||||||
memcpy(buf_copy, answer_buf, answer_len);
|
memcpy(buf_copy, answer_buf, answer_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
CaresAsyncData* data = new CaresAsyncData();
|
wrap->response_data_.reset(new ResponseData());
|
||||||
|
ResponseData* data = wrap->response_data_.get();
|
||||||
data->status = status;
|
data->status = status;
|
||||||
data->wrap = wrap;
|
|
||||||
data->is_host = false;
|
data->is_host = false;
|
||||||
data->data.buf = buf_copy;
|
data->buf = MallocedBuffer<unsigned char>(buf_copy, answer_len);
|
||||||
data->len = answer_len;
|
|
||||||
|
|
||||||
uv_async_t* async_handle = &data->async_handle;
|
wrap->QueueResponseCallback(status);
|
||||||
CHECK_EQ(0, uv_async_init(wrap->env()->event_loop(),
|
|
||||||
async_handle,
|
|
||||||
CaresAsyncCb));
|
|
||||||
|
|
||||||
wrap->channel_->set_query_last_ok(status != ARES_ECONNREFUSED);
|
|
||||||
wrap->channel_->ModifyActivityQueryCount(-1);
|
|
||||||
async_handle->data = data;
|
|
||||||
uv_async_send(async_handle);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void Callback(void* arg, int status, int timeouts,
|
static void Callback(void* arg, int status, int timeouts,
|
||||||
struct hostent* host) {
|
struct hostent* host) {
|
||||||
QueryWrap* wrap = static_cast<QueryWrap*>(arg);
|
QueryWrap* wrap = FromCallbackPointer(arg);
|
||||||
|
if (wrap == nullptr) return;
|
||||||
|
|
||||||
struct hostent* host_copy = nullptr;
|
struct hostent* host_copy = nullptr;
|
||||||
if (status == ARES_SUCCESS) {
|
if (status == ARES_SUCCESS) {
|
||||||
@ -693,20 +685,22 @@ class QueryWrap : public AsyncWrap {
|
|||||||
cares_wrap_hostent_cpy(host_copy, host);
|
cares_wrap_hostent_cpy(host_copy, host);
|
||||||
}
|
}
|
||||||
|
|
||||||
CaresAsyncData* data = new CaresAsyncData();
|
wrap->response_data_.reset(new ResponseData());
|
||||||
|
ResponseData* data = wrap->response_data_.get();
|
||||||
data->status = status;
|
data->status = status;
|
||||||
data->data.host = host_copy;
|
data->host.reset(host_copy);
|
||||||
data->wrap = wrap;
|
|
||||||
data->is_host = true;
|
data->is_host = true;
|
||||||
|
|
||||||
uv_async_t* async_handle = &data->async_handle;
|
wrap->QueueResponseCallback(status);
|
||||||
CHECK_EQ(0, uv_async_init(wrap->env()->event_loop(),
|
}
|
||||||
async_handle,
|
|
||||||
CaresAsyncCb));
|
|
||||||
|
|
||||||
wrap->channel_->set_query_last_ok(status != ARES_ECONNREFUSED);
|
void QueueResponseCallback(int status) {
|
||||||
async_handle->data = data;
|
env()->SetImmediate([](Environment*, void* data) {
|
||||||
uv_async_send(async_handle);
|
static_cast<QueryWrap*>(data)->AfterResponse();
|
||||||
|
}, this, object());
|
||||||
|
|
||||||
|
channel_->set_query_last_ok(status != ARES_ECONNREFUSED);
|
||||||
|
channel_->ModifyActivityQueryCount(-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CallOnComplete(Local<Value> answer,
|
void CallOnComplete(Local<Value> answer,
|
||||||
@ -749,7 +743,11 @@ class QueryWrap : public AsyncWrap {
|
|||||||
ChannelWrap* channel_;
|
ChannelWrap* channel_;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
std::unique_ptr<ResponseData> response_data_;
|
||||||
const char* trace_name_;
|
const char* trace_name_;
|
||||||
|
// Pointer to pointer to 'this' that can be reset from the destructor,
|
||||||
|
// in order to let Callback() know that 'this' no longer exists.
|
||||||
|
QueryWrap** callback_ptr_ = nullptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -1768,7 +1766,7 @@ class GetHostByAddrWrap: public QueryWrap {
|
|||||||
length,
|
length,
|
||||||
family,
|
family,
|
||||||
Callback,
|
Callback,
|
||||||
static_cast<void*>(static_cast<QueryWrap*>(this)));
|
MakeCallbackPointer());
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
23
test/parallel/test-worker-dns-terminate-during-query.js
Normal file
23
test/parallel/test-worker-dns-terminate-during-query.js
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
'use strict';
|
||||||
|
const common = require('../common');
|
||||||
|
const { Resolver } = require('dns');
|
||||||
|
const dgram = require('dgram');
|
||||||
|
const { Worker, isMainThread } = require('worker_threads');
|
||||||
|
|
||||||
|
// Test that Workers can terminate while DNS queries are outstanding.
|
||||||
|
|
||||||
|
if (isMainThread) {
|
||||||
|
return new Worker(__filename);
|
||||||
|
}
|
||||||
|
|
||||||
|
const socket = dgram.createSocket('udp4');
|
||||||
|
|
||||||
|
socket.bind(0, common.mustCall(() => {
|
||||||
|
const resolver = new Resolver();
|
||||||
|
resolver.setServers([`127.0.0.1:${socket.address().port}`]);
|
||||||
|
resolver.resolve4('example.org', common.mustNotCall());
|
||||||
|
}));
|
||||||
|
|
||||||
|
socket.on('message', common.mustCall(() => {
|
||||||
|
process.exit();
|
||||||
|
}));
|
Loading…
x
Reference in New Issue
Block a user