io.c: do not use rb_notify_fd_close close on recycled FD
It is unsafe to release GVL and call rb_notify_fd_close after close(2) on any given FD. FDs (file descriptor) may be recycled in other threads immediately after close() to point to a different file description. Note the distinction between "file description" and "file descriptor". th-1 | th-2 -------------------------------+--------------------------------------- io_close_fptr | rb_notify_fd_close(fd) | fptr_finalize_flush | close(fd) | rb_thread_schedule | | fd reused (via pipe/open/socket/etc) rb_notify_fd_close(fd) | | sees "stream closed" exception | for DIFFERENT file description * thread.c (rb_thread_io_blocking_region): adjust comment for list_del * thread.c (rb_notify_fd_close): give busy list to caller * thread.c (rb_thread_fd_close): loop on busy list * io.c (io_close_fptr): do not call rb_thread_fd_close on invalid FD * io.c (io_reopen): use rb_thread_fd_close Fixes: r57422 ("io.c: close before wait") git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63216 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
parent
af72dcd91b
commit
645f7fbd4e
16
io.c
16
io.c
@ -21,6 +21,7 @@
|
|||||||
#include <ctype.h>
|
#include <ctype.h>
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
#include "ruby_atomic.h"
|
#include "ruby_atomic.h"
|
||||||
|
#include "ccan/list/list.h"
|
||||||
|
|
||||||
#undef free
|
#undef free
|
||||||
#define free(x) xfree(x)
|
#define free(x) xfree(x)
|
||||||
@ -4654,15 +4655,14 @@ rb_io_memsize(const rb_io_t *fptr)
|
|||||||
# define KEEPGVL FALSE
|
# define KEEPGVL FALSE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int rb_notify_fd_close(int fd);
|
int rb_notify_fd_close(int fd, struct list_head *);
|
||||||
static rb_io_t *
|
static rb_io_t *
|
||||||
io_close_fptr(VALUE io)
|
io_close_fptr(VALUE io)
|
||||||
{
|
{
|
||||||
rb_io_t *fptr;
|
rb_io_t *fptr;
|
||||||
int fd;
|
|
||||||
VALUE write_io;
|
VALUE write_io;
|
||||||
rb_io_t *write_fptr;
|
rb_io_t *write_fptr;
|
||||||
int busy;
|
LIST_HEAD(busy);
|
||||||
|
|
||||||
write_io = GetWriteIO(io);
|
write_io = GetWriteIO(io);
|
||||||
if (io != write_io) {
|
if (io != write_io) {
|
||||||
@ -4676,11 +4676,9 @@ io_close_fptr(VALUE io)
|
|||||||
if (!fptr) return 0;
|
if (!fptr) return 0;
|
||||||
if (fptr->fd < 0) return 0;
|
if (fptr->fd < 0) return 0;
|
||||||
|
|
||||||
fd = fptr->fd;
|
if (rb_notify_fd_close(fptr->fd, &busy)) {
|
||||||
busy = rb_notify_fd_close(fd);
|
fptr_finalize_flush(fptr, FALSE, KEEPGVL); /* calls close(fptr->fd) */
|
||||||
if (busy) {
|
do rb_thread_schedule(); while (!list_empty(&busy));
|
||||||
fptr_finalize_flush(fptr, FALSE, KEEPGVL);
|
|
||||||
do rb_thread_schedule(); while (rb_notify_fd_close(fd));
|
|
||||||
}
|
}
|
||||||
rb_io_fptr_cleanup(fptr, FALSE);
|
rb_io_fptr_cleanup(fptr, FALSE);
|
||||||
return fptr;
|
return fptr;
|
||||||
@ -7185,7 +7183,7 @@ io_reopen(VALUE io, VALUE nfile)
|
|||||||
rb_update_max_fd(fd);
|
rb_update_max_fd(fd);
|
||||||
fptr->fd = fd;
|
fptr->fd = fd;
|
||||||
}
|
}
|
||||||
rb_notify_fd_close(fd);
|
rb_thread_fd_close(fd);
|
||||||
if ((orig->mode & FMODE_READABLE) && pos >= 0) {
|
if ((orig->mode & FMODE_READABLE) && pos >= 0) {
|
||||||
if (io_seek(fptr, pos, SEEK_SET) < 0 && errno) {
|
if (io_seek(fptr, pos, SEEK_SET) < 0 && errno) {
|
||||||
rb_sys_fail_path(fptr->pathv);
|
rb_sys_fail_path(fptr->pathv);
|
||||||
|
28
thread.c
28
thread.c
@ -1527,7 +1527,10 @@ rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
|
|||||||
}
|
}
|
||||||
EC_POP_TAG();
|
EC_POP_TAG();
|
||||||
|
|
||||||
/* must be deleted before jump */
|
/*
|
||||||
|
* must be deleted before jump
|
||||||
|
* this will delete either from waiting_fds or on-stack LIST_HEAD(busy)
|
||||||
|
*/
|
||||||
list_del(&wfd.wfd_node);
|
list_del(&wfd.wfd_node);
|
||||||
|
|
||||||
if (state) {
|
if (state) {
|
||||||
@ -2260,35 +2263,36 @@ rb_ec_reset_raised(rb_execution_context_t *ec)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
rb_notify_fd_close(int fd)
|
rb_notify_fd_close(int fd, struct list_head *busy)
|
||||||
{
|
{
|
||||||
rb_vm_t *vm = GET_THREAD()->vm;
|
rb_vm_t *vm = GET_THREAD()->vm;
|
||||||
struct waiting_fd *wfd = 0;
|
struct waiting_fd *wfd = 0;
|
||||||
int busy;
|
struct waiting_fd *next = 0;
|
||||||
|
|
||||||
busy = 0;
|
list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
|
||||||
list_for_each(&vm->waiting_fds, wfd, wfd_node) {
|
|
||||||
if (wfd->fd == fd) {
|
if (wfd->fd == fd) {
|
||||||
rb_thread_t *th = wfd->th;
|
rb_thread_t *th = wfd->th;
|
||||||
VALUE err;
|
VALUE err;
|
||||||
|
|
||||||
busy = 1;
|
list_del(&wfd->wfd_node);
|
||||||
if (!th) {
|
list_add(busy, &wfd->wfd_node);
|
||||||
continue;
|
|
||||||
}
|
|
||||||
wfd->th = 0;
|
|
||||||
err = th->vm->special_exceptions[ruby_error_stream_closed];
|
err = th->vm->special_exceptions[ruby_error_stream_closed];
|
||||||
rb_threadptr_pending_interrupt_enque(th, err);
|
rb_threadptr_pending_interrupt_enque(th, err);
|
||||||
rb_threadptr_interrupt(th);
|
rb_threadptr_interrupt(th);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return busy;
|
return !list_empty(busy);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
rb_thread_fd_close(int fd)
|
rb_thread_fd_close(int fd)
|
||||||
{
|
{
|
||||||
while (rb_notify_fd_close(fd)) rb_thread_schedule();
|
LIST_HEAD(busy);
|
||||||
|
|
||||||
|
if (rb_notify_fd_close(fd, &busy)) {
|
||||||
|
do rb_thread_schedule(); while (!list_empty(&busy));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
x
Reference in New Issue
Block a user