Make waiting_fd behaviour per-IO. (#13127)

- `rb_thread_fd_close` is deprecated and now a no-op.
- IO operations (including close) no longer take a vm-wide lock.
This commit is contained in:
Samuel Williams 2025-05-13 19:02:03 +09:00 committed by GitHub
parent a6435befa7
commit 425fa0aeb5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
Notes: git 2025-05-13 10:02:17 +00:00
Merged-By: ioquatix <samuel@codeotaku.com>
14 changed files with 214 additions and 377 deletions

14
NEWS.md
View File

@ -102,6 +102,19 @@ The following bundled gems are updated.
## C API updates
* IO
* `rb_thread_fd_close` is deprecated and now a no-op. If you need to expose
file descriptors from C extensions to Ruby code, create an `IO` instance
using `RUBY_IO_MODE_EXTERNAL` and use `rb_io_close(io)` to close it (this
also interrupts and waits for all pending operations on the `IO`
instance). Directly closing file descriptors does not interrupt pending
operations, and may lead to undefined beahviour. In other words, if two
`IO` objects share the same file descriptor, closing one does not affect
the other. [[Feature #18455]]
[[Feature #18455]]
## Implementation improvements
## JIT
@ -112,3 +125,4 @@ The following bundled gems are updated.
[Bug #21049]: https://bugs.ruby-lang.org/issues/21049
[Feature #21216]: https://bugs.ruby-lang.org/issues/21216
[Feature #21258]: https://bugs.ruby-lang.org/issues/21258
[Feature #18455]: https://bugs.ruby-lang.org/issues/18455

View File

@ -1,161 +0,0 @@
# AUTOGENERATED DEPENDENCIES START
thread_fd.o: $(RUBY_EXTCONF_H)
thread_fd.o: $(arch_hdrdir)/ruby/config.h
thread_fd.o: $(hdrdir)/ruby/assert.h
thread_fd.o: $(hdrdir)/ruby/backward.h
thread_fd.o: $(hdrdir)/ruby/backward/2/assume.h
thread_fd.o: $(hdrdir)/ruby/backward/2/attributes.h
thread_fd.o: $(hdrdir)/ruby/backward/2/bool.h
thread_fd.o: $(hdrdir)/ruby/backward/2/inttypes.h
thread_fd.o: $(hdrdir)/ruby/backward/2/limits.h
thread_fd.o: $(hdrdir)/ruby/backward/2/long_long.h
thread_fd.o: $(hdrdir)/ruby/backward/2/stdalign.h
thread_fd.o: $(hdrdir)/ruby/backward/2/stdarg.h
thread_fd.o: $(hdrdir)/ruby/defines.h
thread_fd.o: $(hdrdir)/ruby/intern.h
thread_fd.o: $(hdrdir)/ruby/internal/abi.h
thread_fd.o: $(hdrdir)/ruby/internal/anyargs.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic/char.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic/double.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic/fixnum.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic/gid_t.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic/int.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic/intptr_t.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic/long.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic/long_long.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic/mode_t.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic/off_t.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic/pid_t.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic/short.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic/size_t.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic/st_data_t.h
thread_fd.o: $(hdrdir)/ruby/internal/arithmetic/uid_t.h
thread_fd.o: $(hdrdir)/ruby/internal/assume.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/alloc_size.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/artificial.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/cold.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/const.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/constexpr.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/deprecated.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/diagnose_if.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/enum_extensibility.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/error.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/flag_enum.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/forceinline.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/format.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/maybe_unused.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/noalias.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/nodiscard.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/noexcept.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/noinline.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/nonnull.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/noreturn.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/packed_struct.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/pure.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/restrict.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/returns_nonnull.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/warning.h
thread_fd.o: $(hdrdir)/ruby/internal/attr/weakref.h
thread_fd.o: $(hdrdir)/ruby/internal/cast.h
thread_fd.o: $(hdrdir)/ruby/internal/compiler_is.h
thread_fd.o: $(hdrdir)/ruby/internal/compiler_is/apple.h
thread_fd.o: $(hdrdir)/ruby/internal/compiler_is/clang.h
thread_fd.o: $(hdrdir)/ruby/internal/compiler_is/gcc.h
thread_fd.o: $(hdrdir)/ruby/internal/compiler_is/intel.h
thread_fd.o: $(hdrdir)/ruby/internal/compiler_is/msvc.h
thread_fd.o: $(hdrdir)/ruby/internal/compiler_is/sunpro.h
thread_fd.o: $(hdrdir)/ruby/internal/compiler_since.h
thread_fd.o: $(hdrdir)/ruby/internal/config.h
thread_fd.o: $(hdrdir)/ruby/internal/constant_p.h
thread_fd.o: $(hdrdir)/ruby/internal/core.h
thread_fd.o: $(hdrdir)/ruby/internal/core/rarray.h
thread_fd.o: $(hdrdir)/ruby/internal/core/rbasic.h
thread_fd.o: $(hdrdir)/ruby/internal/core/rbignum.h
thread_fd.o: $(hdrdir)/ruby/internal/core/rclass.h
thread_fd.o: $(hdrdir)/ruby/internal/core/rdata.h
thread_fd.o: $(hdrdir)/ruby/internal/core/rfile.h
thread_fd.o: $(hdrdir)/ruby/internal/core/rhash.h
thread_fd.o: $(hdrdir)/ruby/internal/core/robject.h
thread_fd.o: $(hdrdir)/ruby/internal/core/rregexp.h
thread_fd.o: $(hdrdir)/ruby/internal/core/rstring.h
thread_fd.o: $(hdrdir)/ruby/internal/core/rstruct.h
thread_fd.o: $(hdrdir)/ruby/internal/core/rtypeddata.h
thread_fd.o: $(hdrdir)/ruby/internal/ctype.h
thread_fd.o: $(hdrdir)/ruby/internal/dllexport.h
thread_fd.o: $(hdrdir)/ruby/internal/dosish.h
thread_fd.o: $(hdrdir)/ruby/internal/error.h
thread_fd.o: $(hdrdir)/ruby/internal/eval.h
thread_fd.o: $(hdrdir)/ruby/internal/event.h
thread_fd.o: $(hdrdir)/ruby/internal/fl_type.h
thread_fd.o: $(hdrdir)/ruby/internal/gc.h
thread_fd.o: $(hdrdir)/ruby/internal/glob.h
thread_fd.o: $(hdrdir)/ruby/internal/globals.h
thread_fd.o: $(hdrdir)/ruby/internal/has/attribute.h
thread_fd.o: $(hdrdir)/ruby/internal/has/builtin.h
thread_fd.o: $(hdrdir)/ruby/internal/has/c_attribute.h
thread_fd.o: $(hdrdir)/ruby/internal/has/cpp_attribute.h
thread_fd.o: $(hdrdir)/ruby/internal/has/declspec_attribute.h
thread_fd.o: $(hdrdir)/ruby/internal/has/extension.h
thread_fd.o: $(hdrdir)/ruby/internal/has/feature.h
thread_fd.o: $(hdrdir)/ruby/internal/has/warning.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/array.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/bignum.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/class.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/compar.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/complex.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/cont.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/dir.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/enum.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/enumerator.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/error.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/eval.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/file.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/hash.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/io.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/load.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/marshal.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/numeric.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/object.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/parse.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/proc.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/process.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/random.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/range.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/rational.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/re.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/ruby.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/select.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/select/largesize.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/signal.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/sprintf.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/string.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/struct.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/thread.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/time.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/variable.h
thread_fd.o: $(hdrdir)/ruby/internal/intern/vm.h
thread_fd.o: $(hdrdir)/ruby/internal/interpreter.h
thread_fd.o: $(hdrdir)/ruby/internal/iterator.h
thread_fd.o: $(hdrdir)/ruby/internal/memory.h
thread_fd.o: $(hdrdir)/ruby/internal/method.h
thread_fd.o: $(hdrdir)/ruby/internal/module.h
thread_fd.o: $(hdrdir)/ruby/internal/newobj.h
thread_fd.o: $(hdrdir)/ruby/internal/scan_args.h
thread_fd.o: $(hdrdir)/ruby/internal/special_consts.h
thread_fd.o: $(hdrdir)/ruby/internal/static_assert.h
thread_fd.o: $(hdrdir)/ruby/internal/stdalign.h
thread_fd.o: $(hdrdir)/ruby/internal/stdbool.h
thread_fd.o: $(hdrdir)/ruby/internal/stdckdint.h
thread_fd.o: $(hdrdir)/ruby/internal/symbol.h
thread_fd.o: $(hdrdir)/ruby/internal/value.h
thread_fd.o: $(hdrdir)/ruby/internal/value_type.h
thread_fd.o: $(hdrdir)/ruby/internal/variable.h
thread_fd.o: $(hdrdir)/ruby/internal/warning_push.h
thread_fd.o: $(hdrdir)/ruby/internal/xmalloc.h
thread_fd.o: $(hdrdir)/ruby/missing.h
thread_fd.o: $(hdrdir)/ruby/ruby.h
thread_fd.o: $(hdrdir)/ruby/st.h
thread_fd.o: $(hdrdir)/ruby/subst.h
thread_fd.o: thread_fd.c
# AUTOGENERATED DEPENDENCIES END

View File

@ -1,2 +0,0 @@
# frozen_string_literal: true
create_makefile('-test-/thread_fd')

View File

@ -1,30 +0,0 @@
#include "ruby/ruby.h"
static VALUE
thread_fd_close(VALUE ign, VALUE fd)
{
rb_thread_fd_close(NUM2INT(fd));
return Qnil;
}
static VALUE
thread_fd_wait(VALUE ign, VALUE fd)
{
int ret = rb_thread_wait_fd(NUM2INT(fd));
return INT2NUM(ret);
}
static VALUE
thread_fd_writable(VALUE ign, VALUE fd)
{
int ret = rb_thread_fd_writable(NUM2INT(fd));
return INT2NUM(ret);
}
void
Init_thread_fd(void)
{
rb_define_singleton_method(rb_cIO, "thread_fd_close", thread_fd_close, 1);
rb_define_singleton_method(rb_cIO, "thread_fd_wait", thread_fd_wait, 1);
rb_define_singleton_method(rb_cIO, "thread_fd_writable", thread_fd_writable, 1);
}

3
gc.c
View File

@ -3204,6 +3204,7 @@ rb_gc_mark_children(void *objspace, VALUE obj)
gc_mark_internal(RFILE(obj)->fptr->encs.ecopts);
gc_mark_internal(RFILE(obj)->fptr->write_lock);
gc_mark_internal(RFILE(obj)->fptr->timeout);
gc_mark_internal(RFILE(obj)->fptr->wakeup_mutex);
}
break;
@ -4185,6 +4186,8 @@ rb_gc_update_object_references(void *objspace, VALUE obj)
UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_pre_ecopts);
UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->encs.ecopts);
UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->write_lock);
UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->timeout);
UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->wakeup_mutex);
}
break;
case T_REGEXP:

View File

@ -61,10 +61,10 @@ int rb_thread_wait_fd(int fd);
int rb_thread_fd_writable(int fd);
/**
* Notifies a closing of a file descriptor to other threads. Multiple threads
* can wait for the given file descriptor at once. If such file descriptor is
* closed, threads need to start propagating their exceptions. This is the API
* to kick that process.
* This funciton is now a no-op. It was previously used to interrupt threads
* that were using the given file descriptor and wait for them to finish.
*
* @deprecated Use IO with RUBY_IO_MODE_EXTERNAL and `rb_io_close` instead.
*
* @param[in] fd A file descriptor.
* @note This function blocks until all the threads waiting for such fd

View File

@ -14,10 +14,20 @@
struct rb_io;
#include "ruby/io.h" /* for rb_io_t */
#include "ccan/list/list.h"
#define IO_WITHOUT_GVL(func, arg) rb_nogvl(func, arg, RUBY_UBF_IO, 0, RB_NOGVL_OFFLOAD_SAFE)
#define IO_WITHOUT_GVL_INT(func, arg) (int)(VALUE)IO_WITHOUT_GVL(func, arg)
// Represents an in-flight blocking operation:
struct rb_io_blocking_operation {
// The linked list data structure.
struct ccan_list_node list;
// The execution context of the blocking operation:
struct rb_execution_context_struct *ec;
};
/** Ruby's IO, metadata and buffers. */
struct rb_io {
@ -111,6 +121,15 @@ struct rb_io {
* The timeout associated with this IO when performing blocking operations.
*/
VALUE timeout;
/**
* Threads that are performing a blocking operation without the GVL using
* this IO. On calling IO#close, these threads will be interrupted so that
* the operation can be cancelled.
*/
struct ccan_list_head blocking_operations;
struct rb_execution_context_struct *closing_ec;
VALUE wakeup_mutex;
};
/* io.c */
@ -125,7 +144,7 @@ VALUE rb_io_prep_stdin(void);
VALUE rb_io_prep_stdout(void);
VALUE rb_io_prep_stderr(void);
int rb_io_fptr_finalize(struct rb_io *fptr);
int rb_io_notify_close(struct rb_io *fptr);
RUBY_SYMBOL_EXPORT_BEGIN
/* io.c (export) */

View File

@ -13,6 +13,7 @@
#include "ccan/list/list.h" /* for list in rb_io_close_wait_list */
struct rb_thread_struct; /* in vm_core.h */
struct rb_io;
#define RB_VM_SAVE_MACHINE_CONTEXT(th) \
do { \
@ -58,14 +59,8 @@ void ruby_mn_threads_params(void);
int rb_thread_io_wait(struct rb_io *io, int events, struct timeval * timeout);
int rb_thread_wait_for_single_fd(int fd, int events, struct timeval * timeout);
struct rb_io_close_wait_list {
struct ccan_list_head pending_fd_users;
VALUE closing_thread;
VALUE closing_fiber;
VALUE wakeup_mutex;
};
int rb_notify_fd_close(int fd, struct rb_io_close_wait_list *busy);
void rb_notify_fd_close_wait(struct rb_io_close_wait_list *busy);
size_t rb_thread_io_close_interrupt(struct rb_io *);
void rb_thread_io_close_wait(struct rb_io *);
void rb_ec_check_ints(struct rb_execution_context_struct *ec);

56
io.c
View File

@ -5517,8 +5517,7 @@ maygvl_fclose(FILE *file, int keepgvl)
static void free_io_buffer(rb_io_buffer_t *buf);
static void
fptr_finalize_flush(rb_io_t *fptr, int noraise, int keepgvl,
struct rb_io_close_wait_list *busy)
fptr_finalize_flush(rb_io_t *fptr, int noraise, int keepgvl)
{
VALUE error = Qnil;
int fd = fptr->fd;
@ -5558,11 +5557,8 @@ fptr_finalize_flush(rb_io_t *fptr, int noraise, int keepgvl,
fptr->stdio_file = 0;
fptr->mode &= ~(FMODE_READABLE|FMODE_WRITABLE);
// Ensure waiting_fd users do not hit EBADF.
if (busy) {
// Wait for them to exit before we call close().
rb_notify_fd_close_wait(busy);
}
// wait for blocking operations to ensure they do not hit EBADF:
rb_thread_io_close_wait(fptr);
// Disable for now.
// if (!done && fd >= 0) {
@ -5610,7 +5606,7 @@ fptr_finalize_flush(rb_io_t *fptr, int noraise, int keepgvl,
static void
fptr_finalize(rb_io_t *fptr, int noraise)
{
fptr_finalize_flush(fptr, noraise, FALSE, 0);
fptr_finalize_flush(fptr, noraise, FALSE);
free_io_buffer(&fptr->rbuf);
free_io_buffer(&fptr->wbuf);
clear_codeconv(fptr);
@ -5686,14 +5682,20 @@ rb_io_fptr_finalize(struct rb_io *io)
}
size_t
rb_io_memsize(const rb_io_t *fptr)
rb_io_memsize(const rb_io_t *io)
{
size_t size = sizeof(rb_io_t);
size += fptr->rbuf.capa;
size += fptr->wbuf.capa;
size += fptr->cbuf.capa;
if (fptr->readconv) size += rb_econv_memsize(fptr->readconv);
if (fptr->writeconv) size += rb_econv_memsize(fptr->writeconv);
size += io->rbuf.capa;
size += io->wbuf.capa;
size += io->cbuf.capa;
if (io->readconv) size += rb_econv_memsize(io->readconv);
if (io->writeconv) size += rb_econv_memsize(io->writeconv);
struct rb_io_blocking_operation *blocking_operation = 0;
ccan_list_for_each(&io->blocking_operations, blocking_operation, list) {
size += sizeof(struct rb_io_blocking_operation);
}
return size;
}
@ -5710,7 +5712,6 @@ io_close_fptr(VALUE io)
rb_io_t *fptr;
VALUE write_io;
rb_io_t *write_fptr;
struct rb_io_close_wait_list busy;
write_io = GetWriteIO(io);
if (io != write_io) {
@ -5724,9 +5725,9 @@ io_close_fptr(VALUE io)
if (!fptr) return 0;
if (fptr->fd < 0) return 0;
if (rb_notify_fd_close(fptr->fd, &busy)) {
if (rb_thread_io_close_interrupt(fptr)) {
/* calls close(fptr->fd): */
fptr_finalize_flush(fptr, FALSE, KEEPGVL, &busy);
fptr_finalize_flush(fptr, FALSE, KEEPGVL);
}
rb_io_fptr_cleanup(fptr, FALSE);
return fptr;
@ -8369,6 +8370,10 @@ io_reopen(VALUE io, VALUE nfile)
fd = fptr->fd;
fd2 = orig->fd;
if (fd != fd2) {
// Interrupt all usage of the old file descriptor:
rb_thread_io_close_interrupt(fptr);
rb_thread_io_close_wait(fptr);
if (RUBY_IO_EXTERNAL_P(fptr) || fd <= 2 || !fptr->stdio_file) {
/* need to keep FILE objects of stdin, stdout and stderr */
if (rb_cloexec_dup2(fd2, fd) < 0)
@ -8384,7 +8389,7 @@ io_reopen(VALUE io, VALUE nfile)
rb_update_max_fd(fd);
fptr->fd = fd;
}
rb_thread_fd_close(fd);
if ((orig->mode & FMODE_READABLE) && pos >= 0) {
if (io_seek(fptr, pos, SEEK_SET) < 0 && errno) {
rb_sys_fail_path(fptr->pathv);
@ -8561,6 +8566,11 @@ rb_io_init_copy(VALUE dest, VALUE io)
fptr->pid = orig->pid;
fptr->lineno = orig->lineno;
fptr->timeout = orig->timeout;
ccan_list_head_init(&fptr->blocking_operations);
fptr->closing_ec = NULL;
fptr->wakeup_mutex = Qnil;
if (!NIL_P(orig->pathv)) fptr->pathv = orig->pathv;
fptr_copy_finalizer(fptr, orig);
@ -9298,6 +9308,10 @@ rb_io_open_descriptor(VALUE klass, int descriptor, int mode, VALUE path, VALUE t
io->timeout = timeout;
ccan_list_head_init(&io->blocking_operations);
io->closing_ec = NULL;
io->wakeup_mutex = Qnil;
if (encoding) {
io->encs = *encoding;
}
@ -9437,6 +9451,9 @@ rb_io_fptr_new(void)
fp->encs.ecopts = Qnil;
fp->write_lock = Qnil;
fp->timeout = Qnil;
ccan_list_head_init(&fp->blocking_operations);
fp->closing_ec = NULL;
fp->wakeup_mutex = Qnil;
return fp;
}
@ -9567,6 +9584,9 @@ io_initialize(VALUE io, VALUE fnum, VALUE vmode, VALUE opt)
fp->encs = convconfig;
fp->pathv = path;
fp->timeout = Qnil;
ccan_list_head_init(&fp->blocking_operations);
fp->closing_ec = NULL;
fp->wakeup_mutex = Qnil;
clear_codeconv(fp);
io_check_tty(fp);
if (fileno(stdin) == fd)

View File

@ -1,24 +0,0 @@
# frozen_string_literal: true
require 'test/unit'
require '-test-/thread_fd'
class TestThreadFdClose < Test::Unit::TestCase
def test_thread_fd_close
IO.pipe do |r, w|
th = Thread.new do
begin
assert_raise(IOError) {
r.read(4)
}
ensure
w.syswrite('done')
end
end
Thread.pass until th.stop?
IO.thread_fd_close(r.fileno)
assert_equal 'done', r.read(4)
th.join
end
end
end

View File

@ -3826,7 +3826,7 @@ __END__
end
tempfiles = []
(0..fd_setsize+1).map {|i|
(0...fd_setsize).map {|i|
tempfiles << Tempfile.create("test_io_select_with_many_files")
}

256
thread.c
View File

@ -99,6 +99,8 @@
#include "vm_debug.h"
#include "vm_sync.h"
#include "ccan/list/list.h"
#ifndef USE_NATIVE_THREAD_PRIORITY
#define USE_NATIVE_THREAD_PRIORITY 0
#define RUBY_THREAD_PRIORITY_MAX 3
@ -149,13 +151,6 @@ MAYBE_UNUSED(static int consume_communication_pipe(int fd));
static volatile int system_working = 1;
static rb_internal_thread_specific_key_t specific_key_count;
struct waiting_fd {
struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
rb_thread_t *th;
int fd;
struct rb_io_close_wait_list *busy;
};
/********************************************************************************/
#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
@ -1694,44 +1689,45 @@ waitfd_to_waiting_flag(int wfd_event)
return wfd_event << 1;
}
static void
thread_io_setup_wfd(rb_thread_t *th, int fd, struct waiting_fd *wfd)
{
wfd->fd = fd;
wfd->th = th;
wfd->busy = NULL;
struct io_blocking_operation_arguments {
struct rb_io *io;
struct rb_io_blocking_operation *blocking_operation;
};
RB_VM_LOCK_ENTER();
{
ccan_list_add(&th->vm->waiting_fds, &wfd->wfd_node);
static VALUE
io_blocking_operation_release(VALUE _arguments) {
struct io_blocking_operation_arguments *arguments = (void*)_arguments;
struct rb_io_blocking_operation *blocking_operation = arguments->blocking_operation;
ccan_list_del(&blocking_operation->list);
rb_io_t *io = arguments->io;
rb_thread_t *thread = io->closing_ec->thread_ptr;
rb_fiber_t *fiber = io->closing_ec->fiber_ptr;
if (thread->scheduler != Qnil) {
rb_fiber_scheduler_unblock(thread->scheduler, io->self, rb_fiberptr_self(fiber));
} else {
rb_thread_wakeup(thread->self);
}
RB_VM_LOCK_LEAVE();
return Qnil;
}
static void
thread_io_wake_pending_closer(struct waiting_fd *wfd)
rb_io_blocking_operation_release(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
{
bool has_waiter = wfd->busy && RB_TEST(wfd->busy->wakeup_mutex);
if (has_waiter) {
rb_mutex_lock(wfd->busy->wakeup_mutex);
}
VALUE wakeup_mutex = io->wakeup_mutex;
/* Needs to be protected with RB_VM_LOCK because we don't know if
wfd is on the global list of pending FD ops or if it's on a
struct rb_io_close_wait_list close-waiter. */
RB_VM_LOCK_ENTER();
ccan_list_del(&wfd->wfd_node);
RB_VM_LOCK_LEAVE();
if (RB_TEST(wakeup_mutex)) {
struct io_blocking_operation_arguments arguments = {
.io = io,
.blocking_operation = blocking_operation
};
if (has_waiter) {
rb_thread_t *th = rb_thread_ptr(wfd->busy->closing_thread);
if (th->scheduler != Qnil) {
rb_fiber_scheduler_unblock(th->scheduler, wfd->busy->closing_thread, wfd->busy->closing_fiber);
}
else {
rb_thread_wakeup(wfd->busy->closing_thread);
}
rb_mutex_unlock(wfd->busy->wakeup_mutex);
rb_mutex_synchronize(wakeup_mutex, io_blocking_operation_release, (VALUE)&arguments);
} else {
ccan_list_del(&blocking_operation->list);
}
}
@ -1802,12 +1798,11 @@ rb_thread_mn_schedulable(VALUE thval)
VALUE
rb_thread_io_blocking_call(struct rb_io* io, rb_blocking_function_t *func, void *data1, int events)
{
rb_execution_context_t *volatile ec = GET_EC();
rb_thread_t *volatile th = rb_ec_thread_ptr(ec);
rb_execution_context_t * ec = GET_EC();
rb_thread_t *th = rb_ec_thread_ptr(ec);
RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), io->fd, events);
struct waiting_fd waiting_fd;
volatile VALUE val = Qundef; /* shouldn't be used */
volatile int saved_errno = 0;
enum ruby_tag_type state;
@ -1822,7 +1817,11 @@ rb_thread_io_blocking_call(struct rb_io* io, rb_blocking_function_t *func, void
// `func` or not (as opposed to some previously set value).
errno = 0;
thread_io_setup_wfd(th, fd, &waiting_fd);
struct rb_io_blocking_operation blocking_operation = {
.ec = ec,
};
ccan_list_add(&io->blocking_operations, &blocking_operation.list);
{
EC_PUSH_TAG(ec);
if ((state = EC_EXEC_TAG()) == TAG_NONE) {
@ -1847,15 +1846,13 @@ rb_thread_io_blocking_call(struct rb_io* io, rb_blocking_function_t *func, void
th = rb_ec_thread_ptr(ec);
th->mn_schedulable = prev_mn_schedulable;
}
/*
* must be deleted before jump
* this will delete either from waiting_fds or on-stack struct rb_io_close_wait_list
*/
thread_io_wake_pending_closer(&waiting_fd);
rb_io_blocking_operation_release(io, &blocking_operation);
if (state) {
EC_JUMP_TAG(ec, state);
}
/* TODO: check func() */
RUBY_VM_CHECK_INTS_BLOCKING(ec);
@ -2639,76 +2636,81 @@ rb_ec_reset_raised(rb_execution_context_t *ec)
return 1;
}
int
rb_notify_fd_close(int fd, struct rb_io_close_wait_list *busy)
static size_t
thread_io_close_notify_all(struct rb_io *io)
{
rb_vm_t *vm = GET_THREAD()->vm;
struct waiting_fd *wfd = 0, *next;
ccan_list_head_init(&busy->pending_fd_users);
int has_any;
VALUE wakeup_mutex;
RUBY_ASSERT_CRITICAL_SECTION_ENTER();
RB_VM_LOCK_ENTER();
{
ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
if (wfd->fd == fd) {
rb_thread_t *th = wfd->th;
VALUE err;
size_t count = 0;
rb_vm_t *vm = io->closing_ec->thread_ptr->vm;
VALUE error = vm->special_exceptions[ruby_error_stream_closed];
ccan_list_del(&wfd->wfd_node);
ccan_list_add(&busy->pending_fd_users, &wfd->wfd_node);
struct rb_io_blocking_operation *blocking_operation;
ccan_list_for_each(&io->blocking_operations, blocking_operation, list) {
rb_execution_context_t *ec = blocking_operation->ec;
wfd->busy = busy;
err = th->vm->special_exceptions[ruby_error_stream_closed];
rb_threadptr_pending_interrupt_enque(th, err);
rb_threadptr_interrupt(th);
}
}
rb_thread_t *thread = ec->thread_ptr;
rb_threadptr_pending_interrupt_enque(thread, error);
// This operation is slow:
rb_threadptr_interrupt(thread);
count += 1;
}
has_any = !ccan_list_empty(&busy->pending_fd_users);
busy->closing_thread = rb_thread_current();
busy->closing_fiber = rb_fiber_current();
wakeup_mutex = Qnil;
if (has_any) {
wakeup_mutex = rb_mutex_new();
RBASIC_CLEAR_CLASS(wakeup_mutex); /* hide from ObjectSpace */
RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
return count;
}
size_t
rb_thread_io_close_interrupt(struct rb_io *io)
{
// We guard this operation based on `io->closing_ec` -> only one thread will ever enter this function.
if (io->closing_ec) {
return 0;
}
busy->wakeup_mutex = wakeup_mutex;
RB_VM_LOCK_LEAVE();
// If there are no blocking operations, we are done:
if (ccan_list_empty(&io->blocking_operations)) {
return 0;
}
/* If the caller didn't pass *busy as a pointer to something on the stack,
we need to guard this mutex object on _our_ C stack for the duration
of this function. */
RB_GC_GUARD(wakeup_mutex);
return has_any;
// Otherwise, we are now closing the IO:
rb_execution_context_t *ec = GET_EC();
io->closing_ec = ec;
// This is used to ensure the correct execution context is woken up after the blocking operation is interrupted:
io->wakeup_mutex = rb_mutex_new();
return thread_io_close_notify_all(io);
}
void
rb_notify_fd_close_wait(struct rb_io_close_wait_list *busy)
rb_thread_io_close_wait(struct rb_io* io)
{
if (!RB_TEST(busy->wakeup_mutex)) {
/* There was nobody else using this file when we closed it, so we
never bothered to allocate a mutex*/
VALUE wakeup_mutex = io->wakeup_mutex;
if (!RB_TEST(wakeup_mutex)) {
// There was nobody else using this file when we closed it, so we never bothered to allocate a mutex:
return;
}
rb_mutex_lock(busy->wakeup_mutex);
while (!ccan_list_empty(&busy->pending_fd_users)) {
rb_mutex_sleep(busy->wakeup_mutex, Qnil);
rb_mutex_lock(wakeup_mutex);
while (!ccan_list_empty(&io->blocking_operations)) {
rb_mutex_sleep(wakeup_mutex, Qnil);
}
rb_mutex_unlock(busy->wakeup_mutex);
rb_mutex_unlock(wakeup_mutex);
// We are done closing:
io->wakeup_mutex = Qnil;
io->closing_ec = NULL;
}
void
rb_thread_fd_close(int fd)
{
struct rb_io_close_wait_list busy;
if (rb_notify_fd_close(fd, &busy)) {
rb_notify_fd_close_wait(&busy);
}
rb_warn("rb_thread_fd_close is deprecated (and is now a no-op).");
}
/*
@ -4412,14 +4414,17 @@ thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
}};
volatile int result = 0;
nfds_t nfds;
struct waiting_fd wfd;
struct rb_io_blocking_operation blocking_operation;
enum ruby_tag_type state;
volatile int lerrno;
rb_execution_context_t *ec = GET_EC();
rb_thread_t *th = rb_ec_thread_ptr(ec);
thread_io_setup_wfd(th, fd, &wfd);
if (io) {
blocking_operation.ec = ec;
ccan_list_add(&io->blocking_operations, &blocking_operation.list);
}
if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
// fd is readable
@ -4428,25 +4433,27 @@ thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
errno = 0;
}
else {
EC_PUSH_TAG(wfd.th->ec);
EC_PUSH_TAG(ec);
if ((state = EC_EXEC_TAG()) == TAG_NONE) {
rb_hrtime_t *to, rel, end = 0;
RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
RUBY_VM_CHECK_INTS_BLOCKING(ec);
timeout_prepare(&to, &rel, &end, timeout);
do {
nfds = numberof(fds);
result = wait_for_single_fd_blocking_region(wfd.th, fds, nfds, to, &lerrno);
result = wait_for_single_fd_blocking_region(th, fds, nfds, to, &lerrno);
RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
RUBY_VM_CHECK_INTS_BLOCKING(ec);
} while (wait_retryable(&result, lerrno, to, end));
}
EC_POP_TAG();
}
thread_io_wake_pending_closer(&wfd);
if (io) {
rb_io_blocking_operation_release(io, &blocking_operation);
}
if (state) {
EC_JUMP_TAG(wfd.th->ec, state);
EC_JUMP_TAG(ec, state);
}
if (result < 0) {
@ -4479,6 +4486,9 @@ thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
}
#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
struct select_args {
struct rb_io *io;
struct rb_io_blocking_operation *blocking_operation;
union {
int fd;
int error;
@ -4486,7 +4496,6 @@ struct select_args {
rb_fdset_t *read;
rb_fdset_t *write;
rb_fdset_t *except;
struct waiting_fd wfd;
struct timeval *tv;
};
@ -4517,7 +4526,10 @@ select_single_cleanup(VALUE ptr)
{
struct select_args *args = (struct select_args *)ptr;
thread_io_wake_pending_closer(&args->wfd);
if (args->blocking_operation) {
rb_io_blocking_operation_release(args->io, args->blocking_operation);
}
if (args->read) rb_fd_term(args->read);
if (args->write) rb_fd_term(args->write);
if (args->except) rb_fd_term(args->except);
@ -4542,22 +4554,31 @@ thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
{
rb_fdset_t rfds, wfds, efds;
struct select_args args;
int r;
VALUE ptr = (VALUE)&args;
rb_thread_t *th = GET_THREAD();
struct rb_io_blocking_operation blocking_operation;
if (io) {
args.io = io;
blocking_operation.ec = GET_EC();
ccan_list_add(&io->blocking_operations, &blocking_operation.list);
args.blocking_operation = &blocking_operation;
} else {
args.io = NULL;
blocking_operation.ec = NULL;
args.blocking_operation = NULL;
}
args.as.fd = fd;
args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
args.tv = timeout;
thread_io_setup_wfd(th, fd, &args.wfd);
r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
if (r == -1)
int result = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
if (result == -1)
errno = args.as.error;
return r;
return result;
}
#endif /* ! USE_POLL */
@ -5651,21 +5672,6 @@ rb_check_deadlock(rb_ractor_t *r)
}
}
// Used for VM memsize reporting. Returns the size of a list of waiting_fd
// structs. Defined here because the struct definition lives here as well.
size_t
rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
{
struct waiting_fd *waitfd = 0;
size_t size = 0;
ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
size += sizeof(struct waiting_fd);
}
return size;
}
static void
update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
{

2
vm.c
View File

@ -3230,7 +3230,6 @@ ruby_vm_destruct(rb_vm_t *vm)
return 0;
}
size_t rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds); // thread.c
size_t rb_vm_memsize_workqueue(struct ccan_list_head *workqueue); // vm_trace.c
// Used for VM memsize reporting. Returns the size of the at_exit list by
@ -3285,7 +3284,6 @@ vm_memsize(const void *ptr)
return (
sizeof(rb_vm_t) +
rb_vm_memsize_waiting_fds(&vm->waiting_fds) +
rb_st_memsize(vm->loaded_features_index) +
rb_st_memsize(vm->loading_table) +
rb_vm_memsize_postponed_job_queue() +

View File

@ -1888,7 +1888,6 @@ void rb_thread_wakeup_timer_thread(int);
static inline void
rb_vm_living_threads_init(rb_vm_t *vm)
{
ccan_list_head_init(&vm->waiting_fds);
ccan_list_head_init(&vm->workqueue);
ccan_list_head_init(&vm->ractor.set);
ccan_list_head_init(&vm->ractor.sched.zombie_threads);