deps: upgrade to libuv 1.23.2

PR-URL: https://github.com/nodejs/node/pull/23336
Reviewed-By: Anna Henningsen <anna@addaleax.net>
Reviewed-By: Gus Caplan <me@gus.host>
Reviewed-By: James M Snell <jasnell@gmail.com>
Reviewed-By: Gireesh Punathil <gpunathi@in.ibm.com>
Reviewed-By: Refael Ackermann <refack@gmail.com>
Reviewed-By: Richard Lau <riclau@uk.ibm.com>
Reviewed-By: Trivikram Kamat <trivikr.dev@gmail.com>
Reviewed-By: Sakthipriyan Vairamani <thechargingvolcano@gmail.com>
Fixes: https://github.com/nodejs/node/issues/23043
Fixes: https://github.com/nodejs/node/issues/21773
Fixes: https://github.com/nodejs/node/issues/16601
Fixes: https://github.com/nodejs/node/issues/22999
Fixes: https://github.com/nodejs/node/issues/23219
Fixes: https://github.com/nodejs/node/issues/23066
Fixes: https://github.com/nodejs/node/issues/23067
Fixes: https://github.com/nodejs/node/issues/23089
This commit is contained in:
cjihrig 2018-10-08 13:14:58 -04:00 committed by Rich Trott
parent 13340d47fc
commit c65a523597
29 changed files with 557 additions and 283 deletions

1
deps/uv/AUTHORS vendored
View File

@ -351,3 +351,4 @@ Jeremiah Senkpiel <fishrock123@rocketmail.com>
Andy Zhang <zhangyong232@gmail.com>
dmabupt <dmabupt@gmail.com>
Ryan Liptak <squeek502@hotmail.com>
Ali Ijaz Sheikh <ofrobots@google.com>

View File

@ -193,7 +193,6 @@ if(WIN32)
src/win/poll.c
src/win/process.c
src/win/process-stdio.c
src/win/req.c
src/win/signal.c
src/win/snprintf.c
src/win/stream.c

38
deps/uv/ChangeLog vendored
View File

@ -1,3 +1,41 @@
2018.10.09, Version 1.23.2 (Stable), 34c12788d2e7308f3ac506c0abcbf74c0d6abd20
Changes since version 1.23.1:
* unix: return 0 retrieving rss on cygwin (cjihrig)
* unix: initialize uv_interface_address_t.phys_addr (cjihrig)
* test: handle uv_os_setpriority() windows edge case (cjihrig)
* tty, win: fix read stop for raw mode (Bartosz Sosnowski)
* Revert "Revert "unix,fs: fix for potential partial reads/writes"" (Jameson
Nash)
* unix,readv: always permit partial reads to return (Jameson Nash)
* win,tty: fix uv_tty_close() (Bartosz Sosnowski)
* doc: remove extraneous "on" (Ben Noordhuis)
* unix,win: fix threadpool race condition (Anna Henningsen)
* unix: rework thread barrier implementation (Ben Noordhuis)
* aix: switch to libuv's own thread barrier impl (Ben Noordhuis)
* unix: signal done to last thread barrier waiter (Ben Noordhuis)
* test: add uv_barrier_wait serial thread test (Ali Ijaz Sheikh)
* unix: optimize uv_fs_readlink() memory allocation (Ben Noordhuis)
* win: remove req.c and other cleanup (Carlo Marcelo Arenas Belón)
* aix: don't EISDIR on read from directory fd (Ben Noordhuis)
2018.09.22, Version 1.23.1 (Stable), d2282b3d67821dc53c907c2155fa8c5c6ce25180
Changes since version 1.23.0:

8
deps/uv/Makefile.am vendored
View File

@ -68,7 +68,6 @@ libuv_la_SOURCES += src/win/async.c \
src/win/poll.c \
src/win/process-stdio.c \
src/win/process.c \
src/win/req.c \
src/win/req-inl.h \
src/win/signal.c \
src/win/stream.c \
@ -340,8 +339,7 @@ libuv_la_SOURCES += src/unix/aix.c src/unix/aix-common.c
endif
if ANDROID
uvinclude_HEADERS += include/uv/android-ifaddrs.h \
include/uv/pthread-barrier.h
uvinclude_HEADERS += include/uv/android-ifaddrs.h
libuv_la_SOURCES += src/unix/android-ifaddrs.c \
src/unix/pthread-fixes.c
endif
@ -361,8 +359,7 @@ libuv_la_SOURCES += src/unix/cygwin.c \
endif
if DARWIN
uvinclude_HEADERS += include/uv/darwin.h \
include/uv/pthread-barrier.h
uvinclude_HEADERS += include/uv/darwin.h
libuv_la_CFLAGS += -D_DARWIN_USE_64_BIT_INODE=1
libuv_la_CFLAGS += -D_DARWIN_UNLIMITED_SELECT=1
libuv_la_SOURCES += src/unix/bsd-ifaddrs.c \
@ -445,7 +442,6 @@ libuv_la_SOURCES += src/unix/no-proctitle.c \
endif
if OS390
uvinclude_HEADERS += include/uv/pthread-barrier.h
libuv_la_CFLAGS += -D_UNIX03_THREADS \
-D_UNIX03_SOURCE \
-D_OPEN_SYS_IF_EXT=1 \

View File

@ -13,7 +13,7 @@
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
AC_PREREQ(2.57)
AC_INIT([libuv], [1.23.1], [https://github.com/libuv/libuv/issues])
AC_INIT([libuv], [1.23.2], [https://github.com/libuv/libuv/issues])
AC_CONFIG_MACRO_DIR([m4])
m4_include([m4/libuv-extra-automake-flags.m4])
m4_include([m4/as_case.m4])

View File

@ -126,7 +126,7 @@ so the current approach is to run blocking file I/O operations in a thread pool.
For a thorough explanation of the cross-platform file I/O landscape, checkout
`this post <http://blog.libtorrent.org/2012/10/asynchronous-disk-io/>`_.
libuv currently uses a global thread pool on which all loops can queue work on. 3 types of
libuv currently uses a global thread pool on which all loops can queue work. 3 types of
operations are currently run on this pool:
* File system operations

View File

@ -1,69 +0,0 @@
/*
Copyright (c) 2016, Kari Tristan Helgason <kthelgason@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _UV_PTHREAD_BARRIER_
#define _UV_PTHREAD_BARRIER_
#include <errno.h>
#include <pthread.h>
#if !defined(__MVS__)
#include <semaphore.h> /* sem_t */
#endif
#define PTHREAD_BARRIER_SERIAL_THREAD 0x12345
#define UV__PTHREAD_BARRIER_FALLBACK 1
/*
* To maintain ABI compatibility with
* libuv v1.x struct is padded according
* to target platform
*/
#if defined(__ANDROID__)
# define UV_BARRIER_STRUCT_PADDING \
sizeof(pthread_mutex_t) + \
sizeof(pthread_cond_t) + \
sizeof(unsigned int) - \
sizeof(void *)
#elif defined(__APPLE__)
# define UV_BARRIER_STRUCT_PADDING \
sizeof(pthread_mutex_t) + \
2 * sizeof(sem_t) + \
2 * sizeof(unsigned int) - \
sizeof(void *)
#else
# define UV_BARRIER_STRUCT_PADDING 0
#endif
typedef struct {
pthread_mutex_t mutex;
pthread_cond_t cond;
unsigned threshold;
unsigned in;
unsigned out;
} _uv_barrier;
typedef struct {
_uv_barrier* b;
char _pad[UV_BARRIER_STRUCT_PADDING];
} pthread_barrier_t;
int pthread_barrier_init(pthread_barrier_t* barrier,
const void* barrier_attr,
unsigned count);
int pthread_barrier_wait(pthread_barrier_t* barrier);
int pthread_barrier_destroy(pthread_barrier_t *barrier);
#endif /* _UV_PTHREAD_BARRIER_ */

View File

@ -66,10 +66,6 @@
# include "uv/posix.h"
#endif
#ifndef PTHREAD_BARRIER_SERIAL_THREAD
# include "uv/pthread-barrier.h"
#endif
#ifndef NI_MAXHOST
# define NI_MAXHOST 1025
#endif
@ -136,8 +132,28 @@ typedef pthread_rwlock_t uv_rwlock_t;
typedef UV_PLATFORM_SEM_T uv_sem_t;
typedef pthread_cond_t uv_cond_t;
typedef pthread_key_t uv_key_t;
typedef pthread_barrier_t uv_barrier_t;
/* Note: guard clauses should match uv_barrier_init's in src/unix/thread.c. */
#if defined(_AIX) || !defined(PTHREAD_BARRIER_SERIAL_THREAD)
/* TODO(bnoordhuis) Merge into uv_barrier_t in v2. */
struct _uv_barrier {
uv_mutex_t mutex;
uv_cond_t cond;
unsigned threshold;
unsigned in;
unsigned out;
};
typedef struct {
struct _uv_barrier* b;
# if defined(PTHREAD_BARRIER_SERIAL_THREAD)
/* TODO(bnoordhuis) Remove padding in v2. */
char pad[sizeof(pthread_barrier_t) - sizeof(struct _uv_barrier*)];
# endif
} uv_barrier_t;
#else
typedef pthread_barrier_t uv_barrier_t;
#endif
/* Platform-specific definitions for uv_spawn support. */
typedef gid_t uv_gid_t;

View File

@ -32,7 +32,7 @@
#define UV_VERSION_MAJOR 1
#define UV_VERSION_MINOR 23
#define UV_VERSION_PATCH 1
#define UV_VERSION_PATCH 2
#define UV_VERSION_IS_RELEASE 1
#define UV_VERSION_SUFFIX ""

View File

@ -62,10 +62,10 @@ static void worker(void* arg) {
uv_sem_post((uv_sem_t*) arg);
arg = NULL;
uv_mutex_lock(&mutex);
for (;;) {
uv_mutex_lock(&mutex);
/* `mutex` should always be locked at this point. */
wait_for_work:
/* Keep waiting while either no work is present or only slow I/O
and we're at the threshold for that. */
while (QUEUE_EMPTY(&wq) ||
@ -93,13 +93,13 @@ static void worker(void* arg) {
other work in the queue is done. */
if (slow_io_work_running >= slow_work_thread_threshold()) {
QUEUE_INSERT_TAIL(&wq, q);
goto wait_for_work;
continue;
}
/* If we encountered a request to run slow I/O work but there is none
to run, that means it's cancelled => Start over. */
if (QUEUE_EMPTY(&slow_io_pending_wq))
goto wait_for_work;
continue;
is_slow_work = 1;
slow_io_work_running++;
@ -122,13 +122,19 @@ static void worker(void* arg) {
w->work(w);
uv_mutex_lock(&w->loop->wq_mutex);
if (is_slow_work)
slow_io_work_running--;
w->work = NULL; /* Signal uv_cancel() that the work req is done
executing. */
QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq);
uv_async_send(&w->loop->wq_async);
uv_mutex_unlock(&w->loop->wq_mutex);
/* Lock `mutex` since that is expected at the start of the next
* iteration. */
uv_mutex_lock(&mutex);
if (is_slow_work) {
/* `slow_io_work_running` is protected by `mutex`. */
slow_io_work_running--;
}
}
}

View File

@ -119,16 +119,13 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
continue;
address = *addresses;
memset(address->phys_addr, 0, sizeof(address->phys_addr));
for (i = 0; i < *count; i++) {
if (strcmp(address->name, ent->ifa_name) == 0) {
#if defined(__CYGWIN__) || defined(__MSYS__)
memset(address->phys_addr, 0, sizeof(address->phys_addr));
#else
struct sockaddr_dl* sa_addr;
sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
#endif
}
address++;
}

View File

@ -38,7 +38,7 @@ int uv_uptime(double* uptime) {
int uv_resident_set_memory(size_t* rss) {
/* FIXME: read /proc/meminfo? */
*rss = 0;
return UV_ENOSYS;
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {

116
deps/uv/src/unix/fs.c vendored
View File

@ -262,17 +262,13 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
#if defined(__linux__)
static int no_preadv;
#endif
unsigned int iovmax;
ssize_t result;
#if defined(_AIX)
struct stat buf;
if(fstat(req->file, &buf))
return -1;
if(S_ISDIR(buf.st_mode)) {
errno = EISDIR;
return -1;
}
#endif /* defined(_AIX) */
iovmax = uv__getiovmax();
if (req->nbufs > iovmax)
req->nbufs = iovmax;
if (req->off < 0) {
if (req->nbufs == 1)
result = read(req->file, req->bufs[0].base, req->bufs[0].len);
@ -291,25 +287,7 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
if (no_preadv) retry:
# endif
{
off_t nread;
size_t index;
nread = 0;
index = 0;
result = 1;
do {
if (req->bufs[index].len > 0) {
result = pread(req->file,
req->bufs[index].base,
req->bufs[index].len,
req->off + nread);
if (result > 0)
nread += result;
}
index++;
} while (index < req->nbufs && result > 0);
if (nread > 0)
result = nread;
result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
}
# if defined(__linux__)
else {
@ -327,6 +305,13 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
}
done:
/* Early cleanup of bufs allocation, since we're done with it. */
if (req->bufs != req->bufsml)
uv__free(req->bufs);
req->bufs = NULL;
req->nbufs = 0;
return result;
}
@ -391,11 +376,13 @@ static ssize_t uv__fs_pathmax_size(const char* path) {
}
static ssize_t uv__fs_readlink(uv_fs_t* req) {
ssize_t maxlen;
ssize_t len;
char* buf;
char* newbuf;
len = uv__fs_pathmax_size(req->path);
buf = uv__malloc(len + 1);
maxlen = uv__fs_pathmax_size(req->path);
buf = uv__malloc(maxlen);
if (buf == NULL) {
errno = ENOMEM;
@ -403,17 +390,28 @@ static ssize_t uv__fs_readlink(uv_fs_t* req) {
}
#if defined(__MVS__)
len = os390_readlink(req->path, buf, len);
len = os390_readlink(req->path, buf, maxlen);
#else
len = readlink(req->path, buf, len);
len = readlink(req->path, buf, maxlen);
#endif
if (len == -1) {
uv__free(buf);
return -1;
}
/* Uncommon case: resize to make room for the trailing nul byte. */
if (len == maxlen) {
newbuf = uv__realloc(buf, len + 1);
if (newbuf == NULL) {
uv__free(buf);
return -1;
}
buf = newbuf;
}
buf[len] = '\0';
req->ptr = buf;
@ -735,25 +733,7 @@ static ssize_t uv__fs_write(uv_fs_t* req) {
if (no_pwritev) retry:
# endif
{
off_t written;
size_t index;
written = 0;
index = 0;
r = 0;
do {
if (req->bufs[index].len > 0) {
r = pwrite(req->file,
req->bufs[index].base,
req->bufs[index].len,
req->off + written);
if (r > 0)
written += r;
}
index++;
} while (index < req->nbufs && r >= 0);
if (written > 0)
r = written;
r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
}
# if defined(__linux__)
else {
@ -1045,9 +1025,21 @@ static int uv__fs_fstat(int fd, uv_stat_t *buf) {
return ret;
}
static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
size_t offset;
/* Figure out which bufs are done */
for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
size -= bufs[offset].len;
typedef ssize_t (*uv__fs_buf_iter_processor)(uv_fs_t* req);
static ssize_t uv__fs_buf_iter(uv_fs_t* req, uv__fs_buf_iter_processor process) {
/* Fix a partial read/write */
if (size > 0) {
bufs[offset].base += size;
bufs[offset].len -= size;
}
return offset;
}
static ssize_t uv__fs_write_all(uv_fs_t* req) {
unsigned int iovmax;
unsigned int nbufs;
uv_buf_t* bufs;
@ -1064,7 +1056,10 @@ static ssize_t uv__fs_buf_iter(uv_fs_t* req, uv__fs_buf_iter_processor process)
if (req->nbufs > iovmax)
req->nbufs = iovmax;
result = process(req);
do
result = uv__fs_write(req);
while (result < 0 && errno == EINTR);
if (result <= 0) {
if (total == 0)
total = result;
@ -1074,14 +1069,12 @@ static ssize_t uv__fs_buf_iter(uv_fs_t* req, uv__fs_buf_iter_processor process)
if (req->off >= 0)
req->off += result;
req->nbufs = uv__fs_buf_offset(req->bufs, result);
req->bufs += req->nbufs;
nbufs -= req->nbufs;
total += result;
}
if (errno == EINTR && total == -1)
return total;
if (bufs != req->bufsml)
uv__free(bufs);
@ -1098,7 +1091,8 @@ static void uv__fs_work(struct uv__work* w) {
ssize_t r;
req = container_of(w, uv_fs_t, work_req);
retry_on_eintr = !(req->fs_type == UV_FS_CLOSE);
retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
req->fs_type == UV_FS_READ);
do {
errno = 0;
@ -1127,7 +1121,7 @@ static void uv__fs_work(struct uv__work* w) {
X(MKDIR, mkdir(req->path, req->mode));
X(MKDTEMP, uv__fs_mkdtemp(req));
X(OPEN, uv__fs_open(req));
X(READ, uv__fs_buf_iter(req, uv__fs_read));
X(READ, uv__fs_read(req));
X(SCANDIR, uv__fs_scandir(req));
X(READLINK, uv__fs_readlink(req));
X(REALPATH, uv__fs_realpath(req));
@ -1138,7 +1132,7 @@ static void uv__fs_work(struct uv__work* w) {
X(SYMLINK, symlink(req->path, req->new_path));
X(UNLINK, unlink(req->path));
X(UTIME, uv__fs_utime(req));
X(WRITE, uv__fs_buf_iter(req, uv__fs_write));
X(WRITE, uv__fs_write_all(req));
default: abort();
}
#undef X

View File

@ -890,6 +890,7 @@ int uv_interface_addresses(uv_interface_address_t** addresses,
continue;
address = *addresses;
memset(address->phys_addr, 0, sizeof(address->phys_addr));
for (i = 0; i < (*count); i++) {
if (strcmp(address->name, ent->ifa_name) == 0) {

View File

@ -512,7 +512,7 @@ static int uv__interface_addresses_v6(uv_interface_address_t** addresses,
/* TODO: Retrieve netmask using SIOCGIFNETMASK ioctl */
address->is_internal = flg.__nif6e_flags & _NIF6E_FLAGS_LOOPBACK ? 1 : 0;
memset(address->phys_addr, 0, sizeof(address->phys_addr));
address++;
}
@ -624,6 +624,7 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
}
address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0;
memset(address->phys_addr, 0, sizeof(address->phys_addr));
address++;
}

View File

@ -44,108 +44,119 @@
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
#if defined(PTHREAD_BARRIER_SERIAL_THREAD)
STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
#endif
#if defined(UV__PTHREAD_BARRIER_FALLBACK)
/* TODO: support barrier_attr */
int pthread_barrier_init(pthread_barrier_t* barrier,
const void* barrier_attr,
unsigned count) {
/* Note: guard clauses should match uv_barrier_t's in include/uv/uv-unix.h. */
#if defined(_AIX) || !defined(PTHREAD_BARRIER_SERIAL_THREAD)
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
struct _uv_barrier* b;
int rc;
_uv_barrier* b;
if (barrier == NULL || count == 0)
return EINVAL;
if (barrier_attr != NULL)
return ENOTSUP;
return UV_EINVAL;
b = uv__malloc(sizeof(*b));
if (b == NULL)
return ENOMEM;
return UV_ENOMEM;
b->in = 0;
b->out = 0;
b->threshold = count;
if ((rc = pthread_mutex_init(&b->mutex, NULL)) != 0)
rc = uv_mutex_init(&b->mutex);
if (rc != 0)
goto error2;
if ((rc = pthread_cond_init(&b->cond, NULL)) != 0)
rc = uv_cond_init(&b->cond);
if (rc != 0)
goto error;
barrier->b = b;
return 0;
error:
pthread_mutex_destroy(&b->mutex);
uv_mutex_destroy(&b->mutex);
error2:
uv__free(b);
return rc;
}
int pthread_barrier_wait(pthread_barrier_t* barrier) {
int rc;
_uv_barrier* b;
int uv_barrier_wait(uv_barrier_t* barrier) {
struct _uv_barrier* b;
int last;
if (barrier == NULL || barrier->b == NULL)
return EINVAL;
return UV_EINVAL;
b = barrier->b;
/* Lock the mutex*/
if ((rc = pthread_mutex_lock(&b->mutex)) != 0)
return rc;
uv_mutex_lock(&b->mutex);
/* Increment the count. If this is the first thread to reach the threshold,
wake up waiters, unlock the mutex, then return
PTHREAD_BARRIER_SERIAL_THREAD. */
if (++b->in == b->threshold) {
b->in = 0;
b->out = b->threshold - 1;
rc = pthread_cond_signal(&b->cond);
assert(rc == 0);
pthread_mutex_unlock(&b->mutex);
return PTHREAD_BARRIER_SERIAL_THREAD;
b->out = b->threshold;
uv_cond_signal(&b->cond);
} else {
do
uv_cond_wait(&b->cond, &b->mutex);
while (b->in != 0);
}
/* Otherwise, wait for other threads until in is set to 0,
then return 0 to indicate this is not the first thread. */
do {
if ((rc = pthread_cond_wait(&b->cond, &b->mutex)) != 0)
break;
} while (b->in != 0);
/* mark thread exit */
b->out--;
pthread_cond_signal(&b->cond);
pthread_mutex_unlock(&b->mutex);
return rc;
last = (--b->out == 0);
if (!last)
uv_cond_signal(&b->cond); /* Not needed for last thread. */
uv_mutex_unlock(&b->mutex);
return last;
}
int pthread_barrier_destroy(pthread_barrier_t* barrier) {
int rc;
_uv_barrier* b;
if (barrier == NULL || barrier->b == NULL)
return EINVAL;
void uv_barrier_destroy(uv_barrier_t* barrier) {
struct _uv_barrier* b;
b = barrier->b;
uv_mutex_lock(&b->mutex);
if ((rc = pthread_mutex_lock(&b->mutex)) != 0)
return rc;
assert(b->in == 0);
assert(b->out == 0);
if (b->in > 0 || b->out > 0)
rc = EBUSY;
if (b->in != 0 || b->out != 0)
abort();
pthread_mutex_unlock(&b->mutex);
uv_mutex_unlock(&b->mutex);
uv_mutex_destroy(&b->mutex);
uv_cond_destroy(&b->cond);
if (rc)
return rc;
pthread_cond_destroy(&b->cond);
pthread_mutex_destroy(&b->mutex);
uv__free(barrier->b);
barrier->b = NULL;
return 0;
}
#else
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
return UV__ERR(pthread_barrier_init(barrier, NULL, count));
}
int uv_barrier_wait(uv_barrier_t* barrier) {
int rc;
rc = pthread_barrier_wait(barrier);
if (rc != 0)
if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
abort();
return rc == PTHREAD_BARRIER_SERIAL_THREAD;
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
if (pthread_barrier_destroy(barrier))
abort();
}
#endif
@ -771,25 +782,6 @@ int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
}
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
return UV__ERR(pthread_barrier_init(barrier, NULL, count));
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
if (pthread_barrier_destroy(barrier))
abort();
}
int uv_barrier_wait(uv_barrier_t* barrier) {
int r = pthread_barrier_wait(barrier);
if (r && r != PTHREAD_BARRIER_SERIAL_THREAD)
abort();
return r == PTHREAD_BARRIER_SERIAL_THREAD;
}
int uv_key_create(uv_key_t* key) {
return UV__ERR(pthread_key_create(key, NULL));
}

View File

@ -83,7 +83,7 @@ static void uv_relative_path(const WCHAR* filename,
static int uv_split_path(const WCHAR* filename, WCHAR** dir,
WCHAR** file) {
size_t len, i;
if (filename == NULL) {
if (dir != NULL)
*dir = NULL;

View File

@ -1517,10 +1517,10 @@ static void fs__fchmod(uv_fs_t* req) {
SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(nt_status));
goto fchmod_cleanup;
}
/* Test if the Archive attribute is cleared */
if ((file_info.FileAttributes & FILE_ATTRIBUTE_ARCHIVE) == 0) {
/* Set Archive flag, otherwise setting or clearing the read-only
/* Set Archive flag, otherwise setting or clearing the read-only
flag will not work */
file_info.FileAttributes |= FILE_ATTRIBUTE_ARCHIVE;
nt_status = pNtSetInformationFile(handle,

View File

@ -2347,7 +2347,7 @@ int uv_pipe_chmod(uv_pipe_t* handle, int mode) {
error = GetLastError();
goto clean_sid;
}
memset(&ea, 0, sizeof(EXPLICIT_ACCESS));
if (mode & UV_READABLE)
ea.grfAccessPermissions |= GENERIC_READ | FILE_WRITE_ATTRIBUTES;

25
deps/uv/src/win/req.c vendored
View File

@ -1,25 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include "uv.h"
#include "internal.h"

View File

@ -118,7 +118,7 @@ int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
ctx->arg = arg;
/* Create the thread in suspended state so we have a chance to pass
* its own creation handle to it */
* its own creation handle to it */
thread = (HANDLE) _beginthreadex(NULL,
0,
uv__thread_start,

View File

@ -1033,6 +1033,7 @@ int uv_tty_read_stop(uv_tty_t* handle) {
/* Cancel raw read. Write some bullshit event to force the console wait to
* return. */
memset(&record, 0, sizeof record);
record.EventType = FOCUS_EVENT;
if (!WriteConsoleInputW(handle->handle, &record, 1, &written)) {
return GetLastError();
}
@ -2179,14 +2180,14 @@ void uv_process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
void uv_tty_close(uv_tty_t* handle) {
assert(handle->u.fd == -1 || handle->u.fd > 2);
if (handle->flags & UV_HANDLE_READING)
uv_tty_read_stop(handle);
if (handle->u.fd == -1)
CloseHandle(handle->handle);
else
close(handle->u.fd);
if (handle->flags & UV_HANDLE_READING)
uv_tty_read_stop(handle);
handle->u.fd = -1;
handle->handle = INVALID_HANDLE_VALUE;
handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);

View File

@ -228,7 +228,7 @@ int process_copy_output(process_info_t* p, FILE* stream) {
while (fgets(buf, sizeof(buf), f) != NULL)
print_lines(buf, strlen(buf), stream);
if (ferror(f))
return -1;

View File

@ -104,3 +104,45 @@ TEST_IMPL(barrier_3) {
return 0;
}
static void serial_worker(void* data) {
uv_barrier_t* barrier;
barrier = data;
if (uv_barrier_wait(barrier) > 0)
uv_barrier_destroy(barrier);
uv_sleep(100); /* Wait a bit before terminating. */
}
/* Ensure that uv_barrier_wait returns positive only after all threads have
* exited the barrier. If this value is returned too early and the barrier is
* destroyed prematurely, then this test may see a crash. */
TEST_IMPL(barrier_serial_thread) {
uv_thread_t threads[4];
uv_barrier_t barrier;
unsigned i;
ASSERT(0 == uv_barrier_init(&barrier, ARRAY_SIZE(threads) + 1));
for (i = 0; i < ARRAY_SIZE(threads); ++i)
ASSERT(0 == uv_thread_create(&threads[i], serial_worker, &barrier));
if (uv_barrier_wait(&barrier) > 0)
uv_barrier_destroy(&barrier);
for (i = 0; i < ARRAY_SIZE(threads); ++i)
ASSERT(0 == uv_thread_join(&threads[i]));
return 0;
}
/* Single thread uv_barrier_wait should return correct return value. */
TEST_IMPL(barrier_serial_thread_single) {
uv_barrier_t barrier;
ASSERT(0 == uv_barrier_init(&barrier, 1));
ASSERT(0 < uv_barrier_wait(&barrier));
uv_barrier_destroy(&barrier);
return 0;
}

266
deps/uv/test/test-fs.c vendored
View File

@ -26,6 +26,7 @@
#include <string.h> /* memset */
#include <fcntl.h>
#include <sys/stat.h>
#include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
/* FIXME we shouldn't need to branch in this file */
#if defined(__unix__) || defined(__POSIX__) || \
@ -120,6 +121,31 @@ static char test_buf[] = "test-buffer\n";
static char test_buf2[] = "second-buffer\n";
static uv_buf_t iov;
#ifdef _WIN32
int uv_test_getiovmax(void) {
return INT32_MAX; /* Emulated by libuv, so no real limit. */
}
#else
int uv_test_getiovmax(void) {
#if defined(IOV_MAX)
return IOV_MAX;
#elif defined(_SC_IOV_MAX)
static int iovmax = -1;
if (iovmax == -1) {
iovmax = sysconf(_SC_IOV_MAX);
/* On some embedded devices (arm-linux-uclibc based ip camera),
* sysconf(_SC_IOV_MAX) can not get the correct value. The return
* value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
*/
if (iovmax == -1) iovmax = 1;
}
return iovmax;
#else
return 1024;
#endif
}
#endif
#ifdef _WIN32
/*
* This tag and guid have no special meaning, and don't conflict with
@ -2752,19 +2778,44 @@ TEST_IMPL(fs_write_multiple_bufs) {
memset(buf, 0, sizeof(buf));
memset(buf2, 0, sizeof(buf2));
/* Read the strings back to separate buffers. */
iovs[0] = uv_buf_init(buf, sizeof(test_buf));
iovs[1] = uv_buf_init(buf2, sizeof(test_buf2));
ASSERT(lseek(open_req1.result, 0, SEEK_CUR) == 0);
r = uv_fs_read(NULL, &read_req, open_req1.result, iovs, 2, -1, NULL);
ASSERT(r >= 0);
ASSERT(read_req.result == sizeof(test_buf) + sizeof(test_buf2));
ASSERT(strcmp(buf, test_buf) == 0);
ASSERT(strcmp(buf2, test_buf2) == 0);
uv_fs_req_cleanup(&read_req);
iov = uv_buf_init(buf, sizeof(buf));
r = uv_fs_read(NULL, &read_req, open_req1.result, &iov, 1, -1, NULL);
ASSERT(r == 0);
ASSERT(read_req.result == 0);
uv_fs_req_cleanup(&read_req);
/* Read the strings back to separate buffers. */
iovs[0] = uv_buf_init(buf, sizeof(test_buf));
iovs[1] = uv_buf_init(buf2, sizeof(test_buf2));
r = uv_fs_read(NULL, &read_req, open_req1.result, iovs, 2, 0, NULL);
ASSERT(r >= 0);
ASSERT(read_req.result >= 0);
if (read_req.result == sizeof(test_buf)) {
/* Infer that preadv is not available. */
uv_fs_req_cleanup(&read_req);
r = uv_fs_read(NULL, &read_req, open_req1.result, &iovs[1], 1, read_req.result, NULL);
ASSERT(r >= 0);
ASSERT(read_req.result == sizeof(test_buf2));
} else {
ASSERT(read_req.result == sizeof(test_buf) + sizeof(test_buf2));
}
ASSERT(strcmp(buf, test_buf) == 0);
ASSERT(strcmp(buf2, test_buf2) == 0);
uv_fs_req_cleanup(&read_req);
iov = uv_buf_init(buf, sizeof(buf));
r = uv_fs_read(NULL, &read_req, open_req1.result, &iov, 1,
read_req.result, NULL);
sizeof(test_buf) + sizeof(test_buf2), NULL);
ASSERT(r == 0);
ASSERT(read_req.result == 0);
uv_fs_req_cleanup(&read_req);
@ -2783,12 +2834,15 @@ TEST_IMPL(fs_write_multiple_bufs) {
TEST_IMPL(fs_write_alotof_bufs) {
const size_t iovcount = 54321;
size_t iovcount;
size_t iovmax;
uv_buf_t* iovs;
char* buffer;
size_t index;
int r;
iovcount = 54321;
/* Setup. */
unlink("test_file");
@ -2796,6 +2850,7 @@ TEST_IMPL(fs_write_alotof_bufs) {
iovs = malloc(sizeof(*iovs) * iovcount);
ASSERT(iovs != NULL);
iovmax = uv_test_getiovmax();
r = uv_fs_open(NULL,
&open_req1,
@ -2829,7 +2884,10 @@ TEST_IMPL(fs_write_alotof_bufs) {
iovs[index] = uv_buf_init(buffer + index * sizeof(test_buf),
sizeof(test_buf));
r = uv_fs_read(NULL, &read_req, open_req1.result, iovs, iovcount, 0, NULL);
ASSERT(lseek(open_req1.result, 0, SEEK_SET) == 0);
r = uv_fs_read(NULL, &read_req, open_req1.result, iovs, iovcount, -1, NULL);
if (iovcount > iovmax)
iovcount = iovmax;
ASSERT(r >= 0);
ASSERT((size_t)read_req.result == sizeof(test_buf) * iovcount);
@ -2841,13 +2899,14 @@ TEST_IMPL(fs_write_alotof_bufs) {
uv_fs_req_cleanup(&read_req);
free(buffer);
ASSERT(lseek(open_req1.result, write_req.result, SEEK_SET) == write_req.result);
iov = uv_buf_init(buf, sizeof(buf));
r = uv_fs_read(NULL,
&read_req,
open_req1.result,
&iov,
1,
read_req.result,
-1,
NULL);
ASSERT(r == 0);
ASSERT(read_req.result == 0);
@ -2868,14 +2927,19 @@ TEST_IMPL(fs_write_alotof_bufs) {
TEST_IMPL(fs_write_alotof_bufs_with_offset) {
const size_t iovcount = 54321;
size_t iovcount;
size_t iovmax;
uv_buf_t* iovs;
char* buffer;
size_t index;
int r;
int64_t offset;
char* filler = "0123456789";
int filler_len = strlen(filler);
char* filler;
int filler_len;
filler = "0123456789";
filler_len = strlen(filler);
iovcount = 54321;
/* Setup. */
unlink("test_file");
@ -2884,6 +2948,7 @@ TEST_IMPL(fs_write_alotof_bufs_with_offset) {
iovs = malloc(sizeof(*iovs) * iovcount);
ASSERT(iovs != NULL);
iovmax = uv_test_getiovmax();
r = uv_fs_open(NULL,
&open_req1,
@ -2927,6 +2992,10 @@ TEST_IMPL(fs_write_alotof_bufs_with_offset) {
r = uv_fs_read(NULL, &read_req, open_req1.result,
iovs, iovcount, offset, NULL);
ASSERT(r >= 0);
if (r == sizeof(test_buf))
iovcount = 1; /* Infer that preadv is not available. */
else if (iovcount > iovmax)
iovcount = iovmax;
ASSERT((size_t)read_req.result == sizeof(test_buf) * iovcount);
for (index = 0; index < iovcount; ++index)
@ -2940,7 +3009,7 @@ TEST_IMPL(fs_write_alotof_bufs_with_offset) {
r = uv_fs_stat(NULL, &stat_req, "test_file", NULL);
ASSERT(r == 0);
ASSERT((int64_t)((uv_stat_t*)stat_req.ptr)->st_size ==
offset + (int64_t)(iovcount * sizeof(test_buf)));
offset + (int64_t)write_req.result);
uv_fs_req_cleanup(&stat_req);
iov = uv_buf_init(buf, sizeof(buf));
@ -2949,7 +3018,7 @@ TEST_IMPL(fs_write_alotof_bufs_with_offset) {
open_req1.result,
&iov,
1,
read_req.result + offset,
offset + write_req.result,
NULL);
ASSERT(r == 0);
ASSERT(read_req.result == 0);
@ -2969,6 +3038,175 @@ TEST_IMPL(fs_write_alotof_bufs_with_offset) {
}
#ifdef _WIN32
TEST_IMPL(fs_partial_read) {
RETURN_SKIP("Test not implemented on Windows.");
}
TEST_IMPL(fs_partial_write) {
RETURN_SKIP("Test not implemented on Windows.");
}
#else /* !_WIN32 */
struct thread_ctx {
pthread_t pid;
int fd;
char* data;
int size;
int interval;
int doread;
};
static void thread_main(void* arg) {
const struct thread_ctx* ctx;
int size;
char* data;
ctx = (struct thread_ctx*)arg;
size = ctx->size;
data = ctx->data;
while (size > 0) {
ssize_t result;
int nbytes;
nbytes = size < ctx->interval ? size : ctx->interval;
if (ctx->doread) {
result = write(ctx->fd, data, nbytes);
/* Should not see EINTR (or other errors) */
ASSERT(result == nbytes);
} else {
result = read(ctx->fd, data, nbytes);
/* Should not see EINTR (or other errors),
* but might get a partial read if we are faster than the writer
*/
ASSERT(result > 0 && result <= nbytes);
}
pthread_kill(ctx->pid, SIGUSR1);
size -= result;
data += result;
}
}
static void sig_func(uv_signal_t* handle, int signum) {
uv_signal_stop(handle);
}
static size_t uv_test_fs_buf_offset(uv_buf_t* bufs, size_t size) {
size_t offset;
/* Figure out which bufs are done */
for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
size -= bufs[offset].len;
/* Fix a partial read/write */
if (size > 0) {
bufs[offset].base += size;
bufs[offset].len -= size;
}
return offset;
}
static void test_fs_partial(int doread) {
struct thread_ctx ctx;
uv_thread_t thread;
uv_signal_t signal;
int pipe_fds[2];
size_t iovcount;
uv_buf_t* iovs;
char* buffer;
size_t index;
iovcount = 54321;
iovs = malloc(sizeof(*iovs) * iovcount);
ASSERT(iovs != NULL);
ctx.pid = pthread_self();
ctx.doread = doread;
ctx.interval = 1000;
ctx.size = sizeof(test_buf) * iovcount;
ctx.data = malloc(ctx.size);
ASSERT(ctx.data != NULL);
buffer = malloc(ctx.size);
ASSERT(buffer != NULL);
for (index = 0; index < iovcount; ++index)
iovs[index] = uv_buf_init(buffer + index * sizeof(test_buf), sizeof(test_buf));
loop = uv_default_loop();
ASSERT(0 == uv_signal_init(loop, &signal));
ASSERT(0 == uv_signal_start(&signal, sig_func, SIGUSR1));
ASSERT(0 == pipe(pipe_fds));
ctx.fd = pipe_fds[doread];
ASSERT(0 == uv_thread_create(&thread, thread_main, &ctx));
if (doread) {
uv_buf_t* read_iovs;
int nread;
read_iovs = iovs;
nread = 0;
while (nread < ctx.size) {
int result;
result = uv_fs_read(loop, &read_req, pipe_fds[0], read_iovs, iovcount, -1, NULL);
if (result > 0) {
size_t read_iovcount;
read_iovcount = uv_test_fs_buf_offset(read_iovs, result);
read_iovs += read_iovcount;
iovcount -= read_iovcount;
nread += result;
} else {
ASSERT(result == UV_EINTR);
}
uv_fs_req_cleanup(&read_req);
}
} else {
int result;
result = uv_fs_write(loop, &write_req, pipe_fds[1], iovs, iovcount, -1, NULL);
ASSERT(write_req.result == result);
ASSERT(result == ctx.size);
uv_fs_req_cleanup(&write_req);
}
ASSERT(0 == memcmp(buffer, ctx.data, ctx.size));
ASSERT(0 == uv_thread_join(&thread));
ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT));
ASSERT(0 == close(pipe_fds[1]));
uv_close((uv_handle_t*) &signal, NULL);
{ /* Make sure we read everything that we wrote. */
int result;
result = uv_fs_read(loop, &read_req, pipe_fds[0], iovs, 1, -1, NULL);
ASSERT(result == 0);
uv_fs_req_cleanup(&read_req);
}
ASSERT(0 == close(pipe_fds[0]));
free(iovs);
free(buffer);
free(ctx.data);
MAKE_VALGRIND_HAPPY();
}
TEST_IMPL(fs_partial_read) {
test_fs_partial(1);
return 0;
}
TEST_IMPL(fs_partial_write) {
test_fs_partial(0);
return 0;
}
#endif/* _WIN32 */
TEST_IMPL(fs_read_write_null_arguments) {
int r;
@ -3313,7 +3551,7 @@ TEST_IMPL(fs_exclusive_sharing_mode) {
int call_icacls(const char* command, ...) {
char icacls_command[1024];
va_list args;
va_start(args, command);
vsnprintf(icacls_command, ARRAYSIZE(icacls_command), command, args);
va_end(args);
@ -3335,7 +3573,7 @@ TEST_IMPL(fs_open_readonly_acl) {
attrib -r test_file_icacls
del test_file_icacls
*/
/* Setup - clear the ACL and remove the file */
loop = uv_default_loop();
r = uv_os_get_passwd(&pwd);
@ -3345,7 +3583,7 @@ TEST_IMPL(fs_open_readonly_acl) {
uv_fs_chmod(loop, &req, "test_file_icacls", S_IWUSR, NULL);
unlink("test_file_icacls");
/* Create the file */
/* Create the file */
r = uv_fs_open(loop,
&open_req1,
"test_file_icacls",
@ -3370,7 +3608,7 @@ TEST_IMPL(fs_open_readonly_acl) {
if (r != 0) {
goto acl_cleanup;
}
/* Try opening the file */
r = uv_fs_open(NULL, &open_req1, "test_file_icacls", O_RDONLY, 0, NULL);
if (r < 0) {

View File

@ -37,6 +37,8 @@ TEST_DECLARE (default_loop_close)
TEST_DECLARE (barrier_1)
TEST_DECLARE (barrier_2)
TEST_DECLARE (barrier_3)
TEST_DECLARE (barrier_serial_thread)
TEST_DECLARE (barrier_serial_thread_single)
TEST_DECLARE (condvar_1)
TEST_DECLARE (condvar_2)
TEST_DECLARE (condvar_3)
@ -50,6 +52,7 @@ TEST_DECLARE (tty)
TEST_DECLARE (tty_raw)
TEST_DECLARE (tty_empty_write)
TEST_DECLARE (tty_large_write)
TEST_DECLARE (tty_raw_cancel)
#endif
TEST_DECLARE (tty_file)
TEST_DECLARE (tty_pty)
@ -339,6 +342,8 @@ TEST_DECLARE (get_osfhandle_valid_handle)
TEST_DECLARE (open_osfhandle_valid_handle)
TEST_DECLARE (fs_write_alotof_bufs)
TEST_DECLARE (fs_write_alotof_bufs_with_offset)
TEST_DECLARE (fs_partial_read)
TEST_DECLARE (fs_partial_write)
TEST_DECLARE (fs_file_pos_after_op_with_offset)
TEST_DECLARE (fs_null_req)
#ifdef _WIN32
@ -456,6 +461,8 @@ TASK_LIST_START
TEST_ENTRY (barrier_1)
TEST_ENTRY (barrier_2)
TEST_ENTRY (barrier_3)
TEST_ENTRY (barrier_serial_thread)
TEST_ENTRY (barrier_serial_thread_single)
TEST_ENTRY (condvar_1)
TEST_ENTRY (condvar_2)
TEST_ENTRY (condvar_3)
@ -480,6 +487,7 @@ TASK_LIST_START
TEST_ENTRY (tty_raw)
TEST_ENTRY (tty_empty_write)
TEST_ENTRY (tty_large_write)
TEST_ENTRY (tty_raw_cancel)
#endif
TEST_ENTRY (tty_file)
TEST_ENTRY (tty_pty)
@ -884,6 +892,8 @@ TASK_LIST_START
TEST_ENTRY (fs_write_multiple_bufs)
TEST_ENTRY (fs_write_alotof_bufs)
TEST_ENTRY (fs_write_alotof_bufs_with_offset)
TEST_ENTRY (fs_partial_read)
TEST_ENTRY (fs_partial_write)
TEST_ENTRY (fs_read_write_null_arguments)
TEST_ENTRY (fs_file_pos_after_op_with_offset)
TEST_ENTRY (fs_null_req)

View File

@ -54,8 +54,10 @@ TEST_IMPL(process_priority) {
#ifndef _WIN32
ASSERT(priority == i);
#else
/* On Windows, only elevated users can set UV_PRIORITY_HIGHEST. Other
users will silently be set to UV_PRIORITY_HIGH. */
if (i < UV_PRIORITY_HIGH)
ASSERT(priority == UV_PRIORITY_HIGHEST);
ASSERT(priority == UV_PRIORITY_HIGHEST || priority == UV_PRIORITY_HIGH);
else if (i < UV_PRIORITY_ABOVE_NORMAL)
ASSERT(priority == UV_PRIORITY_HIGH);
else if (i < UV_PRIORITY_NORMAL)

View File

@ -310,6 +310,41 @@ TEST_IMPL(tty_large_write) {
MAKE_VALGRIND_HAPPY();
return 0;
}
TEST_IMPL(tty_raw_cancel) {
int r;
int ttyin_fd;
uv_tty_t tty_in;
uv_loop_t* loop;
HANDLE handle;
loop = uv_default_loop();
/* Make sure we have an FD that refers to a tty */
handle = CreateFileA("conin$",
GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE,
NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL);
ASSERT(handle != INVALID_HANDLE_VALUE);
ttyin_fd = _open_osfhandle((intptr_t) handle, 0);
ASSERT(ttyin_fd >= 0);
ASSERT(UV_TTY == uv_guess_handle(ttyin_fd));
r = uv_tty_init(uv_default_loop(), &tty_in, ttyin_fd, 1); /* Readable. */
ASSERT(r == 0);
r = uv_tty_set_mode(&tty_in, UV_TTY_MODE_RAW);
ASSERT(r == 0);
r = uv_read_start((uv_stream_t*)&tty_in, tty_raw_alloc, tty_raw_read);
ASSERT(r == 0);
r = uv_read_stop((uv_stream_t*) &tty_in);
ASSERT(r == 0);
MAKE_VALGRIND_HAPPY();
return 0;
}
#endif

1
deps/uv/uv.gyp vendored
View File

@ -116,7 +116,6 @@
'src/win/poll.c',
'src/win/process.c',
'src/win/process-stdio.c',
'src/win/req.c',
'src/win/req-inl.h',
'src/win/signal.c',
'src/win/snprintf.c',