uv: upgrade to v0.11.4
This commit is contained in:
parent
c188a75103
commit
6b654c0b13
2
deps/uv/AUTHORS
vendored
2
deps/uv/AUTHORS
vendored
@ -86,3 +86,5 @@ Nils Maier <maierman@web.de>
|
|||||||
Nicholas Vavilov <vvnicholas@gmail.com>
|
Nicholas Vavilov <vvnicholas@gmail.com>
|
||||||
Miroslav Bajtoš <miro.bajtos@gmail.com>
|
Miroslav Bajtoš <miro.bajtos@gmail.com>
|
||||||
Elliot Saba <staticfloat@gmail.com>
|
Elliot Saba <staticfloat@gmail.com>
|
||||||
|
Sean Silva <chisophugis@gmail.com>
|
||||||
|
Wynn Wilkes <wynnw@movenetworks.com>
|
||||||
|
99
deps/uv/ChangeLog
vendored
99
deps/uv/ChangeLog
vendored
@ -1,4 +1,101 @@
|
|||||||
2013.05.16, Version 0.11.3 (Unstable)
|
2013.05.30, Version 0.11.4 (Unstable)
|
||||||
|
|
||||||
|
Changes since version 0.11.3:
|
||||||
|
|
||||||
|
* windows: make uv_spawn not fail when the libuv embedding application is run
|
||||||
|
under external job control (Bert Belder)
|
||||||
|
|
||||||
|
* darwin: assume CFRunLoopStop() isn't thread-safe, fixing a race condition
|
||||||
|
when stopping the 'stdin select hack' thread (Fedor Indutny)
|
||||||
|
|
||||||
|
* win: fix UV_EALREADY not being reported correctly to the libuv user in some
|
||||||
|
cases (Bert Belder)
|
||||||
|
|
||||||
|
* darwin: make the uv__cf_loop_runner and uv__cf_loop_cb functions static (Ben
|
||||||
|
Noordhuis)
|
||||||
|
|
||||||
|
* darwin: task_info() cannot fail (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: add error mapping for ENETDOWN (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: implicitly signal write errors to the libuv user (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: fix assertion error on signal pipe overflow (Bert Belder)
|
||||||
|
|
||||||
|
* unix: turn off POLLOUT after stream connect (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: fix stream refcounting buglet (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: remove assert statements that are no longer correct (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: appease warning about non-standard `inline` (Sean Silva)
|
||||||
|
|
||||||
|
* unix: add uv__is_closing() macro (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: stop stream POLLOUT watcher on write error (Ben Noordhuis)
|
||||||
|
|
||||||
|
* include: document uv_update_time() and uv_now() (Ben Noordhuis)
|
||||||
|
|
||||||
|
* linux: fix cpu model parsing on newer arm kernels (Ben Noordhuis)
|
||||||
|
|
||||||
|
* linux: fix a memory leak in uv_cpu_info() error path (Ben Noordhuis)
|
||||||
|
|
||||||
|
* linux: don't ignore out-of-memory errors in uv_cpu_info() (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix, windows: move uv_now() to uv-common.c (Ben Noordhuis)
|
||||||
|
|
||||||
|
* test: fix a compilation problem in test-osx-select.c that was caused by the
|
||||||
|
use of c-style comments (Bert Belder)
|
||||||
|
|
||||||
|
* darwin: use uv_fs_sendfile() use the sendfile api correctly (Wynn Wilkes)
|
||||||
|
|
||||||
|
* windows: call idle handles on every loop iteration, something the unix
|
||||||
|
implementation already did (Bert Belder)
|
||||||
|
|
||||||
|
* test: update the idle-starvation test to verify that idle handles are called
|
||||||
|
in every loop iteration (Bert Belder)
|
||||||
|
|
||||||
|
* unix, windows: ensure that uv_run() in RUN_ONCE mode calls timers that expire
|
||||||
|
after blocking (Ben Noordhuis)
|
||||||
|
|
||||||
|
|
||||||
|
2013.05.29, Version 0.10.9 (Stable), a195f9ace23d92345baf57582678bfc3017e6632
|
||||||
|
|
||||||
|
Changes since version 0.10.8:
|
||||||
|
|
||||||
|
* unix: fix stream refcounting buglet (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: remove erroneous asserts (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: add uv__is_closing() macro (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: stop stream POLLOUT watcher on write error (Ben Noordhuis)
|
||||||
|
|
||||||
|
|
||||||
|
2013.05.25, Version 0.10.8 (Stable), 0f39be12926fe2d8766a9f025797a473003e6504
|
||||||
|
|
||||||
|
Changes since version 0.10.7:
|
||||||
|
|
||||||
|
* windows: make uv_spawn not fail under job control (Bert Belder)
|
||||||
|
|
||||||
|
* darwin: assume CFRunLoopStop() isn't thread-safe (Fedor Indutny)
|
||||||
|
|
||||||
|
* win: fix UV_EALREADY incorrectly set (Bert Belder)
|
||||||
|
|
||||||
|
* darwin: make two uv__cf_*() functions static (Ben Noordhuis)
|
||||||
|
|
||||||
|
* darwin: task_info() cannot fail (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: add mapping for ENETDOWN (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: implicitly signal write errors to libuv user (Ben Noordhuis)
|
||||||
|
|
||||||
|
* unix: fix assert on signal pipe overflow (Bert Belder)
|
||||||
|
|
||||||
|
* unix: turn off POLLOUT after stream connect (Ben Noordhuis)
|
||||||
|
|
||||||
|
|
||||||
|
2013.05.16, Version 0.11.3 (Unstable), 0a48c05b5988aea84c605751900926fa25443b34
|
||||||
|
|
||||||
Changes since version 0.11.2:
|
Changes since version 0.11.2:
|
||||||
|
|
||||||
|
21
deps/uv/include/uv.h
vendored
21
deps/uv/include/uv.h
vendored
@ -283,7 +283,28 @@ UV_EXTERN void uv_ref(uv_handle_t*);
|
|||||||
UV_EXTERN void uv_unref(uv_handle_t*);
|
UV_EXTERN void uv_unref(uv_handle_t*);
|
||||||
UV_EXTERN int uv_has_ref(const uv_handle_t*);
|
UV_EXTERN int uv_has_ref(const uv_handle_t*);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update the event loop's concept of "now". Libuv caches the current time
|
||||||
|
* at the start of the event loop tick in order to reduce the number of
|
||||||
|
* time-related system calls.
|
||||||
|
*
|
||||||
|
* You won't normally need to call this function unless you have callbacks
|
||||||
|
* that block the event loop for longer periods of time, where "longer" is
|
||||||
|
* somewhat subjective but probably on the order of a millisecond or more.
|
||||||
|
*/
|
||||||
UV_EXTERN void uv_update_time(uv_loop_t*);
|
UV_EXTERN void uv_update_time(uv_loop_t*);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return the current timestamp in milliseconds. The timestamp is cached at
|
||||||
|
* the start of the event loop tick, see |uv_update_time()| for details and
|
||||||
|
* rationale.
|
||||||
|
*
|
||||||
|
* The timestamp increases monotonically from some arbitrary point in time.
|
||||||
|
* Don't make assumptions about the starting point, you will only get
|
||||||
|
* disappointed.
|
||||||
|
*
|
||||||
|
* Use uv_hrtime() if you need sub-milliseond granularity.
|
||||||
|
*/
|
||||||
UV_EXTERN uint64_t uv_now(uv_loop_t*);
|
UV_EXTERN uint64_t uv_now(uv_loop_t*);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
29
deps/uv/src/unix/core.c
vendored
29
deps/uv/src/unix/core.c
vendored
@ -162,7 +162,12 @@ void uv__make_close_pending(uv_handle_t* handle) {
|
|||||||
|
|
||||||
|
|
||||||
static void uv__finish_close(uv_handle_t* handle) {
|
static void uv__finish_close(uv_handle_t* handle) {
|
||||||
assert(!uv__is_active(handle));
|
/* Note: while the handle is in the UV_CLOSING state now, it's still possible
|
||||||
|
* for it to be active in the sense that uv__is_active() returns true.
|
||||||
|
* A good example is when the user calls uv_shutdown(), immediately followed
|
||||||
|
* by uv_close(). The handle is considered active at this point because the
|
||||||
|
* completion of the shutdown req is still pending.
|
||||||
|
*/
|
||||||
assert(handle->flags & UV_CLOSING);
|
assert(handle->flags & UV_CLOSING);
|
||||||
assert(!(handle->flags & UV_CLOSED));
|
assert(!(handle->flags & UV_CLOSED));
|
||||||
handle->flags |= UV_CLOSED;
|
handle->flags |= UV_CLOSED;
|
||||||
@ -220,7 +225,7 @@ static void uv__run_closing_handles(uv_loop_t* loop) {
|
|||||||
|
|
||||||
|
|
||||||
int uv_is_closing(const uv_handle_t* handle) {
|
int uv_is_closing(const uv_handle_t* handle) {
|
||||||
return handle->flags & (UV_CLOSING | UV_CLOSED);
|
return uv__is_closing(handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -312,8 +317,21 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
|
|||||||
uv__io_poll(loop, timeout);
|
uv__io_poll(loop, timeout);
|
||||||
uv__run_check(loop);
|
uv__run_check(loop);
|
||||||
uv__run_closing_handles(loop);
|
uv__run_closing_handles(loop);
|
||||||
r = uv__loop_alive(loop);
|
|
||||||
|
|
||||||
|
if (mode == UV_RUN_ONCE) {
|
||||||
|
/* UV_RUN_ONCE implies forward progess: at least one callback must have
|
||||||
|
* been invoked when it returns. uv__io_poll() can return without doing
|
||||||
|
* I/O (meaning: no callbacks) when its timeout expires - which means we
|
||||||
|
* have pending timers that satisfy the forward progress constraint.
|
||||||
|
*
|
||||||
|
* UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
|
||||||
|
* the check.
|
||||||
|
*/
|
||||||
|
uv__update_time(loop);
|
||||||
|
uv__run_timers(loop);
|
||||||
|
}
|
||||||
|
|
||||||
|
r = uv__loop_alive(loop);
|
||||||
UV_TICK_STOP(loop, mode);
|
UV_TICK_STOP(loop, mode);
|
||||||
|
|
||||||
if (mode & (UV_RUN_ONCE | UV_RUN_NOWAIT))
|
if (mode & (UV_RUN_ONCE | UV_RUN_NOWAIT))
|
||||||
@ -335,11 +353,6 @@ void uv_update_time(uv_loop_t* loop) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
uint64_t uv_now(uv_loop_t* loop) {
|
|
||||||
return loop->time;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int uv_is_active(const uv_handle_t* handle) {
|
int uv_is_active(const uv_handle_t* handle) {
|
||||||
return uv__is_active(handle);
|
return uv__is_active(handle);
|
||||||
}
|
}
|
||||||
|
44
deps/uv/src/unix/darwin.c
vendored
44
deps/uv/src/unix/darwin.c
vendored
@ -38,8 +38,8 @@
|
|||||||
#include <unistd.h> /* sysconf */
|
#include <unistd.h> /* sysconf */
|
||||||
|
|
||||||
/* Forward declarations */
|
/* Forward declarations */
|
||||||
void uv__cf_loop_runner(void* arg);
|
static void uv__cf_loop_runner(void* arg);
|
||||||
void uv__cf_loop_cb(void* arg);
|
static void uv__cf_loop_cb(void* arg);
|
||||||
|
|
||||||
typedef struct uv__cf_loop_signal_s uv__cf_loop_signal_t;
|
typedef struct uv__cf_loop_signal_s uv__cf_loop_signal_t;
|
||||||
struct uv__cf_loop_signal_s {
|
struct uv__cf_loop_signal_s {
|
||||||
@ -84,9 +84,8 @@ void uv__platform_loop_delete(uv_loop_t* loop) {
|
|||||||
uv__cf_loop_signal_t* s;
|
uv__cf_loop_signal_t* s;
|
||||||
|
|
||||||
assert(loop->cf_loop != NULL);
|
assert(loop->cf_loop != NULL);
|
||||||
CFRunLoopStop(loop->cf_loop);
|
uv__cf_loop_signal(loop, NULL, NULL);
|
||||||
uv_thread_join(&loop->cf_thread);
|
uv_thread_join(&loop->cf_thread);
|
||||||
loop->cf_loop = NULL;
|
|
||||||
|
|
||||||
uv_sem_destroy(&loop->cf_sem);
|
uv_sem_destroy(&loop->cf_sem);
|
||||||
uv_mutex_destroy(&loop->cf_mutex);
|
uv_mutex_destroy(&loop->cf_mutex);
|
||||||
@ -103,7 +102,7 @@ void uv__platform_loop_delete(uv_loop_t* loop) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void uv__cf_loop_runner(void* arg) {
|
static void uv__cf_loop_runner(void* arg) {
|
||||||
uv_loop_t* loop;
|
uv_loop_t* loop;
|
||||||
|
|
||||||
loop = arg;
|
loop = arg;
|
||||||
@ -125,7 +124,7 @@ void uv__cf_loop_runner(void* arg) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void uv__cf_loop_cb(void* arg) {
|
static void uv__cf_loop_cb(void* arg) {
|
||||||
uv_loop_t* loop;
|
uv_loop_t* loop;
|
||||||
QUEUE* item;
|
QUEUE* item;
|
||||||
QUEUE split_head;
|
QUEUE split_head;
|
||||||
@ -145,7 +144,12 @@ void uv__cf_loop_cb(void* arg) {
|
|||||||
item = QUEUE_HEAD(&split_head);
|
item = QUEUE_HEAD(&split_head);
|
||||||
|
|
||||||
s = QUEUE_DATA(item, uv__cf_loop_signal_t, member);
|
s = QUEUE_DATA(item, uv__cf_loop_signal_t, member);
|
||||||
s->cb(s->arg);
|
|
||||||
|
/* This was a termination signal */
|
||||||
|
if (s->cb == NULL)
|
||||||
|
CFRunLoopStop(loop->cf_loop);
|
||||||
|
else
|
||||||
|
s->cb(s->arg);
|
||||||
|
|
||||||
QUEUE_REMOVE(item);
|
QUEUE_REMOVE(item);
|
||||||
free(s);
|
free(s);
|
||||||
@ -253,19 +257,21 @@ void uv_loadavg(double avg[3]) {
|
|||||||
|
|
||||||
|
|
||||||
uv_err_t uv_resident_set_memory(size_t* rss) {
|
uv_err_t uv_resident_set_memory(size_t* rss) {
|
||||||
struct task_basic_info t_info;
|
mach_msg_type_number_t count;
|
||||||
mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT;
|
task_basic_info_data_t info;
|
||||||
|
kern_return_t err;
|
||||||
|
|
||||||
int r = task_info(mach_task_self(),
|
count = TASK_BASIC_INFO_COUNT;
|
||||||
TASK_BASIC_INFO,
|
err = task_info(mach_task_self(),
|
||||||
(task_info_t)&t_info,
|
TASK_BASIC_INFO,
|
||||||
&t_info_count);
|
(task_info_t) &info,
|
||||||
|
&count);
|
||||||
if (r != KERN_SUCCESS) {
|
(void) &err;
|
||||||
return uv__new_sys_error(errno);
|
/* task_info(TASK_BASIC_INFO) cannot really fail. Anything other than
|
||||||
}
|
* KERN_SUCCESS implies a libuv bug.
|
||||||
|
*/
|
||||||
*rss = t_info.resident_size;
|
assert(err == KERN_SUCCESS);
|
||||||
|
*rss = info.resident_size;
|
||||||
|
|
||||||
return uv_ok_;
|
return uv_ok_;
|
||||||
}
|
}
|
||||||
|
1
deps/uv/src/unix/error.c
vendored
1
deps/uv/src/unix/error.c
vendored
@ -79,6 +79,7 @@ uv_err_code uv_translate_sys_error(int sys_errno) {
|
|||||||
case EMSGSIZE: return UV_EMSGSIZE;
|
case EMSGSIZE: return UV_EMSGSIZE;
|
||||||
case ENAMETOOLONG: return UV_ENAMETOOLONG;
|
case ENAMETOOLONG: return UV_ENAMETOOLONG;
|
||||||
case EINVAL: return UV_EINVAL;
|
case EINVAL: return UV_EINVAL;
|
||||||
|
case ENETDOWN: return UV_ENETDOWN;
|
||||||
case ENETUNREACH: return UV_ENETUNREACH;
|
case ENETUNREACH: return UV_ENETUNREACH;
|
||||||
case ECONNABORTED: return UV_ECONNABORTED;
|
case ECONNABORTED: return UV_ECONNABORTED;
|
||||||
case ELOOP: return UV_ELOOP;
|
case ELOOP: return UV_ELOOP;
|
||||||
|
7
deps/uv/src/unix/fs.c
vendored
7
deps/uv/src/unix/fs.c
vendored
@ -436,11 +436,14 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
|
|||||||
* non-blocking mode and not all data could be written. If a non-zero
|
* non-blocking mode and not all data could be written. If a non-zero
|
||||||
* number of bytes have been sent, we don't consider it an error.
|
* number of bytes have been sent, we don't consider it an error.
|
||||||
*/
|
*/
|
||||||
len = 0;
|
|
||||||
|
|
||||||
#if defined(__FreeBSD__)
|
#if defined(__FreeBSD__)
|
||||||
|
len = 0;
|
||||||
r = sendfile(in_fd, out_fd, req->off, req->len, NULL, &len, 0);
|
r = sendfile(in_fd, out_fd, req->off, req->len, NULL, &len, 0);
|
||||||
#else
|
#else
|
||||||
|
/* The darwin sendfile takes len as an input for the length to send,
|
||||||
|
* so make sure to initialize it with the caller's value. */
|
||||||
|
len = req->len;
|
||||||
r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
|
r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -497,7 +500,7 @@ static ssize_t uv__fs_write(uv_fs_t* req) {
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void uv__to_stat(struct stat* src, uv_stat_t* dst) {
|
static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
|
||||||
dst->st_dev = src->st_dev;
|
dst->st_dev = src->st_dev;
|
||||||
dst->st_mode = src->st_mode;
|
dst->st_mode = src->st_mode;
|
||||||
dst->st_nlink = src->st_nlink;
|
dst->st_nlink = src->st_nlink;
|
||||||
|
111
deps/uv/src/unix/linux-core.c
vendored
111
deps/uv/src/unix/linux-core.c
vendored
@ -382,12 +382,12 @@ uv_err_t uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
|
|||||||
return uv__new_sys_error(ENOMEM);
|
return uv__new_sys_error(ENOMEM);
|
||||||
|
|
||||||
if (read_models(numcpus, ci)) {
|
if (read_models(numcpus, ci)) {
|
||||||
SAVE_ERRNO(free(ci));
|
SAVE_ERRNO(uv_free_cpu_info(ci, numcpus));
|
||||||
return uv__new_sys_error(errno);
|
return uv__new_sys_error(errno);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (read_times(numcpus, ci)) {
|
if (read_times(numcpus, ci)) {
|
||||||
SAVE_ERRNO(free(ci));
|
SAVE_ERRNO(uv_free_cpu_info(ci, numcpus));
|
||||||
return uv__new_sys_error(errno);
|
return uv__new_sys_error(errno);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -414,76 +414,89 @@ static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
|
|||||||
|
|
||||||
/* Also reads the CPU frequency on x86. The other architectures only have
|
/* Also reads the CPU frequency on x86. The other architectures only have
|
||||||
* a BogoMIPS field, which may not be very accurate.
|
* a BogoMIPS field, which may not be very accurate.
|
||||||
|
*
|
||||||
|
* Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
|
||||||
*/
|
*/
|
||||||
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
|
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
|
||||||
#if defined(__i386__) || defined(__x86_64__)
|
|
||||||
static const char model_marker[] = "model name\t: ";
|
static const char model_marker[] = "model name\t: ";
|
||||||
static const char speed_marker[] = "cpu MHz\t\t: ";
|
static const char speed_marker[] = "cpu MHz\t\t: ";
|
||||||
#elif defined(__arm__)
|
const char* inferred_model;
|
||||||
static const char model_marker[] = "Processor\t: ";
|
|
||||||
static const char speed_marker[] = "";
|
|
||||||
#elif defined(__mips__)
|
|
||||||
static const char model_marker[] = "cpu model\t\t: ";
|
|
||||||
static const char speed_marker[] = "";
|
|
||||||
#else
|
|
||||||
# warning uv_cpu_info() is not supported on this architecture.
|
|
||||||
static const char model_marker[] = "";
|
|
||||||
static const char speed_marker[] = "";
|
|
||||||
#endif
|
|
||||||
static const char bogus_model[] = "unknown";
|
|
||||||
unsigned int model_idx;
|
unsigned int model_idx;
|
||||||
unsigned int speed_idx;
|
unsigned int speed_idx;
|
||||||
char buf[1024];
|
char buf[1024];
|
||||||
char* model;
|
char* model;
|
||||||
FILE* fp;
|
FILE* fp;
|
||||||
char* inferred_model;
|
|
||||||
|
|
||||||
fp = fopen("/proc/cpuinfo", "r");
|
/* Most are unused on non-ARM and non-x86 architectures. */
|
||||||
if (fp == NULL)
|
(void) &model_marker;
|
||||||
return -1;
|
(void) &speed_marker;
|
||||||
|
(void) &speed_idx;
|
||||||
|
(void) &model;
|
||||||
|
(void) &buf;
|
||||||
|
(void) &fp;
|
||||||
|
|
||||||
model_idx = 0;
|
model_idx = 0;
|
||||||
speed_idx = 0;
|
speed_idx = 0;
|
||||||
|
|
||||||
|
#if defined(__arm__) || defined(__i386__) || defined(__x86_64__)
|
||||||
|
fp = fopen("/proc/cpuinfo", "r");
|
||||||
|
if (fp == NULL)
|
||||||
|
return -1;
|
||||||
|
|
||||||
while (fgets(buf, sizeof(buf), fp)) {
|
while (fgets(buf, sizeof(buf), fp)) {
|
||||||
if (model_marker[0] != '\0' &&
|
if (model_idx < numcpus) {
|
||||||
model_idx < numcpus &&
|
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
|
||||||
strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0)
|
model = buf + sizeof(model_marker) - 1;
|
||||||
{
|
model = strndup(model, strlen(model) - 1); /* Strip newline. */
|
||||||
model = buf + sizeof(model_marker) - 1;
|
if (model == NULL) {
|
||||||
model = strndup(model, strlen(model) - 1); /* strip newline */
|
fclose(fp);
|
||||||
ci[model_idx++].model = model;
|
return -1;
|
||||||
continue;
|
}
|
||||||
|
ci[model_idx++].model = model;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
#if defined(__arm__)
|
||||||
if (speed_marker[0] != '\0' &&
|
/* Fallback for pre-3.8 kernels. */
|
||||||
speed_idx < numcpus &&
|
if (model_idx < numcpus) {
|
||||||
strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0)
|
static const char model_marker[] = "Processor\t: ";
|
||||||
{
|
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
|
||||||
ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
|
model = buf + sizeof(model_marker) - 1;
|
||||||
continue;
|
model = strndup(model, strlen(model) - 1); /* Strip newline. */
|
||||||
|
if (model == NULL) {
|
||||||
|
fclose(fp);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
ci[model_idx++].model = model;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
#else /* !__arm____ */
|
||||||
|
if (speed_idx < numcpus) {
|
||||||
|
if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
|
||||||
|
ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif /* __arm__ */
|
||||||
}
|
}
|
||||||
|
|
||||||
fclose(fp);
|
fclose(fp);
|
||||||
|
#endif /* __arm__ || __i386__ || __x86_64__ */
|
||||||
|
|
||||||
/* Now we want to make sure that all the models contain *something*:
|
/* Now we want to make sure that all the models contain *something* because
|
||||||
* it's not safe to leave them as null.
|
* it's not safe to leave them as null. Copy the last entry unless there
|
||||||
|
* isn't one, in that case we simply put "unknown" into everything.
|
||||||
*/
|
*/
|
||||||
if (model_idx == 0) {
|
inferred_model = "unknown";
|
||||||
/* No models at all: fake up the first one. */
|
if (model_idx > 0)
|
||||||
ci[0].model = strndup(bogus_model, sizeof(bogus_model) - 1);
|
inferred_model = ci[model_idx - 1].model;
|
||||||
model_idx = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Not enough models, but we do have at least one. So we'll just
|
|
||||||
* copy the rest down: it might be better to indicate somehow that
|
|
||||||
* the remaining ones have been guessed.
|
|
||||||
*/
|
|
||||||
inferred_model = ci[model_idx - 1].model;
|
|
||||||
|
|
||||||
while (model_idx < numcpus) {
|
while (model_idx < numcpus) {
|
||||||
ci[model_idx].model = strndup(inferred_model, strlen(inferred_model));
|
model = strndup(inferred_model, strlen(inferred_model));
|
||||||
model_idx++;
|
if (model == NULL)
|
||||||
|
return -1;
|
||||||
|
ci[model_idx++].model = model;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
4
deps/uv/src/unix/signal.c
vendored
4
deps/uv/src/unix/signal.c
vendored
@ -116,7 +116,7 @@ static void uv__signal_unlock_and_unblock(sigset_t* saved_sigmask) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
inline static uv_signal_t* uv__signal_first_handle(int signum) {
|
static uv_signal_t* uv__signal_first_handle(int signum) {
|
||||||
/* This function must be called with the signal lock held. */
|
/* This function must be called with the signal lock held. */
|
||||||
uv_signal_t lookup;
|
uv_signal_t lookup;
|
||||||
uv_signal_t* handle;
|
uv_signal_t* handle;
|
||||||
@ -160,7 +160,7 @@ static void uv__signal_handler(int signum) {
|
|||||||
} while (r == -1 && errno == EINTR);
|
} while (r == -1 && errno == EINTR);
|
||||||
|
|
||||||
assert(r == sizeof msg ||
|
assert(r == sizeof msg ||
|
||||||
(r == -1 && errno != EAGAIN && errno != EWOULDBLOCK));
|
(r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)));
|
||||||
|
|
||||||
if (r != -1)
|
if (r != -1)
|
||||||
handle->caught_signals++;
|
handle->caught_signals++;
|
||||||
|
68
deps/uv/src/unix/stream.c
vendored
68
deps/uv/src/unix/stream.c
vendored
@ -60,6 +60,7 @@ static void uv__stream_connect(uv_stream_t*);
|
|||||||
static void uv__write(uv_stream_t* stream);
|
static void uv__write(uv_stream_t* stream);
|
||||||
static void uv__read(uv_stream_t* stream);
|
static void uv__read(uv_stream_t* stream);
|
||||||
static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
|
static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
|
||||||
|
static size_t uv__write_req_size(uv_write_t* req);
|
||||||
|
|
||||||
|
|
||||||
/* Used by the accept() EMFILE party trick. */
|
/* Used by the accept() EMFILE party trick. */
|
||||||
@ -399,6 +400,7 @@ void uv__stream_destroy(uv_stream_t* stream) {
|
|||||||
|
|
||||||
if (req->bufs != req->bufsml)
|
if (req->bufs != req->bufsml)
|
||||||
free(req->bufs);
|
free(req->bufs);
|
||||||
|
req->bufs = NULL;
|
||||||
|
|
||||||
if (req->cb) {
|
if (req->cb) {
|
||||||
uv__set_artificial_error(req->handle->loop, UV_ECANCELED);
|
uv__set_artificial_error(req->handle->loop, UV_ECANCELED);
|
||||||
@ -413,6 +415,13 @@ void uv__stream_destroy(uv_stream_t* stream) {
|
|||||||
req = QUEUE_DATA(q, uv_write_t, queue);
|
req = QUEUE_DATA(q, uv_write_t, queue);
|
||||||
uv__req_unregister(stream->loop, req);
|
uv__req_unregister(stream->loop, req);
|
||||||
|
|
||||||
|
if (req->bufs != NULL) {
|
||||||
|
stream->write_queue_size -= uv__write_req_size(req);
|
||||||
|
if (req->bufs != req->bufsml)
|
||||||
|
free(req->bufs);
|
||||||
|
req->bufs = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if (req->cb) {
|
if (req->cb) {
|
||||||
uv__set_sys_error(stream->loop, req->error);
|
uv__set_sys_error(stream->loop, req->error);
|
||||||
req->cb(req, req->error ? -1 : 0);
|
req->cb(req, req->error ? -1 : 0);
|
||||||
@ -420,6 +429,11 @@ void uv__stream_destroy(uv_stream_t* stream) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (stream->shutdown_req) {
|
if (stream->shutdown_req) {
|
||||||
|
/* The UV_ECANCELED error code is a lie, the shutdown(2) syscall is a
|
||||||
|
* fait accompli at this point. Maybe we should revisit this in v0.11.
|
||||||
|
* A possible reason for leaving it unchanged is that it informs the
|
||||||
|
* callee that the handle has been destroyed.
|
||||||
|
*/
|
||||||
uv__req_unregister(stream->loop, stream->shutdown_req);
|
uv__req_unregister(stream->loop, stream->shutdown_req);
|
||||||
uv__set_artificial_error(stream->loop, UV_ECANCELED);
|
uv__set_artificial_error(stream->loop, UV_ECANCELED);
|
||||||
stream->shutdown_req->cb(stream->shutdown_req, -1);
|
stream->shutdown_req->cb(stream->shutdown_req, -1);
|
||||||
@ -601,8 +615,6 @@ static void uv__drain(uv_stream_t* stream) {
|
|||||||
uv_shutdown_t* req;
|
uv_shutdown_t* req;
|
||||||
|
|
||||||
assert(QUEUE_EMPTY(&stream->write_queue));
|
assert(QUEUE_EMPTY(&stream->write_queue));
|
||||||
assert(stream->write_queue_size == 0);
|
|
||||||
|
|
||||||
uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT);
|
uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT);
|
||||||
|
|
||||||
/* Shutdown? */
|
/* Shutdown? */
|
||||||
@ -635,6 +647,7 @@ static void uv__drain(uv_stream_t* stream) {
|
|||||||
static size_t uv__write_req_size(uv_write_t* req) {
|
static size_t uv__write_req_size(uv_write_t* req) {
|
||||||
size_t size;
|
size_t size;
|
||||||
|
|
||||||
|
assert(req->bufs != NULL);
|
||||||
size = uv__buf_count(req->bufs + req->write_index,
|
size = uv__buf_count(req->bufs + req->write_index,
|
||||||
req->bufcnt - req->write_index);
|
req->bufcnt - req->write_index);
|
||||||
assert(req->handle->write_queue_size >= size);
|
assert(req->handle->write_queue_size >= size);
|
||||||
@ -648,10 +661,18 @@ static void uv__write_req_finish(uv_write_t* req) {
|
|||||||
|
|
||||||
/* Pop the req off tcp->write_queue. */
|
/* Pop the req off tcp->write_queue. */
|
||||||
QUEUE_REMOVE(&req->queue);
|
QUEUE_REMOVE(&req->queue);
|
||||||
if (req->bufs != req->bufsml) {
|
|
||||||
free(req->bufs);
|
/* Only free when there was no error. On error, we touch up write_queue_size
|
||||||
|
* right before making the callback. The reason we don't do that right away
|
||||||
|
* is that a write_queue_size > 0 is our only way to signal to the user that
|
||||||
|
* he should stop writing - which he should if we got an error. Something to
|
||||||
|
* revisit in future revisions of the libuv API.
|
||||||
|
*/
|
||||||
|
if (req->error == 0) {
|
||||||
|
if (req->bufs != req->bufsml)
|
||||||
|
free(req->bufs);
|
||||||
|
req->bufs = NULL;
|
||||||
}
|
}
|
||||||
req->bufs = NULL;
|
|
||||||
|
|
||||||
/* Add it to the write_completed_queue where it will have its
|
/* Add it to the write_completed_queue where it will have its
|
||||||
* callback called in the near future.
|
* callback called in the near future.
|
||||||
@ -687,10 +708,8 @@ start:
|
|||||||
|
|
||||||
assert(uv__stream_fd(stream) >= 0);
|
assert(uv__stream_fd(stream) >= 0);
|
||||||
|
|
||||||
if (QUEUE_EMPTY(&stream->write_queue)) {
|
if (QUEUE_EMPTY(&stream->write_queue))
|
||||||
assert(stream->write_queue_size == 0);
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
q = QUEUE_HEAD(&stream->write_queue);
|
q = QUEUE_HEAD(&stream->write_queue);
|
||||||
req = QUEUE_DATA(q, uv_write_t, queue);
|
req = QUEUE_DATA(q, uv_write_t, queue);
|
||||||
@ -761,8 +780,10 @@ start:
|
|||||||
if (errno != EAGAIN && errno != EWOULDBLOCK) {
|
if (errno != EAGAIN && errno != EWOULDBLOCK) {
|
||||||
/* Error */
|
/* Error */
|
||||||
req->error = errno;
|
req->error = errno;
|
||||||
stream->write_queue_size -= uv__write_req_size(req);
|
|
||||||
uv__write_req_finish(req);
|
uv__write_req_finish(req);
|
||||||
|
uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT);
|
||||||
|
if (!uv__io_active(&stream->io_watcher, UV__POLLIN))
|
||||||
|
uv__handle_stop(stream);
|
||||||
return;
|
return;
|
||||||
} else if (stream->flags & UV_STREAM_BLOCKING) {
|
} else if (stream->flags & UV_STREAM_BLOCKING) {
|
||||||
/* If this is a blocking stream, try again. */
|
/* If this is a blocking stream, try again. */
|
||||||
@ -838,6 +859,13 @@ static void uv__write_callbacks(uv_stream_t* stream) {
|
|||||||
QUEUE_REMOVE(q);
|
QUEUE_REMOVE(q);
|
||||||
uv__req_unregister(stream->loop, req);
|
uv__req_unregister(stream->loop, req);
|
||||||
|
|
||||||
|
if (req->bufs != NULL) {
|
||||||
|
stream->write_queue_size -= uv__write_req_size(req);
|
||||||
|
if (req->bufs != req->bufsml)
|
||||||
|
free(req->bufs);
|
||||||
|
req->bufs = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/* NOTE: call callback AFTER freeing the request data. */
|
/* NOTE: call callback AFTER freeing the request data. */
|
||||||
if (req->cb) {
|
if (req->cb) {
|
||||||
uv__set_sys_error(stream->loop, req->error);
|
uv__set_sys_error(stream->loop, req->error);
|
||||||
@ -1119,6 +1147,7 @@ static void uv__stream_connect(uv_stream_t* stream) {
|
|||||||
|
|
||||||
stream->connect_req = NULL;
|
stream->connect_req = NULL;
|
||||||
uv__req_unregister(stream->loop, req);
|
uv__req_unregister(stream->loop, req);
|
||||||
|
uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT);
|
||||||
|
|
||||||
if (req->cb) {
|
if (req->cb) {
|
||||||
uv__set_sys_error(stream->loop, error);
|
uv__set_sys_error(stream->loop, error);
|
||||||
@ -1158,6 +1187,12 @@ int uv_write2(uv_write_t* req,
|
|||||||
return uv__set_artificial_error(stream->loop, UV_EBADF);
|
return uv__set_artificial_error(stream->loop, UV_EBADF);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* It's legal for write_queue_size > 0 even when the write_queue is empty;
|
||||||
|
* it means there are error-state requests in the write_completed_queue that
|
||||||
|
* will touch up write_queue_size later, see also uv__write_req_finish().
|
||||||
|
* We chould check that write_queue is empty instead but that implies making
|
||||||
|
* a write() syscall when we know that the handle is in error mode.
|
||||||
|
*/
|
||||||
empty_queue = (stream->write_queue_size == 0);
|
empty_queue = (stream->write_queue_size == 0);
|
||||||
|
|
||||||
/* Initialize the req */
|
/* Initialize the req */
|
||||||
@ -1266,9 +1301,20 @@ int uv_read2_start(uv_stream_t* stream, uv_alloc_cb alloc_cb,
|
|||||||
|
|
||||||
|
|
||||||
int uv_read_stop(uv_stream_t* stream) {
|
int uv_read_stop(uv_stream_t* stream) {
|
||||||
uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLIN);
|
/* Sanity check. We're going to stop the handle unless it's primed for
|
||||||
uv__handle_stop(stream);
|
* writing but that means there should be some kind of write action in
|
||||||
|
* progress.
|
||||||
|
*/
|
||||||
|
assert(!uv__io_active(&stream->io_watcher, UV__POLLOUT) ||
|
||||||
|
!QUEUE_EMPTY(&stream->write_completed_queue) ||
|
||||||
|
!QUEUE_EMPTY(&stream->write_queue) ||
|
||||||
|
stream->shutdown_req != NULL ||
|
||||||
|
stream->connect_req != NULL);
|
||||||
|
|
||||||
stream->flags &= ~UV_STREAM_READING;
|
stream->flags &= ~UV_STREAM_READING;
|
||||||
|
uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLIN);
|
||||||
|
if (!uv__io_active(&stream->io_watcher, UV__POLLOUT))
|
||||||
|
uv__handle_stop(stream);
|
||||||
|
|
||||||
#if defined(__APPLE__)
|
#if defined(__APPLE__)
|
||||||
/* Notify select() thread about state change */
|
/* Notify select() thread about state change */
|
||||||
|
5
deps/uv/src/uv-common.c
vendored
5
deps/uv/src/uv-common.c
vendored
@ -434,3 +434,8 @@ int uv_has_ref(const uv_handle_t* handle) {
|
|||||||
void uv_stop(uv_loop_t* loop) {
|
void uv_stop(uv_loop_t* loop) {
|
||||||
loop->stop_flag = 1;
|
loop->stop_flag = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
uint64_t uv_now(uv_loop_t* loop) {
|
||||||
|
return loop->time;
|
||||||
|
}
|
||||||
|
3
deps/uv/src/uv-common.h
vendored
3
deps/uv/src/uv-common.h
vendored
@ -150,6 +150,9 @@ void uv__fs_poll_close(uv_fs_poll_t* handle);
|
|||||||
#define uv__is_active(h) \
|
#define uv__is_active(h) \
|
||||||
(((h)->flags & UV__HANDLE_ACTIVE) != 0)
|
(((h)->flags & UV__HANDLE_ACTIVE) != 0)
|
||||||
|
|
||||||
|
#define uv__is_closing(h) \
|
||||||
|
(((h)->flags & (UV_CLOSING | UV_CLOSED)) != 0)
|
||||||
|
|
||||||
#define uv__handle_start(h) \
|
#define uv__handle_start(h) \
|
||||||
do { \
|
do { \
|
||||||
assert(((h)->flags & UV__HANDLE_CLOSING) == 0); \
|
assert(((h)->flags & UV__HANDLE_CLOSING) == 0); \
|
||||||
|
2
deps/uv/src/version.c
vendored
2
deps/uv/src/version.c
vendored
@ -31,7 +31,7 @@
|
|||||||
|
|
||||||
#define UV_VERSION_MAJOR 0
|
#define UV_VERSION_MAJOR 0
|
||||||
#define UV_VERSION_MINOR 11
|
#define UV_VERSION_MINOR 11
|
||||||
#define UV_VERSION_PATCH 3
|
#define UV_VERSION_PATCH 4
|
||||||
#define UV_VERSION_IS_RELEASE 1
|
#define UV_VERSION_IS_RELEASE 1
|
||||||
|
|
||||||
|
|
||||||
|
22
deps/uv/src/win/core.c
vendored
22
deps/uv/src/win/core.c
vendored
@ -282,15 +282,11 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
|
|||||||
uv_update_time(loop);
|
uv_update_time(loop);
|
||||||
uv_process_timers(loop);
|
uv_process_timers(loop);
|
||||||
|
|
||||||
/* Call idle callbacks if nothing to do. */
|
|
||||||
if (loop->pending_reqs_tail == NULL &&
|
|
||||||
loop->endgame_handles == NULL) {
|
|
||||||
uv_idle_invoke(loop);
|
|
||||||
}
|
|
||||||
|
|
||||||
uv_process_reqs(loop);
|
uv_process_reqs(loop);
|
||||||
uv_process_endgames(loop);
|
uv_process_endgames(loop);
|
||||||
|
|
||||||
|
uv_idle_invoke(loop);
|
||||||
|
|
||||||
uv_prepare_invoke(loop);
|
uv_prepare_invoke(loop);
|
||||||
|
|
||||||
(*poll)(loop, loop->idle_handles == NULL &&
|
(*poll)(loop, loop->idle_handles == NULL &&
|
||||||
@ -302,6 +298,20 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
|
|||||||
!(mode & UV_RUN_NOWAIT));
|
!(mode & UV_RUN_NOWAIT));
|
||||||
|
|
||||||
uv_check_invoke(loop);
|
uv_check_invoke(loop);
|
||||||
|
|
||||||
|
if (mode == UV_RUN_ONCE) {
|
||||||
|
/* UV_RUN_ONCE implies forward progess: at least one callback must have
|
||||||
|
* been invoked when it returns. uv__io_poll() can return without doing
|
||||||
|
* I/O (meaning: no callbacks) when its timeout expires - which means we
|
||||||
|
* have pending timers that satisfy the forward progress constraint.
|
||||||
|
*
|
||||||
|
* UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
|
||||||
|
* the check.
|
||||||
|
*/
|
||||||
|
uv_update_time(loop);
|
||||||
|
uv_process_timers(loop);
|
||||||
|
}
|
||||||
|
|
||||||
r = uv__loop_alive(loop);
|
r = uv__loop_alive(loop);
|
||||||
if (mode & (UV_RUN_ONCE | UV_RUN_NOWAIT))
|
if (mode & (UV_RUN_ONCE | UV_RUN_NOWAIT))
|
||||||
break;
|
break;
|
||||||
|
45
deps/uv/src/win/process.c
vendored
45
deps/uv/src/win/process.c
vendored
@ -49,7 +49,22 @@ static HANDLE uv_global_job_handle_;
|
|||||||
static uv_once_t uv_global_job_handle_init_guard_ = UV_ONCE_INIT;
|
static uv_once_t uv_global_job_handle_init_guard_ = UV_ONCE_INIT;
|
||||||
|
|
||||||
|
|
||||||
static void uv__init_global_job_handle() {
|
static void uv__init_global_job_handle(void) {
|
||||||
|
/* Create a job object and set it up to kill all contained processes when
|
||||||
|
* it's closed. Since this handle is made non-inheritable and we're not
|
||||||
|
* giving it to anyone, we're the only process holding a reference to it.
|
||||||
|
* That means that if this process exits it is closed and all the processes
|
||||||
|
* it contains are killed. All processes created with uv_spawn that are not
|
||||||
|
* spawned with the UV_PROCESS_DETACHED flag are assigned to this job.
|
||||||
|
*
|
||||||
|
* We're setting the JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK flag so only the
|
||||||
|
* processes that we explicitly add are affected, and *their* subprocesses
|
||||||
|
* are not. This ensures that our child processes are not limited in their
|
||||||
|
* ability to use job control on Windows versions that don't deal with
|
||||||
|
* nested jobs (prior to Windows 8 / Server 2012). It also lets our child
|
||||||
|
* processes created detached processes without explicitly breaking away
|
||||||
|
* from job control (which uv_spawn doesn't, either).
|
||||||
|
*/
|
||||||
SECURITY_ATTRIBUTES attr;
|
SECURITY_ATTRIBUTES attr;
|
||||||
JOBOBJECT_EXTENDED_LIMIT_INFORMATION info;
|
JOBOBJECT_EXTENDED_LIMIT_INFORMATION info;
|
||||||
|
|
||||||
@ -920,7 +935,18 @@ int uv_spawn(uv_loop_t* loop, uv_process_t* process,
|
|||||||
}
|
}
|
||||||
|
|
||||||
process_flags = CREATE_UNICODE_ENVIRONMENT;
|
process_flags = CREATE_UNICODE_ENVIRONMENT;
|
||||||
|
|
||||||
if (options.flags & UV_PROCESS_DETACHED) {
|
if (options.flags & UV_PROCESS_DETACHED) {
|
||||||
|
/* Note that we're not setting the CREATE_BREAKAWAY_FROM_JOB flag. That
|
||||||
|
* means that libuv might not let you create a fully deamonized process
|
||||||
|
* when run under job control. However the type of job control that libuv
|
||||||
|
* itself creates doesn't trickle down to subprocesses so they can still
|
||||||
|
* daemonize.
|
||||||
|
*
|
||||||
|
* A reason to not do this is that CREATE_BREAKAWAY_FROM_JOB makes the
|
||||||
|
* CreateProcess call fail if we're under job control that doesn't allow
|
||||||
|
* breakaway.
|
||||||
|
*/
|
||||||
process_flags |= DETACHED_PROCESS | CREATE_NEW_PROCESS_GROUP;
|
process_flags |= DETACHED_PROCESS | CREATE_NEW_PROCESS_GROUP;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -943,8 +969,21 @@ int uv_spawn(uv_loop_t* loop, uv_process_t* process,
|
|||||||
if (!(options.flags & UV_PROCESS_DETACHED)) {
|
if (!(options.flags & UV_PROCESS_DETACHED)) {
|
||||||
uv_once(&uv_global_job_handle_init_guard_, uv__init_global_job_handle);
|
uv_once(&uv_global_job_handle_init_guard_, uv__init_global_job_handle);
|
||||||
|
|
||||||
if (!AssignProcessToJobObject(uv_global_job_handle_, info.hProcess))
|
if (!AssignProcessToJobObject(uv_global_job_handle_, info.hProcess)) {
|
||||||
uv_fatal_error(GetLastError(), "AssignProcessToJobObject");
|
/* AssignProcessToJobObject might fail if this process is under job
|
||||||
|
* control and the job doesn't have the
|
||||||
|
* JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK flag set, on a Windows version
|
||||||
|
* that doesn't support nested jobs.
|
||||||
|
*
|
||||||
|
* When that happens we just swallow the error and continue without
|
||||||
|
* establishing a kill-child-on-parent-exit relationship, otherwise
|
||||||
|
* there would be no way for libuv applications run under job control
|
||||||
|
* to spawn processes at all.
|
||||||
|
*/
|
||||||
|
DWORD err = GetLastError();
|
||||||
|
if (err != ERROR_ACCESS_DENIED)
|
||||||
|
uv_fatal_error(err, "AssignProcessToJobObject");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set IPC pid to all IPC pipes. */
|
/* Set IPC pid to all IPC pipes. */
|
||||||
|
4
deps/uv/src/win/stream.c
vendored
4
deps/uv/src/win/stream.c
vendored
@ -56,7 +56,7 @@ int uv_accept(uv_stream_t* server, uv_stream_t* client) {
|
|||||||
int uv_read_start(uv_stream_t* handle, uv_alloc_cb alloc_cb,
|
int uv_read_start(uv_stream_t* handle, uv_alloc_cb alloc_cb,
|
||||||
uv_read_cb read_cb) {
|
uv_read_cb read_cb) {
|
||||||
if (handle->flags & UV_HANDLE_READING) {
|
if (handle->flags & UV_HANDLE_READING) {
|
||||||
uv__set_sys_error(handle->loop, UV_EALREADY);
|
uv__set_artificial_error(handle->loop, UV_EALREADY);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,7 +82,7 @@ int uv_read_start(uv_stream_t* handle, uv_alloc_cb alloc_cb,
|
|||||||
int uv_read2_start(uv_stream_t* handle, uv_alloc_cb alloc_cb,
|
int uv_read2_start(uv_stream_t* handle, uv_alloc_cb alloc_cb,
|
||||||
uv_read2_cb read_cb) {
|
uv_read2_cb read_cb) {
|
||||||
if (handle->flags & UV_HANDLE_READING) {
|
if (handle->flags & UV_HANDLE_READING) {
|
||||||
uv__set_sys_error(handle->loop, UV_EALREADY);
|
uv__set_artificial_error(handle->loop, UV_EALREADY);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
5
deps/uv/src/win/timer.c
vendored
5
deps/uv/src/win/timer.c
vendored
@ -64,11 +64,6 @@ void uv__time_forward(uv_loop_t* loop, uint64_t msecs) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
uint64_t uv_now(uv_loop_t* loop) {
|
|
||||||
return loop->time;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static int uv_timer_compare(uv_timer_t* a, uv_timer_t* b) {
|
static int uv_timer_compare(uv_timer_t* a, uv_timer_t* b) {
|
||||||
if (a->due < b->due)
|
if (a->due < b->due)
|
||||||
return -1;
|
return -1;
|
||||||
|
21
deps/uv/test/test-idle.c
vendored
21
deps/uv/test/test-idle.c
vendored
@ -23,10 +23,12 @@
|
|||||||
#include "task.h"
|
#include "task.h"
|
||||||
|
|
||||||
|
|
||||||
static uv_timer_t timer_handle;
|
|
||||||
static uv_idle_t idle_handle;
|
static uv_idle_t idle_handle;
|
||||||
|
static uv_check_t check_handle;
|
||||||
|
static uv_timer_t timer_handle;
|
||||||
|
|
||||||
static int idle_cb_called = 0;
|
static int idle_cb_called = 0;
|
||||||
|
static int check_cb_called = 0;
|
||||||
static int timer_cb_called = 0;
|
static int timer_cb_called = 0;
|
||||||
static int close_cb_called = 0;
|
static int close_cb_called = 0;
|
||||||
|
|
||||||
@ -41,6 +43,7 @@ static void timer_cb(uv_timer_t* handle, int status) {
|
|||||||
ASSERT(status == 0);
|
ASSERT(status == 0);
|
||||||
|
|
||||||
uv_close((uv_handle_t*) &idle_handle, close_cb);
|
uv_close((uv_handle_t*) &idle_handle, close_cb);
|
||||||
|
uv_close((uv_handle_t*) &check_handle, close_cb);
|
||||||
uv_close((uv_handle_t*) &timer_handle, close_cb);
|
uv_close((uv_handle_t*) &timer_handle, close_cb);
|
||||||
|
|
||||||
timer_cb_called++;
|
timer_cb_called++;
|
||||||
@ -57,6 +60,15 @@ static void idle_cb(uv_idle_t* handle, int status) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void check_cb(uv_check_t* handle, int status) {
|
||||||
|
ASSERT(handle == &check_handle);
|
||||||
|
ASSERT(status == 0);
|
||||||
|
|
||||||
|
check_cb_called++;
|
||||||
|
LOGF("check_cb %d\n", check_cb_called);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
TEST_IMPL(idle_starvation) {
|
TEST_IMPL(idle_starvation) {
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
@ -65,6 +77,11 @@ TEST_IMPL(idle_starvation) {
|
|||||||
r = uv_idle_start(&idle_handle, idle_cb);
|
r = uv_idle_start(&idle_handle, idle_cb);
|
||||||
ASSERT(r == 0);
|
ASSERT(r == 0);
|
||||||
|
|
||||||
|
r = uv_check_init(uv_default_loop(), &check_handle);
|
||||||
|
ASSERT(r == 0);
|
||||||
|
r = uv_check_start(&check_handle, check_cb);
|
||||||
|
ASSERT(r == 0);
|
||||||
|
|
||||||
r = uv_timer_init(uv_default_loop(), &timer_handle);
|
r = uv_timer_init(uv_default_loop(), &timer_handle);
|
||||||
ASSERT(r == 0);
|
ASSERT(r == 0);
|
||||||
r = uv_timer_start(&timer_handle, timer_cb, 50, 0);
|
r = uv_timer_start(&timer_handle, timer_cb, 50, 0);
|
||||||
@ -75,7 +92,7 @@ TEST_IMPL(idle_starvation) {
|
|||||||
|
|
||||||
ASSERT(idle_cb_called > 0);
|
ASSERT(idle_cb_called > 0);
|
||||||
ASSERT(timer_cb_called == 1);
|
ASSERT(timer_cb_called == 1);
|
||||||
ASSERT(close_cb_called == 2);
|
ASSERT(close_cb_called == 3);
|
||||||
|
|
||||||
MAKE_VALGRIND_HAPPY();
|
MAKE_VALGRIND_HAPPY();
|
||||||
return 0;
|
return 0;
|
||||||
|
2
deps/uv/test/test-list.h
vendored
2
deps/uv/test/test-list.h
vendored
@ -101,6 +101,7 @@ TEST_DECLARE (timer_start_twice)
|
|||||||
TEST_DECLARE (timer_order)
|
TEST_DECLARE (timer_order)
|
||||||
TEST_DECLARE (timer_huge_timeout)
|
TEST_DECLARE (timer_huge_timeout)
|
||||||
TEST_DECLARE (timer_huge_repeat)
|
TEST_DECLARE (timer_huge_repeat)
|
||||||
|
TEST_DECLARE (timer_run_once)
|
||||||
TEST_DECLARE (idle_starvation)
|
TEST_DECLARE (idle_starvation)
|
||||||
TEST_DECLARE (loop_handles)
|
TEST_DECLARE (loop_handles)
|
||||||
TEST_DECLARE (get_loadavg)
|
TEST_DECLARE (get_loadavg)
|
||||||
@ -350,6 +351,7 @@ TASK_LIST_START
|
|||||||
TEST_ENTRY (timer_order)
|
TEST_ENTRY (timer_order)
|
||||||
TEST_ENTRY (timer_huge_timeout)
|
TEST_ENTRY (timer_huge_timeout)
|
||||||
TEST_ENTRY (timer_huge_repeat)
|
TEST_ENTRY (timer_huge_repeat)
|
||||||
|
TEST_ENTRY (timer_run_once)
|
||||||
|
|
||||||
TEST_ENTRY (idle_starvation)
|
TEST_ENTRY (idle_starvation)
|
||||||
|
|
||||||
|
2
deps/uv/test/test-osx-select.c
vendored
2
deps/uv/test/test-osx-select.c
vendored
@ -62,7 +62,7 @@ TEST_IMPL(osx_select) {
|
|||||||
|
|
||||||
uv_read_start((uv_stream_t*) &tty, alloc_cb, read_cb);
|
uv_read_start((uv_stream_t*) &tty, alloc_cb, read_cb);
|
||||||
|
|
||||||
// Emulate user-input
|
/* Emulate user-input */
|
||||||
str = "got some input\n"
|
str = "got some input\n"
|
||||||
"with a couple of lines\n"
|
"with a couple of lines\n"
|
||||||
"feel pretty happy\n";
|
"feel pretty happy\n";
|
||||||
|
28
deps/uv/test/test-timer.c
vendored
28
deps/uv/test/test-timer.c
vendored
@ -264,3 +264,31 @@ TEST_IMPL(timer_huge_repeat) {
|
|||||||
MAKE_VALGRIND_HAPPY();
|
MAKE_VALGRIND_HAPPY();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static unsigned int timer_run_once_timer_cb_called;
|
||||||
|
|
||||||
|
|
||||||
|
static void timer_run_once_timer_cb(uv_timer_t* handle, int status) {
|
||||||
|
timer_run_once_timer_cb_called++;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
TEST_IMPL(timer_run_once) {
|
||||||
|
uv_timer_t timer_handle;
|
||||||
|
|
||||||
|
ASSERT(0 == uv_timer_init(uv_default_loop(), &timer_handle));
|
||||||
|
ASSERT(0 == uv_timer_start(&timer_handle, timer_run_once_timer_cb, 0, 0));
|
||||||
|
ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_ONCE));
|
||||||
|
ASSERT(1 == timer_run_once_timer_cb_called);
|
||||||
|
|
||||||
|
ASSERT(0 == uv_timer_start(&timer_handle, timer_run_once_timer_cb, 1, 0));
|
||||||
|
ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_ONCE));
|
||||||
|
ASSERT(2 == timer_run_once_timer_cb_called);
|
||||||
|
|
||||||
|
uv_close((uv_handle_t*) &timer_handle, NULL);
|
||||||
|
ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_ONCE));
|
||||||
|
|
||||||
|
MAKE_VALGRIND_HAPPY();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user