初始化

This commit is contained in:
zw
2025-06-13 11:34:26 +08:00
parent 08b851951c
commit 66ff3a7c74
492 changed files with 2 additions and 2 deletions

View File

@@ -0,0 +1,290 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "uv-common.h"
#ifdef _WIN32
#include "win/internal.h"
#include "win/handle-inl.h"
#define uv__make_close_pending(h) uv__want_endgame((h)->loop, (h))
#else
#include "unix/internal.h"
#endif
#include <assert.h>
#include <stdlib.h>
#include <string.h>
struct poll_ctx {
uv_fs_poll_t* parent_handle;
int busy_polling;
unsigned int interval;
uint64_t start_time;
uv_loop_t* loop;
uv_fs_poll_cb poll_cb;
uv_timer_t timer_handle;
uv_fs_t fs_req; /* TODO(bnoordhuis) mark fs_req internal */
uv_stat_t statbuf;
struct poll_ctx* previous; /* context from previous start()..stop() period */
char path[1]; /* variable length */
};
static int statbuf_eq(const uv_stat_t* a, const uv_stat_t* b);
static void poll_cb(uv_fs_t* req);
static void timer_cb(uv_timer_t* timer);
static void timer_close_cb(uv_handle_t* timer);
static uv_stat_t zero_statbuf;
int uv_fs_poll_init(uv_loop_t* loop, uv_fs_poll_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_POLL);
handle->poll_ctx = NULL;
return 0;
}
int uv_fs_poll_start(uv_fs_poll_t* handle,
uv_fs_poll_cb cb,
const char* path,
unsigned int interval) {
struct poll_ctx* ctx;
uv_loop_t* loop;
size_t len;
int err;
if (uv_is_active((uv_handle_t*)handle))
return 0;
loop = handle->loop;
len = strlen(path);
ctx = uv__calloc(1, sizeof(*ctx) + len);
if (ctx == NULL)
return UV_ENOMEM;
ctx->loop = loop;
ctx->poll_cb = cb;
ctx->interval = interval ? interval : 1;
ctx->start_time = uv_now(loop);
ctx->parent_handle = handle;
memcpy(ctx->path, path, len + 1);
err = uv_timer_init(loop, &ctx->timer_handle);
if (err < 0)
goto error;
ctx->timer_handle.flags |= UV_HANDLE_INTERNAL;
uv__handle_unref(&ctx->timer_handle);
err = uv_fs_stat(loop, &ctx->fs_req, ctx->path, poll_cb);
if (err < 0)
goto error;
if (handle->poll_ctx != NULL)
ctx->previous = handle->poll_ctx;
handle->poll_ctx = ctx;
uv__handle_start(handle);
return 0;
error:
uv__free(ctx);
return err;
}
int uv_fs_poll_stop(uv_fs_poll_t* handle) {
struct poll_ctx* ctx;
if (!uv_is_active((uv_handle_t*)handle))
return 0;
ctx = handle->poll_ctx;
assert(ctx != NULL);
assert(ctx->parent_handle == handle);
/* Close the timer if it's active. If it's inactive, there's a stat request
* in progress and poll_cb will take care of the cleanup.
*/
if (uv_is_active((uv_handle_t*)&ctx->timer_handle))
uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
uv__handle_stop(handle);
return 0;
}
int uv_fs_poll_getpath(uv_fs_poll_t* handle, char* buffer, size_t* size) {
struct poll_ctx* ctx;
size_t required_len;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
if (!uv_is_active((uv_handle_t*)handle)) {
*size = 0;
return UV_EINVAL;
}
ctx = handle->poll_ctx;
assert(ctx != NULL);
required_len = strlen(ctx->path);
if (required_len >= *size) {
*size = required_len + 1;
return UV_ENOBUFS;
}
memcpy(buffer, ctx->path, required_len);
*size = required_len;
buffer[required_len] = '\0';
return 0;
}
void uv__fs_poll_close(uv_fs_poll_t* handle) {
uv_fs_poll_stop(handle);
if (handle->poll_ctx == NULL)
uv__make_close_pending((uv_handle_t*)handle);
}
static void timer_cb(uv_timer_t* timer) {
struct poll_ctx* ctx;
ctx = container_of(timer, struct poll_ctx, timer_handle);
assert(ctx->parent_handle != NULL);
assert(ctx->parent_handle->poll_ctx == ctx);
ctx->start_time = uv_now(ctx->loop);
if (uv_fs_stat(ctx->loop, &ctx->fs_req, ctx->path, poll_cb))
abort();
}
static void poll_cb(uv_fs_t* req) {
uv_stat_t* statbuf;
struct poll_ctx* ctx;
uint64_t interval;
uv_fs_poll_t* handle;
ctx = container_of(req, struct poll_ctx, fs_req);
handle = ctx->parent_handle;
if (!uv_is_active((uv_handle_t*)handle) || uv__is_closing(handle))
goto out;
if (req->result != 0) {
if (ctx->busy_polling != req->result) {
ctx->poll_cb(ctx->parent_handle,
req->result,
&ctx->statbuf,
&zero_statbuf);
ctx->busy_polling = req->result;
}
goto out;
}
statbuf = &req->statbuf;
if (ctx->busy_polling != 0)
if (ctx->busy_polling < 0 || !statbuf_eq(&ctx->statbuf, statbuf))
ctx->poll_cb(ctx->parent_handle, 0, &ctx->statbuf, statbuf);
ctx->statbuf = *statbuf;
ctx->busy_polling = 1;
out:
uv_fs_req_cleanup(req);
if (!uv_is_active((uv_handle_t*)handle) || uv__is_closing(handle)) {
uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
return;
}
/* Reschedule timer, subtract the delay from doing the stat(). */
interval = ctx->interval;
interval -= (uv_now(ctx->loop) - ctx->start_time) % interval;
if (uv_timer_start(&ctx->timer_handle, timer_cb, interval, 0))
abort();
}
static void timer_close_cb(uv_handle_t* timer) {
struct poll_ctx* ctx;
struct poll_ctx* it;
struct poll_ctx* last;
uv_fs_poll_t* handle;
ctx = container_of(timer, struct poll_ctx, timer_handle);
handle = ctx->parent_handle;
if (ctx == handle->poll_ctx) {
handle->poll_ctx = ctx->previous;
if (handle->poll_ctx == NULL && uv__is_closing(handle))
uv__make_close_pending((uv_handle_t*)handle);
} else {
for (last = handle->poll_ctx, it = last->previous;
it != ctx;
last = it, it = it->previous) {
assert(last->previous != NULL);
}
last->previous = ctx->previous;
}
uv__free(ctx);
}
static int statbuf_eq(const uv_stat_t* a, const uv_stat_t* b) {
return a->st_ctim.tv_nsec == b->st_ctim.tv_nsec
&& a->st_mtim.tv_nsec == b->st_mtim.tv_nsec
&& a->st_birthtim.tv_nsec == b->st_birthtim.tv_nsec
&& a->st_ctim.tv_sec == b->st_ctim.tv_sec
&& a->st_mtim.tv_sec == b->st_mtim.tv_sec
&& a->st_birthtim.tv_sec == b->st_birthtim.tv_sec
&& a->st_size == b->st_size
&& a->st_mode == b->st_mode
&& a->st_uid == b->st_uid
&& a->st_gid == b->st_gid
&& a->st_ino == b->st_ino
&& a->st_dev == b->st_dev
&& a->st_flags == b->st_flags
&& a->st_gen == b->st_gen;
}
#if defined(_WIN32)
#include "win/internal.h"
#include "win/handle-inl.h"
void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle) {
assert(handle->flags & UV_HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));
uv__handle_close(handle);
}
#endif /* _WIN32 */

View File

@@ -0,0 +1,245 @@
/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef UV_SRC_HEAP_H_
#define UV_SRC_HEAP_H_
#include <stddef.h> /* NULL */
#if defined(__GNUC__)
# define HEAP_EXPORT(declaration) __attribute__((unused)) static declaration
#else
# define HEAP_EXPORT(declaration) static declaration
#endif
struct heap_node {
struct heap_node* left;
struct heap_node* right;
struct heap_node* parent;
};
/* A binary min heap. The usual properties hold: the root is the lowest
* element in the set, the height of the tree is at most log2(nodes) and
* it's always a complete binary tree.
*
* The heap function try hard to detect corrupted tree nodes at the cost
* of a minor reduction in performance. Compile with -DNDEBUG to disable.
*/
struct heap {
struct heap_node* min;
unsigned int nelts;
};
/* Return non-zero if a < b. */
typedef int (*heap_compare_fn)(const struct heap_node* a,
const struct heap_node* b);
/* Public functions. */
HEAP_EXPORT(void heap_init(struct heap* heap));
HEAP_EXPORT(struct heap_node* heap_min(const struct heap* heap));
HEAP_EXPORT(void heap_insert(struct heap* heap,
struct heap_node* newnode,
heap_compare_fn less_than));
HEAP_EXPORT(void heap_remove(struct heap* heap,
struct heap_node* node,
heap_compare_fn less_than));
HEAP_EXPORT(void heap_dequeue(struct heap* heap, heap_compare_fn less_than));
/* Implementation follows. */
HEAP_EXPORT(void heap_init(struct heap* heap)) {
heap->min = NULL;
heap->nelts = 0;
}
HEAP_EXPORT(struct heap_node* heap_min(const struct heap* heap)) {
return heap->min;
}
/* Swap parent with child. Child moves closer to the root, parent moves away. */
static void heap_node_swap(struct heap* heap,
struct heap_node* parent,
struct heap_node* child) {
struct heap_node* sibling;
struct heap_node t;
t = *parent;
*parent = *child;
*child = t;
parent->parent = child;
if (child->left == child) {
child->left = parent;
sibling = child->right;
} else {
child->right = parent;
sibling = child->left;
}
if (sibling != NULL)
sibling->parent = child;
if (parent->left != NULL)
parent->left->parent = parent;
if (parent->right != NULL)
parent->right->parent = parent;
if (child->parent == NULL)
heap->min = child;
else if (child->parent->left == parent)
child->parent->left = child;
else
child->parent->right = child;
}
HEAP_EXPORT(void heap_insert(struct heap* heap,
struct heap_node* newnode,
heap_compare_fn less_than)) {
struct heap_node** parent;
struct heap_node** child;
unsigned int path;
unsigned int n;
unsigned int k;
newnode->left = NULL;
newnode->right = NULL;
newnode->parent = NULL;
/* Calculate the path from the root to the insertion point. This is a min
* heap so we always insert at the left-most free node of the bottom row.
*/
path = 0;
for (k = 0, n = 1 + heap->nelts; n >= 2; k += 1, n /= 2)
path = (path << 1) | (n & 1);
/* Now traverse the heap using the path we calculated in the previous step. */
parent = child = &heap->min;
while (k > 0) {
parent = child;
if (path & 1)
child = &(*child)->right;
else
child = &(*child)->left;
path >>= 1;
k -= 1;
}
/* Insert the new node. */
newnode->parent = *parent;
*child = newnode;
heap->nelts += 1;
/* Walk up the tree and check at each node if the heap property holds.
* It's a min heap so parent < child must be true.
*/
while (newnode->parent != NULL && less_than(newnode, newnode->parent))
heap_node_swap(heap, newnode->parent, newnode);
}
HEAP_EXPORT(void heap_remove(struct heap* heap,
struct heap_node* node,
heap_compare_fn less_than)) {
struct heap_node* smallest;
struct heap_node** max;
struct heap_node* child;
unsigned int path;
unsigned int k;
unsigned int n;
if (heap->nelts == 0)
return;
/* Calculate the path from the min (the root) to the max, the left-most node
* of the bottom row.
*/
path = 0;
for (k = 0, n = heap->nelts; n >= 2; k += 1, n /= 2)
path = (path << 1) | (n & 1);
/* Now traverse the heap using the path we calculated in the previous step. */
max = &heap->min;
while (k > 0) {
if (path & 1)
max = &(*max)->right;
else
max = &(*max)->left;
path >>= 1;
k -= 1;
}
heap->nelts -= 1;
/* Unlink the max node. */
child = *max;
*max = NULL;
if (child == node) {
/* We're removing either the max or the last node in the tree. */
if (child == heap->min) {
heap->min = NULL;
}
return;
}
/* Replace the to be deleted node with the max node. */
child->left = node->left;
child->right = node->right;
child->parent = node->parent;
if (child->left != NULL) {
child->left->parent = child;
}
if (child->right != NULL) {
child->right->parent = child;
}
if (node->parent == NULL) {
heap->min = child;
} else if (node->parent->left == node) {
node->parent->left = child;
} else {
node->parent->right = child;
}
/* Walk down the subtree and check at each node if the heap property holds.
* It's a min heap so parent < child must be true. If the parent is bigger,
* swap it with the smallest child.
*/
for (;;) {
smallest = child;
if (child->left != NULL && less_than(child->left, smallest))
smallest = child->left;
if (child->right != NULL && less_than(child->right, smallest))
smallest = child->right;
if (smallest == child)
break;
heap_node_swap(heap, child, smallest);
}
/* Walk up the subtree and check that each parent is less than the node
* this is required, because `max` node is not guaranteed to be the
* actual maximum in tree
*/
while (child->parent != NULL && less_than(child, child->parent))
heap_node_swap(heap, child->parent, child);
}
HEAP_EXPORT(void heap_dequeue(struct heap* heap, heap_compare_fn less_than)) {
heap_remove(heap, heap->min, less_than);
}
#undef HEAP_EXPORT
#endif /* UV_SRC_HEAP_H_ */

View File

@@ -0,0 +1,560 @@
/* Copyright libuv contributors. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Derived from https://github.com/bnoordhuis/punycode
* but updated to support IDNA 2008.
*/
#include "uv.h"
#include "uv-common.h"
#include "idna.h"
#include <assert.h>
#include <string.h>
#include <limits.h> /* UINT_MAX */
static int32_t uv__wtf8_decode1(const char** input) {
uint32_t code_point;
uint8_t b1;
uint8_t b2;
uint8_t b3;
uint8_t b4;
b1 = **input;
if (b1 <= 0x7F)
return b1; /* ASCII code point */
if (b1 < 0xC2)
return -1; /* invalid: continuation byte */
code_point = b1;
b2 = *++*input;
if ((b2 & 0xC0) != 0x80)
return -1; /* invalid: not a continuation byte */
code_point = (code_point << 6) | (b2 & 0x3F);
if (b1 <= 0xDF)
return 0x7FF & code_point; /* two-byte character */
b3 = *++*input;
if ((b3 & 0xC0) != 0x80)
return -1; /* invalid: not a continuation byte */
code_point = (code_point << 6) | (b3 & 0x3F);
if (b1 <= 0xEF)
return 0xFFFF & code_point; /* three-byte character */
b4 = *++*input;
if ((b4 & 0xC0) != 0x80)
return -1; /* invalid: not a continuation byte */
code_point = (code_point << 6) | (b4 & 0x3F);
if (b1 <= 0xF4) {
code_point &= 0x1FFFFF;
if (code_point <= 0x10FFFF)
return code_point; /* four-byte character */
}
/* code point too large */
return -1;
}
static unsigned uv__utf8_decode1_slow(const char** p,
const char* pe,
unsigned a) {
unsigned b;
unsigned c;
unsigned d;
unsigned min;
if (a > 0xF7)
return -1;
switch (pe - *p) {
default:
if (a > 0xEF) {
min = 0x10000;
a = a & 7;
b = (unsigned char) *(*p)++;
c = (unsigned char) *(*p)++;
d = (unsigned char) *(*p)++;
break;
}
/* Fall through. */
case 2:
if (a > 0xDF) {
min = 0x800;
b = 0x80 | (a & 15);
c = (unsigned char) *(*p)++;
d = (unsigned char) *(*p)++;
a = 0;
break;
}
/* Fall through. */
case 1:
if (a > 0xBF) {
min = 0x80;
b = 0x80;
c = 0x80 | (a & 31);
d = (unsigned char) *(*p)++;
a = 0;
break;
}
/* Fall through. */
case 0:
return -1; /* Invalid continuation byte. */
}
if (0x80 != (0xC0 & (b ^ c ^ d)))
return -1; /* Invalid sequence. */
b &= 63;
c &= 63;
d &= 63;
a = (a << 18) | (b << 12) | (c << 6) | d;
if (a < min)
return -1; /* Overlong sequence. */
if (a > 0x10FFFF)
return -1; /* Four-byte sequence > U+10FFFF. */
if (a >= 0xD800 && a <= 0xDFFF)
return -1; /* Surrogate pair. */
return a;
}
unsigned uv__utf8_decode1(const char** p, const char* pe) {
unsigned a;
assert(*p < pe);
a = (unsigned char) *(*p)++;
if (a < 128)
return a; /* ASCII, common case. */
return uv__utf8_decode1_slow(p, pe, a);
}
static int uv__idna_toascii_label(const char* s, const char* se,
char** d, char* de) {
static const char alphabet[] = "abcdefghijklmnopqrstuvwxyz0123456789";
const char* ss;
unsigned c;
unsigned h;
unsigned k;
unsigned n;
unsigned m;
unsigned q;
unsigned t;
unsigned x;
unsigned y;
unsigned bias;
unsigned delta;
unsigned todo;
int first;
h = 0;
ss = s;
todo = 0;
/* Note: after this loop we've visited all UTF-8 characters and know
* they're legal so we no longer need to check for decode errors.
*/
while (s < se) {
c = uv__utf8_decode1(&s, se);
if (c == UINT_MAX)
return UV_EINVAL;
if (c < 128)
h++;
else
todo++;
}
/* Only write "xn--" when there are non-ASCII characters. */
if (todo > 0) {
if (*d < de) *(*d)++ = 'x';
if (*d < de) *(*d)++ = 'n';
if (*d < de) *(*d)++ = '-';
if (*d < de) *(*d)++ = '-';
}
/* Write ASCII characters. */
x = 0;
s = ss;
while (s < se) {
c = uv__utf8_decode1(&s, se);
assert(c != UINT_MAX);
if (c > 127)
continue;
if (*d < de)
*(*d)++ = c;
if (++x == h)
break; /* Visited all ASCII characters. */
}
if (todo == 0)
return h;
/* Only write separator when we've written ASCII characters first. */
if (h > 0)
if (*d < de)
*(*d)++ = '-';
n = 128;
bias = 72;
delta = 0;
first = 1;
while (todo > 0) {
m = -1;
s = ss;
while (s < se) {
c = uv__utf8_decode1(&s, se);
assert(c != UINT_MAX);
if (c >= n)
if (c < m)
m = c;
}
x = m - n;
y = h + 1;
if (x > ~delta / y)
return UV_E2BIG; /* Overflow. */
delta += x * y;
n = m;
s = ss;
while (s < se) {
c = uv__utf8_decode1(&s, se);
assert(c != UINT_MAX);
if (c < n)
if (++delta == 0)
return UV_E2BIG; /* Overflow. */
if (c != n)
continue;
for (k = 36, q = delta; /* empty */; k += 36) {
t = 1;
if (k > bias)
t = k - bias;
if (t > 26)
t = 26;
if (q < t)
break;
/* TODO(bnoordhuis) Since 1 <= t <= 26 and therefore
* 10 <= y <= 35, we can optimize the long division
* into a table-based reciprocal multiplication.
*/
x = q - t;
y = 36 - t; /* 10 <= y <= 35 since 1 <= t <= 26. */
q = x / y;
t = t + x % y; /* 1 <= t <= 35 because of y. */
if (*d < de)
*(*d)++ = alphabet[t];
}
if (*d < de)
*(*d)++ = alphabet[q];
delta /= 2;
if (first) {
delta /= 350;
first = 0;
}
/* No overflow check is needed because |delta| was just
* divided by 2 and |delta+delta >= delta + delta/h|.
*/
h++;
delta += delta / h;
for (bias = 0; delta > 35 * 26 / 2; bias += 36)
delta /= 35;
bias += 36 * delta / (delta + 38);
delta = 0;
todo--;
}
delta++;
n++;
}
return 0;
}
ssize_t uv__idna_toascii(const char* s, const char* se, char* d, char* de) {
const char* si;
const char* st;
unsigned c;
char* ds;
int rc;
if (s == se)
return UV_EINVAL;
ds = d;
si = s;
while (si < se) {
st = si;
c = uv__utf8_decode1(&si, se);
if (c == UINT_MAX)
return UV_EINVAL;
if (c != '.')
if (c != 0x3002) /* 。 */
if (c != 0xFF0E) /* */
if (c != 0xFF61) /* 。 */
continue;
rc = uv__idna_toascii_label(s, st, &d, de);
if (rc < 0)
return rc;
if (d < de)
*d++ = '.';
s = si;
}
if (s < se) {
rc = uv__idna_toascii_label(s, se, &d, de);
if (rc < 0)
return rc;
}
if (d >= de)
return UV_EINVAL;
*d++ = '\0';
return d - ds; /* Number of bytes written. */
}
ssize_t uv_wtf8_length_as_utf16(const char* source_ptr) {
size_t w_target_len = 0;
int32_t code_point;
do {
code_point = uv__wtf8_decode1(&source_ptr);
if (code_point < 0)
return -1;
if (code_point > 0xFFFF)
w_target_len++;
w_target_len++;
} while (*source_ptr++);
return w_target_len;
}
void uv_wtf8_to_utf16(const char* source_ptr,
uint16_t* w_target,
size_t w_target_len) {
int32_t code_point;
do {
code_point = uv__wtf8_decode1(&source_ptr);
/* uv_wtf8_length_as_utf16 should have been called and checked first. */
assert(code_point >= 0);
if (code_point > 0xFFFF) {
assert(code_point < 0x10FFFF);
*w_target++ = (((code_point - 0x10000) >> 10) + 0xD800);
*w_target++ = ((code_point - 0x10000) & 0x3FF) + 0xDC00;
w_target_len -= 2;
} else {
*w_target++ = code_point;
w_target_len -= 1;
}
} while (*source_ptr++);
(void)w_target_len;
assert(w_target_len == 0);
}
static int32_t uv__get_surrogate_value(const uint16_t* w_source_ptr,
ssize_t w_source_len) {
uint16_t u;
uint16_t next;
u = w_source_ptr[0];
if (u >= 0xD800 && u <= 0xDBFF && w_source_len != 1) {
next = w_source_ptr[1];
if (next >= 0xDC00 && next <= 0xDFFF)
return 0x10000 + ((u - 0xD800) << 10) + (next - 0xDC00);
}
return u;
}
size_t uv_utf16_length_as_wtf8(const uint16_t* w_source_ptr,
ssize_t w_source_len) {
size_t target_len;
int32_t code_point;
target_len = 0;
while (w_source_len) {
code_point = uv__get_surrogate_value(w_source_ptr, w_source_len);
/* Can be invalid UTF-8 but must be valid WTF-8. */
assert(code_point >= 0);
if (w_source_len < 0 && code_point == 0)
break;
if (code_point < 0x80)
target_len += 1;
else if (code_point < 0x800)
target_len += 2;
else if (code_point < 0x10000)
target_len += 3;
else {
target_len += 4;
w_source_ptr++;
if (w_source_len > 0)
w_source_len--;
}
w_source_ptr++;
if (w_source_len > 0)
w_source_len--;
}
return target_len;
}
int uv_utf16_to_wtf8(const uint16_t* w_source_ptr,
ssize_t w_source_len,
char** target_ptr,
size_t* target_len_ptr) {
size_t target_len;
char* target;
char* target_end;
int32_t code_point;
/* If *target_ptr is provided, then *target_len_ptr must be its length
* (excluding space for NUL), otherwise we will compute the target_len_ptr
* length and may return a new allocation in *target_ptr if target_ptr is
* provided. */
if (target_ptr == NULL || *target_ptr == NULL) {
target_len = uv_utf16_length_as_wtf8(w_source_ptr, w_source_len);
if (target_len_ptr != NULL)
*target_len_ptr = target_len;
} else {
target_len = *target_len_ptr;
}
if (target_ptr == NULL)
return 0;
if (*target_ptr == NULL) {
target = uv__malloc(target_len + 1);
if (target == NULL) {
return UV_ENOMEM;
}
*target_ptr = target;
} else {
target = *target_ptr;
}
target_end = target + target_len;
while (target != target_end && w_source_len) {
code_point = uv__get_surrogate_value(w_source_ptr, w_source_len);
/* Can be invalid UTF-8 but must be valid WTF-8. */
assert(code_point >= 0);
if (w_source_len < 0 && code_point == 0) {
w_source_len = 0;
break;
}
if (code_point < 0x80) {
*target++ = code_point;
} else if (code_point < 0x800) {
*target++ = 0xC0 | (code_point >> 6);
if (target == target_end)
break;
*target++ = 0x80 | (code_point & 0x3F);
} else if (code_point < 0x10000) {
*target++ = 0xE0 | (code_point >> 12);
if (target == target_end)
break;
*target++ = 0x80 | ((code_point >> 6) & 0x3F);
if (target == target_end)
break;
*target++ = 0x80 | (code_point & 0x3F);
} else {
*target++ = 0xF0 | (code_point >> 18);
if (target == target_end)
break;
*target++ = 0x80 | ((code_point >> 12) & 0x3F);
if (target == target_end)
break;
*target++ = 0x80 | ((code_point >> 6) & 0x3F);
if (target == target_end)
break;
*target++ = 0x80 | (code_point & 0x3F);
/* uv__get_surrogate_value consumed 2 input characters */
w_source_ptr++;
if (w_source_len > 0)
w_source_len--;
}
target_len = target - *target_ptr;
w_source_ptr++;
if (w_source_len > 0)
w_source_len--;
}
if (target != target_end && target_len_ptr != NULL)
/* Did not fill all of the provided buffer, so update the target_len_ptr
* output with the space used. */
*target_len_ptr = target - *target_ptr;
/* Check if input fit into target exactly. */
if (w_source_len < 0 && target == target_end && w_source_ptr[0] == 0)
w_source_len = 0;
*target++ = '\0';
/* Characters remained after filling the buffer, compute the remaining length now. */
if (w_source_len) {
if (target_len_ptr != NULL)
*target_len_ptr = target_len + uv_utf16_length_as_wtf8(w_source_ptr, w_source_len);
return UV_ENOBUFS;
}
return 0;
}

View File

@@ -0,0 +1,31 @@
/* Copyright libuv contributors. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef UV_SRC_IDNA_H_
#define UV_SRC_IDNA_H_
/* Decode a single codepoint. Returns the codepoint or UINT32_MAX on error.
* |p| is updated on success _and_ error, i.e., bad multi-byte sequences are
* skipped in their entirety, not just the first bad byte.
*/
unsigned uv__utf8_decode1(const char** p, const char* pe);
/* Convert a UTF-8 domain name to IDNA 2008 / Punycode. A return value >= 0
* is the number of bytes written to |d|, including the trailing nul byte.
* A return value < 0 is a libuv error code. |s| and |d| can not overlap.
*/
ssize_t uv__idna_toascii(const char* s, const char* se, char* d, char* de);
#endif /* UV_SRC_IDNA_H_ */

View File

@@ -0,0 +1,298 @@
/*
* Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC")
* Copyright (c) 1996-1999 by Internet Software Consortium.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include "uv.h"
#include "uv-common.h"
#define UV__INET_ADDRSTRLEN 16
#define UV__INET6_ADDRSTRLEN 46
static int inet_ntop4(const unsigned char *src, char *dst, size_t size);
static int inet_ntop6(const unsigned char *src, char *dst, size_t size);
static int inet_pton4(const char *src, unsigned char *dst);
static int inet_pton6(const char *src, unsigned char *dst);
int uv_inet_ntop(int af, const void* src, char* dst, size_t size) {
switch (af) {
case AF_INET:
return (inet_ntop4(src, dst, size));
case AF_INET6:
return (inet_ntop6(src, dst, size));
default:
return UV_EAFNOSUPPORT;
}
/* NOTREACHED */
}
static int inet_ntop4(const unsigned char *src, char *dst, size_t size) {
static const char fmt[] = "%u.%u.%u.%u";
char tmp[UV__INET_ADDRSTRLEN];
int l;
l = snprintf(tmp, sizeof(tmp), fmt, src[0], src[1], src[2], src[3]);
if (l <= 0 || (size_t) l >= size) {
return UV_ENOSPC;
}
uv__strscpy(dst, tmp, size);
return 0;
}
static int inet_ntop6(const unsigned char *src, char *dst, size_t size) {
/*
* Note that int32_t and int16_t need only be "at least" large enough
* to contain a value of the specified size. On some systems, like
* Crays, there is no such thing as an integer variable with 16 bits.
* Keep this in mind if you think this function should have been coded
* to use pointer overlays. All the world's not a VAX.
*/
char tmp[UV__INET6_ADDRSTRLEN], *tp;
struct { int base, len; } best, cur;
unsigned int words[sizeof(struct in6_addr) / sizeof(uint16_t)];
int i;
/*
* Preprocess:
* Copy the input (bytewise) array into a wordwise array.
* Find the longest run of 0x00's in src[] for :: shorthanding.
*/
memset(words, '\0', sizeof words);
for (i = 0; i < (int) sizeof(struct in6_addr); i++)
words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3));
best.base = -1;
best.len = 0;
cur.base = -1;
cur.len = 0;
for (i = 0; i < (int) ARRAY_SIZE(words); i++) {
if (words[i] == 0) {
if (cur.base == -1)
cur.base = i, cur.len = 1;
else
cur.len++;
} else {
if (cur.base != -1) {
if (best.base == -1 || cur.len > best.len)
best = cur;
cur.base = -1;
}
}
}
if (cur.base != -1) {
if (best.base == -1 || cur.len > best.len)
best = cur;
}
if (best.base != -1 && best.len < 2)
best.base = -1;
/*
* Format the result.
*/
tp = tmp;
for (i = 0; i < (int) ARRAY_SIZE(words); i++) {
/* Are we inside the best run of 0x00's? */
if (best.base != -1 && i >= best.base &&
i < (best.base + best.len)) {
if (i == best.base)
*tp++ = ':';
continue;
}
/* Are we following an initial run of 0x00s or any real hex? */
if (i != 0)
*tp++ = ':';
/* Is this address an encapsulated IPv4? */
if (i == 6 && best.base == 0 && (best.len == 6 ||
(best.len == 7 && words[7] != 0x0001) ||
(best.len == 5 && words[5] == 0xffff))) {
int err = inet_ntop4(src+12, tp, sizeof tmp - (tp - tmp));
if (err)
return err;
tp += strlen(tp);
break;
}
tp += snprintf(tp, sizeof tmp - (tp - tmp), "%x", words[i]);
}
/* Was it a trailing run of 0x00's? */
if (best.base != -1 && (best.base + best.len) == ARRAY_SIZE(words))
*tp++ = ':';
*tp++ = '\0';
if ((size_t) (tp - tmp) > size)
return UV_ENOSPC;
uv__strscpy(dst, tmp, size);
return 0;
}
int uv_inet_pton(int af, const char* src, void* dst) {
if (src == NULL || dst == NULL)
return UV_EINVAL;
switch (af) {
case AF_INET:
return (inet_pton4(src, dst));
case AF_INET6: {
int len;
char tmp[UV__INET6_ADDRSTRLEN], *s, *p;
s = (char*) src;
p = strchr(src, '%');
if (p != NULL) {
s = tmp;
len = p - src;
if (len > UV__INET6_ADDRSTRLEN-1)
return UV_EINVAL;
memcpy(s, src, len);
s[len] = '\0';
}
return inet_pton6(s, dst);
}
default:
return UV_EAFNOSUPPORT;
}
/* NOTREACHED */
}
static int inet_pton4(const char *src, unsigned char *dst) {
static const char digits[] = "0123456789";
int saw_digit, octets, ch;
unsigned char tmp[sizeof(struct in_addr)], *tp;
saw_digit = 0;
octets = 0;
*(tp = tmp) = 0;
while ((ch = *src++) != '\0') {
const char *pch;
if ((pch = strchr(digits, ch)) != NULL) {
unsigned int nw = *tp * 10 + (pch - digits);
if (saw_digit && *tp == 0)
return UV_EINVAL;
if (nw > 255)
return UV_EINVAL;
*tp = nw;
if (!saw_digit) {
if (++octets > 4)
return UV_EINVAL;
saw_digit = 1;
}
} else if (ch == '.' && saw_digit) {
if (octets == 4)
return UV_EINVAL;
*++tp = 0;
saw_digit = 0;
} else
return UV_EINVAL;
}
if (octets < 4)
return UV_EINVAL;
memcpy(dst, tmp, sizeof(struct in_addr));
return 0;
}
static int inet_pton6(const char *src, unsigned char *dst) {
static const char xdigits_l[] = "0123456789abcdef",
xdigits_u[] = "0123456789ABCDEF";
unsigned char tmp[sizeof(struct in6_addr)], *tp, *endp, *colonp;
const char *xdigits, *curtok;
int ch, seen_xdigits;
unsigned int val;
memset((tp = tmp), '\0', sizeof tmp);
endp = tp + sizeof tmp;
colonp = NULL;
/* Leading :: requires some special handling. */
if (*src == ':')
if (*++src != ':')
return UV_EINVAL;
curtok = src;
seen_xdigits = 0;
val = 0;
while ((ch = *src++) != '\0') {
const char *pch;
if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL)
pch = strchr((xdigits = xdigits_u), ch);
if (pch != NULL) {
val <<= 4;
val |= (pch - xdigits);
if (++seen_xdigits > 4)
return UV_EINVAL;
continue;
}
if (ch == ':') {
curtok = src;
if (!seen_xdigits) {
if (colonp)
return UV_EINVAL;
colonp = tp;
continue;
} else if (*src == '\0') {
return UV_EINVAL;
}
if (tp + sizeof(uint16_t) > endp)
return UV_EINVAL;
*tp++ = (unsigned char) (val >> 8) & 0xff;
*tp++ = (unsigned char) val & 0xff;
seen_xdigits = 0;
val = 0;
continue;
}
if (ch == '.' && ((tp + sizeof(struct in_addr)) <= endp)) {
int err = inet_pton4(curtok, tp);
if (err == 0) {
tp += sizeof(struct in_addr);
seen_xdigits = 0;
break; /*%< '\\0' was seen by inet_pton4(). */
}
}
return UV_EINVAL;
}
if (seen_xdigits) {
if (tp + sizeof(uint16_t) > endp)
return UV_EINVAL;
*tp++ = (unsigned char) (val >> 8) & 0xff;
*tp++ = (unsigned char) val & 0xff;
}
if (colonp != NULL) {
/*
* Since some memmove()'s erroneously fail to handle
* overlapping regions, we'll do the shift by hand.
*/
const int n = tp - colonp;
int i;
if (tp == endp)
return UV_EINVAL;
for (i = 1; i <= n; i++) {
endp[- i] = colonp[n - i];
colonp[n - i] = 0;
}
tp = endp;
}
if (tp != endp)
return UV_EINVAL;
memcpy(dst, tmp, sizeof tmp);
return 0;
}

View File

@@ -0,0 +1,90 @@
/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef QUEUE_H_
#define QUEUE_H_
#include <stddef.h>
#define uv__queue_data(pointer, type, field) \
((type*) ((char*) (pointer) - offsetof(type, field)))
#define uv__queue_foreach(q, h) \
for ((q) = (h)->next; (q) != (h); (q) = (q)->next)
static inline void uv__queue_init(struct uv__queue* q) {
q->next = q;
q->prev = q;
}
static inline int uv__queue_empty(const struct uv__queue* q) {
return q == q->next;
}
static inline struct uv__queue* uv__queue_head(const struct uv__queue* q) {
return q->next;
}
static inline struct uv__queue* uv__queue_next(const struct uv__queue* q) {
return q->next;
}
static inline void uv__queue_add(struct uv__queue* h, struct uv__queue* n) {
h->prev->next = n->next;
n->next->prev = h->prev;
h->prev = n->prev;
h->prev->next = h;
}
static inline void uv__queue_split(struct uv__queue* h,
struct uv__queue* q,
struct uv__queue* n) {
n->prev = h->prev;
n->prev->next = n;
n->next = q;
h->prev = q->prev;
h->prev->next = h;
q->prev = n;
}
static inline void uv__queue_move(struct uv__queue* h, struct uv__queue* n) {
if (uv__queue_empty(h))
uv__queue_init(n);
else
uv__queue_split(h, h->next, n);
}
static inline void uv__queue_insert_head(struct uv__queue* h,
struct uv__queue* q) {
q->next = h->next;
q->prev = h;
q->next->prev = q;
h->next = q;
}
static inline void uv__queue_insert_tail(struct uv__queue* h,
struct uv__queue* q) {
q->next = h;
q->prev = h->prev;
q->prev->next = q;
h->prev = q;
}
static inline void uv__queue_remove(struct uv__queue* q) {
q->prev->next = q->next;
q->next->prev = q->prev;
}
#endif /* QUEUE_H_ */

View File

@@ -0,0 +1,123 @@
/* Copyright libuv contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "uv-common.h"
#ifdef _WIN32
# include "win/internal.h"
#else
# include "unix/internal.h"
#endif
static int uv__random(void* buf, size_t buflen) {
int rc;
#if defined(__PASE__)
rc = uv__random_readpath("/dev/urandom", buf, buflen);
#elif defined(_AIX) || defined(__QNX__)
rc = uv__random_readpath("/dev/random", buf, buflen);
#elif defined(__APPLE__) || defined(__OpenBSD__) || \
(defined(__ANDROID_API__) && __ANDROID_API__ >= 28)
rc = uv__random_getentropy(buf, buflen);
if (rc == UV_ENOSYS)
rc = uv__random_devurandom(buf, buflen);
#elif defined(__NetBSD__)
rc = uv__random_sysctl(buf, buflen);
#elif defined(__FreeBSD__) || defined(__linux__)
rc = uv__random_getrandom(buf, buflen);
if (rc == UV_ENOSYS)
rc = uv__random_devurandom(buf, buflen);
# if defined(__linux__)
switch (rc) {
case UV_EACCES:
case UV_EIO:
case UV_ELOOP:
case UV_EMFILE:
case UV_ENFILE:
case UV_ENOENT:
case UV_EPERM:
rc = uv__random_sysctl(buf, buflen);
break;
}
# endif
#elif defined(_WIN32)
uv__once_init();
rc = uv__random_rtlgenrandom(buf, buflen);
#else
rc = uv__random_devurandom(buf, buflen);
#endif
return rc;
}
static void uv__random_work(struct uv__work* w) {
uv_random_t* req;
req = container_of(w, uv_random_t, work_req);
req->status = uv__random(req->buf, req->buflen);
}
static void uv__random_done(struct uv__work* w, int status) {
uv_random_t* req;
req = container_of(w, uv_random_t, work_req);
uv__req_unregister(req->loop);
if (status == 0)
status = req->status;
req->cb(req, status, req->buf, req->buflen);
}
int uv_random(uv_loop_t* loop,
uv_random_t* req,
void *buf,
size_t buflen,
unsigned flags,
uv_random_cb cb) {
if (buflen > 0x7FFFFFFFu)
return UV_E2BIG;
if (flags != 0)
return UV_EINVAL;
if (cb == NULL)
return uv__random(buf, buflen);
uv__req_init(loop, req, UV_RANDOM);
req->loop = loop;
req->status = 0;
req->cb = cb;
req->buf = buf;
req->buflen = buflen;
uv__work_submit(loop,
&req->work_req,
UV__WORK_CPU,
uv__random_work,
uv__random_done);
return 0;
}

View File

@@ -0,0 +1,38 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "strscpy.h"
#include <limits.h> /* SSIZE_MAX */
ssize_t uv__strscpy(char* d, const char* s, size_t n) {
size_t i;
for (i = 0; i < n; i++)
if ('\0' == (d[i] = s[i]))
return i > SSIZE_MAX ? UV_E2BIG : (ssize_t) i;
if (i == 0)
return 0;
d[--i] = '\0';
return UV_E2BIG;
}

View File

@@ -0,0 +1,39 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_STRSCPY_H_
#define UV_STRSCPY_H_
/* Include uv.h for its definitions of size_t and ssize_t.
* size_t can be obtained directly from <stddef.h> but ssize_t requires
* some hoop jumping on Windows that I didn't want to duplicate here.
*/
#include "uv.h"
/* Copies up to |n-1| bytes from |s| to |d| and always zero-terminates
* the result, except when |n==0|. Returns the number of bytes copied
* or UV_E2BIG if |d| is too small.
*
* See https://www.kernel.org/doc/htmldocs/kernel-api/API-strscpy.html
*/
ssize_t uv__strscpy(char* d, const char* s, size_t n);
#endif /* UV_STRSCPY_H_ */

View File

@@ -0,0 +1,52 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <stdlib.h>
#include "strtok.h"
char* uv__strtok(char* str, const char* sep, char** itr) {
const char* sep_itr;
char* tmp;
char* start;
if (str == NULL)
start = tmp = *itr;
else
start = tmp = str;
if (tmp == NULL)
return NULL;
while (*tmp != '\0') {
sep_itr = sep;
while (*sep_itr != '\0') {
if (*tmp == *sep_itr) {
*itr = tmp + 1;
*tmp = '\0';
return start;
}
sep_itr++;
}
tmp++;
}
*itr = NULL;
return start;
}

View File

@@ -0,0 +1,27 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_STRTOK_H_
#define UV_STRTOK_H_
char* uv__strtok(char* str, const char* sep, char** itr);
#endif /* UV_STRTOK_H_ */

View File

@@ -0,0 +1,175 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "uv-common.h"
#include <stdlib.h>
#ifndef _WIN32
#include <pthread.h>
#endif
#if defined(PTHREAD_BARRIER_SERIAL_THREAD)
STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
#endif
/* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */
#if defined(_AIX) || \
defined(__OpenBSD__) || \
!defined(PTHREAD_BARRIER_SERIAL_THREAD)
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
int rc;
#ifdef _WIN32
uv_barrier_t* b;
b = barrier;
if (barrier == NULL || count == 0)
return UV_EINVAL;
#else
struct _uv_barrier* b;
if (barrier == NULL || count == 0)
return UV_EINVAL;
b = uv__malloc(sizeof(*b));
if (b == NULL)
return UV_ENOMEM;
#endif
b->in = 0;
b->out = 0;
b->threshold = count;
rc = uv_mutex_init(&b->mutex);
if (rc != 0)
goto error2;
/* TODO(vjnash): remove these uv_cond_t casts in v2. */
rc = uv_cond_init((uv_cond_t*) &b->cond);
if (rc != 0)
goto error;
#ifndef _WIN32
barrier->b = b;
#endif
return 0;
error:
uv_mutex_destroy(&b->mutex);
error2:
#ifndef _WIN32
uv__free(b);
#endif
return rc;
}
int uv_barrier_wait(uv_barrier_t* barrier) {
int last;
#ifdef _WIN32
uv_barrier_t* b;
b = barrier;
#else
struct _uv_barrier* b;
if (barrier == NULL || barrier->b == NULL)
return UV_EINVAL;
b = barrier->b;
#endif
uv_mutex_lock(&b->mutex);
while (b->out != 0)
uv_cond_wait((uv_cond_t*) &b->cond, &b->mutex);
if (++b->in == b->threshold) {
b->in = 0;
b->out = b->threshold;
uv_cond_broadcast((uv_cond_t*) &b->cond);
} else {
do
uv_cond_wait((uv_cond_t*) &b->cond, &b->mutex);
while (b->in != 0);
}
last = (--b->out == 0);
if (last)
uv_cond_broadcast((uv_cond_t*) &b->cond);
uv_mutex_unlock(&b->mutex);
return last;
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
#ifdef _WIN32
uv_barrier_t* b;
b = barrier;
#else
struct _uv_barrier* b;
b = barrier->b;
#endif
uv_mutex_lock(&b->mutex);
assert(b->in == 0);
while (b->out != 0)
uv_cond_wait((uv_cond_t*) &b->cond, &b->mutex);
if (b->in != 0)
abort();
uv_mutex_unlock(&b->mutex);
uv_mutex_destroy(&b->mutex);
uv_cond_destroy((uv_cond_t*) &b->cond);
#ifndef _WIN32
uv__free(barrier->b);
barrier->b = NULL;
#endif
}
#else
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
return UV__ERR(pthread_barrier_init(barrier, NULL, count));
}
int uv_barrier_wait(uv_barrier_t* barrier) {
int rc;
rc = pthread_barrier_wait(barrier);
if (rc != 0)
if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
abort();
return rc == PTHREAD_BARRIER_SERIAL_THREAD;
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
if (pthread_barrier_destroy(barrier))
abort();
}
#endif

View File

@@ -0,0 +1,419 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv-common.h"
#if !defined(_WIN32)
# include "unix/internal.h"
#endif
#include <stdlib.h>
#define MAX_THREADPOOL_SIZE 1024
static uv_once_t once = UV_ONCE_INIT;
static uv_cond_t cond;
static uv_mutex_t mutex;
static unsigned int idle_threads;
static unsigned int slow_io_work_running;
static unsigned int nthreads;
static uv_thread_t* threads;
static uv_thread_t default_threads[4];
static struct uv__queue exit_message;
static struct uv__queue wq;
static struct uv__queue run_slow_work_message;
static struct uv__queue slow_io_pending_wq;
static unsigned int slow_work_thread_threshold(void) {
return (nthreads + 1) / 2;
}
static void uv__cancelled(struct uv__work* w) {
abort();
}
/* To avoid deadlock with uv_cancel() it's crucial that the worker
* never holds the global mutex and the loop-local mutex at the same time.
*/
static void worker(void* arg) {
struct uv__work* w;
struct uv__queue* q;
int is_slow_work;
uv_thread_setname("libuv-worker");
uv_sem_post((uv_sem_t*) arg);
arg = NULL;
uv_mutex_lock(&mutex);
for (;;) {
/* `mutex` should always be locked at this point. */
/* Keep waiting while either no work is present or only slow I/O
and we're at the threshold for that. */
while (uv__queue_empty(&wq) ||
(uv__queue_head(&wq) == &run_slow_work_message &&
uv__queue_next(&run_slow_work_message) == &wq &&
slow_io_work_running >= slow_work_thread_threshold())) {
idle_threads += 1;
uv_cond_wait(&cond, &mutex);
idle_threads -= 1;
}
q = uv__queue_head(&wq);
if (q == &exit_message) {
uv_cond_signal(&cond);
uv_mutex_unlock(&mutex);
break;
}
uv__queue_remove(q);
uv__queue_init(q); /* Signal uv_cancel() that the work req is executing. */
is_slow_work = 0;
if (q == &run_slow_work_message) {
/* If we're at the slow I/O threshold, re-schedule until after all
other work in the queue is done. */
if (slow_io_work_running >= slow_work_thread_threshold()) {
uv__queue_insert_tail(&wq, q);
continue;
}
/* If we encountered a request to run slow I/O work but there is none
to run, that means it's cancelled => Start over. */
if (uv__queue_empty(&slow_io_pending_wq))
continue;
is_slow_work = 1;
slow_io_work_running++;
q = uv__queue_head(&slow_io_pending_wq);
uv__queue_remove(q);
uv__queue_init(q);
/* If there is more slow I/O work, schedule it to be run as well. */
if (!uv__queue_empty(&slow_io_pending_wq)) {
uv__queue_insert_tail(&wq, &run_slow_work_message);
if (idle_threads > 0)
uv_cond_signal(&cond);
}
}
uv_mutex_unlock(&mutex);
w = uv__queue_data(q, struct uv__work, wq);
w->work(w);
uv_mutex_lock(&w->loop->wq_mutex);
w->work = NULL; /* Signal uv_cancel() that the work req is done
executing. */
uv__queue_insert_tail(&w->loop->wq, &w->wq);
uv_async_send(&w->loop->wq_async);
uv_mutex_unlock(&w->loop->wq_mutex);
/* Lock `mutex` since that is expected at the start of the next
* iteration. */
uv_mutex_lock(&mutex);
if (is_slow_work) {
/* `slow_io_work_running` is protected by `mutex`. */
slow_io_work_running--;
}
}
}
static void post(struct uv__queue* q, enum uv__work_kind kind) {
uv_mutex_lock(&mutex);
if (kind == UV__WORK_SLOW_IO) {
/* Insert into a separate queue. */
uv__queue_insert_tail(&slow_io_pending_wq, q);
if (!uv__queue_empty(&run_slow_work_message)) {
/* Running slow I/O tasks is already scheduled => Nothing to do here.
The worker that runs said other task will schedule this one as well. */
uv_mutex_unlock(&mutex);
return;
}
q = &run_slow_work_message;
}
uv__queue_insert_tail(&wq, q);
if (idle_threads > 0)
uv_cond_signal(&cond);
uv_mutex_unlock(&mutex);
}
#ifdef __MVS__
/* TODO(itodorov) - zos: revisit when Woz compiler is available. */
__attribute__((destructor))
#endif
void uv__threadpool_cleanup(void) {
unsigned int i;
if (nthreads == 0)
return;
#ifndef __MVS__
/* TODO(gabylb) - zos: revisit when Woz compiler is available. */
post(&exit_message, UV__WORK_CPU);
#endif
for (i = 0; i < nthreads; i++)
if (uv_thread_join(threads + i))
abort();
if (threads != default_threads)
uv__free(threads);
uv_mutex_destroy(&mutex);
uv_cond_destroy(&cond);
threads = NULL;
nthreads = 0;
}
static void init_threads(void) {
uv_thread_options_t config;
unsigned int i;
const char* val;
uv_sem_t sem;
nthreads = ARRAY_SIZE(default_threads);
val = getenv("UV_THREADPOOL_SIZE");
if (val != NULL)
nthreads = atoi(val);
if (nthreads == 0)
nthreads = 1;
if (nthreads > MAX_THREADPOOL_SIZE)
nthreads = MAX_THREADPOOL_SIZE;
threads = default_threads;
if (nthreads > ARRAY_SIZE(default_threads)) {
threads = uv__malloc(nthreads * sizeof(threads[0]));
if (threads == NULL) {
nthreads = ARRAY_SIZE(default_threads);
threads = default_threads;
}
}
if (uv_cond_init(&cond))
abort();
if (uv_mutex_init(&mutex))
abort();
uv__queue_init(&wq);
uv__queue_init(&slow_io_pending_wq);
uv__queue_init(&run_slow_work_message);
if (uv_sem_init(&sem, 0))
abort();
config.flags = UV_THREAD_HAS_STACK_SIZE;
config.stack_size = 8u << 20; /* 8 MB */
for (i = 0; i < nthreads; i++)
if (uv_thread_create_ex(threads + i, &config, worker, &sem))
abort();
for (i = 0; i < nthreads; i++)
uv_sem_wait(&sem);
uv_sem_destroy(&sem);
}
#ifndef _WIN32
static void reset_once(void) {
uv_once_t child_once = UV_ONCE_INIT;
memcpy(&once, &child_once, sizeof(child_once));
}
#endif
static void init_once(void) {
#ifndef _WIN32
/* Re-initialize the threadpool after fork.
* Note that this discards the global mutex and condition as well
* as the work queue.
*/
if (pthread_atfork(NULL, NULL, &reset_once))
abort();
#endif
init_threads();
}
void uv__work_submit(uv_loop_t* loop,
struct uv__work* w,
enum uv__work_kind kind,
void (*work)(struct uv__work* w),
void (*done)(struct uv__work* w, int status)) {
uv_once(&once, init_once);
w->loop = loop;
w->work = work;
w->done = done;
post(&w->wq, kind);
}
/* TODO(bnoordhuis) teach libuv how to cancel file operations
* that go through io_uring instead of the thread pool.
*/
static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
int cancelled;
uv_once(&once, init_once); /* Ensure |mutex| is initialized. */
uv_mutex_lock(&mutex);
uv_mutex_lock(&w->loop->wq_mutex);
cancelled = !uv__queue_empty(&w->wq) && w->work != NULL;
if (cancelled)
uv__queue_remove(&w->wq);
uv_mutex_unlock(&w->loop->wq_mutex);
uv_mutex_unlock(&mutex);
if (!cancelled)
return UV_EBUSY;
w->work = uv__cancelled;
uv_mutex_lock(&loop->wq_mutex);
uv__queue_insert_tail(&loop->wq, &w->wq);
uv_async_send(&loop->wq_async);
uv_mutex_unlock(&loop->wq_mutex);
return 0;
}
void uv__work_done(uv_async_t* handle) {
struct uv__work* w;
uv_loop_t* loop;
struct uv__queue* q;
struct uv__queue wq;
int err;
int nevents;
loop = container_of(handle, uv_loop_t, wq_async);
uv_mutex_lock(&loop->wq_mutex);
uv__queue_move(&loop->wq, &wq);
uv_mutex_unlock(&loop->wq_mutex);
nevents = 0;
while (!uv__queue_empty(&wq)) {
q = uv__queue_head(&wq);
uv__queue_remove(q);
w = container_of(q, struct uv__work, wq);
err = (w->work == uv__cancelled) ? UV_ECANCELED : 0;
w->done(w, err);
nevents++;
}
/* This check accomplishes 2 things:
* 1. Even if the queue was empty, the call to uv__work_done() should count
* as an event. Which will have been added by the event loop when
* calling this callback.
* 2. Prevents accidental wrap around in case nevents == 0 events == 0.
*/
if (nevents > 1) {
/* Subtract 1 to counter the call to uv__work_done(). */
uv__metrics_inc_events(loop, nevents - 1);
if (uv__get_internal_fields(loop)->current_timeout == 0)
uv__metrics_inc_events_waiting(loop, nevents - 1);
}
}
static void uv__queue_work(struct uv__work* w) {
uv_work_t* req = container_of(w, uv_work_t, work_req);
req->work_cb(req);
}
static void uv__queue_done(struct uv__work* w, int err) {
uv_work_t* req;
req = container_of(w, uv_work_t, work_req);
uv__req_unregister(req->loop);
if (req->after_work_cb == NULL)
return;
req->after_work_cb(req, err);
}
int uv_queue_work(uv_loop_t* loop,
uv_work_t* req,
uv_work_cb work_cb,
uv_after_work_cb after_work_cb) {
if (work_cb == NULL)
return UV_EINVAL;
uv__req_init(loop, req, UV_WORK);
req->loop = loop;
req->work_cb = work_cb;
req->after_work_cb = after_work_cb;
uv__work_submit(loop,
&req->work_req,
UV__WORK_CPU,
uv__queue_work,
uv__queue_done);
return 0;
}
int uv_cancel(uv_req_t* req) {
struct uv__work* wreq;
uv_loop_t* loop;
switch (req->type) {
case UV_FS:
loop = ((uv_fs_t*) req)->loop;
wreq = &((uv_fs_t*) req)->work_req;
break;
case UV_GETADDRINFO:
loop = ((uv_getaddrinfo_t*) req)->loop;
wreq = &((uv_getaddrinfo_t*) req)->work_req;
break;
case UV_GETNAMEINFO:
loop = ((uv_getnameinfo_t*) req)->loop;
wreq = &((uv_getnameinfo_t*) req)->work_req;
break;
case UV_RANDOM:
loop = ((uv_random_t*) req)->loop;
wreq = &((uv_random_t*) req)->work_req;
break;
case UV_WORK:
loop = ((uv_work_t*) req)->loop;
wreq = &((uv_work_t*) req)->work_req;
break;
default:
return UV_EINVAL;
}
return uv__work_cancel(loop, req, wreq);
}

View File

@@ -0,0 +1,200 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "uv-common.h"
#include "heap-inl.h"
#include <assert.h>
#include <limits.h>
static struct heap *timer_heap(const uv_loop_t* loop) {
#ifdef _WIN32
return (struct heap*) loop->timer_heap;
#else
return (struct heap*) &loop->timer_heap;
#endif
}
static int timer_less_than(const struct heap_node* ha,
const struct heap_node* hb) {
const uv_timer_t* a;
const uv_timer_t* b;
a = container_of(ha, uv_timer_t, node.heap);
b = container_of(hb, uv_timer_t, node.heap);
if (a->timeout < b->timeout)
return 1;
if (b->timeout < a->timeout)
return 0;
/* Compare start_id when both have the same timeout. start_id is
* allocated with loop->timer_counter in uv_timer_start().
*/
return a->start_id < b->start_id;
}
int uv_timer_init(uv_loop_t* loop, uv_timer_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_TIMER);
handle->timer_cb = NULL;
handle->timeout = 0;
handle->repeat = 0;
uv__queue_init(&handle->node.queue);
return 0;
}
int uv_timer_start(uv_timer_t* handle,
uv_timer_cb cb,
uint64_t timeout,
uint64_t repeat) {
uint64_t clamped_timeout;
if (uv__is_closing(handle) || cb == NULL)
return UV_EINVAL;
uv_timer_stop(handle);
clamped_timeout = handle->loop->time + timeout;
if (clamped_timeout < timeout)
clamped_timeout = (uint64_t) -1;
handle->timer_cb = cb;
handle->timeout = clamped_timeout;
handle->repeat = repeat;
/* start_id is the second index to be compared in timer_less_than() */
handle->start_id = handle->loop->timer_counter++;
heap_insert(timer_heap(handle->loop),
(struct heap_node*) &handle->node.heap,
timer_less_than);
uv__handle_start(handle);
return 0;
}
int uv_timer_stop(uv_timer_t* handle) {
if (uv__is_active(handle)) {
heap_remove(timer_heap(handle->loop),
(struct heap_node*) &handle->node.heap,
timer_less_than);
uv__handle_stop(handle);
} else {
uv__queue_remove(&handle->node.queue);
}
uv__queue_init(&handle->node.queue);
return 0;
}
int uv_timer_again(uv_timer_t* handle) {
if (handle->timer_cb == NULL)
return UV_EINVAL;
if (handle->repeat) {
uv_timer_stop(handle);
uv_timer_start(handle, handle->timer_cb, handle->repeat, handle->repeat);
}
return 0;
}
void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat) {
handle->repeat = repeat;
}
uint64_t uv_timer_get_repeat(const uv_timer_t* handle) {
return handle->repeat;
}
uint64_t uv_timer_get_due_in(const uv_timer_t* handle) {
if (handle->loop->time >= handle->timeout)
return 0;
return handle->timeout - handle->loop->time;
}
int uv__next_timeout(const uv_loop_t* loop) {
const struct heap_node* heap_node;
const uv_timer_t* handle;
uint64_t diff;
heap_node = heap_min(timer_heap(loop));
if (heap_node == NULL)
return -1; /* block indefinitely */
handle = container_of(heap_node, uv_timer_t, node.heap);
if (handle->timeout <= loop->time)
return 0;
diff = handle->timeout - loop->time;
if (diff > INT_MAX)
diff = INT_MAX;
return (int) diff;
}
void uv__run_timers(uv_loop_t* loop) {
struct heap_node* heap_node;
uv_timer_t* handle;
struct uv__queue* queue_node;
struct uv__queue ready_queue;
uv__queue_init(&ready_queue);
for (;;) {
heap_node = heap_min(timer_heap(loop));
if (heap_node == NULL)
break;
handle = container_of(heap_node, uv_timer_t, node.heap);
if (handle->timeout > loop->time)
break;
uv_timer_stop(handle);
uv__queue_insert_tail(&ready_queue, &handle->node.queue);
}
while (!uv__queue_empty(&ready_queue)) {
queue_node = uv__queue_head(&ready_queue);
uv__queue_remove(queue_node);
uv__queue_init(queue_node);
handle = container_of(queue_node, uv_timer_t, node.queue);
uv_timer_again(handle);
handle->timer_cb(handle);
}
}
void uv__timer_close(uv_timer_t* handle) {
uv_timer_stop(handle);
}

View File

@@ -0,0 +1,89 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/time.h>
#include <unistd.h>
#include <procinfo.h>
#include <ctype.h>
extern char* original_exepath;
extern uv_mutex_t process_title_mutex;
extern uv_once_t process_title_mutex_once;
extern void init_process_title_mutex_once(void);
uint64_t uv__hrtime(uv_clocktype_t type) {
uint64_t G = 1000000000;
timebasestruct_t t;
read_wall_time(&t, TIMEBASE_SZ);
time_base_to_time(&t, TIMEBASE_SZ);
return (uint64_t) t.tb_high * G + t.tb_low;
}
/*
* We could use a static buffer for the path manipulations that we need outside
* of the function, but this function could be called by multiple consumers and
* we don't want to potentially create a race condition in the use of snprintf.
* There is no direct way of getting the exe path in AIX - either through /procfs
* or through some libc APIs. The below approach is to parse the argv[0]'s pattern
* and use it in conjunction with PATH environment variable to craft one.
*/
int uv_exepath(char* buffer, size_t* size) {
int res;
char args[UV__PATH_MAX];
size_t cached_len;
struct procsinfo pi;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
uv_mutex_lock(&process_title_mutex);
if (original_exepath != NULL) {
cached_len = strlen(original_exepath);
*size -= 1;
if (*size > cached_len)
*size = cached_len;
memcpy(buffer, original_exepath, *size);
buffer[*size] = '\0';
uv_mutex_unlock(&process_title_mutex);
return 0;
}
uv_mutex_unlock(&process_title_mutex);
pi.pi_pid = getpid();
res = getargs(&pi, sizeof(pi), args, sizeof(args));
if (res < 0)
return UV_EINVAL;
return uv__search_path(args, buffer, size);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,418 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* This file contains both the uv__async internal infrastructure and the
* user-facing uv_async_t functions.
*/
#include "uv.h"
#include "internal.h"
#include <errno.h>
#include <stdatomic.h>
#include <stdio.h> /* snprintf() */
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sched.h> /* sched_yield() */
#ifdef __linux__
#include <sys/eventfd.h>
#endif
#if UV__KQUEUE_EVFILT_USER
static uv_once_t kqueue_runtime_detection_guard = UV_ONCE_INIT;
static int kqueue_evfilt_user_support = 1;
static void uv__kqueue_runtime_detection(void) {
int kq;
struct kevent ev[2];
struct timespec timeout = {0, 0};
/* Perform the runtime detection to ensure that kqueue with
* EVFILT_USER actually works. */
kq = kqueue();
EV_SET(ev, UV__KQUEUE_EVFILT_USER_IDENT, EVFILT_USER,
EV_ADD | EV_CLEAR, 0, 0, 0);
EV_SET(ev + 1, UV__KQUEUE_EVFILT_USER_IDENT, EVFILT_USER,
0, NOTE_TRIGGER, 0, 0);
if (kevent(kq, ev, 2, ev, 1, &timeout) < 1 ||
ev[0].filter != EVFILT_USER ||
ev[0].ident != UV__KQUEUE_EVFILT_USER_IDENT ||
ev[0].flags & EV_ERROR)
/* If we wind up here, we can assume that EVFILT_USER is defined but
* broken on the current system. */
kqueue_evfilt_user_support = 0;
uv__close(kq);
}
#endif
static void uv__async_send(uv_loop_t* loop);
static int uv__async_start(uv_loop_t* loop);
static void uv__cpu_relax(void);
int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
int err;
err = uv__async_start(loop);
if (err)
return err;
uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC);
handle->async_cb = async_cb;
handle->pending = 0;
handle->u.fd = 0; /* This will be used as a busy flag. */
uv__queue_insert_tail(&loop->async_handles, &handle->queue);
uv__handle_start(handle);
return 0;
}
int uv_async_send(uv_async_t* handle) {
_Atomic int* pending;
_Atomic int* busy;
pending = (_Atomic int*) &handle->pending;
busy = (_Atomic int*) &handle->u.fd;
/* Do a cheap read first. */
if (atomic_load_explicit(pending, memory_order_relaxed) != 0)
return 0;
/* Set the loop to busy. */
atomic_fetch_add(busy, 1);
/* Wake up the other thread's event loop. */
if (atomic_exchange(pending, 1) == 0)
uv__async_send(handle->loop);
/* Set the loop to not-busy. */
atomic_fetch_add(busy, -1);
return 0;
}
/* Wait for the busy flag to clear before closing.
* Only call this from the event loop thread. */
static void uv__async_spin(uv_async_t* handle) {
_Atomic int* pending;
_Atomic int* busy;
int i;
pending = (_Atomic int*) &handle->pending;
busy = (_Atomic int*) &handle->u.fd;
/* Set the pending flag first, so no new events will be added by other
* threads after this function returns. */
atomic_store(pending, 1);
for (;;) {
/* 997 is not completely chosen at random. It's a prime number, acyclic by
* nature, and should therefore hopefully dampen sympathetic resonance.
*/
for (i = 0; i < 997; i++) {
if (atomic_load(busy) == 0)
return;
/* Other thread is busy with this handle, spin until it's done. */
uv__cpu_relax();
}
/* Yield the CPU. We may have preempted the other thread while it's
* inside the critical section and if it's running on the same CPU
* as us, we'll just burn CPU cycles until the end of our time slice.
*/
sched_yield();
}
}
void uv__async_close(uv_async_t* handle) {
uv__async_spin(handle);
uv__queue_remove(&handle->queue);
uv__handle_stop(handle);
}
static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
char buf[1024];
ssize_t r;
struct uv__queue queue;
struct uv__queue* q;
uv_async_t* h;
_Atomic int *pending;
assert(w == &loop->async_io_watcher);
#if UV__KQUEUE_EVFILT_USER
for (;!kqueue_evfilt_user_support;) {
#else
for (;;) {
#endif
r = read(w->fd, buf, sizeof(buf));
if (r == sizeof(buf))
continue;
if (r != -1)
break;
if (errno == EAGAIN || errno == EWOULDBLOCK)
break;
if (errno == EINTR)
continue;
abort();
}
uv__queue_move(&loop->async_handles, &queue);
while (!uv__queue_empty(&queue)) {
q = uv__queue_head(&queue);
h = uv__queue_data(q, uv_async_t, queue);
uv__queue_remove(q);
uv__queue_insert_tail(&loop->async_handles, q);
/* Atomically fetch and clear pending flag */
pending = (_Atomic int*) &h->pending;
if (atomic_exchange(pending, 0) == 0)
continue;
if (h->async_cb == NULL)
continue;
h->async_cb(h);
}
}
static void uv__async_send(uv_loop_t* loop) {
const void* buf;
ssize_t len;
int fd;
int r;
buf = "";
len = 1;
fd = loop->async_wfd;
#if defined(__linux__)
if (fd == -1) {
static const uint64_t val = 1;
buf = &val;
len = sizeof(val);
fd = loop->async_io_watcher.fd; /* eventfd */
}
#elif UV__KQUEUE_EVFILT_USER
struct kevent ev;
if (kqueue_evfilt_user_support) {
fd = loop->async_io_watcher.fd; /* magic number for EVFILT_USER */
EV_SET(&ev, fd, EVFILT_USER, 0, NOTE_TRIGGER, 0, 0);
r = kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL);
if (r == 0)
return;
abort();
}
#endif
do
r = write(fd, buf, len);
while (r == -1 && errno == EINTR);
if (r == len)
return;
if (r == -1)
if (errno == EAGAIN || errno == EWOULDBLOCK)
return;
abort();
}
static int uv__async_start(uv_loop_t* loop) {
int pipefd[2];
int err;
#if UV__KQUEUE_EVFILT_USER
struct kevent ev;
#endif
if (loop->async_io_watcher.fd != -1)
return 0;
#ifdef __linux__
err = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
if (err < 0)
return UV__ERR(errno);
pipefd[0] = err;
pipefd[1] = -1;
#elif UV__KQUEUE_EVFILT_USER
uv_once(&kqueue_runtime_detection_guard, uv__kqueue_runtime_detection);
if (kqueue_evfilt_user_support) {
/* In order not to break the generic pattern of I/O polling, a valid
* file descriptor is required to take up a room in loop->watchers,
* thus we create one for that, but this fd will not be actually used,
* it's just a placeholder and magic number which is going to be closed
* during the cleanup, as other FDs. */
err = uv__open_cloexec("/", O_RDONLY);
if (err < 0)
return err;
pipefd[0] = err;
pipefd[1] = -1;
/* When using EVFILT_USER event to wake up the kqueue, this event must be
* registered beforehand. Otherwise, calling kevent() to issue an
* unregistered EVFILT_USER event will get an ENOENT.
* Since uv__async_send() may happen before uv__io_poll() with multi-threads,
* we can't defer this registration of EVFILT_USER event as we did for other
* events, but must perform it right away. */
EV_SET(&ev, err, EVFILT_USER, EV_ADD | EV_CLEAR, 0, 0, 0);
err = kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL);
if (err < 0)
return UV__ERR(errno);
} else {
err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE);
if (err < 0)
return err;
}
#else
err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE);
if (err < 0)
return err;
#endif
err = uv__io_init_start(loop, &loop->async_io_watcher, uv__async_io,
pipefd[0], POLLIN);
if (err < 0) {
uv__close(pipefd[0]);
if (pipefd[1] != -1)
uv__close(pipefd[1]);
return err;
}
loop->async_wfd = pipefd[1];
#if UV__KQUEUE_EVFILT_USER
/* Prevent the EVFILT_USER event from being added to kqueue redundantly
* and mistakenly later in uv__io_poll(). */
if (kqueue_evfilt_user_support)
loop->async_io_watcher.events = loop->async_io_watcher.pevents;
#endif
return 0;
}
void uv__async_stop(uv_loop_t* loop) {
struct uv__queue queue;
struct uv__queue* q;
uv_async_t* h;
if (loop->async_io_watcher.fd == -1)
return;
/* Make sure no other thread is accessing the async handle fd after the loop
* cleanup.
*/
uv__queue_move(&loop->async_handles, &queue);
while (!uv__queue_empty(&queue)) {
q = uv__queue_head(&queue);
h = uv__queue_data(q, uv_async_t, queue);
uv__queue_remove(q);
uv__queue_insert_tail(&loop->async_handles, q);
uv__async_spin(h);
}
if (loop->async_wfd != -1) {
if (loop->async_wfd != loop->async_io_watcher.fd)
uv__close(loop->async_wfd);
loop->async_wfd = -1;
}
uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
uv__close(loop->async_io_watcher.fd);
loop->async_io_watcher.fd = -1;
}
int uv__async_fork(uv_loop_t* loop) {
struct uv__queue queue;
struct uv__queue* q;
uv_async_t* h;
if (loop->async_io_watcher.fd == -1) /* never started */
return 0;
uv__queue_move(&loop->async_handles, &queue);
while (!uv__queue_empty(&queue)) {
q = uv__queue_head(&queue);
h = uv__queue_data(q, uv_async_t, queue);
uv__queue_remove(q);
uv__queue_insert_tail(&loop->async_handles, q);
/* The state of any thread that set pending is now likely corrupt in this
* child because the user called fork, so just clear these flags and move
* on. Calling most libc functions after `fork` is declared to be undefined
* behavior anyways, unless async-signal-safe, for multithreaded programs
* like libuv, and nothing interesting in pthreads is async-signal-safe.
*/
h->pending = 0;
/* This is the busy flag, and we just abruptly lost all other threads. */
h->u.fd = 0;
}
/* Recreate these, since they still exist, but belong to the wrong pid now. */
if (loop->async_wfd != -1) {
if (loop->async_wfd != loop->async_io_watcher.fd)
uv__close(loop->async_wfd);
loop->async_wfd = -1;
}
uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
uv__close(loop->async_io_watcher.fd);
loop->async_io_watcher.fd = -1;
return uv__async_start(loop);
}
static void uv__cpu_relax(void) {
#if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
__asm__ __volatile__ ("yield" ::: "memory");
#elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
__asm volatile ("" : : : "memory");
#elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
__asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
#endif
}

View File

@@ -0,0 +1,164 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <errno.h>
#include <stddef.h>
#include <ifaddrs.h>
#include <net/if.h>
#if !defined(__CYGWIN__) && !defined(__MSYS__) && !defined(__GNU__)
#include <net/if_dl.h>
#endif
#if defined(__HAIKU__)
#define IFF_RUNNING IFF_LINK
#endif
static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
return 1;
if (ent->ifa_addr == NULL)
return 1;
#if !defined(__CYGWIN__) && !defined(__MSYS__) && !defined(__GNU__)
/*
* If `exclude_type` is `UV__EXCLUDE_IFPHYS`, return whether `sa_family`
* equals `AF_LINK`. Otherwise, the result depends on the operating
* system with `AF_LINK` or `PF_INET`.
*/
if (exclude_type == UV__EXCLUDE_IFPHYS)
return (ent->ifa_addr->sa_family != AF_LINK);
#endif
#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__DragonFly__) || \
defined(__HAIKU__)
/*
* On BSD getifaddrs returns information related to the raw underlying
* devices. We're not interested in this information.
*/
if (ent->ifa_addr->sa_family == AF_LINK)
return 1;
#elif defined(__NetBSD__) || defined(__OpenBSD__)
if (ent->ifa_addr->sa_family != PF_INET &&
ent->ifa_addr->sa_family != PF_INET6)
return 1;
#endif
return 0;
}
/* TODO(bnoordhuis) share with linux.c */
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
uv_interface_address_t* address;
struct ifaddrs* addrs;
struct ifaddrs* ent;
size_t namelen;
char* name;
*count = 0;
*addresses = NULL;
if (getifaddrs(&addrs) != 0)
return UV__ERR(errno);
/* Count the number of interfaces */
namelen = 0;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
continue;
namelen += strlen(ent->ifa_name) + 1;
(*count)++;
}
if (*count == 0) {
freeifaddrs(addrs);
return 0;
}
/* Make sure the memory is initiallized to zero using calloc() */
*addresses = uv__calloc(1, *count * sizeof(**addresses) + namelen);
if (*addresses == NULL) {
freeifaddrs(addrs);
return UV_ENOMEM;
}
name = (char*) &(*addresses)[*count];
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
continue;
namelen = strlen(ent->ifa_name) + 1;
address->name = memcpy(name, ent->ifa_name, namelen);
name += namelen;
if (ent->ifa_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
} else {
address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
}
if (ent->ifa_netmask == NULL) {
memset(&address->netmask, 0, sizeof(address->netmask));
} else if (ent->ifa_netmask->sa_family == AF_INET6) {
address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
} else {
address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
}
address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
address++;
}
#if !(defined(__CYGWIN__) || defined(__MSYS__)) && !defined(__GNU__)
/* Fill in physical addresses for each interface */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
int i;
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))
continue;
address = *addresses;
for (i = 0; i < *count; i++) {
if (strcmp(address->name, ent->ifa_name) == 0) {
struct sockaddr_dl* sa_addr;
sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
}
address++;
}
}
#endif
freeifaddrs(addrs);
return 0;
}
/* TODO(bnoordhuis) share with linux.c */
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
uv__free(addresses);
}

View File

@@ -0,0 +1,99 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <sys/types.h>
#include <unistd.h>
static uv_mutex_t process_title_mutex;
static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
static char* process_title;
static void init_process_title_mutex_once(void) {
if (uv_mutex_init(&process_title_mutex))
abort();
}
void uv__process_title_cleanup(void) {
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
uv_mutex_destroy(&process_title_mutex);
}
char** uv_setup_args(int argc, char** argv) {
process_title = argc > 0 ? uv__strdup(argv[0]) : NULL;
return argv;
}
int uv_set_process_title(const char* title) {
char* new_title;
new_title = uv__strdup(title);
if (new_title == NULL)
return UV_ENOMEM;
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
uv_mutex_lock(&process_title_mutex);
uv__free(process_title);
process_title = new_title;
setproctitle("%s", title);
uv_mutex_unlock(&process_title_mutex);
return 0;
}
int uv_get_process_title(char* buffer, size_t size) {
size_t len;
if (buffer == NULL || size == 0)
return UV_EINVAL;
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
uv_mutex_lock(&process_title_mutex);
if (process_title != NULL) {
len = strlen(process_title) + 1;
if (size < len) {
uv_mutex_unlock(&process_title_mutex);
return UV_ENOBUFS;
}
memcpy(buffer, process_title, len);
} else {
len = 0;
}
uv_mutex_unlock(&process_title_mutex);
buffer[len] = '\0';
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,93 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <sys/sysinfo.h>
#include <unistd.h>
int uv_uptime(double* uptime) {
struct sysinfo info;
if (sysinfo(&info) < 0)
return UV__ERR(errno);
*uptime = info.uptime;
return 0;
}
int uv_resident_set_memory(size_t* rss) {
char buf[1024];
const char* s;
long val;
int rc;
int i;
struct sysinfo si;
/* rss: 24th element */
rc = uv__slurp("/proc/self/stat", buf, sizeof(buf));
if (rc < 0)
return rc;
/* find the last ')' */
s = strrchr(buf, ')');
if (s == NULL)
goto err;
for (i = 1; i <= 22; i++) {
s = strchr(s + 1, ' ');
if (s == NULL)
goto err;
}
errno = 0;
val = strtol(s, NULL, 10);
if (val < 0 || errno != 0)
goto err;
do
rc = sysinfo(&si);
while (rc == -1 && errno == EINTR);
if (rc == -1)
return UV__ERR(errno);
*rss = val * si.mem_unit;
return 0;
err:
return UV_EINVAL;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
/* FIXME: read /proc/stat? */
*cpu_infos = NULL;
*count = 0;
return UV_ENOSYS;
}
uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}

View File

@@ -0,0 +1,176 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <dlfcn.h>
#include <errno.h>
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
#include <TargetConditionals.h>
#if !TARGET_OS_IPHONE
#include "darwin-stub.h"
#endif
int uv__set_process_title(const char* title) {
#if TARGET_OS_IPHONE
return uv__thread_setname(title);
#else
CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef,
const char*,
CFStringEncoding);
CFBundleRef (*pCFBundleGetBundleWithIdentifier)(CFStringRef);
void *(*pCFBundleGetDataPointerForName)(CFBundleRef, CFStringRef);
void *(*pCFBundleGetFunctionPointerForName)(CFBundleRef, CFStringRef);
CFTypeRef (*pLSGetCurrentApplicationASN)(void);
OSStatus (*pLSSetApplicationInformationItem)(int,
CFTypeRef,
CFStringRef,
CFStringRef,
CFDictionaryRef*);
void* application_services_handle;
void* core_foundation_handle;
CFBundleRef launch_services_bundle;
CFStringRef* display_name_key;
CFDictionaryRef (*pCFBundleGetInfoDictionary)(CFBundleRef);
CFBundleRef (*pCFBundleGetMainBundle)(void);
CFDictionaryRef (*pLSApplicationCheckIn)(int, CFDictionaryRef);
void (*pLSSetApplicationLaunchServicesServerConnectionStatus)(uint64_t,
void*);
CFTypeRef asn;
int err;
err = UV_ENOENT;
application_services_handle = dlopen("/System/Library/Frameworks/"
"ApplicationServices.framework/"
"Versions/A/ApplicationServices",
RTLD_LAZY | RTLD_LOCAL);
core_foundation_handle = dlopen("/System/Library/Frameworks/"
"CoreFoundation.framework/"
"Versions/A/CoreFoundation",
RTLD_LAZY | RTLD_LOCAL);
if (application_services_handle == NULL || core_foundation_handle == NULL)
goto out;
*(void **)(&pCFStringCreateWithCString) =
dlsym(core_foundation_handle, "CFStringCreateWithCString");
*(void **)(&pCFBundleGetBundleWithIdentifier) =
dlsym(core_foundation_handle, "CFBundleGetBundleWithIdentifier");
*(void **)(&pCFBundleGetDataPointerForName) =
dlsym(core_foundation_handle, "CFBundleGetDataPointerForName");
*(void **)(&pCFBundleGetFunctionPointerForName) =
dlsym(core_foundation_handle, "CFBundleGetFunctionPointerForName");
if (pCFStringCreateWithCString == NULL ||
pCFBundleGetBundleWithIdentifier == NULL ||
pCFBundleGetDataPointerForName == NULL ||
pCFBundleGetFunctionPointerForName == NULL) {
goto out;
}
#define S(s) pCFStringCreateWithCString(NULL, (s), kCFStringEncodingUTF8)
launch_services_bundle =
pCFBundleGetBundleWithIdentifier(S("com.apple.LaunchServices"));
if (launch_services_bundle == NULL)
goto out;
*(void **)(&pLSGetCurrentApplicationASN) =
pCFBundleGetFunctionPointerForName(launch_services_bundle,
S("_LSGetCurrentApplicationASN"));
if (pLSGetCurrentApplicationASN == NULL)
goto out;
*(void **)(&pLSSetApplicationInformationItem) =
pCFBundleGetFunctionPointerForName(launch_services_bundle,
S("_LSSetApplicationInformationItem"));
if (pLSSetApplicationInformationItem == NULL)
goto out;
display_name_key = pCFBundleGetDataPointerForName(launch_services_bundle,
S("_kLSDisplayNameKey"));
if (display_name_key == NULL || *display_name_key == NULL)
goto out;
*(void **)(&pCFBundleGetInfoDictionary) = dlsym(core_foundation_handle,
"CFBundleGetInfoDictionary");
*(void **)(&pCFBundleGetMainBundle) = dlsym(core_foundation_handle,
"CFBundleGetMainBundle");
if (pCFBundleGetInfoDictionary == NULL || pCFBundleGetMainBundle == NULL)
goto out;
*(void **)(&pLSApplicationCheckIn) = pCFBundleGetFunctionPointerForName(
launch_services_bundle,
S("_LSApplicationCheckIn"));
if (pLSApplicationCheckIn == NULL)
goto out;
*(void **)(&pLSSetApplicationLaunchServicesServerConnectionStatus) =
pCFBundleGetFunctionPointerForName(
launch_services_bundle,
S("_LSSetApplicationLaunchServicesServerConnectionStatus"));
if (pLSSetApplicationLaunchServicesServerConnectionStatus == NULL)
goto out;
pLSSetApplicationLaunchServicesServerConnectionStatus(0, NULL);
/* Check into process manager?! */
pLSApplicationCheckIn(-2,
pCFBundleGetInfoDictionary(pCFBundleGetMainBundle()));
asn = pLSGetCurrentApplicationASN();
err = UV_EBUSY;
if (asn == NULL)
goto out;
err = UV_EINVAL;
if (pLSSetApplicationInformationItem(-2, /* Magic value. */
asn,
*display_name_key,
S(title),
NULL) != noErr) {
goto out;
}
uv__thread_setname(title); /* Don't care if it fails. */
err = 0;
out:
if (core_foundation_handle != NULL)
dlclose(core_foundation_handle);
if (application_services_handle != NULL)
dlclose(application_services_handle);
return err;
#endif /* !TARGET_OS_IPHONE */
}

View File

@@ -0,0 +1,97 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_DARWIN_STUB_H_
#define UV_DARWIN_STUB_H_
#include <stdint.h>
struct CFArrayCallBacks;
struct CFRunLoopSourceContext;
struct FSEventStreamContext;
typedef double CFAbsoluteTime;
typedef double CFTimeInterval;
typedef int FSEventStreamEventFlags;
typedef int OSStatus;
typedef long CFIndex;
typedef struct CFArrayCallBacks CFArrayCallBacks;
typedef struct CFRunLoopSourceContext CFRunLoopSourceContext;
typedef struct FSEventStreamContext FSEventStreamContext;
typedef uint32_t FSEventStreamCreateFlags;
typedef uint64_t FSEventStreamEventId;
typedef unsigned CFStringEncoding;
typedef void* CFAllocatorRef;
typedef void* CFArrayRef;
typedef void* CFBundleRef;
typedef void* CFDictionaryRef;
typedef void* CFRunLoopRef;
typedef void* CFRunLoopSourceRef;
typedef void* CFStringRef;
typedef void* CFTypeRef;
typedef void* FSEventStreamRef;
typedef void (*FSEventStreamCallback)(const FSEventStreamRef,
void*,
size_t,
void*,
const FSEventStreamEventFlags*,
const FSEventStreamEventId*);
struct CFRunLoopSourceContext {
CFIndex version;
void* info;
void* pad[7];
void (*perform)(void*);
};
struct FSEventStreamContext {
CFIndex version;
void* info;
void* pad[3];
};
static const CFStringEncoding kCFStringEncodingUTF8 = 0x8000100;
static const OSStatus noErr = 0;
static const FSEventStreamEventId kFSEventStreamEventIdSinceNow = -1;
static const int kFSEventStreamCreateFlagNoDefer = 2;
static const int kFSEventStreamCreateFlagFileEvents = 16;
static const int kFSEventStreamEventFlagEventIdsWrapped = 8;
static const int kFSEventStreamEventFlagHistoryDone = 16;
static const int kFSEventStreamEventFlagItemChangeOwner = 0x4000;
static const int kFSEventStreamEventFlagItemCreated = 0x100;
static const int kFSEventStreamEventFlagItemFinderInfoMod = 0x2000;
static const int kFSEventStreamEventFlagItemInodeMetaMod = 0x400;
static const int kFSEventStreamEventFlagItemIsDir = 0x20000;
static const int kFSEventStreamEventFlagItemModified = 0x1000;
static const int kFSEventStreamEventFlagItemRemoved = 0x200;
static const int kFSEventStreamEventFlagItemRenamed = 0x800;
static const int kFSEventStreamEventFlagItemXattrMod = 0x8000;
static const int kFSEventStreamEventFlagKernelDropped = 4;
static const int kFSEventStreamEventFlagMount = 64;
static const int kFSEventStreamEventFlagRootChanged = 32;
static const int kFSEventStreamEventFlagUnmount = 128;
static const int kFSEventStreamEventFlagUserDropped = 2;
#endif /* UV_DARWIN_STUB_H_ */

View File

@@ -0,0 +1,17 @@
#ifndef UV_DARWIN_SYSCALLS_H_
#define UV_DARWIN_SYSCALLS_H_
#include <sys/types.h>
#include <sys/socket.h>
/* https://github.com/apple/darwin-xnu/blob/master/bsd/sys/socket.h */
struct mmsghdr {
struct msghdr msg_hdr;
size_t msg_len;
};
ssize_t recvmsg_x(int s, const struct mmsghdr* msgp, u_int cnt, int flags);
ssize_t sendmsg_x(int s, const struct mmsghdr* msgp, u_int cnt, int flags);
#endif /* UV_DARWIN_SYSCALLS_H_ */

View File

@@ -0,0 +1,237 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <stdint.h>
#include <errno.h>
#include <mach/mach.h>
#include <mach/mach_time.h>
#include <mach-o/dyld.h> /* _NSGetExecutablePath */
#include <sys/resource.h>
#include <sys/sysctl.h>
#include <unistd.h> /* sysconf */
static uv_once_t once = UV_ONCE_INIT;
static mach_timebase_info_data_t timebase;
int uv__platform_loop_init(uv_loop_t* loop) {
loop->cf_state = NULL;
if (uv__kqueue_init(loop))
return UV__ERR(errno);
return 0;
}
void uv__platform_loop_delete(uv_loop_t* loop) {
uv__fsevents_loop_delete(loop);
}
static void uv__hrtime_init_once(void) {
if (KERN_SUCCESS != mach_timebase_info(&timebase))
abort();
}
uint64_t uv__hrtime(uv_clocktype_t type) {
uv_once(&once, uv__hrtime_init_once);
return mach_continuous_time() * timebase.numer / timebase.denom;
}
int uv_exepath(char* buffer, size_t* size) {
/* realpath(exepath) may be > PATH_MAX so double it to be on the safe side. */
char abspath[PATH_MAX * 2 + 1];
char exepath[PATH_MAX + 1];
uint32_t exepath_size;
size_t abspath_size;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
exepath_size = sizeof(exepath);
if (_NSGetExecutablePath(exepath, &exepath_size))
return UV_EIO;
if (realpath(exepath, abspath) != abspath)
return UV__ERR(errno);
abspath_size = strlen(abspath);
if (abspath_size == 0)
return UV_EIO;
*size -= 1;
if (*size > abspath_size)
*size = abspath_size;
memcpy(buffer, abspath, *size);
buffer[*size] = '\0';
return 0;
}
uint64_t uv_get_free_memory(void) {
vm_statistics_data_t info;
mach_msg_type_number_t count = sizeof(info) / sizeof(integer_t);
if (host_statistics(mach_host_self(), HOST_VM_INFO,
(host_info_t)&info, &count) != KERN_SUCCESS) {
return 0;
}
return (uint64_t) info.free_count * sysconf(_SC_PAGESIZE);
}
uint64_t uv_get_total_memory(void) {
uint64_t info;
int which[] = {CTL_HW, HW_MEMSIZE};
size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return 0;
return (uint64_t) info;
}
uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_LOADAVG};
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0) < 0) return;
avg[0] = (double) info.ldavg[0] / info.fscale;
avg[1] = (double) info.ldavg[1] / info.fscale;
avg[2] = (double) info.ldavg[2] / info.fscale;
}
int uv_resident_set_memory(size_t* rss) {
mach_msg_type_number_t count;
task_basic_info_data_t info;
kern_return_t err;
count = TASK_BASIC_INFO_COUNT;
err = task_info(mach_task_self(),
TASK_BASIC_INFO,
(task_info_t) &info,
&count);
(void) &err;
/* task_info(TASK_BASIC_INFO) cannot really fail. Anything other than
* KERN_SUCCESS implies a libuv bug.
*/
assert(err == KERN_SUCCESS);
*rss = info.resident_size;
return 0;
}
int uv_uptime(double* uptime) {
time_t now;
struct timeval info;
size_t size = sizeof(info);
static int which[] = {CTL_KERN, KERN_BOOTTIME};
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return UV__ERR(errno);
now = time(NULL);
*uptime = now - info.tv_sec;
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
multiplier = ((uint64_t)1000L / ticks);
char model[512];
uint64_t cpuspeed;
size_t size;
unsigned int i;
natural_t numcpus;
mach_msg_type_number_t msg_type;
processor_cpu_load_info_data_t *info;
uv_cpu_info_t* cpu_info;
size = sizeof(model);
if (sysctlbyname("machdep.cpu.brand_string", &model, &size, NULL, 0) &&
sysctlbyname("hw.model", &model, &size, NULL, 0)) {
return UV__ERR(errno);
}
cpuspeed = 0;
size = sizeof(cpuspeed);
sysctlbyname("hw.cpufrequency", &cpuspeed, &size, NULL, 0);
if (cpuspeed == 0)
/* If sysctl hw.cputype == CPU_TYPE_ARM64, the correct value is unavailable
* from Apple, but we can hard-code it here to a plausible value. */
cpuspeed = 2400000000U;
if (host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &numcpus,
(processor_info_array_t*)&info,
&msg_type) != KERN_SUCCESS) {
return UV_EINVAL; /* FIXME(bnoordhuis) Translate error. */
}
*cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
if (!(*cpu_infos)) {
vm_deallocate(mach_task_self(), (vm_address_t)info, msg_type);
return UV_ENOMEM;
}
*count = numcpus;
for (i = 0; i < numcpus; i++) {
cpu_info = &(*cpu_infos)[i];
cpu_info->cpu_times.user = (uint64_t)(info[i].cpu_ticks[0]) * multiplier;
cpu_info->cpu_times.nice = (uint64_t)(info[i].cpu_ticks[3]) * multiplier;
cpu_info->cpu_times.sys = (uint64_t)(info[i].cpu_ticks[1]) * multiplier;
cpu_info->cpu_times.idle = (uint64_t)(info[i].cpu_ticks[2]) * multiplier;
cpu_info->cpu_times.irq = 0;
cpu_info->model = uv__strdup(model);
cpu_info->speed = (int)(cpuspeed / 1000000);
}
vm_deallocate(mach_task_self(), (vm_address_t)info, msg_type);
return 0;
}

View File

@@ -0,0 +1,80 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <dlfcn.h>
#include <errno.h>
#include <string.h>
#include <locale.h>
static int uv__dlerror(uv_lib_t* lib);
int uv_dlopen(const char* filename, uv_lib_t* lib) {
dlerror(); /* Reset error status. */
lib->errmsg = NULL;
lib->handle = dlopen(filename, RTLD_LAZY);
return lib->handle ? 0 : uv__dlerror(lib);
}
void uv_dlclose(uv_lib_t* lib) {
uv__free(lib->errmsg);
lib->errmsg = NULL;
if (lib->handle) {
/* Ignore errors. No good way to signal them without leaking memory. */
dlclose(lib->handle);
lib->handle = NULL;
}
}
int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) {
dlerror(); /* Reset error status. */
*ptr = dlsym(lib->handle, name);
return *ptr ? 0 : uv__dlerror(lib);
}
const char* uv_dlerror(const uv_lib_t* lib) {
return lib->errmsg ? lib->errmsg : "no error";
}
static int uv__dlerror(uv_lib_t* lib) {
const char* errmsg;
uv__free(lib->errmsg);
errmsg = dlerror();
if (errmsg) {
lib->errmsg = uv__strdup(errmsg);
return -1;
}
else {
lib->errmsg = NULL;
return 0;
}
}

View File

@@ -0,0 +1,290 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <paths.h>
#if defined(__DragonFly__)
# include <sys/event.h>
# include <sys/kinfo.h>
#else
# include <sys/user.h>
#endif
#include <sys/types.h>
#include <sys/resource.h>
#include <sys/sysctl.h>
#include <vm/vm_param.h> /* VM_LOADAVG */
#include <time.h>
#include <stdlib.h>
#include <unistd.h> /* sysconf */
#include <fcntl.h>
#ifndef CPUSTATES
# define CPUSTATES 5U
#endif
#ifndef CP_USER
# define CP_USER 0
# define CP_NICE 1
# define CP_SYS 2
# define CP_IDLE 3
# define CP_INTR 4
#endif
int uv__platform_loop_init(uv_loop_t* loop) {
return uv__kqueue_init(loop);
}
void uv__platform_loop_delete(uv_loop_t* loop) {
}
int uv_exepath(char* buffer, size_t* size) {
char abspath[PATH_MAX * 2 + 1];
int mib[4];
size_t abspath_size;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
mib[0] = CTL_KERN;
mib[1] = KERN_PROC;
mib[2] = KERN_PROC_PATHNAME;
mib[3] = -1;
abspath_size = sizeof abspath;
if (sysctl(mib, ARRAY_SIZE(mib), abspath, &abspath_size, NULL, 0))
return UV__ERR(errno);
assert(abspath_size > 0);
abspath_size -= 1;
*size -= 1;
if (*size > abspath_size)
*size = abspath_size;
memcpy(buffer, abspath, *size);
buffer[*size] = '\0';
return 0;
}
uint64_t uv_get_free_memory(void) {
int freecount;
size_t size = sizeof(freecount);
if (sysctlbyname("vm.stats.vm.v_free_count", &freecount, &size, NULL, 0))
return 0;
return (uint64_t) freecount * sysconf(_SC_PAGESIZE);
}
uint64_t uv_get_total_memory(void) {
unsigned long info;
int which[] = {CTL_HW, HW_PHYSMEM};
size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return 0;
return (uint64_t) info;
}
uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_LOADAVG};
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0) < 0) return;
avg[0] = (double) info.ldavg[0] / info.fscale;
avg[1] = (double) info.ldavg[1] / info.fscale;
avg[2] = (double) info.ldavg[2] / info.fscale;
}
int uv_resident_set_memory(size_t* rss) {
struct kinfo_proc kinfo;
size_t page_size;
size_t kinfo_size;
int mib[4];
mib[0] = CTL_KERN;
mib[1] = KERN_PROC;
mib[2] = KERN_PROC_PID;
mib[3] = getpid();
kinfo_size = sizeof(kinfo);
if (sysctl(mib, ARRAY_SIZE(mib), &kinfo, &kinfo_size, NULL, 0))
return UV__ERR(errno);
page_size = getpagesize();
#ifdef __DragonFly__
*rss = kinfo.kp_vm_rssize * page_size;
#else
*rss = kinfo.ki_rssize * page_size;
#endif
return 0;
}
int uv_uptime(double* uptime) {
int r;
struct timespec sp;
r = clock_gettime(CLOCK_MONOTONIC, &sp);
if (r)
return UV__ERR(errno);
*uptime = sp.tv_sec;
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
multiplier = ((uint64_t)1000L / ticks), cpuspeed, maxcpus,
cur = 0;
uv_cpu_info_t* cpu_info;
const char* maxcpus_key;
const char* cptimes_key;
const char* model_key;
char model[512];
long* cp_times;
int numcpus;
size_t size;
int i;
#if defined(__DragonFly__)
/* This is not quite correct but DragonFlyBSD doesn't seem to have anything
* comparable to kern.smp.maxcpus or kern.cp_times (kern.cp_time is a total,
* not per CPU). At least this stops uv_cpu_info() from failing completely.
*/
maxcpus_key = "hw.ncpu";
cptimes_key = "kern.cp_time";
#else
maxcpus_key = "kern.smp.maxcpus";
cptimes_key = "kern.cp_times";
#endif
#if defined(__arm__) || defined(__aarch64__)
/* The key hw.model and hw.clockrate are not available on FreeBSD ARM. */
model_key = "hw.machine";
cpuspeed = 0;
#else
model_key = "hw.model";
size = sizeof(cpuspeed);
if (sysctlbyname("hw.clockrate", &cpuspeed, &size, NULL, 0))
return -errno;
#endif
size = sizeof(model);
if (sysctlbyname(model_key, &model, &size, NULL, 0))
return UV__ERR(errno);
size = sizeof(numcpus);
if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0))
return UV__ERR(errno);
*cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
if (!(*cpu_infos))
return UV_ENOMEM;
*count = numcpus;
/* kern.cp_times on FreeBSD i386 gives an array up to maxcpus instead of
* ncpu.
*/
size = sizeof(maxcpus);
if (sysctlbyname(maxcpus_key, &maxcpus, &size, NULL, 0)) {
uv__free(*cpu_infos);
return UV__ERR(errno);
}
size = maxcpus * CPUSTATES * sizeof(long);
cp_times = uv__malloc(size);
if (cp_times == NULL) {
uv__free(*cpu_infos);
return UV_ENOMEM;
}
if (sysctlbyname(cptimes_key, cp_times, &size, NULL, 0)) {
uv__free(cp_times);
uv__free(*cpu_infos);
return UV__ERR(errno);
}
for (i = 0; i < numcpus; i++) {
cpu_info = &(*cpu_infos)[i];
cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier;
cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier;
cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier;
cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier;
cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier;
cpu_info->model = uv__strdup(model);
cpu_info->speed = cpuspeed;
cur+=CPUSTATES;
}
uv__free(cp_times);
return 0;
}
ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,
int fd_out,
off_t* off_out,
size_t len,
unsigned int flags)
{
#if __FreeBSD__ >= 13 && !defined(__DragonFly__)
return copy_file_range(fd_in, off_in, fd_out, off_out, len, flags);
#else
return errno = ENOSYS, -1;
#endif
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,906 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#if TARGET_OS_IPHONE || MAC_OS_X_VERSION_MAX_ALLOWED < 1070
/* iOS (currently) doesn't provide the FSEvents-API (nor CoreServices) */
/* macOS prior to 10.7 doesn't provide the full FSEvents API so use kqueue */
int uv__fsevents_init(uv_fs_event_t* handle) {
return 0;
}
int uv__fsevents_close(uv_fs_event_t* handle) {
return 0;
}
void uv__fsevents_loop_delete(uv_loop_t* loop) {
}
#else /* TARGET_OS_IPHONE */
#include "darwin-stub.h"
#include <dlfcn.h>
#include <assert.h>
#include <stdlib.h>
#include <pthread.h>
static const int kFSEventsModified =
kFSEventStreamEventFlagItemChangeOwner |
kFSEventStreamEventFlagItemFinderInfoMod |
kFSEventStreamEventFlagItemInodeMetaMod |
kFSEventStreamEventFlagItemModified |
kFSEventStreamEventFlagItemXattrMod;
static const int kFSEventsRenamed =
kFSEventStreamEventFlagItemCreated |
kFSEventStreamEventFlagItemRemoved |
kFSEventStreamEventFlagItemRenamed;
static const int kFSEventsSystem =
kFSEventStreamEventFlagUserDropped |
kFSEventStreamEventFlagKernelDropped |
kFSEventStreamEventFlagEventIdsWrapped |
kFSEventStreamEventFlagHistoryDone |
kFSEventStreamEventFlagMount |
kFSEventStreamEventFlagUnmount |
kFSEventStreamEventFlagRootChanged;
typedef struct uv__fsevents_event_s uv__fsevents_event_t;
typedef struct uv__cf_loop_signal_s uv__cf_loop_signal_t;
typedef struct uv__cf_loop_state_s uv__cf_loop_state_t;
enum uv__cf_loop_signal_type_e {
kUVCFLoopSignalRegular,
kUVCFLoopSignalClosing
};
typedef enum uv__cf_loop_signal_type_e uv__cf_loop_signal_type_t;
struct uv__cf_loop_signal_s {
struct uv__queue member;
uv_fs_event_t* handle;
uv__cf_loop_signal_type_t type;
};
struct uv__fsevents_event_s {
struct uv__queue member;
int events;
char path[1];
};
struct uv__cf_loop_state_s {
CFRunLoopRef loop;
CFRunLoopSourceRef signal_source;
int fsevent_need_reschedule;
FSEventStreamRef fsevent_stream;
uv_sem_t fsevent_sem;
uv_mutex_t fsevent_mutex;
struct uv__queue fsevent_handles;
unsigned int fsevent_handle_count;
};
/* Forward declarations */
static void uv__cf_loop_cb(void* arg);
static void* uv__cf_loop_runner(void* arg);
static int uv__cf_loop_signal(uv_loop_t* loop,
uv_fs_event_t* handle,
uv__cf_loop_signal_type_t type);
/* Lazy-loaded by uv__fsevents_global_init(). */
static CFArrayRef (*pCFArrayCreate)(CFAllocatorRef,
const void**,
CFIndex,
const CFArrayCallBacks*);
static void (*pCFRelease)(CFTypeRef);
static void (*pCFRunLoopAddSource)(CFRunLoopRef,
CFRunLoopSourceRef,
CFStringRef);
static CFRunLoopRef (*pCFRunLoopGetCurrent)(void);
static void (*pCFRunLoopRemoveSource)(CFRunLoopRef,
CFRunLoopSourceRef,
CFStringRef);
static void (*pCFRunLoopRun)(void);
static CFRunLoopSourceRef (*pCFRunLoopSourceCreate)(CFAllocatorRef,
CFIndex,
CFRunLoopSourceContext*);
static void (*pCFRunLoopSourceSignal)(CFRunLoopSourceRef);
static void (*pCFRunLoopStop)(CFRunLoopRef);
static void (*pCFRunLoopWakeUp)(CFRunLoopRef);
static CFStringRef (*pCFStringCreateWithFileSystemRepresentation)(
CFAllocatorRef,
const char*);
static CFStringRef (*pkCFRunLoopDefaultMode);
static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef,
FSEventStreamCallback,
FSEventStreamContext*,
CFArrayRef,
FSEventStreamEventId,
CFTimeInterval,
FSEventStreamCreateFlags);
static void (*pFSEventStreamInvalidate)(FSEventStreamRef);
static void (*pFSEventStreamRelease)(FSEventStreamRef);
static void (*pFSEventStreamScheduleWithRunLoop)(FSEventStreamRef,
CFRunLoopRef,
CFStringRef);
static int (*pFSEventStreamStart)(FSEventStreamRef);
static void (*pFSEventStreamStop)(FSEventStreamRef);
#define UV__FSEVENTS_PROCESS(handle, block) \
do { \
struct uv__queue events; \
struct uv__queue* q; \
uv__fsevents_event_t* event; \
int err; \
uv_mutex_lock(&(handle)->cf_mutex); \
/* Split-off all events and empty original queue */ \
uv__queue_move(&(handle)->cf_events, &events); \
/* Get error (if any) and zero original one */ \
err = (handle)->cf_error; \
(handle)->cf_error = 0; \
uv_mutex_unlock(&(handle)->cf_mutex); \
/* Loop through events, deallocating each after processing */ \
while (!uv__queue_empty(&events)) { \
q = uv__queue_head(&events); \
event = uv__queue_data(q, uv__fsevents_event_t, member); \
uv__queue_remove(q); \
/* NOTE: Checking uv__is_active() is required here, because handle \
* callback may close handle and invoking it after it will lead to \
* incorrect behaviour */ \
if (!uv__is_closing((handle)) && uv__is_active((handle))) \
block \
/* Free allocated data */ \
uv__free(event); \
} \
if (err != 0 && !uv__is_closing((handle)) && uv__is_active((handle))) \
(handle)->cb((handle), NULL, 0, err); \
} while (0)
/* Runs in UV loop's thread, when there're events to report to handle */
static void uv__fsevents_cb(uv_async_t* cb) {
uv_fs_event_t* handle;
handle = cb->data;
UV__FSEVENTS_PROCESS(handle, {
handle->cb(handle, event->path[0] ? event->path : NULL, event->events, 0);
});
}
/* Runs in CF thread, pushed event into handle's event list */
static void uv__fsevents_push_event(uv_fs_event_t* handle,
struct uv__queue* events,
int err) {
assert(events != NULL || err != 0);
uv_mutex_lock(&handle->cf_mutex);
/* Concatenate two queues */
if (events != NULL)
uv__queue_add(&handle->cf_events, events);
/* Propagate error */
if (err != 0)
handle->cf_error = err;
uv_mutex_unlock(&handle->cf_mutex);
uv_async_send(handle->cf_cb);
}
/* Runs in CF thread, when there're events in FSEventStream */
static void uv__fsevents_event_cb(const FSEventStreamRef streamRef,
void* info,
size_t numEvents,
void* eventPaths,
const FSEventStreamEventFlags eventFlags[],
const FSEventStreamEventId eventIds[]) {
size_t i;
int len;
char** paths;
char* path;
char* pos;
uv_fs_event_t* handle;
struct uv__queue* q;
uv_loop_t* loop;
uv__cf_loop_state_t* state;
uv__fsevents_event_t* event;
FSEventStreamEventFlags flags;
struct uv__queue head;
loop = info;
state = loop->cf_state;
assert(state != NULL);
paths = eventPaths;
/* For each handle */
uv_mutex_lock(&state->fsevent_mutex);
uv__queue_foreach(q, &state->fsevent_handles) {
handle = uv__queue_data(q, uv_fs_event_t, cf_member);
uv__queue_init(&head);
/* Process and filter out events */
for (i = 0; i < numEvents; i++) {
flags = eventFlags[i];
/* Ignore system events */
if (flags & kFSEventsSystem)
continue;
path = paths[i];
len = strlen(path);
if (handle->realpath_len == 0)
continue; /* This should be unreachable */
/* Filter out paths that are outside handle's request */
if (len < handle->realpath_len)
continue;
/* Make sure that realpath actually named a directory,
* (unless watching root, which alone keeps a trailing slash on the realpath)
* or that we matched the whole string */
if (handle->realpath_len != len &&
handle->realpath_len > 1 &&
path[handle->realpath_len] != '/')
continue;
if (memcmp(path, handle->realpath, handle->realpath_len) != 0)
continue;
if (!(handle->realpath_len == 1 && handle->realpath[0] == '/')) {
/* Remove common prefix, unless the watched folder is "/" */
path += handle->realpath_len;
len -= handle->realpath_len;
if (len == 0) {
/* Since we're using fsevents to watch the file itself,
* realpath == path, and we now need to get the basename of the file back
* (for commonality with other codepaths and platforms). */
while (len < handle->realpath_len && path[-1] != '/') {
path--;
len++;
}
/* Created and Removed seem to be always set, but don't make sense */
flags &= ~kFSEventsRenamed;
} else {
/* Skip forward slash */
path++;
len--;
}
}
/* Do not emit events from subdirectories (without option set) */
if ((handle->cf_flags & UV_FS_EVENT_RECURSIVE) == 0 && *path != '\0') {
pos = strchr(path + 1, '/');
if (pos != NULL)
continue;
}
event = uv__malloc(sizeof(*event) + len);
if (event == NULL)
break;
memset(event, 0, sizeof(*event));
memcpy(event->path, path, len + 1);
event->events = UV_RENAME;
if (0 == (flags & kFSEventsRenamed)) {
if (0 != (flags & kFSEventsModified) ||
0 == (flags & kFSEventStreamEventFlagItemIsDir))
event->events = UV_CHANGE;
}
uv__queue_insert_tail(&head, &event->member);
}
if (!uv__queue_empty(&head))
uv__fsevents_push_event(handle, &head, 0);
}
uv_mutex_unlock(&state->fsevent_mutex);
}
/* Runs in CF thread */
static int uv__fsevents_create_stream(uv__cf_loop_state_t* state,
uv_loop_t* loop,
CFArrayRef paths) {
FSEventStreamContext ctx;
FSEventStreamRef ref;
CFAbsoluteTime latency;
FSEventStreamCreateFlags flags;
/* Initialize context */
memset(&ctx, 0, sizeof(ctx));
ctx.info = loop;
latency = 0.05;
/* Explanation of selected flags:
* 1. NoDefer - without this flag, events that are happening continuously
* (i.e. each event is happening after time interval less than `latency`,
* counted from previous event), will be deferred and passed to callback
* once they'll either fill whole OS buffer, or when this continuous stream
* will stop (i.e. there'll be delay between events, bigger than
* `latency`).
* Specifying this flag will invoke callback after `latency` time passed
* since event.
* 2. FileEvents - fire callback for file changes too (by default it is firing
* it only for directory changes).
*/
flags = kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents;
/*
* NOTE: It might sound like a good idea to remember last seen StreamEventId,
* but in reality one dir might have last StreamEventId less than, the other,
* that is being watched now. Which will cause FSEventStream API to report
* changes to files from the past.
*/
ref = pFSEventStreamCreate(NULL,
&uv__fsevents_event_cb,
&ctx,
paths,
kFSEventStreamEventIdSinceNow,
latency,
flags);
assert(ref != NULL);
pFSEventStreamScheduleWithRunLoop(ref, state->loop, *pkCFRunLoopDefaultMode);
if (!pFSEventStreamStart(ref)) {
pFSEventStreamInvalidate(ref);
pFSEventStreamRelease(ref);
return UV_EMFILE;
}
state->fsevent_stream = ref;
return 0;
}
/* Runs in CF thread */
static void uv__fsevents_destroy_stream(uv__cf_loop_state_t* state) {
if (state->fsevent_stream == NULL)
return;
/* Stop emitting events */
pFSEventStreamStop(state->fsevent_stream);
/* Release stream */
pFSEventStreamInvalidate(state->fsevent_stream);
pFSEventStreamRelease(state->fsevent_stream);
state->fsevent_stream = NULL;
}
/* Runs in CF thread, when there're new fsevent handles to add to stream */
static void uv__fsevents_reschedule(uv__cf_loop_state_t* state,
uv_loop_t* loop,
uv__cf_loop_signal_type_t type) {
struct uv__queue* q;
uv_fs_event_t* curr;
CFArrayRef cf_paths;
CFStringRef* paths;
unsigned int i;
int err;
unsigned int path_count;
paths = NULL;
cf_paths = NULL;
err = 0;
/* NOTE: `i` is used in deallocation loop below */
i = 0;
/* Optimization to prevent O(n^2) time spent when starting to watch
* many files simultaneously
*/
uv_mutex_lock(&state->fsevent_mutex);
if (state->fsevent_need_reschedule == 0) {
uv_mutex_unlock(&state->fsevent_mutex);
goto final;
}
state->fsevent_need_reschedule = 0;
uv_mutex_unlock(&state->fsevent_mutex);
/* Destroy previous FSEventStream */
uv__fsevents_destroy_stream(state);
/* Any failure below will be a memory failure */
err = UV_ENOMEM;
/* Create list of all watched paths */
uv_mutex_lock(&state->fsevent_mutex);
path_count = state->fsevent_handle_count;
if (path_count != 0) {
paths = uv__malloc(sizeof(*paths) * path_count);
if (paths == NULL) {
uv_mutex_unlock(&state->fsevent_mutex);
goto final;
}
q = &state->fsevent_handles;
for (; i < path_count; i++) {
q = uv__queue_next(q);
assert(q != &state->fsevent_handles);
curr = uv__queue_data(q, uv_fs_event_t, cf_member);
assert(curr->realpath != NULL);
paths[i] =
pCFStringCreateWithFileSystemRepresentation(NULL, curr->realpath);
if (paths[i] == NULL) {
uv_mutex_unlock(&state->fsevent_mutex);
goto final;
}
}
}
uv_mutex_unlock(&state->fsevent_mutex);
err = 0;
if (path_count != 0) {
/* Create new FSEventStream */
cf_paths = pCFArrayCreate(NULL, (const void**) paths, path_count, NULL);
if (cf_paths == NULL) {
err = UV_ENOMEM;
goto final;
}
err = uv__fsevents_create_stream(state, loop, cf_paths);
}
final:
/* Deallocate all paths in case of failure */
if (err != 0) {
if (cf_paths == NULL) {
while (i != 0)
pCFRelease(paths[--i]);
uv__free(paths);
} else {
/* CFArray takes ownership of both strings and original C-array */
pCFRelease(cf_paths);
}
/* Broadcast error to all handles */
uv_mutex_lock(&state->fsevent_mutex);
uv__queue_foreach(q, &state->fsevent_handles) {
curr = uv__queue_data(q, uv_fs_event_t, cf_member);
uv__fsevents_push_event(curr, NULL, err);
}
uv_mutex_unlock(&state->fsevent_mutex);
}
/*
* Main thread will block until the removal of handle from the list,
* we must tell it when we're ready.
*
* NOTE: This is coupled with `uv_sem_wait()` in `uv__fsevents_close`
*/
if (type == kUVCFLoopSignalClosing)
uv_sem_post(&state->fsevent_sem);
}
static int uv__fsevents_global_init(void) {
static pthread_mutex_t global_init_mutex = PTHREAD_MUTEX_INITIALIZER;
static void* core_foundation_handle;
static void* core_services_handle;
int err;
err = 0;
pthread_mutex_lock(&global_init_mutex);
if (core_foundation_handle != NULL)
goto out;
/* The libraries are never unloaded because we currently don't have a good
* mechanism for keeping a reference count. It's unlikely to be an issue
* but if it ever becomes one, we can turn the dynamic library handles into
* per-event loop properties and have the dynamic linker keep track for us.
*/
err = UV_ENOSYS;
core_foundation_handle = dlopen("/System/Library/Frameworks/"
"CoreFoundation.framework/"
"Versions/A/CoreFoundation",
RTLD_LAZY | RTLD_LOCAL);
if (core_foundation_handle == NULL)
goto out;
core_services_handle = dlopen("/System/Library/Frameworks/"
"CoreServices.framework/"
"Versions/A/CoreServices",
RTLD_LAZY | RTLD_LOCAL);
if (core_services_handle == NULL)
goto out;
err = UV_ENOENT;
#define V(handle, symbol) \
do { \
*(void **)(&p ## symbol) = dlsym((handle), #symbol); \
if (p ## symbol == NULL) \
goto out; \
} \
while (0)
V(core_foundation_handle, CFArrayCreate);
V(core_foundation_handle, CFRelease);
V(core_foundation_handle, CFRunLoopAddSource);
V(core_foundation_handle, CFRunLoopGetCurrent);
V(core_foundation_handle, CFRunLoopRemoveSource);
V(core_foundation_handle, CFRunLoopRun);
V(core_foundation_handle, CFRunLoopSourceCreate);
V(core_foundation_handle, CFRunLoopSourceSignal);
V(core_foundation_handle, CFRunLoopStop);
V(core_foundation_handle, CFRunLoopWakeUp);
V(core_foundation_handle, CFStringCreateWithFileSystemRepresentation);
V(core_foundation_handle, kCFRunLoopDefaultMode);
V(core_services_handle, FSEventStreamCreate);
V(core_services_handle, FSEventStreamInvalidate);
V(core_services_handle, FSEventStreamRelease);
V(core_services_handle, FSEventStreamScheduleWithRunLoop);
V(core_services_handle, FSEventStreamStart);
V(core_services_handle, FSEventStreamStop);
#undef V
err = 0;
out:
if (err && core_services_handle != NULL) {
dlclose(core_services_handle);
core_services_handle = NULL;
}
if (err && core_foundation_handle != NULL) {
dlclose(core_foundation_handle);
core_foundation_handle = NULL;
}
pthread_mutex_unlock(&global_init_mutex);
return err;
}
/* Runs in UV loop */
static int uv__fsevents_loop_init(uv_loop_t* loop) {
CFRunLoopSourceContext ctx;
uv__cf_loop_state_t* state;
pthread_attr_t attr;
int err;
if (loop->cf_state != NULL)
return 0;
err = uv__fsevents_global_init();
if (err)
return err;
state = uv__calloc(1, sizeof(*state));
if (state == NULL)
return UV_ENOMEM;
err = uv_mutex_init(&loop->cf_mutex);
if (err)
goto fail_mutex_init;
err = uv_sem_init(&loop->cf_sem, 0);
if (err)
goto fail_sem_init;
uv__queue_init(&loop->cf_signals);
err = uv_sem_init(&state->fsevent_sem, 0);
if (err)
goto fail_fsevent_sem_init;
err = uv_mutex_init(&state->fsevent_mutex);
if (err)
goto fail_fsevent_mutex_init;
uv__queue_init(&state->fsevent_handles);
state->fsevent_need_reschedule = 0;
state->fsevent_handle_count = 0;
memset(&ctx, 0, sizeof(ctx));
ctx.info = loop;
ctx.perform = uv__cf_loop_cb;
state->signal_source = pCFRunLoopSourceCreate(NULL, 0, &ctx);
if (state->signal_source == NULL) {
err = UV_ENOMEM;
goto fail_signal_source_create;
}
if (pthread_attr_init(&attr))
abort();
if (pthread_attr_setstacksize(&attr, uv__thread_stack_size()))
abort();
loop->cf_state = state;
/* uv_thread_t is an alias for pthread_t. */
err = UV__ERR(pthread_create(&loop->cf_thread, &attr, uv__cf_loop_runner, loop));
if (pthread_attr_destroy(&attr))
abort();
if (err)
goto fail_thread_create;
/* Synchronize threads */
uv_sem_wait(&loop->cf_sem);
return 0;
fail_thread_create:
loop->cf_state = NULL;
fail_signal_source_create:
uv_mutex_destroy(&state->fsevent_mutex);
fail_fsevent_mutex_init:
uv_sem_destroy(&state->fsevent_sem);
fail_fsevent_sem_init:
uv_sem_destroy(&loop->cf_sem);
fail_sem_init:
uv_mutex_destroy(&loop->cf_mutex);
fail_mutex_init:
uv__free(state);
return err;
}
/* Runs in UV loop */
void uv__fsevents_loop_delete(uv_loop_t* loop) {
uv__cf_loop_signal_t* s;
uv__cf_loop_state_t* state;
struct uv__queue* q;
if (loop->cf_state == NULL)
return;
if (uv__cf_loop_signal(loop, NULL, kUVCFLoopSignalRegular) != 0)
abort();
uv_thread_join(&loop->cf_thread);
uv_sem_destroy(&loop->cf_sem);
uv_mutex_destroy(&loop->cf_mutex);
/* Free any remaining data */
while (!uv__queue_empty(&loop->cf_signals)) {
q = uv__queue_head(&loop->cf_signals);
s = uv__queue_data(q, uv__cf_loop_signal_t, member);
uv__queue_remove(q);
uv__free(s);
}
/* Destroy state */
state = loop->cf_state;
uv_sem_destroy(&state->fsevent_sem);
uv_mutex_destroy(&state->fsevent_mutex);
pCFRelease(state->signal_source);
uv__free(state);
loop->cf_state = NULL;
}
/* Runs in CF thread. This is the CF loop's body */
static void* uv__cf_loop_runner(void* arg) {
uv_loop_t* loop;
uv__cf_loop_state_t* state;
loop = arg;
state = loop->cf_state;
state->loop = pCFRunLoopGetCurrent();
pCFRunLoopAddSource(state->loop,
state->signal_source,
*pkCFRunLoopDefaultMode);
uv_sem_post(&loop->cf_sem);
pCFRunLoopRun();
pCFRunLoopRemoveSource(state->loop,
state->signal_source,
*pkCFRunLoopDefaultMode);
state->loop = NULL;
return NULL;
}
/* Runs in CF thread, executed after `uv__cf_loop_signal()` */
static void uv__cf_loop_cb(void* arg) {
uv_loop_t* loop;
uv__cf_loop_state_t* state;
struct uv__queue* item;
struct uv__queue split_head;
uv__cf_loop_signal_t* s;
loop = arg;
state = loop->cf_state;
uv_mutex_lock(&loop->cf_mutex);
uv__queue_move(&loop->cf_signals, &split_head);
uv_mutex_unlock(&loop->cf_mutex);
while (!uv__queue_empty(&split_head)) {
item = uv__queue_head(&split_head);
uv__queue_remove(item);
s = uv__queue_data(item, uv__cf_loop_signal_t, member);
/* This was a termination signal */
if (s->handle == NULL)
pCFRunLoopStop(state->loop);
else
uv__fsevents_reschedule(state, loop, s->type);
uv__free(s);
}
}
/* Runs in UV loop to notify CF thread */
int uv__cf_loop_signal(uv_loop_t* loop,
uv_fs_event_t* handle,
uv__cf_loop_signal_type_t type) {
uv__cf_loop_signal_t* item;
uv__cf_loop_state_t* state;
item = uv__malloc(sizeof(*item));
if (item == NULL)
return UV_ENOMEM;
item->handle = handle;
item->type = type;
uv_mutex_lock(&loop->cf_mutex);
uv__queue_insert_tail(&loop->cf_signals, &item->member);
state = loop->cf_state;
assert(state != NULL);
pCFRunLoopSourceSignal(state->signal_source);
pCFRunLoopWakeUp(state->loop);
uv_mutex_unlock(&loop->cf_mutex);
return 0;
}
/* Runs in UV loop to initialize handle */
int uv__fsevents_init(uv_fs_event_t* handle) {
char* buf;
int err;
uv__cf_loop_state_t* state;
err = uv__fsevents_loop_init(handle->loop);
if (err)
return err;
/* Get absolute path to file */
buf = realpath(handle->path, NULL);
if (buf == NULL)
return UV__ERR(errno);
handle->realpath = uv__strdup(buf);
free(buf); /* _Not_ uv__free. */
if (handle->realpath == NULL)
return UV_ENOMEM;
handle->realpath_len = strlen(handle->realpath);
/* Initialize event queue */
uv__queue_init(&handle->cf_events);
handle->cf_error = 0;
/*
* Events will occur in other thread.
* Initialize callback for getting them back into event loop's thread
*/
handle->cf_cb = uv__malloc(sizeof(*handle->cf_cb));
if (handle->cf_cb == NULL) {
err = UV_ENOMEM;
goto fail_cf_cb_malloc;
}
handle->cf_cb->data = handle;
uv_async_init(handle->loop, handle->cf_cb, uv__fsevents_cb);
handle->cf_cb->flags |= UV_HANDLE_INTERNAL;
uv_unref((uv_handle_t*) handle->cf_cb);
err = uv_mutex_init(&handle->cf_mutex);
if (err)
goto fail_cf_mutex_init;
/* Insert handle into the list */
state = handle->loop->cf_state;
uv_mutex_lock(&state->fsevent_mutex);
uv__queue_insert_tail(&state->fsevent_handles, &handle->cf_member);
state->fsevent_handle_count++;
state->fsevent_need_reschedule = 1;
uv_mutex_unlock(&state->fsevent_mutex);
/* Reschedule FSEventStream */
assert(handle != NULL);
err = uv__cf_loop_signal(handle->loop, handle, kUVCFLoopSignalRegular);
if (err)
goto fail_loop_signal;
return 0;
fail_loop_signal:
uv_mutex_destroy(&handle->cf_mutex);
fail_cf_mutex_init:
uv__free(handle->cf_cb);
handle->cf_cb = NULL;
fail_cf_cb_malloc:
uv__free(handle->realpath);
handle->realpath = NULL;
handle->realpath_len = 0;
return err;
}
/* Runs in UV loop to de-initialize handle */
int uv__fsevents_close(uv_fs_event_t* handle) {
int err;
uv__cf_loop_state_t* state;
if (handle->cf_cb == NULL)
return UV_EINVAL;
/* Remove handle from the list */
state = handle->loop->cf_state;
uv_mutex_lock(&state->fsevent_mutex);
uv__queue_remove(&handle->cf_member);
state->fsevent_handle_count--;
state->fsevent_need_reschedule = 1;
uv_mutex_unlock(&state->fsevent_mutex);
/* Reschedule FSEventStream */
assert(handle != NULL);
err = uv__cf_loop_signal(handle->loop, handle, kUVCFLoopSignalClosing);
if (err)
return UV__ERR(err);
/* Wait for deinitialization */
uv_sem_wait(&state->fsevent_sem);
uv_close((uv_handle_t*) handle->cf_cb, (uv_close_cb) uv__free);
handle->cf_cb = NULL;
/* Free data in queue */
UV__FSEVENTS_PROCESS(handle, {
/* NOP */
});
uv_mutex_destroy(&handle->cf_mutex);
uv__free(handle->realpath);
handle->realpath = NULL;
handle->realpath_len = 0;
return 0;
}
#endif /* TARGET_OS_IPHONE */

View File

@@ -0,0 +1,252 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* Expose glibc-specific EAI_* error codes. Needs to be defined before we
* include any headers.
*/
#include "uv.h"
#include "internal.h"
#include "idna.h"
#include <errno.h>
#include <stddef.h> /* NULL */
#include <stdlib.h>
#include <string.h>
#include <net/if.h> /* if_indextoname() */
/* EAI_* constants. */
#include <netdb.h>
int uv__getaddrinfo_translate_error(int sys_err) {
switch (sys_err) {
case 0: return 0;
#if defined(EAI_ADDRFAMILY)
case EAI_ADDRFAMILY: return UV_EAI_ADDRFAMILY;
#endif
#if defined(EAI_AGAIN)
case EAI_AGAIN: return UV_EAI_AGAIN;
#endif
#if defined(EAI_BADFLAGS)
case EAI_BADFLAGS: return UV_EAI_BADFLAGS;
#endif
#if defined(EAI_BADHINTS)
case EAI_BADHINTS: return UV_EAI_BADHINTS;
#endif
#if defined(EAI_CANCELED)
case EAI_CANCELED: return UV_EAI_CANCELED;
#endif
#if defined(EAI_FAIL)
case EAI_FAIL: return UV_EAI_FAIL;
#endif
#if defined(EAI_FAMILY)
case EAI_FAMILY: return UV_EAI_FAMILY;
#endif
#if defined(EAI_MEMORY)
case EAI_MEMORY: return UV_EAI_MEMORY;
#endif
#if defined(EAI_NODATA)
case EAI_NODATA: return UV_EAI_NODATA;
#endif
#if defined(EAI_NONAME)
# if !defined(EAI_NODATA) || EAI_NODATA != EAI_NONAME
case EAI_NONAME: return UV_EAI_NONAME;
# endif
#endif
#if defined(EAI_OVERFLOW)
case EAI_OVERFLOW: return UV_EAI_OVERFLOW;
#endif
#if defined(EAI_PROTOCOL)
case EAI_PROTOCOL: return UV_EAI_PROTOCOL;
#endif
#if defined(EAI_SERVICE)
case EAI_SERVICE: return UV_EAI_SERVICE;
#endif
#if defined(EAI_SOCKTYPE)
case EAI_SOCKTYPE: return UV_EAI_SOCKTYPE;
#endif
#if defined(EAI_SYSTEM)
case EAI_SYSTEM: return UV__ERR(errno);
#endif
}
assert(!"unknown EAI_* error code");
abort();
#ifndef __SUNPRO_C
return 0; /* Pacify compiler. */
#endif
}
static void uv__getaddrinfo_work(struct uv__work* w) {
uv_getaddrinfo_t* req;
int err;
req = container_of(w, uv_getaddrinfo_t, work_req);
err = getaddrinfo(req->hostname, req->service, req->hints, &req->addrinfo);
req->retcode = uv__getaddrinfo_translate_error(err);
}
static void uv__getaddrinfo_done(struct uv__work* w, int status) {
uv_getaddrinfo_t* req;
req = container_of(w, uv_getaddrinfo_t, work_req);
uv__req_unregister(req->loop);
/* See initialization in uv_getaddrinfo(). */
if (req->hints)
uv__free(req->hints);
else if (req->service)
uv__free(req->service);
else if (req->hostname)
uv__free(req->hostname);
else
assert(0);
req->hints = NULL;
req->service = NULL;
req->hostname = NULL;
if (status == UV_ECANCELED) {
assert(req->retcode == 0);
req->retcode = UV_EAI_CANCELED;
}
if (req->cb)
req->cb(req, req->retcode, req->addrinfo);
}
int uv_getaddrinfo(uv_loop_t* loop,
uv_getaddrinfo_t* req,
uv_getaddrinfo_cb cb,
const char* hostname,
const char* service,
const struct addrinfo* hints) {
char hostname_ascii[256];
size_t hostname_len;
size_t service_len;
size_t hints_len;
size_t len;
char* buf;
long rc;
if (req == NULL || (hostname == NULL && service == NULL))
return UV_EINVAL;
/* FIXME(bnoordhuis) IDNA does not seem to work z/OS,
* probably because it uses EBCDIC rather than ASCII.
*/
#ifdef __MVS__
(void) &hostname_ascii;
#else
if (hostname != NULL) {
rc = uv__idna_toascii(hostname,
hostname + strlen(hostname),
hostname_ascii,
hostname_ascii + sizeof(hostname_ascii));
if (rc < 0)
return rc;
hostname = hostname_ascii;
}
#endif
hostname_len = hostname ? strlen(hostname) + 1 : 0;
service_len = service ? strlen(service) + 1 : 0;
hints_len = hints ? sizeof(*hints) : 0;
buf = uv__malloc(hostname_len + service_len + hints_len);
if (buf == NULL)
return UV_ENOMEM;
uv__req_init(loop, req, UV_GETADDRINFO);
req->loop = loop;
req->cb = cb;
req->addrinfo = NULL;
req->hints = NULL;
req->service = NULL;
req->hostname = NULL;
req->retcode = 0;
/* order matters, see uv_getaddrinfo_done() */
len = 0;
if (hints) {
req->hints = memcpy(buf + len, hints, sizeof(*hints));
len += sizeof(*hints);
}
if (service) {
req->service = memcpy(buf + len, service, service_len);
len += service_len;
}
if (hostname)
req->hostname = memcpy(buf + len, hostname, hostname_len);
if (cb) {
uv__work_submit(loop,
&req->work_req,
UV__WORK_SLOW_IO,
uv__getaddrinfo_work,
uv__getaddrinfo_done);
return 0;
} else {
uv__getaddrinfo_work(&req->work_req);
uv__getaddrinfo_done(&req->work_req, 0);
return req->retcode;
}
}
void uv_freeaddrinfo(struct addrinfo* ai) {
if (ai)
freeaddrinfo(ai);
}
int uv_if_indextoname(unsigned int ifindex, char* buffer, size_t* size) {
char ifname_buf[UV_IF_NAMESIZE];
size_t len;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
if (if_indextoname(ifindex, ifname_buf) == NULL)
return UV__ERR(errno);
len = strnlen(ifname_buf, sizeof(ifname_buf));
if (*size <= len) {
*size = len + 1;
return UV_ENOBUFS;
}
memcpy(buffer, ifname_buf, len);
buffer[len] = '\0';
*size = len;
return 0;
}
int uv_if_indextoiid(unsigned int ifindex, char* buffer, size_t* size) {
return uv_if_indextoname(ifindex, buffer, size);
}

View File

@@ -0,0 +1,121 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "uv.h"
#include "internal.h"
static void uv__getnameinfo_work(struct uv__work* w) {
uv_getnameinfo_t* req;
int err;
socklen_t salen;
req = container_of(w, uv_getnameinfo_t, work_req);
if (req->storage.ss_family == AF_INET)
salen = sizeof(struct sockaddr_in);
else if (req->storage.ss_family == AF_INET6)
salen = sizeof(struct sockaddr_in6);
else
abort();
err = getnameinfo((struct sockaddr*) &req->storage,
salen,
req->host,
sizeof(req->host),
req->service,
sizeof(req->service),
req->flags);
req->retcode = uv__getaddrinfo_translate_error(err);
}
static void uv__getnameinfo_done(struct uv__work* w, int status) {
uv_getnameinfo_t* req;
char* host;
char* service;
req = container_of(w, uv_getnameinfo_t, work_req);
uv__req_unregister(req->loop);
host = service = NULL;
if (status == UV_ECANCELED) {
assert(req->retcode == 0);
req->retcode = UV_EAI_CANCELED;
} else if (req->retcode == 0) {
host = req->host;
service = req->service;
}
if (req->getnameinfo_cb)
req->getnameinfo_cb(req, req->retcode, host, service);
}
/*
* Entry point for getnameinfo
* return 0 if a callback will be made
* return error code if validation fails
*/
int uv_getnameinfo(uv_loop_t* loop,
uv_getnameinfo_t* req,
uv_getnameinfo_cb getnameinfo_cb,
const struct sockaddr* addr,
int flags) {
if (req == NULL || addr == NULL)
return UV_EINVAL;
if (addr->sa_family == AF_INET) {
memcpy(&req->storage,
addr,
sizeof(struct sockaddr_in));
} else if (addr->sa_family == AF_INET6) {
memcpy(&req->storage,
addr,
sizeof(struct sockaddr_in6));
} else {
return UV_EINVAL;
}
uv__req_init(loop, (uv_req_t*)req, UV_GETNAMEINFO);
req->getnameinfo_cb = getnameinfo_cb;
req->flags = flags;
req->type = UV_GETNAMEINFO;
req->loop = loop;
req->retcode = 0;
if (getnameinfo_cb) {
uv__work_submit(loop,
&req->work_req,
UV__WORK_SLOW_IO,
uv__getnameinfo_work,
uv__getnameinfo_done);
return 0;
} else {
uv__getnameinfo_work(&req->work_req);
uv__getnameinfo_done(&req->work_req, 0);
return req->retcode;
}
}

View File

@@ -0,0 +1,172 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <FindDirectory.h> /* find_path() */
#include <OS.h>
void uv_loadavg(double avg[3]) {
avg[0] = 0;
avg[1] = 0;
avg[2] = 0;
}
int uv_exepath(char* buffer, size_t* size) {
char abspath[B_PATH_NAME_LENGTH];
status_t status;
ssize_t abspath_len;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
status = find_path(B_APP_IMAGE_SYMBOL, B_FIND_PATH_IMAGE_PATH, NULL, abspath,
sizeof(abspath));
if (status != B_OK)
return UV__ERR(status);
abspath_len = uv__strscpy(buffer, abspath, *size);
*size -= 1;
if (abspath_len >= 0 && *size > (size_t)abspath_len)
*size = (size_t)abspath_len;
return 0;
}
uint64_t uv_get_free_memory(void) {
status_t status;
system_info sinfo;
status = get_system_info(&sinfo);
if (status != B_OK)
return 0;
return (sinfo.max_pages - sinfo.used_pages) * B_PAGE_SIZE;
}
uint64_t uv_get_total_memory(void) {
status_t status;
system_info sinfo;
status = get_system_info(&sinfo);
if (status != B_OK)
return 0;
return sinfo.max_pages * B_PAGE_SIZE;
}
uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
int uv_resident_set_memory(size_t* rss) {
area_info area;
ssize_t cookie;
status_t status;
thread_info thread;
status = get_thread_info(find_thread(NULL), &thread);
if (status != B_OK)
return UV__ERR(status);
cookie = 0;
*rss = 0;
while (get_next_area_info(thread.team, &cookie, &area) == B_OK)
*rss += area.ram_size;
return 0;
}
int uv_uptime(double* uptime) {
/* system_time() returns time since booting in microseconds */
*uptime = (double)system_time() / 1000000;
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
cpu_topology_node_info* topology_infos;
int i;
status_t status;
system_info system;
uint32 topology_count; /* Haiku expects address of uint32, not uint32_t */
uint64_t cpuspeed;
uv_cpu_info_t* cpu_info;
if (cpu_infos == NULL || count == NULL)
return UV_EINVAL;
status = get_cpu_topology_info(NULL, &topology_count);
if (status != B_OK)
return UV__ERR(status);
topology_infos = uv__malloc(topology_count * sizeof(*topology_infos));
if (topology_infos == NULL)
return UV_ENOMEM;
status = get_cpu_topology_info(topology_infos, &topology_count);
if (status != B_OK) {
uv__free(topology_infos);
return UV__ERR(status);
}
cpuspeed = 0;
for (i = 0; i < (int)topology_count; i++) {
if (topology_infos[i].type == B_TOPOLOGY_CORE) {
cpuspeed = topology_infos[i].data.core.default_frequency;
break;
}
}
uv__free(topology_infos);
status = get_system_info(&system);
if (status != B_OK)
return UV__ERR(status);
*cpu_infos = uv__calloc(system.cpu_count, sizeof(**cpu_infos));
if (*cpu_infos == NULL)
return UV_ENOMEM;
/* CPU time and model are not exposed by Haiku. */
cpu_info = *cpu_infos;
for (i = 0; i < (int)system.cpu_count; i++) {
cpu_info->model = uv__strdup("unknown");
cpu_info->speed = (int)(cpuspeed / 1000000);
cpu_info++;
}
*count = system.cpu_count;
return 0;
}

View File

@@ -0,0 +1,172 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#define _GNU_SOURCE 1
#include "uv.h"
#include "internal.h"
#include <hurd.h>
#include <hurd/process.h>
#include <mach/task_info.h>
#include <mach/vm_statistics.h>
#include <mach/vm_param.h>
#include <inttypes.h>
#include <stddef.h>
#include <unistd.h>
#include <string.h>
#include <limits.h>
int uv_exepath(char* buffer, size_t* size) {
kern_return_t err;
/* XXX in current Hurd, strings are char arrays of 1024 elements */
string_t exepath;
ssize_t copied;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
if (*size - 1 > 0) {
/* XXX limited length of buffer in current Hurd, this API will probably
* evolve in the future */
err = proc_get_exe(getproc(), getpid(), exepath);
if (err)
return UV__ERR(err);
}
copied = uv__strscpy(buffer, exepath, *size);
/* do not return error on UV_E2BIG failure */
*size = copied < 0 ? strlen(buffer) : (size_t) copied;
return 0;
}
int uv_resident_set_memory(size_t* rss) {
kern_return_t err;
struct task_basic_info bi;
mach_msg_type_number_t count;
count = TASK_BASIC_INFO_COUNT;
err = task_info(mach_task_self(), TASK_BASIC_INFO,
(task_info_t) &bi, &count);
if (err)
return UV__ERR(err);
*rss = bi.resident_size;
return 0;
}
uint64_t uv_get_free_memory(void) {
kern_return_t err;
struct vm_statistics vmstats;
err = vm_statistics(mach_task_self(), &vmstats);
if (err)
return 0;
return vmstats.free_count * vm_page_size;
}
uint64_t uv_get_total_memory(void) {
kern_return_t err;
host_basic_info_data_t hbi;
mach_msg_type_number_t cnt;
cnt = HOST_BASIC_INFO_COUNT;
err = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t) &hbi, &cnt);
if (err)
return 0;
return hbi.memory_size;
}
int uv_uptime(double* uptime) {
char buf[128];
/* Try /proc/uptime first */
if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
if (1 == sscanf(buf, "%lf", uptime))
return 0;
/* Reimplement here code from procfs to calculate uptime if not mounted? */
return UV__ERR(EIO);
}
void uv_loadavg(double avg[3]) {
char buf[128]; /* Large enough to hold all of /proc/loadavg. */
if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf)))
if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2]))
return;
/* Reimplement here code from procfs to calculate loadavg if not mounted? */
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
kern_return_t err;
host_basic_info_data_t hbi;
mach_msg_type_number_t cnt;
/* Get count of cpus */
cnt = HOST_BASIC_INFO_COUNT;
err = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t) &hbi, &cnt);
if (err) {
err = UV__ERR(err);
goto abort;
}
/* XXX not implemented on the Hurd */
*cpu_infos = uv__calloc(hbi.avail_cpus, sizeof(**cpu_infos));
if (*cpu_infos == NULL) {
err = UV_ENOMEM;
goto abort;
}
*count = hbi.avail_cpus;
return 0;
abort:
*cpu_infos = NULL;
*count = 0;
return err;
}
uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}

View File

@@ -0,0 +1,545 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <net/if.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <sys/time.h>
#include <unistd.h>
#include <fcntl.h>
#include <utmp.h>
#include <libgen.h>
#include <sys/protosw.h>
#include <procinfo.h>
#include <sys/proc.h>
#include <sys/procfs.h>
#include <ctype.h>
#include <sys/mntctl.h>
#include <sys/vmount.h>
#include <limits.h>
#include <strings.h>
#include <sys/vnode.h>
#include <as400_protos.h>
#include <as400_types.h>
char* original_exepath = NULL;
uv_mutex_t process_title_mutex;
uv_once_t process_title_mutex_once = UV_ONCE_INIT;
typedef struct {
int bytes_available;
int bytes_returned;
char current_date_and_time[8];
char system_name[8];
char elapsed_time[6];
char restricted_state_flag;
char reserved;
int percent_processing_unit_used;
int jobs_in_system;
int percent_permanent_addresses;
int percent_temporary_addresses;
int system_asp;
int percent_system_asp_used;
int total_auxiliary_storage;
int current_unprotected_storage_used;
int maximum_unprotected_storage_used;
int percent_db_capability;
int main_storage_size;
int number_of_partitions;
int partition_identifier;
int reserved1;
int current_processing_capacity;
char processor_sharing_attribute;
char reserved2[3];
int number_of_processors;
int active_jobs_in_system;
int active_threads_in_system;
int maximum_jobs_in_system;
int percent_temporary_256mb_segments_used;
int percent_temporary_4gb_segments_used;
int percent_permanent_256mb_segments_used;
int percent_permanent_4gb_segments_used;
int percent_current_interactive_performance;
int percent_uncapped_cpu_capacity_used;
int percent_shared_processor_pool_used;
long main_storage_size_long;
} SSTS0200;
typedef struct {
char header[208];
unsigned char loca_adapter_address[12];
} LIND0500;
typedef struct {
int bytes_provided;
int bytes_available;
char msgid[7];
} errcode_s;
static const unsigned char e2a[256] = {
0, 1, 2, 3, 156, 9, 134, 127, 151, 141, 142, 11, 12, 13, 14, 15,
16, 17, 18, 19, 157, 133, 8, 135, 24, 25, 146, 143, 28, 29, 30, 31,
128, 129, 130, 131, 132, 10, 23, 27, 136, 137, 138, 139, 140, 5, 6, 7,
144, 145, 22, 147, 148, 149, 150, 4, 152, 153, 154, 155, 20, 21, 158, 26,
32, 160, 161, 162, 163, 164, 165, 166, 167, 168, 91, 46, 60, 40, 43, 33,
38, 169, 170, 171, 172, 173, 174, 175, 176, 177, 93, 36, 42, 41, 59, 94,
45, 47, 178, 179, 180, 181, 182, 183, 184, 185, 124, 44, 37, 95, 62, 63,
186, 187, 188, 189, 190, 191, 192, 193, 194, 96, 58, 35, 64, 39, 61, 34,
195, 97, 98, 99, 100, 101, 102, 103, 104, 105, 196, 197, 198, 199, 200, 201,
202, 106, 107, 108, 109, 110, 111, 112, 113, 114, 203, 204, 205, 206, 207, 208,
209, 126, 115, 116, 117, 118, 119, 120, 121, 122, 210, 211, 212, 213, 214, 215,
216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231,
123, 65, 66, 67, 68, 69, 70, 71, 72, 73, 232, 233, 234, 235, 236, 237,
125, 74, 75, 76, 77, 78, 79, 80, 81, 82, 238, 239, 240, 241, 242, 243,
92, 159, 83, 84, 85, 86, 87, 88, 89, 90, 244, 245, 246, 247, 248, 249,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 250, 251, 252, 253, 254, 255};
static const unsigned char a2e[256] = {
0, 1, 2, 3, 55, 45, 46, 47, 22, 5, 37, 11, 12, 13, 14, 15,
16, 17, 18, 19, 60, 61, 50, 38, 24, 25, 63, 39, 28, 29, 30, 31,
64, 79, 127, 123, 91, 108, 80, 125, 77, 93, 92, 78, 107, 96, 75, 97,
240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 122, 94, 76, 126, 110, 111,
124, 193, 194, 195, 196, 197, 198, 199, 200, 201, 209, 210, 211, 212, 213, 214,
215, 216, 217, 226, 227, 228, 229, 230, 231, 232, 233, 74, 224, 90, 95, 109,
121, 129, 130, 131, 132, 133, 134, 135, 136, 137, 145, 146, 147, 148, 149, 150,
151, 152, 153, 162, 163, 164, 165, 166, 167, 168, 169, 192, 106, 208, 161, 7,
32, 33, 34, 35, 36, 21, 6, 23, 40, 41, 42, 43, 44, 9, 10, 27,
48, 49, 26, 51, 52, 53, 54, 8, 56, 57, 58, 59, 4, 20, 62, 225,
65, 66, 67, 68, 69, 70, 71, 72, 73, 81, 82, 83, 84, 85, 86, 87,
88, 89, 98, 99, 100, 101, 102, 103, 104, 105, 112, 113, 114, 115, 116, 117,
118, 119, 120, 128, 138, 139, 140, 141, 142, 143, 144, 154, 155, 156, 157, 158,
159, 160, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183,
184, 185, 186, 187, 188, 189, 190, 191, 202, 203, 204, 205, 206, 207, 218, 219,
220, 221, 222, 223, 234, 235, 236, 237, 238, 239, 250, 251, 252, 253, 254, 255};
static void iconv_e2a(unsigned char src[], unsigned char dst[], size_t length) {
size_t i;
for (i = 0; i < length; i++)
dst[i] = e2a[src[i]];
}
static void iconv_a2e(const char* src, unsigned char dst[], size_t length) {
size_t srclen;
size_t i;
srclen = strlen(src);
if (srclen > length)
srclen = length;
for (i = 0; i < srclen; i++)
dst[i] = a2e[src[i]];
/* padding the remaining part with spaces */
for (; i < length; i++)
dst[i] = a2e[' '];
}
void init_process_title_mutex_once(void) {
uv_mutex_init(&process_title_mutex);
}
static int get_ibmi_system_status(SSTS0200* rcvr) {
/* rcvrlen is input parameter 2 to QWCRSSTS */
unsigned int rcvrlen = sizeof(*rcvr);
unsigned char format[8], reset_status[10];
/* format is input parameter 3 to QWCRSSTS */
iconv_a2e("SSTS0200", format, sizeof(format));
/* reset_status is input parameter 4 */
iconv_a2e("*NO", reset_status, sizeof(reset_status));
/* errcode is input parameter 5 to QWCRSSTS */
errcode_s errcode;
/* qwcrssts_pointer is the 16-byte tagged system pointer to QWCRSSTS */
ILEpointer __attribute__((aligned(16))) qwcrssts_pointer;
/* qwcrssts_argv is the array of argument pointers to QWCRSSTS */
void* qwcrssts_argv[6];
/* Set the IBM i pointer to the QSYS/QWCRSSTS *PGM object */
int rc = _RSLOBJ2(&qwcrssts_pointer, RSLOBJ_TS_PGM, "QWCRSSTS", "QSYS");
if (rc != 0)
return rc;
/* initialize the QWCRSSTS returned info structure */
memset(rcvr, 0, sizeof(*rcvr));
/* initialize the QWCRSSTS error code structure */
memset(&errcode, 0, sizeof(errcode));
errcode.bytes_provided = sizeof(errcode);
/* initialize the array of argument pointers for the QWCRSSTS API */
qwcrssts_argv[0] = rcvr;
qwcrssts_argv[1] = &rcvrlen;
qwcrssts_argv[2] = &format;
qwcrssts_argv[3] = &reset_status;
qwcrssts_argv[4] = &errcode;
qwcrssts_argv[5] = NULL;
/* Call the IBM i QWCRSSTS API from PASE */
rc = _PGMCALL(&qwcrssts_pointer, qwcrssts_argv, 0);
return rc;
}
uint64_t uv_get_free_memory(void) {
SSTS0200 rcvr;
if (get_ibmi_system_status(&rcvr))
return 0;
return (uint64_t)rcvr.main_storage_size * 1024ULL;
}
uint64_t uv_get_total_memory(void) {
SSTS0200 rcvr;
if (get_ibmi_system_status(&rcvr))
return 0;
return (uint64_t)rcvr.main_storage_size * 1024ULL;
}
uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
void uv_loadavg(double avg[3]) {
SSTS0200 rcvr;
if (get_ibmi_system_status(&rcvr)) {
avg[0] = avg[1] = avg[2] = 0;
return;
}
/* The average (in tenths) of the elapsed time during which the processing
* units were in use. For example, a value of 411 in binary would be 41.1%.
* This percentage could be greater than 100% for an uncapped partition.
*/
double processing_unit_used_percent =
rcvr.percent_processing_unit_used / 1000.0;
avg[0] = avg[1] = avg[2] = processing_unit_used_percent;
}
int uv_resident_set_memory(size_t* rss) {
*rss = 0;
return 0;
}
int uv_uptime(double* uptime) {
return UV_ENOSYS;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int numcpus, idx = 0;
uv_cpu_info_t* cpu_info;
*cpu_infos = NULL;
*count = 0;
numcpus = sysconf(_SC_NPROCESSORS_ONLN);
*cpu_infos = uv__malloc(numcpus * sizeof(uv_cpu_info_t));
if (!*cpu_infos) {
return UV_ENOMEM;
}
cpu_info = *cpu_infos;
for (idx = 0; idx < numcpus; idx++) {
cpu_info->speed = 0;
cpu_info->model = uv__strdup("unknown");
cpu_info->cpu_times.user = 0;
cpu_info->cpu_times.sys = 0;
cpu_info->cpu_times.idle = 0;
cpu_info->cpu_times.irq = 0;
cpu_info->cpu_times.nice = 0;
cpu_info++;
}
*count = numcpus;
return 0;
}
static int get_ibmi_physical_address(const char* line, char (*phys_addr)[6]) {
LIND0500 rcvr;
/* rcvrlen is input parameter 2 to QDCRLIND */
unsigned int rcvrlen = sizeof(rcvr);
unsigned char format[8], line_name[10];
unsigned char mac_addr[sizeof(rcvr.loca_adapter_address)];
int c[6];
/* format is input parameter 3 to QDCRLIND */
iconv_a2e("LIND0500", format, sizeof(format));
/* line_name is input parameter 4 to QDCRLIND */
iconv_a2e(line, line_name, sizeof(line_name));
/* err is input parameter 5 to QDCRLIND */
errcode_s err;
/* qwcrssts_pointer is the 16-byte tagged system pointer to QDCRLIND */
ILEpointer __attribute__((aligned(16))) qdcrlind_pointer;
/* qwcrssts_argv is the array of argument pointers to QDCRLIND */
void* qdcrlind_argv[6];
/* Set the IBM i pointer to the QSYS/QDCRLIND *PGM object */
int rc = _RSLOBJ2(&qdcrlind_pointer, RSLOBJ_TS_PGM, "QDCRLIND", "QSYS");
if (rc != 0)
return rc;
/* initialize the QDCRLIND returned info structure */
memset(&rcvr, 0, sizeof(rcvr));
/* initialize the QDCRLIND error code structure */
memset(&err, 0, sizeof(err));
err.bytes_provided = sizeof(err);
/* initialize the array of argument pointers for the QDCRLIND API */
qdcrlind_argv[0] = &rcvr;
qdcrlind_argv[1] = &rcvrlen;
qdcrlind_argv[2] = &format;
qdcrlind_argv[3] = &line_name;
qdcrlind_argv[4] = &err;
qdcrlind_argv[5] = NULL;
/* Call the IBM i QDCRLIND API from PASE */
rc = _PGMCALL(&qdcrlind_pointer, qdcrlind_argv, 0);
if (rc != 0)
return rc;
if (err.bytes_available > 0) {
return -1;
}
/* convert ebcdic loca_adapter_address to ascii first */
iconv_e2a(rcvr.loca_adapter_address, mac_addr,
sizeof(rcvr.loca_adapter_address));
/* convert loca_adapter_address(char[12]) to phys_addr(char[6]) */
int r = sscanf(mac_addr, "%02x%02x%02x%02x%02x%02x",
&c[0], &c[1], &c[2], &c[3], &c[4], &c[5]);
if (r == ARRAY_SIZE(c)) {
(*phys_addr)[0] = c[0];
(*phys_addr)[1] = c[1];
(*phys_addr)[2] = c[2];
(*phys_addr)[3] = c[3];
(*phys_addr)[4] = c[4];
(*phys_addr)[5] = c[5];
} else {
memset(*phys_addr, 0, sizeof(*phys_addr));
rc = -1;
}
return rc;
}
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
uv_interface_address_t* address;
struct ifaddrs_pase *ifap = NULL, *cur;
size_t namelen;
char* name;
int inet6, r = 0;
*count = 0;
*addresses = NULL;
if (Qp2getifaddrs(&ifap))
return UV_ENOSYS;
/* The first loop to get the size of the array to be allocated */
namelen = 0;
for (cur = ifap; cur; cur = cur->ifa_next) {
if (!(cur->ifa_addr->sa_family == AF_INET6 ||
cur->ifa_addr->sa_family == AF_INET))
continue;
if (!(cur->ifa_flags & IFF_UP && cur->ifa_flags & IFF_RUNNING))
continue;
namelen += strlen(cur->ifa_name) + 1;
(*count)++;
}
if (*count == 0) {
Qp2freeifaddrs(ifap);
return 0;
}
/* Alloc the return interface structs */
*addresses = uv__calloc(1, *count * sizeof(**addresses) + namelen);
if (*addresses == NULL) {
Qp2freeifaddrs(ifap);
return UV_ENOMEM;
}
name = (char*) &(*addresses)[*count];
address = *addresses;
/* The second loop to fill in the array */
for (cur = ifap; cur; cur = cur->ifa_next) {
if (!(cur->ifa_addr->sa_family == AF_INET6 ||
cur->ifa_addr->sa_family == AF_INET))
continue;
if (!(cur->ifa_flags & IFF_UP && cur->ifa_flags & IFF_RUNNING))
continue;
namelen = strlen(cur->ifa_name) + 1;
address->name = memcpy(name, cur->ifa_name, namelen);
name += namelen;
inet6 = (cur->ifa_addr->sa_family == AF_INET6);
if (inet6) {
address->address.address6 = *((struct sockaddr_in6*)cur->ifa_addr);
address->netmask.netmask6 = *((struct sockaddr_in6*)cur->ifa_netmask);
address->netmask.netmask6.sin6_family = AF_INET6;
} else {
address->address.address4 = *((struct sockaddr_in*)cur->ifa_addr);
address->netmask.netmask4 = *((struct sockaddr_in*)cur->ifa_netmask);
address->netmask.netmask4.sin_family = AF_INET;
}
address->is_internal = cur->ifa_flags & IFF_LOOPBACK ? 1 : 0;
if (!address->is_internal) {
int rc = -1;
size_t name_len = strlen(address->name);
/* To get the associated MAC address, we must convert the address to a
* line description. Normally, the name field contains the line
* description name, but for VLANs it has the VLAN appended with a
* period. Since object names can also contain periods and numbers, there
* is no way to know if a returned name is for a VLAN or not. eg.
* *LIND ETH1.1 and *LIND ETH1, VLAN 1 both have the same name: ETH1.1
*
* Instead, we apply the same heuristic used by some of the XPF ioctls:
* - names > 10 *must* contain a VLAN
* - assume names <= 10 do not contain a VLAN and try directly
* - if >10 or QDCRLIND returned an error, try to strip off a VLAN
* and try again
* - if we still get an error or couldn't find a period, leave the MAC as
* 00:00:00:00:00:00
*/
if (name_len <= 10) {
/* Assume name does not contain a VLAN ID */
rc = get_ibmi_physical_address(address->name, &address->phys_addr);
}
if (name_len > 10 || rc != 0) {
/* The interface name must contain a VLAN ID suffix. Attempt to strip
* it off so we can get the line description to pass to QDCRLIND.
*/
char* temp_name = uv__strdup(address->name);
char* dot = strrchr(temp_name, '.');
if (dot != NULL) {
*dot = '\0';
if (strlen(temp_name) <= 10) {
rc = get_ibmi_physical_address(temp_name, &address->phys_addr);
}
}
uv__free(temp_name);
}
}
address++;
}
Qp2freeifaddrs(ifap);
return r;
}
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
uv__free(addresses);
}
char** uv_setup_args(int argc, char** argv) {
char exepath[UV__PATH_MAX];
char* s;
size_t size;
if (argc > 0) {
/* Use argv[0] to determine value for uv_exepath(). */
size = sizeof(exepath);
if (uv__search_path(argv[0], exepath, &size) == 0) {
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
uv_mutex_lock(&process_title_mutex);
original_exepath = uv__strdup(exepath);
uv_mutex_unlock(&process_title_mutex);
}
}
return argv;
}
int uv_set_process_title(const char* title) {
return 0;
}
int uv_get_process_title(char* buffer, size_t size) {
if (buffer == NULL || size == 0)
return UV_EINVAL;
buffer[0] = '\0';
return 0;
}
void uv__process_title_cleanup(void) {
}

View File

@@ -0,0 +1,530 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_UNIX_INTERNAL_H_
#define UV_UNIX_INTERNAL_H_
#include "uv-common.h"
#include <assert.h>
#include <limits.h> /* _POSIX_PATH_MAX, PATH_MAX */
#include <stdint.h>
#include <stdlib.h> /* abort */
#include <string.h> /* strrchr */
#include <fcntl.h> /* O_CLOEXEC and O_NONBLOCK, if supported. */
#include <stdio.h>
#include <errno.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/types.h>
#if defined(__APPLE__) || defined(__DragonFly__) || \
defined(__FreeBSD__) || defined(__NetBSD__)
#include <sys/event.h>
#endif
#define uv__msan_unpoison(p, n) \
do { \
(void) (p); \
(void) (n); \
} while (0)
#if defined(__has_feature)
# if __has_feature(memory_sanitizer)
# include <sanitizer/msan_interface.h>
# undef uv__msan_unpoison
# define uv__msan_unpoison __msan_unpoison
# endif
#endif
#if defined(__STRICT_ANSI__)
# define inline __inline
#endif
#if defined(__MVS__)
# include "os390-syscalls.h"
#endif /* __MVS__ */
#if defined(__sun)
# include <sys/port.h>
# include <port.h>
#endif /* __sun */
#if defined(_AIX)
# define reqevents events
# define rtnevents revents
# include <sys/poll.h>
#else
# include <poll.h>
#endif /* _AIX */
#if defined(__APPLE__)
# include "darwin-syscalls.h"
# if !TARGET_OS_IPHONE
# include <AvailabilityMacros.h>
# endif
#endif
/*
* Define common detection for active Thread Sanitizer
* - clang uses __has_feature(thread_sanitizer)
* - gcc-7+ uses __SANITIZE_THREAD__
*/
#if defined(__has_feature)
# if __has_feature(thread_sanitizer)
# define __SANITIZE_THREAD__ 1
# endif
#endif
#if defined(PATH_MAX)
# define UV__PATH_MAX PATH_MAX
#else
# define UV__PATH_MAX 8192
#endif
union uv__sockaddr {
struct sockaddr_in6 in6;
struct sockaddr_in in;
struct sockaddr addr;
};
#define ACCESS_ONCE(type, var) \
(*(volatile type*) &(var))
#define ROUND_UP(a, b) \
((a) % (b) ? ((a) + (b)) - ((a) % (b)) : (a))
#define UNREACHABLE() \
do { \
assert(0 && "unreachable code"); \
abort(); \
} \
while (0)
#define SAVE_ERRNO(block) \
do { \
int _saved_errno = errno; \
do { block; } while (0); \
errno = _saved_errno; \
} \
while (0)
/* The __clang__ and __INTEL_COMPILER checks are superfluous because they
* define __GNUC__. They are here to convey to you, dear reader, that these
* macros are enabled when compiling with clang or icc.
*/
#if defined(__clang__) || \
defined(__GNUC__) || \
defined(__INTEL_COMPILER)
# define UV_UNUSED(declaration) __attribute__((unused)) declaration
#else
# define UV_UNUSED(declaration) declaration
#endif
/* Leans on the fact that, on Linux, POLLRDHUP == EPOLLRDHUP. */
#ifdef POLLRDHUP
# define UV__POLLRDHUP POLLRDHUP
#else
# define UV__POLLRDHUP 0x2000
#endif
#ifdef POLLPRI
# define UV__POLLPRI POLLPRI
#else
# define UV__POLLPRI 0
#endif
#if !defined(O_CLOEXEC) && defined(__FreeBSD__)
/*
* It may be that we are just missing `__POSIX_VISIBLE >= 200809`.
* Try using fixed value const and give up, if it doesn't work
*/
# define O_CLOEXEC 0x00100000
#endif
typedef struct uv__stream_queued_fds_s uv__stream_queued_fds_t;
/* loop flags */
enum {
UV_LOOP_BLOCK_SIGPROF = 0x1,
UV_LOOP_REAP_CHILDREN = 0x2,
UV_LOOP_ENABLE_IO_URING_SQPOLL = 0x4
};
/* flags of excluding ifaddr */
enum {
UV__EXCLUDE_IFPHYS,
UV__EXCLUDE_IFADDR
};
typedef enum {
UV_CLOCK_PRECISE = 0, /* Use the highest resolution clock available. */
UV_CLOCK_FAST = 1 /* Use the fastest clock with <= 1ms granularity. */
} uv_clocktype_t;
struct uv__stream_queued_fds_s {
unsigned int size;
unsigned int offset;
int fds[1];
};
#ifdef __linux__
struct uv__statx_timestamp {
int64_t tv_sec;
uint32_t tv_nsec;
int32_t unused0;
};
struct uv__statx {
uint32_t stx_mask;
uint32_t stx_blksize;
uint64_t stx_attributes;
uint32_t stx_nlink;
uint32_t stx_uid;
uint32_t stx_gid;
uint16_t stx_mode;
uint16_t unused0;
uint64_t stx_ino;
uint64_t stx_size;
uint64_t stx_blocks;
uint64_t stx_attributes_mask;
struct uv__statx_timestamp stx_atime;
struct uv__statx_timestamp stx_btime;
struct uv__statx_timestamp stx_ctime;
struct uv__statx_timestamp stx_mtime;
uint32_t stx_rdev_major;
uint32_t stx_rdev_minor;
uint32_t stx_dev_major;
uint32_t stx_dev_minor;
uint64_t unused1[14];
};
#endif /* __linux__ */
#if defined(_AIX) || \
defined(__APPLE__) || \
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__linux__) || \
defined(__OpenBSD__) || \
defined(__NetBSD__)
#define uv__nonblock uv__nonblock_ioctl
#define UV__NONBLOCK_IS_IOCTL 1
#else
#define uv__nonblock uv__nonblock_fcntl
#define UV__NONBLOCK_IS_IOCTL 0
#endif
/* On Linux, uv__nonblock_fcntl() and uv__nonblock_ioctl() do not commute
* when O_NDELAY is not equal to O_NONBLOCK. Case in point: linux/sparc32
* and linux/sparc64, where O_NDELAY is O_NONBLOCK + another bit.
*
* Libuv uses uv__nonblock_fcntl() directly sometimes so ensure that it
* commutes with uv__nonblock().
*/
#if defined(__linux__) && O_NDELAY != O_NONBLOCK
#undef uv__nonblock
#define uv__nonblock uv__nonblock_fcntl
#endif
/* core */
int uv__cloexec(int fd, int set);
int uv__nonblock_ioctl(int fd, int set);
int uv__nonblock_fcntl(int fd, int set);
int uv__close(int fd); /* preserves errno */
int uv__close_nocheckstdio(int fd);
int uv__close_nocancel(int fd);
int uv__socket(int domain, int type, int protocol);
int uv__sock_reuseport(int fd);
ssize_t uv__recvmsg(int fd, struct msghdr *msg, int flags);
void uv__make_close_pending(uv_handle_t* handle);
int uv__getiovmax(void);
void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd);
int uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events);
int uv__io_init_start(uv_loop_t* loop,
uv__io_t* w,
uv__io_cb cb,
int fd,
unsigned int events);
void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events);
void uv__io_close(uv_loop_t* loop, uv__io_t* w);
void uv__io_feed(uv_loop_t* loop, uv__io_t* w);
int uv__io_active(const uv__io_t* w, unsigned int events);
int uv__io_check_fd(uv_loop_t* loop, int fd);
void uv__io_poll(uv_loop_t* loop, int timeout); /* in milliseconds or -1 */
int uv__io_fork(uv_loop_t* loop);
int uv__fd_exists(uv_loop_t* loop, int fd);
/* async */
void uv__async_stop(uv_loop_t* loop);
int uv__async_fork(uv_loop_t* loop);
/* loop */
void uv__run_idle(uv_loop_t* loop);
void uv__run_check(uv_loop_t* loop);
void uv__run_prepare(uv_loop_t* loop);
/* stream */
void uv__stream_init(uv_loop_t* loop, uv_stream_t* stream,
uv_handle_type type);
int uv__stream_open(uv_stream_t*, int fd, int flags);
void uv__stream_destroy(uv_stream_t* stream);
#if defined(__APPLE__)
int uv__stream_try_select(uv_stream_t* stream, int* fd);
#endif /* defined(__APPLE__) */
void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
int uv__accept(int sockfd);
int uv__dup2_cloexec(int oldfd, int newfd);
int uv__open_cloexec(const char* path, int flags);
int uv__slurp(const char* filename, char* buf, size_t len);
/* tcp */
int uv__tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb);
int uv__tcp_nodelay(int fd, int on);
int uv__tcp_keepalive(int fd, int on, unsigned int delay);
/* tty */
void uv__tty_close(uv_tty_t* handle);
/* pipe */
int uv__pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
/* signal */
void uv__signal_close(uv_signal_t* handle);
void uv__signal_global_once_init(void);
void uv__signal_loop_cleanup(uv_loop_t* loop);
int uv__signal_loop_fork(uv_loop_t* loop);
/* platform specific */
uint64_t uv__hrtime(uv_clocktype_t type);
int uv__kqueue_init(uv_loop_t* loop);
int uv__platform_loop_init(uv_loop_t* loop);
void uv__platform_loop_delete(uv_loop_t* loop);
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd);
int uv__process_init(uv_loop_t* loop);
/* various */
void uv__async_close(uv_async_t* handle);
void uv__check_close(uv_check_t* handle);
void uv__fs_event_close(uv_fs_event_t* handle);
void uv__idle_close(uv_idle_t* handle);
void uv__pipe_close(uv_pipe_t* handle);
void uv__poll_close(uv_poll_t* handle);
void uv__prepare_close(uv_prepare_t* handle);
void uv__process_close(uv_process_t* handle);
void uv__stream_close(uv_stream_t* handle);
void uv__tcp_close(uv_tcp_t* handle);
int uv__thread_setname(const char* name);
int uv__thread_getname(uv_thread_t* tid, char* name, size_t size);
size_t uv__thread_stack_size(void);
void uv__udp_close(uv_udp_t* handle);
void uv__udp_finish_close(uv_udp_t* handle);
FILE* uv__open_file(const char* path);
int uv__search_path(const char* prog, char* buf, size_t* buflen);
void uv__wait_children(uv_loop_t* loop);
/* random */
int uv__random_devurandom(void* buf, size_t buflen);
int uv__random_getrandom(void* buf, size_t buflen);
int uv__random_getentropy(void* buf, size_t buflen);
int uv__random_readpath(const char* path, void* buf, size_t buflen);
int uv__random_sysctl(void* buf, size_t buflen);
/* io_uring */
#ifdef __linux__
int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_ftruncate(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop,
uv_fs_t* req,
uint32_t fsync_flags);
int uv__iou_fs_link(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_mkdir(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_read_or_write(uv_loop_t* loop,
uv_fs_t* req,
int is_read);
int uv__iou_fs_rename(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_statx(uv_loop_t* loop,
uv_fs_t* req,
int is_fstat,
int is_lstat);
int uv__iou_fs_symlink(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_unlink(uv_loop_t* loop, uv_fs_t* req);
#else
#define uv__iou_fs_close(loop, req) 0
#define uv__iou_fs_ftruncate(loop, req) 0
#define uv__iou_fs_fsync_or_fdatasync(loop, req, fsync_flags) 0
#define uv__iou_fs_link(loop, req) 0
#define uv__iou_fs_mkdir(loop, req) 0
#define uv__iou_fs_open(loop, req) 0
#define uv__iou_fs_read_or_write(loop, req, is_read) 0
#define uv__iou_fs_rename(loop, req) 0
#define uv__iou_fs_statx(loop, req, is_fstat, is_lstat) 0
#define uv__iou_fs_symlink(loop, req) 0
#define uv__iou_fs_unlink(loop, req) 0
#endif
#if defined(__APPLE__)
int uv___stream_fd(const uv_stream_t* handle);
#define uv__stream_fd(handle) (uv___stream_fd((const uv_stream_t*) (handle)))
#else
#define uv__stream_fd(handle) ((handle)->io_watcher.fd)
#endif /* defined(__APPLE__) */
int uv__make_pipe(int fds[2], int flags);
#if defined(__APPLE__)
int uv__fsevents_init(uv_fs_event_t* handle);
int uv__fsevents_close(uv_fs_event_t* handle);
void uv__fsevents_loop_delete(uv_loop_t* loop);
#endif /* defined(__APPLE__) */
UV_UNUSED(static void uv__update_time(uv_loop_t* loop)) {
/* Use a fast time source if available. We only need millisecond precision.
*/
loop->time = uv__hrtime(UV_CLOCK_FAST) / 1000000;
}
UV_UNUSED(static char* uv__basename_r(const char* path)) {
char* s;
s = strrchr(path, '/');
if (s == NULL)
return (char*) path;
return s + 1;
}
UV_UNUSED(static int uv__fstat(int fd, struct stat* s)) {
int rc;
rc = fstat(fd, s);
if (rc >= 0)
uv__msan_unpoison(s, sizeof(*s));
return rc;
}
UV_UNUSED(static int uv__lstat(const char* path, struct stat* s)) {
int rc;
rc = lstat(path, s);
if (rc >= 0)
uv__msan_unpoison(s, sizeof(*s));
return rc;
}
UV_UNUSED(static int uv__stat(const char* path, struct stat* s)) {
int rc;
rc = stat(path, s);
if (rc >= 0)
uv__msan_unpoison(s, sizeof(*s));
return rc;
}
#if defined(__linux__)
void uv__fs_post(uv_loop_t* loop, uv_fs_t* req);
ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,
int fd_out,
off_t* off_out,
size_t len,
unsigned int flags);
int uv__statx(int dirfd,
const char* path,
int flags,
unsigned int mask,
struct uv__statx* statxbuf);
void uv__statx_to_stat(const struct uv__statx* statxbuf, uv_stat_t* buf);
ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags);
unsigned uv__kernel_version(void);
#endif
typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*);
int uv__getsockpeername(const uv_handle_t* handle,
uv__peersockfunc func,
struct sockaddr* name,
int* namelen);
#if defined(__sun)
#if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L
size_t strnlen(const char* s, size_t maxlen);
#endif
#endif
#if defined(__FreeBSD__)
ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,
int fd_out,
off_t* off_out,
size_t len,
unsigned int flags);
#endif
#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 1301000)
#define UV__CPU_AFFINITY_SUPPORTED 1
#else
#define UV__CPU_AFFINITY_SUPPORTED 0
#endif
#ifdef __linux__
int uv__get_constrained_cpu(long long* quota);
#endif
#if defined(__sun) && !defined(__illumos__)
#ifdef SO_FLOW_NAME
/* Since it's impossible to detect the Solaris 11.4 version via OS macros,
* so we check the presence of the socket option SO_FLOW_NAME that was first
* introduced to Solaris 11.4 and define a custom macro for determining 11.4.
*/
#define UV__SOLARIS_11_4 (1)
#else
#define UV__SOLARIS_11_4 (0)
#endif
#endif
#if defined(EVFILT_USER) && defined(NOTE_TRIGGER)
/* EVFILT_USER is available since OS X 10.6, DragonFlyBSD 4.0,
* FreeBSD 8.1, and NetBSD 10.0.
*
* Note that even though EVFILT_USER is defined on the current system,
* it may still fail to work at runtime somehow. In that case, we fall
* back to pipe-based signaling.
*/
#define UV__KQUEUE_EVFILT_USER 1
/* Magic number of identifier used for EVFILT_USER during runtime detection.
* There are no Google hits for this number when I create it. That way,
* people will be directed here if this number gets printed due to some
* kqueue error and they google for help. */
#define UV__KQUEUE_EVFILT_USER_IDENT 0x1e7e7711
#else
#define UV__KQUEUE_EVFILT_USER 0
#endif
#endif /* UV_UNIX_INTERNAL_H_ */

View File

@@ -0,0 +1,667 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/sysctl.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/time.h>
#if defined(__FreeBSD__)
#include <sys/user.h>
#endif
#include <unistd.h>
#include <fcntl.h>
#include <time.h>
/*
* Required on
* - Until at least FreeBSD 11.0
* - Older versions of Mac OS X
*
* http://www.boost.org/doc/libs/1_61_0/boost/asio/detail/kqueue_reactor.hpp
*/
#ifndef EV_OOBAND
#define EV_OOBAND EV_FLAG1
#endif
static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
int uv__kqueue_init(uv_loop_t* loop) {
loop->backend_fd = kqueue();
if (loop->backend_fd == -1)
return UV__ERR(errno);
uv__cloexec(loop->backend_fd, 1);
return 0;
}
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
static _Atomic int uv__has_forked_with_cfrunloop;
#endif
int uv__io_fork(uv_loop_t* loop) {
int err;
loop->backend_fd = -1;
err = uv__kqueue_init(loop);
if (err)
return err;
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
if (loop->cf_state != NULL) {
/* We cannot start another CFRunloop and/or thread in the child
process; CF aborts if you try or if you try to touch the thread
at all to kill it. So the best we can do is ignore it from now
on. This means we can't watch directories in the same way
anymore (like other BSDs). It also means we cannot properly
clean up the allocated resources; calling
uv__fsevents_loop_delete from uv_loop_close will crash the
process. So we sidestep the issue by pretending like we never
started it in the first place.
*/
atomic_store_explicit(&uv__has_forked_with_cfrunloop,
1,
memory_order_relaxed);
uv__free(loop->cf_state);
loop->cf_state = NULL;
}
#endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
return err;
}
int uv__io_check_fd(uv_loop_t* loop, int fd) {
struct kevent ev[2];
struct stat sb;
#ifdef __APPLE__
char path[MAXPATHLEN];
#endif
if (uv__fstat(fd, &sb))
return UV__ERR(errno);
/* On FreeBSD, kqueue only supports EVFILT_READ notification for regular files
* and always reports ready events for writing, resulting in busy-looping.
*
* On Darwin, DragonFlyBSD, NetBSD and OpenBSD, kqueue reports ready events for
* regular files as readable and writable only once, acting like an EV_ONESHOT.
*
* Neither of the above cases should be added to the kqueue.
*/
if (S_ISREG(sb.st_mode) || S_ISDIR(sb.st_mode))
return UV_EINVAL;
#ifdef __APPLE__
/* On Darwin (both macOS and iOS), in addition to regular files, FIFOs also don't
* work properly with kqueue: the disconnection from the last writer won't trigger
* an event for kqueue in spite of what the man pages say. Thus, we also disallow
* the case of S_IFIFO. */
if (S_ISFIFO(sb.st_mode)) {
/* File descriptors of FIFO, pipe and kqueue share the same type of file,
* therefore there is no way to tell them apart via stat.st_mode&S_IFMT.
* Fortunately, FIFO is the only one that has a persisted file on filesystem,
* from which we're able to make the distinction for it. */
if (!fcntl(fd, F_GETPATH, path))
return UV_EINVAL;
}
#endif
EV_SET(ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0);
EV_SET(ev + 1, fd, EVFILT_READ, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, ev, 2, NULL, 0, NULL))
return UV__ERR(errno);
return 0;
}
static void uv__kqueue_delete(int kqfd, const struct kevent *ev) {
struct kevent change;
EV_SET(&change, ev->ident, ev->filter, EV_DELETE, 0, 0, 0);
if (0 == kevent(kqfd, &change, 1, NULL, 0, NULL))
return;
if (errno == EBADF || errno == ENOENT)
return;
abort();
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
uv__loop_internal_fields_t* lfields;
struct kevent events[1024];
struct kevent* ev;
struct timespec spec;
unsigned int nevents;
unsigned int revents;
struct uv__queue* q;
uv__io_t* w;
uv_process_t* process;
sigset_t* pset;
sigset_t set;
uint64_t base;
uint64_t diff;
int have_signals;
int filter;
int fflags;
int count;
int nfds;
int fd;
int op;
int i;
int user_timeout;
int reset_timeout;
if (loop->nfds == 0) {
assert(uv__queue_empty(&loop->watcher_queue));
return;
}
lfields = uv__get_internal_fields(loop);
nevents = 0;
while (!uv__queue_empty(&loop->watcher_queue)) {
q = uv__queue_head(&loop->watcher_queue);
uv__queue_remove(q);
uv__queue_init(q);
w = uv__queue_data(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
if ((w->events & POLLIN) == 0 && (w->pevents & POLLIN) != 0) {
filter = EVFILT_READ;
fflags = 0;
op = EV_ADD;
if (w->cb == uv__fs_event) {
filter = EVFILT_VNODE;
fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
| NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
}
EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
if (++nevents == ARRAY_SIZE(events)) {
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
abort();
nevents = 0;
}
}
if ((w->events & POLLOUT) == 0 && (w->pevents & POLLOUT) != 0) {
EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
if (++nevents == ARRAY_SIZE(events)) {
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
abort();
nevents = 0;
}
}
if ((w->events & UV__POLLPRI) == 0 && (w->pevents & UV__POLLPRI) != 0) {
EV_SET(events + nevents, w->fd, EV_OOBAND, EV_ADD, 0, 0, 0);
if (++nevents == ARRAY_SIZE(events)) {
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
abort();
nevents = 0;
}
}
w->events = w->pevents;
}
pset = NULL;
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
pset = &set;
sigemptyset(pset);
sigaddset(pset, SIGPROF);
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
}
for (;; nevents = 0) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
if (timeout != -1) {
spec.tv_sec = timeout / 1000;
spec.tv_nsec = (timeout % 1000) * 1000000;
}
if (pset != NULL)
pthread_sigmask(SIG_BLOCK, pset, NULL);
/* Store the current timeout in a location that's globally accessible so
* other locations like uv__work_done() can determine whether the queue
* of events in the callback were waiting when poll was called.
*/
lfields->current_timeout = timeout;
nfds = kevent(loop->backend_fd,
events,
nevents,
events,
ARRAY_SIZE(events),
timeout == -1 ? NULL : &spec);
if (nfds == -1)
assert(errno == EINTR);
else if (nfds == 0)
/* Unlimited timeout should only return with events or signal. */
assert(timeout != -1);
if (pset != NULL)
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
uv__update_time(loop);
if (nfds == 0 || nfds == -1) {
/* If kqueue is empty or interrupted, we might still have children ready
* to reap immediately. */
if (loop->flags & UV_LOOP_REAP_CHILDREN) {
loop->flags &= ~UV_LOOP_REAP_CHILDREN;
uv__wait_children(loop);
assert((reset_timeout == 0 ? timeout : user_timeout) == 0);
return; /* Equivalent to fall-through behavior. */
}
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
} else if (nfds == 0) {
return;
}
/* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout;
}
have_signals = 0;
nevents = 0;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = (void*) events;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
for (i = 0; i < nfds; i++) {
ev = events + i;
fd = ev->ident;
/* Handle kevent NOTE_EXIT results */
if (ev->filter == EVFILT_PROC) {
uv__queue_foreach(q, &loop->process_handles) {
process = uv__queue_data(q, uv_process_t, queue);
if (process->pid == fd) {
process->flags |= UV_HANDLE_REAP;
loop->flags |= UV_LOOP_REAP_CHILDREN;
break;
}
}
nevents++;
continue;
}
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
continue;
w = loop->watchers[fd];
if (w == NULL) {
/* File descriptor that we've stopped watching, disarm it. */
uv__kqueue_delete(loop->backend_fd, ev);
continue;
}
#if UV__KQUEUE_EVFILT_USER
if (ev->filter == EVFILT_USER) {
w = &loop->async_io_watcher;
assert(fd == w->fd);
uv__metrics_update_idle_time(loop);
w->cb(loop, w, w->events);
nevents++;
continue;
}
#endif
if (ev->filter == EVFILT_VNODE) {
assert(w->events == POLLIN);
assert(w->pevents == POLLIN);
uv__metrics_update_idle_time(loop);
w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
nevents++;
continue;
}
revents = 0;
if (ev->filter == EVFILT_READ) {
if (w->pevents & POLLIN)
revents |= POLLIN;
else
uv__kqueue_delete(loop->backend_fd, ev);
if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
revents |= UV__POLLRDHUP;
}
if (ev->filter == EV_OOBAND) {
if (w->pevents & UV__POLLPRI)
revents |= UV__POLLPRI;
else
uv__kqueue_delete(loop->backend_fd, ev);
}
if (ev->filter == EVFILT_WRITE) {
if (w->pevents & POLLOUT)
revents |= POLLOUT;
else
uv__kqueue_delete(loop->backend_fd, ev);
}
if (ev->flags & EV_ERROR)
revents |= POLLERR;
if (revents == 0)
continue;
/* Run signal watchers last. This also affects child process watchers
* because those are implemented in terms of signal watchers.
*/
if (w == &loop->signal_io_watcher) {
have_signals = 1;
} else {
uv__metrics_update_idle_time(loop);
w->cb(loop, w, revents);
}
nevents++;
}
if (loop->flags & UV_LOOP_REAP_CHILDREN) {
loop->flags &= ~UV_LOOP_REAP_CHILDREN;
uv__wait_children(loop);
}
uv__metrics_inc_events(loop, nevents);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
uv__metrics_inc_events_waiting(loop, nevents);
}
if (have_signals != 0) {
uv__metrics_update_idle_time(loop);
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (have_signals != 0)
return; /* Event loop should cycle now so don't poll again. */
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
}
return;
}
update_timeout:
if (timeout == 0)
return;
if (timeout == -1)
continue;
assert(timeout > 0);
diff = loop->time - base;
if (diff >= (uint64_t) timeout)
return;
timeout -= diff;
}
}
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
struct kevent* events;
uintptr_t i;
uintptr_t nfds;
assert(loop->watchers != NULL);
assert(fd >= 0);
events = (struct kevent*) loop->watchers[loop->nwatchers];
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
if (events == NULL)
return;
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if ((int) events[i].ident == fd && events[i].filter != EVFILT_PROC)
events[i].ident = -1;
}
static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
uv_fs_event_t* handle;
struct kevent ev;
int events;
const char* path;
#if defined(F_GETPATH)
/* MAXPATHLEN == PATH_MAX but the former is what XNU calls it internally. */
char pathbuf[MAXPATHLEN];
#endif
handle = container_of(w, uv_fs_event_t, event_watcher);
if (fflags & (NOTE_ATTRIB | NOTE_EXTEND))
events = UV_CHANGE;
else
events = UV_RENAME;
path = NULL;
#if defined(F_GETPATH)
/* Also works when the file has been unlinked from the file system. Passing
* in the path when the file has been deleted is arguably a little strange
* but it's consistent with what the inotify backend does.
*/
if (fcntl(handle->event_watcher.fd, F_GETPATH, pathbuf) == 0)
path = uv__basename_r(pathbuf);
#elif defined(F_KINFO)
/* We try to get the file info reference from the file descriptor.
* the struct's kf_structsize must be initialised beforehand
* whether with the KINFO_FILE_SIZE constant or this way.
*/
struct stat statbuf;
struct kinfo_file kf;
if (handle->event_watcher.fd != -1 &&
(!uv__fstat(handle->event_watcher.fd, &statbuf) && !(statbuf.st_mode & S_IFDIR))) {
/* we are purposely not using KINFO_FILE_SIZE here
* as it is not available on non intl archs
* and here it gives 1392 too on intel.
* anyway, the man page also mentions we can proceed
* this way.
*/
kf.kf_structsize = sizeof(kf);
if (fcntl(handle->event_watcher.fd, F_KINFO, &kf) == 0)
path = uv__basename_r(kf.kf_path);
}
#endif
handle->cb(handle, path, events, 0);
if (handle->event_watcher.fd == -1)
return;
/* Watcher operates in one-shot mode, re-arm it. */
fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
| NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
abort();
}
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
return 0;
}
int uv_fs_event_start(uv_fs_event_t* handle,
uv_fs_event_cb cb,
const char* path,
unsigned int flags) {
int fd;
int r;
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
struct stat statbuf;
#endif
if (uv__is_active(handle))
return UV_EINVAL;
handle->cb = cb;
handle->path = uv__strdup(path);
if (handle->path == NULL)
return UV_ENOMEM;
/* TODO open asynchronously - but how do we report back errors? */
fd = open(handle->path, O_RDONLY);
if (fd == -1) {
uv__free(handle->path);
handle->path = NULL;
return UV__ERR(errno);
}
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
/* Nullify field to perform checks later */
handle->cf_cb = NULL;
handle->realpath = NULL;
handle->realpath_len = 0;
handle->cf_flags = flags;
if (uv__fstat(fd, &statbuf))
goto fallback;
/* FSEvents works only with directories */
if (!(statbuf.st_mode & S_IFDIR))
goto fallback;
if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop,
memory_order_relaxed)) {
/* The fallback fd is no longer needed */
uv__close_nocheckstdio(fd);
handle->event_watcher.fd = -1;
r = uv__fsevents_init(handle);
if (r == 0) {
uv__handle_start(handle);
} else {
uv__free(handle->path);
handle->path = NULL;
}
return r;
}
fallback:
#endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
r = uv__io_init_start(handle->loop,
&handle->event_watcher,
uv__fs_event,
fd,
POLLIN);
if (!r)
uv__handle_start(handle);
return r;
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
int r;
r = 0;
if (!uv__is_active(handle))
return 0;
uv__handle_stop(handle);
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop,
memory_order_relaxed))
if (handle->cf_cb != NULL)
r = uv__fsevents_close(handle);
#endif
if (handle->event_watcher.fd != -1) {
uv__io_close(handle->loop, &handle->event_watcher);
uv__close(handle->event_watcher.fd);
handle->event_watcher.fd = -1;
}
uv__free(handle->path);
handle->path = NULL;
return r;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
uv_fs_event_stop(handle);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,68 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#define UV_LOOP_WATCHER_DEFINE(name, type) \
int uv_##name##_init(uv_loop_t* loop, uv_##name##_t* handle) { \
uv__handle_init(loop, (uv_handle_t*)handle, UV_##type); \
handle->name##_cb = NULL; \
return 0; \
} \
\
int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) { \
if (uv__is_active(handle)) return 0; \
if (cb == NULL) return UV_EINVAL; \
uv__queue_insert_head(&handle->loop->name##_handles, &handle->queue); \
handle->name##_cb = cb; \
uv__handle_start(handle); \
return 0; \
} \
\
int uv_##name##_stop(uv_##name##_t* handle) { \
if (!uv__is_active(handle)) return 0; \
uv__queue_remove(&handle->queue); \
uv__handle_stop(handle); \
return 0; \
} \
\
void uv__run_##name(uv_loop_t* loop) { \
uv_##name##_t* h; \
struct uv__queue queue; \
struct uv__queue* q; \
uv__queue_move(&loop->name##_handles, &queue); \
while (!uv__queue_empty(&queue)) { \
q = uv__queue_head(&queue); \
h = uv__queue_data(q, uv_##name##_t, queue); \
uv__queue_remove(q); \
uv__queue_insert_tail(&loop->name##_handles, q); \
h->name##_cb(h); \
} \
} \
\
void uv__##name##_close(uv_##name##_t* handle) { \
uv_##name##_stop(handle); \
}
UV_LOOP_WATCHER_DEFINE(prepare, PREPARE)
UV_LOOP_WATCHER_DEFINE(check, CHECK)
UV_LOOP_WATCHER_DEFINE(idle, IDLE)

View File

@@ -0,0 +1,240 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "uv/tree.h"
#include "internal.h"
#include "heap-inl.h"
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
int uv_loop_init(uv_loop_t* loop) {
uv__loop_internal_fields_t* lfields;
void* saved_data;
int err;
saved_data = loop->data;
memset(loop, 0, sizeof(*loop));
loop->data = saved_data;
lfields = uv__calloc(1, sizeof(*lfields));
if (lfields == NULL)
return UV_ENOMEM;
loop->internal_fields = lfields;
err = uv_mutex_init(&lfields->loop_metrics.lock);
if (err)
goto fail_metrics_mutex_init;
memset(&lfields->loop_metrics.metrics,
0,
sizeof(lfields->loop_metrics.metrics));
heap_init((struct heap*) &loop->timer_heap);
uv__queue_init(&loop->wq);
uv__queue_init(&loop->idle_handles);
uv__queue_init(&loop->async_handles);
uv__queue_init(&loop->check_handles);
uv__queue_init(&loop->prepare_handles);
uv__queue_init(&loop->handle_queue);
loop->active_handles = 0;
loop->active_reqs.count = 0;
loop->nfds = 0;
loop->watchers = NULL;
loop->nwatchers = 0;
uv__queue_init(&loop->pending_queue);
uv__queue_init(&loop->watcher_queue);
loop->closing_handles = NULL;
uv__update_time(loop);
loop->async_io_watcher.fd = -1;
loop->async_wfd = -1;
loop->signal_pipefd[0] = -1;
loop->signal_pipefd[1] = -1;
loop->backend_fd = -1;
loop->emfile_fd = -1;
loop->timer_counter = 0;
loop->stop_flag = 0;
err = uv__platform_loop_init(loop);
if (err)
goto fail_platform_init;
uv__signal_global_once_init();
err = uv__process_init(loop);
if (err)
goto fail_signal_init;
uv__queue_init(&loop->process_handles);
err = uv_rwlock_init(&loop->cloexec_lock);
if (err)
goto fail_rwlock_init;
err = uv_mutex_init(&loop->wq_mutex);
if (err)
goto fail_mutex_init;
err = uv_async_init(loop, &loop->wq_async, uv__work_done);
if (err)
goto fail_async_init;
uv__handle_unref(&loop->wq_async);
loop->wq_async.flags |= UV_HANDLE_INTERNAL;
return 0;
fail_async_init:
uv_mutex_destroy(&loop->wq_mutex);
fail_mutex_init:
uv_rwlock_destroy(&loop->cloexec_lock);
fail_rwlock_init:
uv__signal_loop_cleanup(loop);
fail_signal_init:
uv__platform_loop_delete(loop);
if (loop->backend_fd != -1) {
uv__close(loop->backend_fd);
loop->backend_fd = -1;
}
fail_platform_init:
uv_mutex_destroy(&lfields->loop_metrics.lock);
fail_metrics_mutex_init:
uv__free(lfields);
loop->internal_fields = NULL;
uv__free(loop->watchers);
loop->nwatchers = 0;
return err;
}
int uv_loop_fork(uv_loop_t* loop) {
int err;
unsigned int i;
uv__io_t* w;
err = uv__io_fork(loop);
if (err)
return err;
err = uv__async_fork(loop);
if (err)
return err;
err = uv__signal_loop_fork(loop);
if (err)
return err;
/* Rearm all the watchers that aren't re-queued by the above. */
for (i = 0; i < loop->nwatchers; i++) {
w = loop->watchers[i];
if (w == NULL)
continue;
if (w->pevents != 0 && uv__queue_empty(&w->watcher_queue)) {
w->events = 0; /* Force re-registration in uv__io_poll. */
uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
}
}
return 0;
}
void uv__loop_close(uv_loop_t* loop) {
uv__loop_internal_fields_t* lfields;
uv__signal_loop_cleanup(loop);
uv__platform_loop_delete(loop);
uv__async_stop(loop);
if (loop->emfile_fd != -1) {
uv__close(loop->emfile_fd);
loop->emfile_fd = -1;
}
if (loop->backend_fd != -1) {
uv__close(loop->backend_fd);
loop->backend_fd = -1;
}
uv_mutex_lock(&loop->wq_mutex);
assert(uv__queue_empty(&loop->wq) && "thread pool work queue not empty!");
assert(!uv__has_active_reqs(loop));
uv_mutex_unlock(&loop->wq_mutex);
uv_mutex_destroy(&loop->wq_mutex);
/*
* Note that all thread pool stuff is finished at this point and
* it is safe to just destroy rw lock
*/
uv_rwlock_destroy(&loop->cloexec_lock);
#if 0
assert(uv__queue_empty(&loop->pending_queue));
assert(uv__queue_empty(&loop->watcher_queue));
assert(loop->nfds == 0);
#endif
uv__free(loop->watchers);
loop->watchers = NULL;
loop->nwatchers = 0;
lfields = uv__get_internal_fields(loop);
uv_mutex_destroy(&lfields->loop_metrics.lock);
uv__free(lfields);
loop->internal_fields = NULL;
}
int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) {
uv__loop_internal_fields_t* lfields;
lfields = uv__get_internal_fields(loop);
if (option == UV_METRICS_IDLE_TIME) {
lfields->flags |= UV_METRICS_IDLE_TIME;
return 0;
}
#if defined(__linux__)
if (option == UV_LOOP_USE_IO_URING_SQPOLL) {
loop->flags |= UV_LOOP_ENABLE_IO_URING_SQPOLL;
return 0;
}
#endif
if (option != UV_LOOP_BLOCK_SIGNAL)
return UV_ENOSYS;
if (va_arg(ap, int) != SIGPROF)
return UV_EINVAL;
loop->flags |= UV_LOOP_BLOCK_SIGPROF;
return 0;
}

View File

@@ -0,0 +1,264 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <kvm.h>
#include <paths.h>
#include <unistd.h>
#include <time.h>
#include <stdlib.h>
#include <fcntl.h>
#include <sys/resource.h>
#include <sys/types.h>
#include <sys/sysctl.h>
#include <uvm/uvm_extern.h>
#include <unistd.h>
#include <time.h>
int uv__platform_loop_init(uv_loop_t* loop) {
return uv__kqueue_init(loop);
}
void uv__platform_loop_delete(uv_loop_t* loop) {
}
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_LOADAVG};
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0) == -1) return;
avg[0] = (double) info.ldavg[0] / info.fscale;
avg[1] = (double) info.ldavg[1] / info.fscale;
avg[2] = (double) info.ldavg[2] / info.fscale;
}
int uv_exepath(char* buffer, size_t* size) {
/* Intermediate buffer, retrieving partial path name does not work
* As of NetBSD-8(beta), vnode->path translator does not handle files
* with longer names than 31 characters.
*/
char int_buf[PATH_MAX];
size_t int_size;
int mib[4];
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
mib[0] = CTL_KERN;
mib[1] = KERN_PROC_ARGS;
mib[2] = -1;
mib[3] = KERN_PROC_PATHNAME;
int_size = ARRAY_SIZE(int_buf);
if (sysctl(mib, 4, int_buf, &int_size, NULL, 0))
return UV__ERR(errno);
/* Copy string from the intermediate buffer to outer one with appropriate
* length.
*/
/* TODO(bnoordhuis) Check uv__strscpy() return value. */
uv__strscpy(buffer, int_buf, *size);
/* Set new size. */
*size = strlen(buffer);
return 0;
}
uint64_t uv_get_free_memory(void) {
struct uvmexp info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_UVMEXP};
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return 0;
return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
}
uint64_t uv_get_total_memory(void) {
#if defined(HW_PHYSMEM64)
uint64_t info;
int which[] = {CTL_HW, HW_PHYSMEM64};
#else
unsigned int info;
int which[] = {CTL_HW, HW_PHYSMEM};
#endif
size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return 0;
return (uint64_t) info;
}
uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
int uv_resident_set_memory(size_t* rss) {
kvm_t *kd = NULL;
struct kinfo_proc2 *kinfo = NULL;
pid_t pid;
int nprocs;
int max_size = sizeof(struct kinfo_proc2);
int page_size;
page_size = getpagesize();
pid = getpid();
kd = kvm_open(NULL, NULL, NULL, KVM_NO_FILES, "kvm_open");
if (kd == NULL) goto error;
kinfo = kvm_getproc2(kd, KERN_PROC_PID, pid, max_size, &nprocs);
if (kinfo == NULL) goto error;
*rss = kinfo->p_vm_rssize * page_size;
kvm_close(kd);
return 0;
error:
if (kd) kvm_close(kd);
return UV_EPERM;
}
int uv_uptime(double* uptime) {
time_t now;
struct timeval info;
size_t size = sizeof(info);
static int which[] = {CTL_KERN, KERN_BOOTTIME};
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return UV__ERR(errno);
now = time(NULL);
*uptime = (double)(now - info.tv_sec);
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK);
unsigned int multiplier = ((uint64_t)1000L / ticks);
unsigned int cur = 0;
uv_cpu_info_t* cpu_info;
u_int64_t* cp_times;
char model[512];
u_int64_t cpuspeed;
int numcpus;
size_t size;
int i;
size = sizeof(model);
if (sysctlbyname("machdep.cpu_brand", &model, &size, NULL, 0) &&
sysctlbyname("hw.model", &model, &size, NULL, 0)) {
return UV__ERR(errno);
}
size = sizeof(numcpus);
if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0))
return UV__ERR(errno);
*count = numcpus;
/* Only i386 and amd64 have machdep.tsc_freq */
size = sizeof(cpuspeed);
if (sysctlbyname("machdep.tsc_freq", &cpuspeed, &size, NULL, 0))
cpuspeed = 0;
size = numcpus * CPUSTATES * sizeof(*cp_times);
cp_times = uv__malloc(size);
if (cp_times == NULL)
return UV_ENOMEM;
if (sysctlbyname("kern.cp_time", cp_times, &size, NULL, 0))
return UV__ERR(errno);
*cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
if (!(*cpu_infos)) {
uv__free(cp_times);
uv__free(*cpu_infos);
return UV_ENOMEM;
}
for (i = 0; i < numcpus; i++) {
cpu_info = &(*cpu_infos)[i];
cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier;
cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier;
cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier;
cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier;
cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier;
cpu_info->model = uv__strdup(model);
cpu_info->speed = (int)(cpuspeed/(uint64_t) 1e6);
cur += CPUSTATES;
}
uv__free(cp_times);
return 0;
}
int uv__random_sysctl(void* buf, size_t len) {
static int name[] = {CTL_KERN, KERN_ARND};
size_t count, req;
unsigned char* p;
p = buf;
while (len) {
req = len < 32 ? len : 32;
count = req;
if (sysctl(name, ARRAY_SIZE(name), p, &count, NULL, 0) == -1)
return UV__ERR(errno);
if (count != req)
return UV_EIO; /* Can't happen. */
p += count;
len -= count;
}
return 0;
}

View File

@@ -0,0 +1,42 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <errno.h>
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
return UV_ENOSYS;
}
int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb,
const char* filename, unsigned int flags) {
return UV_ENOSYS;
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
return UV_ENOSYS;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
UNREACHABLE();
}

View File

@@ -0,0 +1,45 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <errno.h>
#include <stddef.h>
char** uv_setup_args(int argc, char** argv) {
return argv;
}
void uv__process_title_cleanup(void) {
}
int uv_set_process_title(const char* title) {
return 0;
}
int uv_get_process_title(char* buffer, size_t size) {
if (buffer == NULL || size == 0)
return UV_EINVAL;
buffer[0] = '\0';
return 0;
}

View File

@@ -0,0 +1,253 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <sys/types.h>
#include <sys/param.h>
#include <sys/resource.h>
#include <sys/sched.h>
#include <sys/time.h>
#include <sys/sysctl.h>
#include <errno.h>
#include <fcntl.h>
#include <paths.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
int uv__platform_loop_init(uv_loop_t* loop) {
return uv__kqueue_init(loop);
}
void uv__platform_loop_delete(uv_loop_t* loop) {
}
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_LOADAVG};
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0) < 0) return;
avg[0] = (double) info.ldavg[0] / info.fscale;
avg[1] = (double) info.ldavg[1] / info.fscale;
avg[2] = (double) info.ldavg[2] / info.fscale;
}
int uv_exepath(char* buffer, size_t* size) {
int mib[4];
char **argsbuf = NULL;
size_t argsbuf_size = 100U;
size_t exepath_size;
pid_t mypid;
int err;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
mypid = getpid();
for (;;) {
err = UV_ENOMEM;
argsbuf = uv__reallocf(argsbuf, argsbuf_size);
if (argsbuf == NULL)
goto out;
mib[0] = CTL_KERN;
mib[1] = KERN_PROC_ARGS;
mib[2] = mypid;
mib[3] = KERN_PROC_ARGV;
if (sysctl(mib, ARRAY_SIZE(mib), argsbuf, &argsbuf_size, NULL, 0) == 0) {
break;
}
if (errno != ENOMEM) {
err = UV__ERR(errno);
goto out;
}
argsbuf_size *= 2U;
}
if (argsbuf[0] == NULL) {
err = UV_EINVAL; /* FIXME(bnoordhuis) More appropriate error. */
goto out;
}
*size -= 1;
exepath_size = strlen(argsbuf[0]);
if (*size > exepath_size)
*size = exepath_size;
memcpy(buffer, argsbuf[0], *size);
buffer[*size] = '\0';
err = 0;
out:
uv__free(argsbuf);
return err;
}
uint64_t uv_get_free_memory(void) {
struct uvmexp info;
size_t size = sizeof(info);
int which[] = {CTL_VM, VM_UVMEXP};
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return 0;
return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
}
uint64_t uv_get_total_memory(void) {
uint64_t info;
int which[] = {CTL_HW, HW_PHYSMEM64};
size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return 0;
return (uint64_t) info;
}
uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
int uv_resident_set_memory(size_t* rss) {
struct kinfo_proc kinfo;
size_t page_size = getpagesize();
size_t size = sizeof(struct kinfo_proc);
int mib[6];
mib[0] = CTL_KERN;
mib[1] = KERN_PROC;
mib[2] = KERN_PROC_PID;
mib[3] = getpid();
mib[4] = sizeof(struct kinfo_proc);
mib[5] = 1;
if (sysctl(mib, ARRAY_SIZE(mib), &kinfo, &size, NULL, 0) < 0)
return UV__ERR(errno);
*rss = kinfo.p_vm_rssize * page_size;
return 0;
}
int uv_uptime(double* uptime) {
time_t now;
struct timeval info;
size_t size = sizeof(info);
static int which[] = {CTL_KERN, KERN_BOOTTIME};
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return UV__ERR(errno);
now = time(NULL);
*uptime = (double)(now - info.tv_sec);
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
multiplier = ((uint64_t)1000L / ticks), cpuspeed;
uint64_t info[CPUSTATES];
char model[512];
int numcpus = 1;
int which[] = {CTL_HW,HW_MODEL};
int percpu[] = {CTL_KERN,KERN_CPTIME2,0};
size_t size;
int i, j;
uv_cpu_info_t* cpu_info;
size = sizeof(model);
if (sysctl(which, ARRAY_SIZE(which), &model, &size, NULL, 0))
return UV__ERR(errno);
which[1] = HW_NCPUONLINE;
size = sizeof(numcpus);
if (sysctl(which, ARRAY_SIZE(which), &numcpus, &size, NULL, 0))
return UV__ERR(errno);
*cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
if (!(*cpu_infos))
return UV_ENOMEM;
i = 0;
*count = numcpus;
which[1] = HW_CPUSPEED;
size = sizeof(cpuspeed);
cpuspeed = 0;
/*
* HW_CPUSPEED can return EOPNOTSUPP if cpuspeed is 0,
* so ignore that and continue the flow, because we
* still care about the rest of the CPU info.
*/
if (sysctl(which, ARRAY_SIZE(which), &cpuspeed, &size, NULL, 0) &&
(errno != EOPNOTSUPP)) {
goto error;
}
size = sizeof(info);
for (i = 0; i < numcpus; i++) {
percpu[2] = i;
if (sysctl(percpu, ARRAY_SIZE(percpu), &info, &size, NULL, 0))
goto error;
cpu_info = &(*cpu_infos)[i];
cpu_info->cpu_times.user = (uint64_t)(info[CP_USER]) * multiplier;
cpu_info->cpu_times.nice = (uint64_t)(info[CP_NICE]) * multiplier;
cpu_info->cpu_times.sys = (uint64_t)(info[CP_SYS]) * multiplier;
cpu_info->cpu_times.idle = (uint64_t)(info[CP_IDLE]) * multiplier;
cpu_info->cpu_times.irq = (uint64_t)(info[CP_INTR]) * multiplier;
cpu_info->model = uv__strdup(model);
cpu_info->speed = cpuspeed;
}
return 0;
error:
*count = 0;
for (j = 0; j < i; j++)
uv__free((*cpu_infos)[j].model);
uv__free(*cpu_infos);
*cpu_infos = NULL;
return UV__ERR(errno);
}

View File

@@ -0,0 +1,136 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdlib.h>
#include <string.h>
static uv_mutex_t process_title_mutex;
static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
static char* process_title = NULL;
static void* args_mem = NULL;
static void init_process_title_mutex_once(void) {
uv_mutex_init(&process_title_mutex);
}
char** uv_setup_args(int argc, char** argv) {
char** new_argv;
size_t size;
char* s;
int i;
if (argc <= 0)
return argv;
/* Calculate how much memory we need for the argv strings. */
size = 0;
for (i = 0; i < argc; i++)
size += strlen(argv[i]) + 1;
/* Add space for the argv pointers. */
size += (argc + 1) * sizeof(char*);
new_argv = uv__malloc(size);
if (new_argv == NULL)
return argv;
/* Copy over the strings and set up the pointer table. */
s = (char*) &new_argv[argc + 1];
for (i = 0; i < argc; i++) {
size = strlen(argv[i]) + 1;
memcpy(s, argv[i], size);
new_argv[i] = s;
s += size;
}
new_argv[i] = NULL;
args_mem = new_argv;
process_title = uv__strdup(argv[0]);
return new_argv;
}
int uv_set_process_title(const char* title) {
char* new_title;
/* If uv_setup_args wasn't called or failed, we can't continue. */
if (args_mem == NULL)
return UV_ENOBUFS;
/* We cannot free this pointer when libuv shuts down,
* the process may still be using it.
*/
new_title = uv__strdup(title);
if (new_title == NULL)
return UV_ENOMEM;
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
uv_mutex_lock(&process_title_mutex);
if (process_title != NULL)
uv__free(process_title);
process_title = new_title;
uv_mutex_unlock(&process_title_mutex);
return 0;
}
int uv_get_process_title(char* buffer, size_t size) {
size_t len;
if (buffer == NULL || size == 0)
return UV_EINVAL;
/* If uv_setup_args wasn't called or failed, we can't continue. */
if (args_mem == NULL || process_title == NULL)
return UV_ENOBUFS;
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
uv_mutex_lock(&process_title_mutex);
len = strlen(process_title);
if (size <= len) {
uv_mutex_unlock(&process_title_mutex);
return UV_ENOBUFS;
}
strcpy(buffer, process_title);
uv_mutex_unlock(&process_title_mutex);
return 0;
}
void uv__process_title_cleanup(void) {
uv__free(args_mem); /* Keep valgrind happy. */
args_mem = NULL;
}

View File

@@ -0,0 +1,536 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "os390-syscalls.h"
#include <errno.h>
#include <stdlib.h>
#include <search.h>
#include <termios.h>
#include <sys/msg.h>
static struct uv__queue global_epoll_queue;
static uv_mutex_t global_epoll_lock;
static uv_once_t once = UV_ONCE_INIT;
int scandir(const char* maindir, struct dirent*** namelist,
int (*filter)(const struct dirent*),
int (*compar)(const struct dirent**,
const struct dirent **)) {
struct dirent** nl;
struct dirent** nl_copy;
struct dirent* dirent;
unsigned count;
size_t allocated;
DIR* mdir;
nl = NULL;
count = 0;
allocated = 0;
mdir = opendir(maindir);
if (!mdir)
return -1;
for (;;) {
dirent = readdir(mdir);
if (!dirent)
break;
if (!filter || filter(dirent)) {
struct dirent* copy;
copy = uv__malloc(sizeof(*copy));
if (!copy)
goto error;
memcpy(copy, dirent, sizeof(*copy));
nl_copy = uv__realloc(nl, sizeof(*copy) * (count + 1));
if (nl_copy == NULL) {
uv__free(copy);
goto error;
}
nl = nl_copy;
nl[count++] = copy;
}
}
qsort(nl, count, sizeof(struct dirent *),
(int (*)(const void *, const void *)) compar);
closedir(mdir);
*namelist = nl;
return count;
error:
while (count > 0) {
dirent = nl[--count];
uv__free(dirent);
}
uv__free(nl);
closedir(mdir);
errno = ENOMEM;
return -1;
}
static unsigned int next_power_of_two(unsigned int val) {
val -= 1;
val |= val >> 1;
val |= val >> 2;
val |= val >> 4;
val |= val >> 8;
val |= val >> 16;
val += 1;
return val;
}
static void maybe_resize(uv__os390_epoll* lst, unsigned int len) {
unsigned int newsize;
unsigned int i;
struct pollfd* newlst;
struct pollfd event;
if (len <= lst->size)
return;
if (lst->size == 0)
event.fd = -1;
else {
/* Extract the message queue at the end. */
event = lst->items[lst->size - 1];
lst->items[lst->size - 1].fd = -1;
}
newsize = next_power_of_two(len);
newlst = uv__reallocf(lst->items, newsize * sizeof(lst->items[0]));
if (newlst == NULL)
abort();
for (i = lst->size; i < newsize; ++i)
newlst[i].fd = -1;
/* Restore the message queue at the end */
newlst[newsize - 1] = event;
lst->items = newlst;
lst->size = newsize;
}
void uv__os390_cleanup(void) {
msgctl(uv_backend_fd(uv_default_loop()), IPC_RMID, NULL);
}
static void init_message_queue(uv__os390_epoll* lst) {
struct {
long int header;
char body;
} msg;
/* initialize message queue */
lst->msg_queue = msgget(IPC_PRIVATE, 0600 | IPC_CREAT);
if (lst->msg_queue == -1)
abort();
/*
On z/OS, the message queue will be affiliated with the process only
when a send is performed on it. Once this is done, the system
can be queried for all message queues belonging to our process id.
*/
msg.header = 1;
if (msgsnd(lst->msg_queue, &msg, sizeof(msg.body), 0) != 0)
abort();
/* Clean up the dummy message sent above */
if (msgrcv(lst->msg_queue, &msg, sizeof(msg.body), 0, 0) != sizeof(msg.body))
abort();
}
static void before_fork(void) {
uv_mutex_lock(&global_epoll_lock);
}
static void after_fork(void) {
uv_mutex_unlock(&global_epoll_lock);
}
static void child_fork(void) {
struct uv__queue* q;
uv_once_t child_once = UV_ONCE_INIT;
/* reset once */
memcpy(&once, &child_once, sizeof(child_once));
/* reset epoll list */
while (!uv__queue_empty(&global_epoll_queue)) {
uv__os390_epoll* lst;
q = uv__queue_head(&global_epoll_queue);
uv__queue_remove(q);
lst = uv__queue_data(q, uv__os390_epoll, member);
uv__free(lst->items);
lst->items = NULL;
lst->size = 0;
}
uv_mutex_unlock(&global_epoll_lock);
uv_mutex_destroy(&global_epoll_lock);
}
static void epoll_init(void) {
uv__queue_init(&global_epoll_queue);
if (uv_mutex_init(&global_epoll_lock))
abort();
if (pthread_atfork(&before_fork, &after_fork, &child_fork))
abort();
}
uv__os390_epoll* epoll_create1(int flags) {
uv__os390_epoll* lst;
lst = uv__malloc(sizeof(*lst));
if (lst != NULL) {
/* initialize list */
lst->size = 0;
lst->items = NULL;
init_message_queue(lst);
maybe_resize(lst, 1);
lst->items[lst->size - 1].fd = lst->msg_queue;
lst->items[lst->size - 1].events = POLLIN;
lst->items[lst->size - 1].revents = 0;
uv_once(&once, epoll_init);
uv_mutex_lock(&global_epoll_lock);
uv__queue_insert_tail(&global_epoll_queue, &lst->member);
uv_mutex_unlock(&global_epoll_lock);
}
return lst;
}
int epoll_ctl(uv__os390_epoll* lst,
int op,
int fd,
struct epoll_event *event) {
uv_mutex_lock(&global_epoll_lock);
if (op == EPOLL_CTL_DEL) {
if (fd >= lst->size || lst->items[fd].fd == -1) {
uv_mutex_unlock(&global_epoll_lock);
errno = ENOENT;
return -1;
}
lst->items[fd].fd = -1;
} else if (op == EPOLL_CTL_ADD) {
/* Resizing to 'fd + 1' would expand the list to contain at least
* 'fd'. But we need to guarantee that the last index on the list
* is reserved for the message queue. So specify 'fd + 2' instead.
*/
maybe_resize(lst, fd + 2);
if (lst->items[fd].fd != -1) {
uv_mutex_unlock(&global_epoll_lock);
errno = EEXIST;
return -1;
}
lst->items[fd].fd = fd;
lst->items[fd].events = event->events;
lst->items[fd].revents = 0;
} else if (op == EPOLL_CTL_MOD) {
if (fd >= lst->size - 1 || lst->items[fd].fd == -1) {
uv_mutex_unlock(&global_epoll_lock);
errno = ENOENT;
return -1;
}
lst->items[fd].events = event->events;
lst->items[fd].revents = 0;
} else
abort();
uv_mutex_unlock(&global_epoll_lock);
return 0;
}
#define EP_MAX_PFDS (ULONG_MAX / sizeof(struct pollfd))
#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
int epoll_wait(uv__os390_epoll* lst, struct epoll_event* events,
int maxevents, int timeout) {
nmsgsfds_t size;
struct pollfd* pfds;
int pollret;
int pollfdret;
int pollmsgret;
int reventcount;
int nevents;
struct pollfd msg_fd;
int i;
if (!lst || !lst->items || !events) {
errno = EFAULT;
return -1;
}
if (lst->size > EP_MAX_PFDS) {
errno = EINVAL;
return -1;
}
if (maxevents <= 0 || maxevents > EP_MAX_EVENTS) {
errno = EINVAL;
return -1;
}
assert(lst->size > 0);
_SET_FDS_MSGS(size, 1, lst->size - 1);
pfds = lst->items;
pollret = poll(pfds, size, timeout);
if (pollret <= 0)
return pollret;
pollfdret = _NFDS(pollret);
pollmsgret = _NMSGS(pollret);
reventcount = 0;
nevents = 0;
msg_fd = pfds[lst->size - 1]; /* message queue is always last entry */
maxevents = maxevents - pollmsgret; /* allow spot for message queue */
for (i = 0;
i < lst->size - 1 &&
nevents < maxevents &&
reventcount < pollfdret; ++i) {
struct epoll_event ev;
struct pollfd* pfd;
pfd = &pfds[i];
if (pfd->fd == -1 || pfd->revents == 0)
continue;
ev.fd = pfd->fd;
ev.events = pfd->revents;
ev.is_msg = 0;
reventcount++;
events[nevents++] = ev;
}
if (pollmsgret > 0 && msg_fd.revents != 0 && msg_fd.fd != -1) {
struct epoll_event ev;
ev.fd = msg_fd.fd;
ev.events = msg_fd.revents;
ev.is_msg = 1;
events[nevents++] = ev;
}
return nevents;
}
int epoll_file_close(int fd) {
struct uv__queue* q;
uv_once(&once, epoll_init);
uv_mutex_lock(&global_epoll_lock);
uv__queue_foreach(q, &global_epoll_queue) {
uv__os390_epoll* lst;
lst = uv__queue_data(q, uv__os390_epoll, member);
if (fd < lst->size && lst->items != NULL && lst->items[fd].fd != -1)
lst->items[fd].fd = -1;
}
uv_mutex_unlock(&global_epoll_lock);
return 0;
}
void epoll_queue_close(uv__os390_epoll* lst) {
/* Remove epoll instance from global queue */
uv_mutex_lock(&global_epoll_lock);
uv__queue_remove(&lst->member);
uv_mutex_unlock(&global_epoll_lock);
/* Free resources */
msgctl(lst->msg_queue, IPC_RMID, NULL);
lst->msg_queue = -1;
uv__free(lst->items);
lst->items = NULL;
}
char* mkdtemp(char* path) {
static const char* tempchars =
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
static const size_t num_chars = 62;
static const size_t num_x = 6;
char *ep, *cp;
unsigned int tries, i;
size_t len;
uint64_t v;
int fd;
int retval;
int saved_errno;
len = strlen(path);
ep = path + len;
if (len < num_x || strncmp(ep - num_x, "XXXXXX", num_x)) {
errno = EINVAL;
return NULL;
}
fd = open("/dev/urandom", O_RDONLY);
if (fd == -1)
return NULL;
tries = TMP_MAX;
retval = -1;
do {
if (read(fd, &v, sizeof(v)) != sizeof(v))
break;
cp = ep - num_x;
for (i = 0; i < num_x; i++) {
*cp++ = tempchars[v % num_chars];
v /= num_chars;
}
if (mkdir(path, S_IRWXU) == 0) {
retval = 0;
break;
}
else if (errno != EEXIST)
break;
} while (--tries);
saved_errno = errno;
uv__close(fd);
if (tries == 0) {
errno = EEXIST;
return NULL;
}
if (retval == -1) {
errno = saved_errno;
return NULL;
}
return path;
}
ssize_t os390_readlink(const char* path, char* buf, size_t len) {
ssize_t rlen;
ssize_t vlen;
ssize_t plen;
char* delimiter;
char old_delim;
char* tmpbuf;
char realpathstr[PATH_MAX + 1];
tmpbuf = uv__malloc(len + 1);
if (tmpbuf == NULL) {
errno = ENOMEM;
return -1;
}
rlen = readlink(path, tmpbuf, len);
if (rlen < 0) {
uv__free(tmpbuf);
return rlen;
}
if (rlen < 3 || strncmp("/$", tmpbuf, 2) != 0) {
/* Straightforward readlink. */
memcpy(buf, tmpbuf, rlen);
uv__free(tmpbuf);
return rlen;
}
/*
* There is a parmlib variable at the beginning
* which needs interpretation.
*/
tmpbuf[rlen] = '\0';
delimiter = strchr(tmpbuf + 2, '/');
if (delimiter == NULL)
/* No slash at the end */
delimiter = strchr(tmpbuf + 2, '\0');
/* Read real path of the variable. */
old_delim = *delimiter;
*delimiter = '\0';
if (realpath(tmpbuf, realpathstr) == NULL) {
uv__free(tmpbuf);
return -1;
}
/* realpathstr is not guaranteed to end with null byte.*/
realpathstr[PATH_MAX] = '\0';
/* Reset the delimiter and fill up the buffer. */
*delimiter = old_delim;
plen = strlen(delimiter);
vlen = strlen(realpathstr);
rlen = plen + vlen;
if (rlen > len) {
uv__free(tmpbuf);
errno = ENAMETOOLONG;
return -1;
}
memcpy(buf, realpathstr, vlen);
memcpy(buf + vlen, delimiter, plen);
/* Done using temporary buffer. */
uv__free(tmpbuf);
return rlen;
}
int sem_init(UV_PLATFORM_SEM_T* semid, int pshared, unsigned int value) {
UNREACHABLE();
}
int sem_destroy(UV_PLATFORM_SEM_T* semid) {
UNREACHABLE();
}
int sem_post(UV_PLATFORM_SEM_T* semid) {
UNREACHABLE();
}
int sem_trywait(UV_PLATFORM_SEM_T* semid) {
UNREACHABLE();
}
int sem_wait(UV_PLATFORM_SEM_T* semid) {
UNREACHABLE();
}

View File

@@ -0,0 +1,75 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_OS390_SYSCALL_H_
#define UV_OS390_SYSCALL_H_
#include "uv.h"
#include "internal.h"
#include <dirent.h>
#include <poll.h>
#include <pthread.h>
#include "zos-base.h"
#define EPOLL_CTL_ADD 1
#define EPOLL_CTL_DEL 2
#define EPOLL_CTL_MOD 3
#define MAX_EPOLL_INSTANCES 256
#define MAX_ITEMS_PER_EPOLL 1024
#define UV__O_CLOEXEC 0x80000
struct epoll_event {
int events;
int fd;
int is_msg;
};
typedef struct {
struct uv__queue member;
struct pollfd* items;
unsigned long size;
int msg_queue;
} uv__os390_epoll;
/* epoll api */
uv__os390_epoll* epoll_create1(int flags);
int epoll_ctl(uv__os390_epoll* ep, int op, int fd, struct epoll_event *event);
int epoll_wait(uv__os390_epoll* ep, struct epoll_event *events, int maxevents, int timeout);
int epoll_file_close(int fd);
/* utility functions */
int scandir(const char* maindir, struct dirent*** namelist,
int (*filter)(const struct dirent *),
int (*compar)(const struct dirent **,
const struct dirent **));
char *mkdtemp(char* path);
ssize_t os390_readlink(const char* path, char* buf, size_t len);
size_t strnlen(const char* str, size_t maxlen);
int sem_init(UV_PLATFORM_SEM_T* semid, int pshared, unsigned int value);
int sem_destroy(UV_PLATFORM_SEM_T* semid);
int sem_post(UV_PLATFORM_SEM_T* semid);
int sem_trywait(UV_PLATFORM_SEM_T* semid);
int sem_wait(UV_PLATFORM_SEM_T* semid);
void uv__os390_cleanup(void);
#endif /* UV_OS390_SYSCALL_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,555 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <errno.h>
#include <string.h>
#include <sys/un.h>
#include <unistd.h>
#include <stdlib.h>
/* Does the file path contain embedded nul bytes? */
static int includes_invalid_nul(const char *s, size_t n) {
if (n == 0)
return 0;
#ifdef __linux__
/* Accept abstract socket namespace paths, throughout which nul bytes have
* no special significance ("\0foo\0bar").
*/
if (s[0] == '\0')
return 0;
#endif
return NULL != memchr(s, '\0', n);
}
int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
uv__stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE);
handle->shutdown_req = NULL;
handle->connect_req = NULL;
handle->pipe_fname = NULL;
handle->ipc = ipc;
return 0;
}
int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
return uv_pipe_bind2(handle, name, strlen(name), 0);
}
int uv_pipe_bind2(uv_pipe_t* handle,
const char* name,
size_t namelen,
unsigned int flags) {
struct sockaddr_un saddr;
char* pipe_fname;
int sockfd;
int err;
socklen_t addrlen;
pipe_fname = NULL;
if (flags & ~UV_PIPE_NO_TRUNCATE)
return UV_EINVAL;
if (name == NULL)
return UV_EINVAL;
/* namelen==0 on Linux means autobind the listen socket in the abstract
* socket namespace, see `man 7 unix` for details.
*/
#if !defined(__linux__)
if (namelen == 0)
return UV_EINVAL;
#endif
if (includes_invalid_nul(name, namelen))
return UV_EINVAL;
if (flags & UV_PIPE_NO_TRUNCATE)
if (namelen > sizeof(saddr.sun_path))
return UV_EINVAL;
/* Truncate long paths. Documented behavior. */
if (namelen > sizeof(saddr.sun_path))
namelen = sizeof(saddr.sun_path);
/* Already bound? */
if (uv__stream_fd(handle) >= 0)
return UV_EINVAL;
if (uv__is_closing(handle))
return UV_EINVAL;
/* Make a copy of the file path unless it is an abstract socket.
* We unlink the file later but abstract sockets disappear
* automatically since they're not real file system entities.
*/
if (*name == '\0') {
addrlen = offsetof(struct sockaddr_un, sun_path) + namelen;
} else {
pipe_fname = uv__malloc(namelen + 1);
if (pipe_fname == NULL)
return UV_ENOMEM;
memcpy(pipe_fname, name, namelen);
pipe_fname[namelen] = '\0';
addrlen = sizeof saddr;
}
err = uv__socket(AF_UNIX, SOCK_STREAM, 0);
if (err < 0)
goto err_socket;
sockfd = err;
memset(&saddr, 0, sizeof saddr);
memcpy(&saddr.sun_path, name, namelen);
saddr.sun_family = AF_UNIX;
if (bind(sockfd, (struct sockaddr*)&saddr, addrlen)) {
err = UV__ERR(errno);
/* Convert ENOENT to EACCES for compatibility with Windows. */
if (err == UV_ENOENT)
err = UV_EACCES;
uv__close(sockfd);
goto err_socket;
}
/* Success. */
handle->flags |= UV_HANDLE_BOUND;
handle->pipe_fname = pipe_fname; /* NULL or a copy of |name| */
handle->io_watcher.fd = sockfd;
return 0;
err_socket:
uv__free(pipe_fname);
return err;
}
int uv__pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
if (uv__stream_fd(handle) == -1)
return UV_EINVAL;
if (handle->ipc)
return UV_EINVAL;
#if defined(__MVS__) || defined(__PASE__)
/* On zOS, backlog=0 has undefined behaviour */
/* On IBMi PASE, backlog=0 leads to "Connection refused" error */
if (backlog == 0)
backlog = 1;
else if (backlog < 0)
backlog = SOMAXCONN;
#endif
if (listen(uv__stream_fd(handle), backlog))
return UV__ERR(errno);
handle->connection_cb = cb;
handle->io_watcher.cb = uv__server_io;
uv__io_start(handle->loop, &handle->io_watcher, POLLIN);
return 0;
}
void uv__pipe_close(uv_pipe_t* handle) {
if (handle->pipe_fname) {
/*
* Unlink the file system entity before closing the file descriptor.
* Doing it the other way around introduces a race where our process
* unlinks a socket with the same name that's just been created by
* another thread or process.
*/
unlink(handle->pipe_fname);
uv__free((void*)handle->pipe_fname);
handle->pipe_fname = NULL;
}
uv__stream_close((uv_stream_t*)handle);
}
int uv_pipe_open(uv_pipe_t* handle, uv_file fd) {
int flags;
int mode;
int err;
flags = 0;
if (uv__fd_exists(handle->loop, fd))
return UV_EEXIST;
do
mode = fcntl(fd, F_GETFL);
while (mode == -1 && errno == EINTR);
if (mode == -1)
return UV__ERR(errno); /* according to docs, must be EBADF */
err = uv__nonblock(fd, 1);
if (err)
return err;
#if defined(__APPLE__)
err = uv__stream_try_select((uv_stream_t*) handle, &fd);
if (err)
return err;
#endif /* defined(__APPLE__) */
mode &= O_ACCMODE;
if (mode != O_WRONLY)
flags |= UV_HANDLE_READABLE;
if (mode != O_RDONLY)
flags |= UV_HANDLE_WRITABLE;
return uv__stream_open((uv_stream_t*)handle, fd, flags);
}
void uv_pipe_connect(uv_connect_t* req,
uv_pipe_t* handle,
const char* name,
uv_connect_cb cb) {
int err;
err = uv_pipe_connect2(req, handle, name, strlen(name), 0, cb);
if (err) {
handle->delayed_error = err;
handle->connect_req = req;
uv__req_init(handle->loop, req, UV_CONNECT);
req->handle = (uv_stream_t*) handle;
req->cb = cb;
uv__queue_init(&req->queue);
/* Force callback to run on next tick in case of error. */
uv__io_feed(handle->loop, &handle->io_watcher);
}
}
int uv_pipe_connect2(uv_connect_t* req,
uv_pipe_t* handle,
const char* name,
size_t namelen,
unsigned int flags,
uv_connect_cb cb) {
struct sockaddr_un saddr;
int new_sock;
int err;
int r;
socklen_t addrlen;
if (flags & ~UV_PIPE_NO_TRUNCATE)
return UV_EINVAL;
if (name == NULL)
return UV_EINVAL;
if (namelen == 0)
return UV_EINVAL;
if (includes_invalid_nul(name, namelen))
return UV_EINVAL;
if (flags & UV_PIPE_NO_TRUNCATE)
if (namelen > sizeof(saddr.sun_path))
return UV_EINVAL;
/* Truncate long paths. Documented behavior. */
if (namelen > sizeof(saddr.sun_path))
namelen = sizeof(saddr.sun_path);
new_sock = (uv__stream_fd(handle) == -1);
if (new_sock) {
err = uv__socket(AF_UNIX, SOCK_STREAM, 0);
if (err < 0)
goto out;
handle->io_watcher.fd = err;
}
memset(&saddr, 0, sizeof saddr);
memcpy(&saddr.sun_path, name, namelen);
saddr.sun_family = AF_UNIX;
if (*name == '\0')
addrlen = offsetof(struct sockaddr_un, sun_path) + namelen;
else
addrlen = sizeof saddr;
do {
r = connect(uv__stream_fd(handle), (struct sockaddr*)&saddr, addrlen);
}
while (r == -1 && errno == EINTR);
if (r == -1 && errno != EINPROGRESS) {
err = UV__ERR(errno);
#if defined(__CYGWIN__) || defined(__MSYS__)
/* EBADF is supposed to mean that the socket fd is bad, but
Cygwin reports EBADF instead of ENOTSOCK when the file is
not a socket. We do not expect to see a bad fd here
(e.g. due to new_sock), so translate the error. */
if (err == UV_EBADF)
err = UV_ENOTSOCK;
#endif
goto out;
}
err = 0;
if (new_sock) {
err = uv__stream_open((uv_stream_t*)handle,
uv__stream_fd(handle),
UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
}
if (err == 0)
uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
out:
handle->delayed_error = err;
handle->connect_req = req;
uv__req_init(handle->loop, req, UV_CONNECT);
req->handle = (uv_stream_t*) handle;
req->cb = cb;
uv__queue_init(&req->queue);
/* Force callback to run on next tick in case of error. */
if (err)
uv__io_feed(handle->loop, &handle->io_watcher);
return 0;
}
static int uv__pipe_getsockpeername(const uv_pipe_t* handle,
uv__peersockfunc func,
char* buffer,
size_t* size) {
#if defined(__linux__)
static const int is_linux = 1;
#else
static const int is_linux = 0;
#endif
struct sockaddr_un sa;
socklen_t addrlen;
size_t slop;
char* p;
int err;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
addrlen = sizeof(sa);
memset(&sa, 0, addrlen);
err = uv__getsockpeername((const uv_handle_t*) handle,
func,
(struct sockaddr*) &sa,
(int*) &addrlen);
if (err < 0) {
*size = 0;
return err;
}
slop = 1;
if (is_linux && sa.sun_path[0] == '\0') {
/* Linux abstract namespace. Not zero-terminated. */
slop = 0;
addrlen -= offsetof(struct sockaddr_un, sun_path);
} else {
p = memchr(sa.sun_path, '\0', sizeof(sa.sun_path));
if (p == NULL)
p = ARRAY_END(sa.sun_path);
addrlen = p - sa.sun_path;
}
if ((size_t)addrlen + slop > *size) {
*size = addrlen + slop;
return UV_ENOBUFS;
}
memcpy(buffer, sa.sun_path, addrlen);
*size = addrlen;
/* only null-terminate if it's not an abstract socket */
if (buffer[0] != '\0')
buffer[addrlen] = '\0';
return 0;
}
int uv_pipe_getsockname(const uv_pipe_t* handle, char* buffer, size_t* size) {
return uv__pipe_getsockpeername(handle, getsockname, buffer, size);
}
int uv_pipe_getpeername(const uv_pipe_t* handle, char* buffer, size_t* size) {
return uv__pipe_getsockpeername(handle, getpeername, buffer, size);
}
void uv_pipe_pending_instances(uv_pipe_t* handle, int count) {
}
int uv_pipe_pending_count(uv_pipe_t* handle) {
uv__stream_queued_fds_t* queued_fds;
if (!handle->ipc)
return 0;
if (handle->accepted_fd == -1)
return 0;
if (handle->queued_fds == NULL)
return 1;
queued_fds = handle->queued_fds;
return queued_fds->offset + 1;
}
uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle) {
if (!handle->ipc)
return UV_UNKNOWN_HANDLE;
if (handle->accepted_fd == -1)
return UV_UNKNOWN_HANDLE;
else
return uv_guess_handle(handle->accepted_fd);
}
int uv_pipe_chmod(uv_pipe_t* handle, int mode) {
char name_buffer[1 + UV__PATH_MAX];
int desired_mode;
size_t name_len;
const char* name;
int fd;
int r;
if (handle == NULL)
return UV_EBADF;
fd = uv__stream_fd(handle);
if (fd == -1)
return UV_EBADF;
if (mode != UV_READABLE &&
mode != UV_WRITABLE &&
mode != (UV_WRITABLE | UV_READABLE))
return UV_EINVAL;
desired_mode = 0;
if (mode & UV_READABLE)
desired_mode |= S_IRUSR | S_IRGRP | S_IROTH;
if (mode & UV_WRITABLE)
desired_mode |= S_IWUSR | S_IWGRP | S_IWOTH;
/* fchmod on macOS and (Free|Net|Open)BSD does not support UNIX sockets. */
if (fchmod(fd, desired_mode))
if (errno != EINVAL && errno != EOPNOTSUPP)
return UV__ERR(errno);
/* Fall back to chmod. */
name_len = sizeof(name_buffer);
r = uv_pipe_getsockname(handle, name_buffer, &name_len);
if (r != 0)
return r;
name = name_buffer;
/* On some platforms, getsockname returns an empty string, and we try with pipe_fname. */
if (name_len == 0 && handle->pipe_fname != NULL)
name = handle->pipe_fname;
if (chmod(name, desired_mode))
return UV__ERR(errno);
return 0;
}
int uv_pipe(uv_os_fd_t fds[2], int read_flags, int write_flags) {
uv_os_fd_t temp[2];
int err;
#if defined(__linux__) || \
defined(__FreeBSD__) || \
defined(__OpenBSD__) || \
defined(__DragonFly__) || \
defined(__NetBSD__) || \
defined(__illumos__) || \
(defined(UV__SOLARIS_11_4) && UV__SOLARIS_11_4)
int flags = O_CLOEXEC;
if ((read_flags & UV_NONBLOCK_PIPE) && (write_flags & UV_NONBLOCK_PIPE))
flags |= UV_FS_O_NONBLOCK;
if (pipe2(temp, flags))
return UV__ERR(errno);
if (flags & UV_FS_O_NONBLOCK) {
fds[0] = temp[0];
fds[1] = temp[1];
return 0;
}
#else
if (pipe(temp))
return UV__ERR(errno);
if ((err = uv__cloexec(temp[0], 1)))
goto fail;
if ((err = uv__cloexec(temp[1], 1)))
goto fail;
#endif
if (read_flags & UV_NONBLOCK_PIPE)
if ((err = uv__nonblock(temp[0], 1)))
goto fail;
if (write_flags & UV_NONBLOCK_PIPE)
if ((err = uv__nonblock(temp[1], 1)))
goto fail;
fds[0] = temp[0];
fds[1] = temp[1];
return 0;
fail:
uv__close(temp[0]);
uv__close(temp[1]);
return err;
}
int uv__make_pipe(int fds[2], int flags) {
return uv_pipe(fds,
flags & UV_NONBLOCK_PIPE,
flags & UV_NONBLOCK_PIPE);
}

View File

@@ -0,0 +1,159 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <unistd.h>
#include <assert.h>
static void uv__poll_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
uv_poll_t* handle;
int pevents;
handle = container_of(w, uv_poll_t, io_watcher);
/*
* As documented in the kernel source fs/kernfs/file.c #780
* poll will return POLLERR|POLLPRI in case of sysfs
* polling. This does not happen in case of out-of-band
* TCP messages.
*
* The above is the case on (at least) FreeBSD and Linux.
*
* So to properly determine a POLLPRI or a POLLERR we need
* to check for both.
*/
if ((events & POLLERR) && !(events & UV__POLLPRI)) {
uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
uv__handle_stop(handle);
handle->poll_cb(handle, UV_EBADF, 0);
return;
}
pevents = 0;
if (events & POLLIN)
pevents |= UV_READABLE;
if (events & UV__POLLPRI)
pevents |= UV_PRIORITIZED;
if (events & POLLOUT)
pevents |= UV_WRITABLE;
if (events & UV__POLLRDHUP)
pevents |= UV_DISCONNECT;
handle->poll_cb(handle, 0, pevents);
}
int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
int err;
if (uv__fd_exists(loop, fd))
return UV_EEXIST;
err = uv__io_check_fd(loop, fd);
if (err)
return err;
/* If ioctl(FIONBIO) reports ENOTTY, try fcntl(F_GETFL) + fcntl(F_SETFL).
* Workaround for e.g. kqueue fds not supporting ioctls.
*/
err = uv__nonblock(fd, 1);
#if UV__NONBLOCK_IS_IOCTL
if (err == UV_ENOTTY)
err = uv__nonblock_fcntl(fd, 1);
#endif
if (err)
return err;
uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL);
uv__io_init(&handle->io_watcher, uv__poll_io, fd);
handle->poll_cb = NULL;
return 0;
}
int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
uv_os_sock_t socket) {
return uv_poll_init(loop, handle, socket);
}
static void uv__poll_stop(uv_poll_t* handle) {
uv__io_stop(handle->loop,
&handle->io_watcher,
POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
uv__handle_stop(handle);
uv__platform_invalidate_fd(handle->loop, handle->io_watcher.fd);
}
int uv_poll_stop(uv_poll_t* handle) {
assert(!uv__is_closing(handle));
uv__poll_stop(handle);
return 0;
}
int uv_poll_start(uv_poll_t* handle, int pevents, uv_poll_cb poll_cb) {
uv__io_t** watchers;
uv__io_t* w;
int events;
assert((pevents & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT |
UV_PRIORITIZED)) == 0);
assert(!uv__is_closing(handle));
watchers = handle->loop->watchers;
w = &handle->io_watcher;
if (uv__fd_exists(handle->loop, w->fd))
if (watchers[w->fd] != w)
return UV_EEXIST;
uv__poll_stop(handle);
if (pevents == 0)
return 0;
events = 0;
if (pevents & UV_READABLE)
events |= POLLIN;
if (pevents & UV_PRIORITIZED)
events |= UV__POLLPRI;
if (pevents & UV_WRITABLE)
events |= POLLOUT;
if (pevents & UV_DISCONNECT)
events |= UV__POLLRDHUP;
uv__io_start(handle->loop, &handle->io_watcher, events);
uv__handle_start(handle);
handle->poll_cb = poll_cb;
return 0;
}
void uv__poll_close(uv_poll_t* handle) {
uv__poll_stop(handle);
}

View File

@@ -0,0 +1,36 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdint.h>
#include <stdlib.h>
#include <time.h>
uint64_t uv__hrtime(uv_clocktype_t type) {
struct timespec t;
if (clock_gettime(CLOCK_MONOTONIC, &t))
abort();
return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
}

View File

@@ -0,0 +1,385 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
/* POSIX defines poll() as a portable way to wait on file descriptors.
* Here we maintain a dynamically sized array of file descriptors and
* events to pass as the first argument to poll().
*/
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <errno.h>
#include <unistd.h>
int uv__platform_loop_init(uv_loop_t* loop) {
loop->poll_fds = NULL;
loop->poll_fds_used = 0;
loop->poll_fds_size = 0;
loop->poll_fds_iterating = 0;
return 0;
}
void uv__platform_loop_delete(uv_loop_t* loop) {
uv__free(loop->poll_fds);
loop->poll_fds = NULL;
}
int uv__io_fork(uv_loop_t* loop) {
uv__platform_loop_delete(loop);
return uv__platform_loop_init(loop);
}
/* Allocate or dynamically resize our poll fds array. */
static void uv__pollfds_maybe_resize(uv_loop_t* loop) {
size_t i;
size_t n;
struct pollfd* p;
if (loop->poll_fds_used < loop->poll_fds_size)
return;
n = loop->poll_fds_size ? loop->poll_fds_size * 2 : 64;
p = uv__reallocf(loop->poll_fds, n * sizeof(*loop->poll_fds));
if (p == NULL)
abort();
loop->poll_fds = p;
for (i = loop->poll_fds_size; i < n; i++) {
loop->poll_fds[i].fd = -1;
loop->poll_fds[i].events = 0;
loop->poll_fds[i].revents = 0;
}
loop->poll_fds_size = n;
}
/* Primitive swap operation on poll fds array elements. */
static void uv__pollfds_swap(uv_loop_t* loop, size_t l, size_t r) {
struct pollfd pfd;
pfd = loop->poll_fds[l];
loop->poll_fds[l] = loop->poll_fds[r];
loop->poll_fds[r] = pfd;
}
/* Add a watcher's fd to our poll fds array with its pending events. */
static void uv__pollfds_add(uv_loop_t* loop, uv__io_t* w) {
size_t i;
struct pollfd* pe;
/* If the fd is already in the set just update its events. */
assert(!loop->poll_fds_iterating);
for (i = 0; i < loop->poll_fds_used; ++i) {
if (loop->poll_fds[i].fd == w->fd) {
loop->poll_fds[i].events = w->pevents;
return;
}
}
/* Otherwise, allocate a new slot in the set for the fd. */
uv__pollfds_maybe_resize(loop);
pe = &loop->poll_fds[loop->poll_fds_used++];
pe->fd = w->fd;
pe->events = w->pevents;
}
/* Remove a watcher's fd from our poll fds array. */
static void uv__pollfds_del(uv_loop_t* loop, int fd) {
size_t i;
assert(!loop->poll_fds_iterating);
for (i = 0; i < loop->poll_fds_used;) {
if (loop->poll_fds[i].fd == fd) {
/* swap to last position and remove */
--loop->poll_fds_used;
uv__pollfds_swap(loop, i, loop->poll_fds_used);
loop->poll_fds[loop->poll_fds_used].fd = -1;
loop->poll_fds[loop->poll_fds_used].events = 0;
loop->poll_fds[loop->poll_fds_used].revents = 0;
/* This method is called with an fd of -1 to purge the invalidated fds,
* so we may possibly have multiples to remove.
*/
if (-1 != fd)
return;
} else {
/* We must only increment the loop counter when the fds do not match.
* Otherwise, when we are purging an invalidated fd, the value just
* swapped here from the previous end of the array will be skipped.
*/
++i;
}
}
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
uv__loop_internal_fields_t* lfields;
sigset_t* pset;
sigset_t set;
uint64_t time_base;
uint64_t time_diff;
struct uv__queue* q;
uv__io_t* w;
size_t i;
unsigned int nevents;
int nfds;
int have_signals;
struct pollfd* pe;
int fd;
int user_timeout;
int reset_timeout;
if (loop->nfds == 0) {
assert(uv__queue_empty(&loop->watcher_queue));
return;
}
lfields = uv__get_internal_fields(loop);
/* Take queued watchers and add their fds to our poll fds array. */
while (!uv__queue_empty(&loop->watcher_queue)) {
q = uv__queue_head(&loop->watcher_queue);
uv__queue_remove(q);
uv__queue_init(q);
w = uv__queue_data(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
uv__pollfds_add(loop, w);
w->events = w->pevents;
}
/* Prepare a set of signals to block around poll(), if any. */
pset = NULL;
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
pset = &set;
sigemptyset(pset);
sigaddset(pset, SIGPROF);
}
assert(timeout >= -1);
time_base = loop->time;
if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
}
/* Loop calls to poll() and processing of results. If we get some
* results from poll() but they turn out not to be interesting to
* our caller then we need to loop around and poll() again.
*/
for (;;) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
/* Store the current timeout in a location that's globally accessible so
* other locations like uv__work_done() can determine whether the queue
* of events in the callback were waiting when poll was called.
*/
lfields->current_timeout = timeout;
if (pset != NULL)
if (pthread_sigmask(SIG_BLOCK, pset, NULL))
abort();
nfds = poll(loop->poll_fds, (nfds_t)loop->poll_fds_used, timeout);
if (pset != NULL)
if (pthread_sigmask(SIG_UNBLOCK, pset, NULL))
abort();
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
if (timeout == -1)
continue;
if (timeout > 0)
goto update_timeout;
}
assert(timeout != -1);
return;
}
if (nfds == -1) {
if (errno != EINTR)
abort();
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
continue;
if (timeout == 0)
return;
/* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout;
}
/* Tell uv__platform_invalidate_fd not to manipulate our array
* while we are iterating over it.
*/
loop->poll_fds_iterating = 1;
/* Initialize a count of events that we care about. */
nevents = 0;
have_signals = 0;
/* Loop over the entire poll fds array looking for returned events. */
for (i = 0; i < loop->poll_fds_used; i++) {
pe = loop->poll_fds + i;
fd = pe->fd;
/* Skip invalidated events, see uv__platform_invalidate_fd. */
if (fd == -1)
continue;
assert(fd >= 0);
assert((unsigned) fd < loop->nwatchers);
w = loop->watchers[fd];
if (w == NULL) {
/* File descriptor that we've stopped watching, ignore. */
uv__platform_invalidate_fd(loop, fd);
continue;
}
/* Filter out events that user has not requested us to watch
* (e.g. POLLNVAL).
*/
pe->revents &= w->pevents | POLLERR | POLLHUP;
if (pe->revents != 0) {
/* Run signal watchers last. */
if (w == &loop->signal_io_watcher) {
have_signals = 1;
} else {
uv__metrics_update_idle_time(loop);
w->cb(loop, w, pe->revents);
}
nevents++;
}
}
uv__metrics_inc_events(loop, nevents);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
uv__metrics_inc_events_waiting(loop, nevents);
}
if (have_signals != 0) {
uv__metrics_update_idle_time(loop);
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
}
loop->poll_fds_iterating = 0;
/* Purge invalidated fds from our poll fds array. */
uv__pollfds_del(loop, -1);
if (have_signals != 0)
return; /* Event loop should cycle now so don't poll again. */
if (nevents != 0)
return;
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
time_diff = loop->time - time_base;
if (time_diff >= (uint64_t) timeout)
return;
timeout -= time_diff;
}
}
/* Remove the given fd from our poll fds array because no one
* is interested in its events anymore.
*/
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
size_t i;
assert(fd >= 0);
if (loop->poll_fds_iterating) {
/* uv__io_poll is currently iterating. Just invalidate fd. */
for (i = 0; i < loop->poll_fds_used; i++)
if (loop->poll_fds[i].fd == fd) {
loop->poll_fds[i].fd = -1;
loop->poll_fds[i].events = 0;
loop->poll_fds[i].revents = 0;
}
} else {
/* uv__io_poll is not iterating. Delete fd from the set. */
uv__pollfds_del(loop, fd);
}
}
/* Check whether the given fd is supported by poll(). */
int uv__io_check_fd(uv_loop_t* loop, int fd) {
struct pollfd p[1];
int rv;
p[0].fd = fd;
p[0].events = POLLIN;
do
rv = poll(p, 1, 0);
while (rv == -1 && (errno == EINTR || errno == EAGAIN));
if (rv == -1)
return UV__ERR(errno);
if (p[0].revents & POLLNVAL)
return UV_EINVAL;
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,45 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stddef.h>
#include <unistd.h>
int uv_exepath(char* buffer, size_t* size) {
ssize_t n;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
n = *size - 1;
if (n > 0)
n = readlink("/proc/self/exe", buffer, n);
if (n == -1)
return UV__ERR(errno);
buffer[n] = '\0';
*size = n;
return 0;
}

View File

@@ -0,0 +1,157 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdlib.h>
#include <string.h>
struct uv__process_title {
char* str;
size_t len; /* Length of the current process title. */
size_t cap; /* Maximum capacity. Computed once in uv_setup_args(). */
};
extern void uv__set_process_title(const char* title);
static uv_mutex_t process_title_mutex;
static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
static struct uv__process_title process_title;
static void* args_mem;
static void init_process_title_mutex_once(void) {
uv_mutex_init(&process_title_mutex);
}
char** uv_setup_args(int argc, char** argv) {
struct uv__process_title pt;
char** new_argv;
size_t size;
char* s;
int i;
if (argc <= 0)
return argv;
pt.str = argv[0];
pt.len = strlen(argv[0]);
pt.cap = pt.len + 1;
/* Calculate how much memory we need for the argv strings. */
size = pt.cap;
for (i = 1; i < argc; i++)
size += strlen(argv[i]) + 1;
/* Add space for the argv pointers. */
size += (argc + 1) * sizeof(char*);
new_argv = uv__malloc(size);
if (new_argv == NULL)
return argv;
/* Copy over the strings and set up the pointer table. */
i = 0;
s = (char*) &new_argv[argc + 1];
size = pt.cap;
goto loop;
for (/* empty */; i < argc; i++) {
size = strlen(argv[i]) + 1;
loop:
memcpy(s, argv[i], size);
new_argv[i] = s;
s += size;
}
new_argv[i] = NULL;
pt.cap = argv[i - 1] + size - argv[0];
args_mem = new_argv;
process_title = pt;
return new_argv;
}
int uv_set_process_title(const char* title) {
struct uv__process_title* pt;
size_t len;
/* If uv_setup_args wasn't called or failed, we can't continue. */
if (args_mem == NULL)
return UV_ENOBUFS;
pt = &process_title;
len = strlen(title);
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
uv_mutex_lock(&process_title_mutex);
if (len >= pt->cap) {
len = 0;
if (pt->cap > 0)
len = pt->cap - 1;
}
memcpy(pt->str, title, len);
memset(pt->str + len, '\0', pt->cap - len);
pt->len = len;
uv__set_process_title(pt->str);
uv_mutex_unlock(&process_title_mutex);
return 0;
}
int uv_get_process_title(char* buffer, size_t size) {
if (buffer == NULL || size == 0)
return UV_EINVAL;
/* If uv_setup_args wasn't called or failed, we can't continue. */
if (args_mem == NULL)
return UV_ENOBUFS;
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
uv_mutex_lock(&process_title_mutex);
if (size <= process_title.len) {
uv_mutex_unlock(&process_title_mutex);
return UV_ENOBUFS;
}
if (process_title.len != 0)
memcpy(buffer, process_title.str, process_title.len + 1);
buffer[process_title.len] = '\0';
uv_mutex_unlock(&process_title_mutex);
return 0;
}
void uv__process_title_cleanup(void) {
uv__free(args_mem); /* Keep valgrind happy. */
args_mem = NULL;
}

View File

@@ -0,0 +1,142 @@
/* Copyright libuv contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <string.h>
#include <sys/process.h>
#include <sys/neutrino.h>
#include <sys/memmsg.h>
#include <sys/syspage.h>
#include <sys/procfs.h>
static void
get_mem_info(uint64_t* totalmem, uint64_t* freemem) {
mem_info_t msg;
memset(&msg, 0, sizeof(msg));
msg.i.type = _MEM_INFO;
msg.i.fd = -1;
if (MsgSend(MEMMGR_COID, &msg.i, sizeof(msg.i), &msg.o, sizeof(msg.o))
!= -1) {
*totalmem = msg.o.info.__posix_tmi_total;
*freemem = msg.o.info.posix_tmi_length;
} else {
*totalmem = 0;
*freemem = 0;
}
}
void uv_loadavg(double avg[3]) {
avg[0] = 0.0;
avg[1] = 0.0;
avg[2] = 0.0;
}
int uv_exepath(char* buffer, size_t* size) {
char path[PATH_MAX];
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
realpath(_cmdname(NULL), path);
strlcpy(buffer, path, *size);
*size = strlen(buffer);
return 0;
}
uint64_t uv_get_free_memory(void) {
uint64_t totalmem;
uint64_t freemem;
get_mem_info(&totalmem, &freemem);
return freemem;
}
uint64_t uv_get_total_memory(void) {
uint64_t totalmem;
uint64_t freemem;
get_mem_info(&totalmem, &freemem);
return totalmem;
}
uint64_t uv_get_constrained_memory(void) {
return 0;
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
int uv_resident_set_memory(size_t* rss) {
int fd;
procfs_asinfo asinfo;
fd = uv__open_cloexec("/proc/self/ctl", O_RDONLY);
if (fd == -1)
return UV__ERR(errno);
if (devctl(fd, DCMD_PROC_ASINFO, &asinfo, sizeof(asinfo), 0) == -1) {
uv__close(fd);
return UV__ERR(errno);
}
uv__close(fd);
*rss = asinfo.rss;
return 0;
}
int uv_uptime(double* uptime) {
struct qtime_entry* qtime = _SYSPAGE_ENTRY(_syspage_ptr, qtime);
*uptime = (qtime->nsec / 1000000000.0);
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
struct cpuinfo_entry* cpuinfo =
(struct cpuinfo_entry*)_SYSPAGE_ENTRY(_syspage_ptr, new_cpuinfo);
size_t cpuinfo_size = _SYSPAGE_ELEMENT_SIZE(_syspage_ptr, cpuinfo);
struct strings_entry* strings = _SYSPAGE_ENTRY(_syspage_ptr, strings);
int num_cpus = _syspage_ptr->num_cpu;
int i;
*count = num_cpus;
*cpu_infos = uv__malloc(num_cpus * sizeof(**cpu_infos));
if (*cpu_infos == NULL)
return UV_ENOMEM;
for (i = 0; i < num_cpus; i++) {
(*cpu_infos)[i].model = strdup(&strings->data[cpuinfo->name]);
(*cpu_infos)[i].speed = cpuinfo->speed;
SYSPAGE_ARRAY_ADJ_OFFSET(cpuinfo, cpuinfo, cpuinfo_size);
}
return 0;
}

View File

@@ -0,0 +1,93 @@
/* Copyright libuv contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <sys/stat.h>
#include <unistd.h>
static uv_once_t once = UV_ONCE_INIT;
static int status;
int uv__random_readpath(const char* path, void* buf, size_t buflen) {
struct stat s;
size_t pos;
ssize_t n;
int fd;
fd = uv__open_cloexec(path, O_RDONLY);
if (fd < 0)
return fd;
if (uv__fstat(fd, &s)) {
uv__close(fd);
return UV__ERR(errno);
}
if (!S_ISCHR(s.st_mode)) {
uv__close(fd);
return UV_EIO;
}
for (pos = 0; pos != buflen; pos += n) {
do
n = read(fd, (char*) buf + pos, buflen - pos);
while (n == -1 && errno == EINTR);
if (n == -1) {
uv__close(fd);
return UV__ERR(errno);
}
if (n == 0) {
uv__close(fd);
return UV_EIO;
}
}
uv__close(fd);
return 0;
}
static void uv__random_devurandom_init(void) {
char c;
/* Linux's random(4) man page suggests applications should read at least
* once from /dev/random before switching to /dev/urandom in order to seed
* the system RNG. Reads from /dev/random can of course block indefinitely
* until entropy is available but that's the point.
*/
status = uv__random_readpath("/dev/random", &c, 1);
}
int uv__random_devurandom(void* buf, size_t buflen) {
uv_once(&once, uv__random_devurandom_init);
if (status != 0)
return status;
return uv__random_readpath("/dev/urandom", buf, buflen);
}

View File

@@ -0,0 +1,57 @@
/* Copyright libuv contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stddef.h>
#include <dlfcn.h>
typedef int (*uv__getentropy_cb)(void *, size_t);
static uv__getentropy_cb uv__getentropy;
static uv_once_t once = UV_ONCE_INIT;
static void uv__random_getentropy_init(void) {
uv__getentropy = (uv__getentropy_cb) dlsym(RTLD_DEFAULT, "getentropy");
}
int uv__random_getentropy(void* buf, size_t buflen) {
size_t pos;
size_t stride;
uv_once(&once, uv__random_getentropy_init);
if (uv__getentropy == NULL)
return UV_ENOSYS;
/* getentropy() returns an error for requests > 256 bytes. */
for (pos = 0, stride = 256; pos + stride < buflen; pos += stride)
if (uv__getentropy((char *) buf + pos, stride))
return UV__ERR(errno);
if (uv__getentropy((char *) buf + pos, buflen - pos))
return UV__ERR(errno);
return 0;
}

View File

@@ -0,0 +1,86 @@
/* Copyright libuv contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#ifdef __linux__
#define uv__random_getrandom_init() 0
#else /* !__linux__ */
#include <stddef.h>
#include <dlfcn.h>
typedef ssize_t (*uv__getrandom_cb)(void *, size_t, unsigned);
static uv__getrandom_cb uv__getrandom;
static uv_once_t once = UV_ONCE_INIT;
static void uv__random_getrandom_init_once(void) {
uv__getrandom = (uv__getrandom_cb) dlsym(RTLD_DEFAULT, "getrandom");
}
static int uv__random_getrandom_init(void) {
uv_once(&once, uv__random_getrandom_init_once);
if (uv__getrandom == NULL)
return UV_ENOSYS;
return 0;
}
#endif /* !__linux__ */
int uv__random_getrandom(void* buf, size_t buflen) {
ssize_t n;
size_t pos;
int rc;
rc = uv__random_getrandom_init();
if (rc != 0)
return rc;
for (pos = 0; pos != buflen; pos += n) {
do {
n = buflen - pos;
/* Most getrandom() implementations promise that reads <= 256 bytes
* will always succeed and won't be interrupted by signals.
* It's therefore useful to split it up in smaller reads because
* one big read may, in theory, continuously fail with EINTR.
*/
if (n > 256)
n = 256;
n = uv__getrandom((char *) buf + pos, n, 0);
} while (n == -1 && errno == EINTR);
if (n == -1)
return UV__ERR(errno);
if (n == 0)
return UV_EIO;
}
return 0;
}

View File

@@ -0,0 +1,99 @@
/* Copyright libuv contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <errno.h>
#include <string.h>
#include <syscall.h>
#include <unistd.h>
struct uv__sysctl_args {
int* name;
int nlen;
void* oldval;
size_t* oldlenp;
void* newval;
size_t newlen;
unsigned long unused[4];
};
int uv__random_sysctl(void* buf, size_t buflen) {
static int name[] = {1 /*CTL_KERN*/, 40 /*KERN_RANDOM*/, 6 /*RANDOM_UUID*/};
struct uv__sysctl_args args;
char uuid[16];
char* p;
char* pe;
size_t n;
p = buf;
pe = p + buflen;
while (p < pe) {
memset(&args, 0, sizeof(args));
args.name = name;
args.nlen = ARRAY_SIZE(name);
args.oldval = uuid;
args.oldlenp = &n;
n = sizeof(uuid);
/* Emits a deprecation warning with some kernels but that seems like
* an okay trade-off for the fallback of the fallback: this function is
* only called when neither getrandom(2) nor /dev/urandom are available.
* Fails with ENOSYS on kernels configured without CONFIG_SYSCTL_SYSCALL.
* At least arm64 never had a _sysctl system call and therefore doesn't
* have a SYS__sysctl define either.
*/
#ifdef SYS__sysctl
if (syscall(SYS__sysctl, &args) == -1)
return UV__ERR(errno);
#else
{
(void) &args;
return UV_ENOSYS;
}
#endif
if (n != sizeof(uuid))
return UV_EIO; /* Can't happen. */
/* uuid[] is now a type 4 UUID. Bytes 6 and 8 (counting from zero) contain
* 4 and 5 bits of entropy, respectively. For ease of use, we skip those
* and only use 14 of the 16 bytes.
*/
uuid[6] = uuid[14];
uuid[8] = uuid[15];
n = pe - p;
if (n > 14)
n = 14;
memcpy(p, uuid, n);
p += n;
}
return 0;
}

View File

@@ -0,0 +1,581 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#ifndef SA_RESTART
# define SA_RESTART 0
#endif
typedef struct {
uv_signal_t* handle;
int signum;
} uv__signal_msg_t;
RB_HEAD(uv__signal_tree_s, uv_signal_s);
static int uv__signal_unlock(void);
static int uv__signal_start(uv_signal_t* handle,
uv_signal_cb signal_cb,
int signum,
int oneshot);
static void uv__signal_event(uv_loop_t* loop, uv__io_t* w, unsigned int events);
static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2);
static void uv__signal_stop(uv_signal_t* handle);
static void uv__signal_unregister_handler(int signum);
static uv_once_t uv__signal_global_init_guard = UV_ONCE_INIT;
static struct uv__signal_tree_s uv__signal_tree =
RB_INITIALIZER(uv__signal_tree);
static int uv__signal_lock_pipefd[2] = { -1, -1 };
RB_GENERATE_STATIC(uv__signal_tree_s,
uv_signal_s, tree_entry,
uv__signal_compare)
static void uv__signal_global_reinit(void);
static void uv__signal_global_init(void) {
if (uv__signal_lock_pipefd[0] == -1)
/* pthread_atfork can register before and after handlers, one
* for each child. This only registers one for the child. That
* state is both persistent and cumulative, so if we keep doing
* it the handler functions will be called multiple times. Thus
* we only want to do it once.
*/
if (pthread_atfork(NULL, NULL, &uv__signal_global_reinit))
abort();
uv__signal_global_reinit();
}
void uv__signal_cleanup(void) {
/* We can only use signal-safe functions here.
* That includes read/write and close, fortunately.
* We do all of this directly here instead of resetting
* uv__signal_global_init_guard because
* uv__signal_global_once_init is only called from uv_loop_init
* and this needs to function in existing loops.
*/
if (uv__signal_lock_pipefd[0] != -1) {
uv__close(uv__signal_lock_pipefd[0]);
uv__signal_lock_pipefd[0] = -1;
}
if (uv__signal_lock_pipefd[1] != -1) {
uv__close(uv__signal_lock_pipefd[1]);
uv__signal_lock_pipefd[1] = -1;
}
}
static void uv__signal_global_reinit(void) {
uv__signal_cleanup();
if (uv__make_pipe(uv__signal_lock_pipefd, 0))
abort();
if (uv__signal_unlock())
abort();
}
void uv__signal_global_once_init(void) {
uv_once(&uv__signal_global_init_guard, uv__signal_global_init);
}
static int uv__signal_lock(void) {
int r;
char data;
do {
r = read(uv__signal_lock_pipefd[0], &data, sizeof data);
} while (r < 0 && errno == EINTR);
return (r < 0) ? -1 : 0;
}
static int uv__signal_unlock(void) {
int r;
char data = 42;
do {
r = write(uv__signal_lock_pipefd[1], &data, sizeof data);
} while (r < 0 && errno == EINTR);
return (r < 0) ? -1 : 0;
}
static void uv__signal_block_and_lock(sigset_t* saved_sigmask) {
sigset_t new_mask;
if (sigfillset(&new_mask))
abort();
/* to shut up valgrind */
sigemptyset(saved_sigmask);
if (pthread_sigmask(SIG_SETMASK, &new_mask, saved_sigmask))
abort();
if (uv__signal_lock())
abort();
}
static void uv__signal_unlock_and_unblock(sigset_t* saved_sigmask) {
if (uv__signal_unlock())
abort();
if (pthread_sigmask(SIG_SETMASK, saved_sigmask, NULL))
abort();
}
static uv_signal_t* uv__signal_first_handle(int signum) {
/* This function must be called with the signal lock held. */
uv_signal_t lookup;
uv_signal_t* handle;
lookup.signum = signum;
lookup.flags = 0;
lookup.loop = NULL;
handle = RB_NFIND(uv__signal_tree_s, &uv__signal_tree, &lookup);
if (handle != NULL && handle->signum == signum)
return handle;
return NULL;
}
static void uv__signal_handler(int signum) {
uv__signal_msg_t msg;
uv_signal_t* handle;
int saved_errno;
saved_errno = errno;
memset(&msg, 0, sizeof msg);
if (uv__signal_lock()) {
errno = saved_errno;
return;
}
for (handle = uv__signal_first_handle(signum);
handle != NULL && handle->signum == signum;
handle = RB_NEXT(uv__signal_tree_s, handle)) {
int r;
msg.signum = signum;
msg.handle = handle;
/* write() should be atomic for small data chunks, so the entire message
* should be written at once. In theory the pipe could become full, in
* which case the user is out of luck.
*/
do {
r = write(handle->loop->signal_pipefd[1], &msg, sizeof msg);
} while (r == -1 && errno == EINTR);
assert(r == sizeof msg ||
(r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)));
if (r != -1)
handle->caught_signals++;
}
uv__signal_unlock();
errno = saved_errno;
}
static int uv__signal_register_handler(int signum, int oneshot) {
/* When this function is called, the signal lock must be held. */
struct sigaction sa;
/* XXX use a separate signal stack? */
memset(&sa, 0, sizeof(sa));
if (sigfillset(&sa.sa_mask))
abort();
sa.sa_handler = uv__signal_handler;
sa.sa_flags = SA_RESTART;
if (oneshot)
sa.sa_flags |= SA_RESETHAND;
/* XXX save old action so we can restore it later on? */
if (sigaction(signum, &sa, NULL))
return UV__ERR(errno);
return 0;
}
static void uv__signal_unregister_handler(int signum) {
/* When this function is called, the signal lock must be held. */
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
/* sigaction can only fail with EINVAL or EFAULT; an attempt to deregister a
* signal implies that it was successfully registered earlier, so EINVAL
* should never happen.
*/
if (sigaction(signum, &sa, NULL))
abort();
}
static int uv__signal_loop_once_init(uv_loop_t* loop) {
int* pipefd;
int err;
/* Return if already initialized. */
pipefd = loop->signal_pipefd;
if (pipefd[0] != -1)
return 0;
err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE);
if (err)
return err;
err = uv__io_init_start(loop, &loop->signal_io_watcher, uv__signal_event,
pipefd[0], POLLIN);
if (err) {
uv__close(pipefd[0]);
uv__close(pipefd[1]);
pipefd[0] = -1;
pipefd[1] = -1;
}
return err;
}
int uv__signal_loop_fork(uv_loop_t* loop) {
struct uv__queue* q;
if (loop->signal_pipefd[0] == -1)
return 0;
uv__io_stop(loop, &loop->signal_io_watcher, POLLIN);
uv__close(loop->signal_pipefd[0]);
uv__close(loop->signal_pipefd[1]);
loop->signal_pipefd[0] = -1;
loop->signal_pipefd[1] = -1;
uv__queue_foreach(q, &loop->handle_queue) {
uv_handle_t* handle = uv__queue_data(q, uv_handle_t, handle_queue);
uv_signal_t* sh;
if (handle->type != UV_SIGNAL)
continue;
sh = (uv_signal_t*) handle;
sh->caught_signals = 0;
sh->dispatched_signals = 0;
}
return uv__signal_loop_once_init(loop);
}
void uv__signal_loop_cleanup(uv_loop_t* loop) {
struct uv__queue* q;
/* Stop all the signal watchers that are still attached to this loop. This
* ensures that the (shared) signal tree doesn't contain any invalid entries
* entries, and that signal handlers are removed when appropriate.
* It's safe to use uv__queue_foreach here because the handles and the handle
* queue are not modified by uv__signal_stop().
*/
uv__queue_foreach(q, &loop->handle_queue) {
uv_handle_t* handle = uv__queue_data(q, uv_handle_t, handle_queue);
if (handle->type == UV_SIGNAL)
uv__signal_stop((uv_signal_t*) handle);
}
if (loop->signal_pipefd[0] != -1) {
uv__close(loop->signal_pipefd[0]);
loop->signal_pipefd[0] = -1;
}
if (loop->signal_pipefd[1] != -1) {
uv__close(loop->signal_pipefd[1]);
loop->signal_pipefd[1] = -1;
}
}
int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) {
int err;
err = uv__signal_loop_once_init(loop);
if (err)
return err;
uv__handle_init(loop, (uv_handle_t*) handle, UV_SIGNAL);
handle->signum = 0;
handle->caught_signals = 0;
handle->dispatched_signals = 0;
return 0;
}
void uv__signal_close(uv_signal_t* handle) {
uv__signal_stop(handle);
}
int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) {
return uv__signal_start(handle, signal_cb, signum, 0);
}
int uv_signal_start_oneshot(uv_signal_t* handle,
uv_signal_cb signal_cb,
int signum) {
return uv__signal_start(handle, signal_cb, signum, 1);
}
static int uv__signal_start(uv_signal_t* handle,
uv_signal_cb signal_cb,
int signum,
int oneshot) {
sigset_t saved_sigmask;
int err;
uv_signal_t* first_handle;
assert(!uv__is_closing(handle));
/* If the user supplies signum == 0, then return an error already. If the
* signum is otherwise invalid then uv__signal_register will find out
* eventually.
*/
if (signum == 0)
return UV_EINVAL;
/* Short circuit: if the signal watcher is already watching {signum} don't
* go through the process of deregistering and registering the handler.
* Additionally, this avoids pending signals getting lost in the small
* time frame that handle->signum == 0.
*/
if (signum == handle->signum) {
handle->signal_cb = signal_cb;
return 0;
}
/* If the signal handler was already active, stop it first. */
if (handle->signum != 0) {
uv__signal_stop(handle);
}
uv__signal_block_and_lock(&saved_sigmask);
/* If at this point there are no active signal watchers for this signum (in
* any of the loops), it's time to try and register a handler for it here.
* Also in case there's only one-shot handlers and a regular handler comes in.
*/
first_handle = uv__signal_first_handle(signum);
if (first_handle == NULL ||
(!oneshot && (first_handle->flags & UV_SIGNAL_ONE_SHOT))) {
err = uv__signal_register_handler(signum, oneshot);
if (err) {
/* Registering the signal handler failed. Must be an invalid signal. */
uv__signal_unlock_and_unblock(&saved_sigmask);
return err;
}
}
handle->signum = signum;
if (oneshot)
handle->flags |= UV_SIGNAL_ONE_SHOT;
RB_INSERT(uv__signal_tree_s, &uv__signal_tree, handle);
uv__signal_unlock_and_unblock(&saved_sigmask);
handle->signal_cb = signal_cb;
uv__handle_start(handle);
return 0;
}
static void uv__signal_event(uv_loop_t* loop,
uv__io_t* w,
unsigned int events) {
uv__signal_msg_t* msg;
uv_signal_t* handle;
char buf[sizeof(uv__signal_msg_t) * 32];
size_t bytes, end, i;
int r;
bytes = 0;
end = 0;
do {
r = read(loop->signal_pipefd[0], buf + bytes, sizeof(buf) - bytes);
if (r == -1 && errno == EINTR)
continue;
if (r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
/* If there are bytes in the buffer already (which really is extremely
* unlikely if possible at all) we can't exit the function here. We'll
* spin until more bytes are read instead.
*/
if (bytes > 0)
continue;
/* Otherwise, there was nothing there. */
return;
}
/* Other errors really should never happen. */
if (r == -1)
abort();
bytes += r;
/* `end` is rounded down to a multiple of sizeof(uv__signal_msg_t). */
end = (bytes / sizeof(uv__signal_msg_t)) * sizeof(uv__signal_msg_t);
for (i = 0; i < end; i += sizeof(uv__signal_msg_t)) {
msg = (uv__signal_msg_t*) (buf + i);
handle = msg->handle;
if (msg->signum == handle->signum) {
assert(!(handle->flags & UV_HANDLE_CLOSING));
handle->signal_cb(handle, handle->signum);
}
handle->dispatched_signals++;
if (handle->flags & UV_SIGNAL_ONE_SHOT)
uv__signal_stop(handle);
}
bytes -= end;
/* If there are any "partial" messages left, move them to the start of the
* the buffer, and spin. This should not happen.
*/
if (bytes) {
memmove(buf, buf + end, bytes);
continue;
}
} while (end == sizeof buf);
}
static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2) {
int f1;
int f2;
/* Compare signums first so all watchers with the same signnum end up
* adjacent.
*/
if (w1->signum < w2->signum) return -1;
if (w1->signum > w2->signum) return 1;
/* Handlers without UV_SIGNAL_ONE_SHOT set will come first, so if the first
* handler returned is a one-shot handler, the rest will be too.
*/
f1 = w1->flags & UV_SIGNAL_ONE_SHOT;
f2 = w2->flags & UV_SIGNAL_ONE_SHOT;
if (f1 < f2) return -1;
if (f1 > f2) return 1;
/* Sort by loop pointer, so we can easily look up the first item after
* { .signum = x, .loop = NULL }.
*/
if (w1->loop < w2->loop) return -1;
if (w1->loop > w2->loop) return 1;
if (w1 < w2) return -1;
if (w1 > w2) return 1;
return 0;
}
int uv_signal_stop(uv_signal_t* handle) {
assert(!uv__is_closing(handle));
uv__signal_stop(handle);
return 0;
}
static void uv__signal_stop(uv_signal_t* handle) {
uv_signal_t* removed_handle;
sigset_t saved_sigmask;
uv_signal_t* first_handle;
int rem_oneshot;
int first_oneshot;
int ret;
/* If the watcher wasn't started, this is a no-op. */
if (handle->signum == 0)
return;
uv__signal_block_and_lock(&saved_sigmask);
removed_handle = RB_REMOVE(uv__signal_tree_s, &uv__signal_tree, handle);
assert(removed_handle == handle);
(void) removed_handle;
/* Check if there are other active signal watchers observing this signal. If
* not, unregister the signal handler.
*/
first_handle = uv__signal_first_handle(handle->signum);
if (first_handle == NULL) {
uv__signal_unregister_handler(handle->signum);
} else {
rem_oneshot = handle->flags & UV_SIGNAL_ONE_SHOT;
first_oneshot = first_handle->flags & UV_SIGNAL_ONE_SHOT;
if (first_oneshot && !rem_oneshot) {
ret = uv__signal_register_handler(handle->signum, 1);
assert(ret == 0);
(void)ret;
}
}
uv__signal_unlock_and_unblock(&saved_sigmask);
handle->signum = 0;
uv__handle_stop(handle);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,915 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#ifndef SUNOS_NO_IFADDRS
# include <ifaddrs.h>
#endif
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_arp.h>
#include <sys/sockio.h>
#include <sys/loadavg.h>
#include <sys/time.h>
#include <unistd.h>
#include <kstat.h>
#include <fcntl.h>
#include <sys/port.h>
#include <port.h>
#define PORT_FIRED 0x69
#define PORT_UNUSED 0x0
#define PORT_LOADED 0x99
#define PORT_DELETED -1
#if (!defined(_LP64)) && (_FILE_OFFSET_BITS - 0 == 64)
#define PROCFS_FILE_OFFSET_BITS_HACK 1
#undef _FILE_OFFSET_BITS
#else
#define PROCFS_FILE_OFFSET_BITS_HACK 0
#endif
#include <procfs.h>
#if (PROCFS_FILE_OFFSET_BITS_HACK - 0 == 1)
#define _FILE_OFFSET_BITS 64
#endif
int uv__platform_loop_init(uv_loop_t* loop) {
int err;
int fd;
loop->fs_fd = -1;
loop->backend_fd = -1;
fd = port_create();
if (fd == -1)
return UV__ERR(errno);
err = uv__cloexec(fd, 1);
if (err) {
uv__close(fd);
return err;
}
loop->backend_fd = fd;
return 0;
}
void uv__platform_loop_delete(uv_loop_t* loop) {
if (loop->fs_fd != -1) {
uv__close(loop->fs_fd);
loop->fs_fd = -1;
}
if (loop->backend_fd != -1) {
uv__close(loop->backend_fd);
loop->backend_fd = -1;
}
}
int uv__io_fork(uv_loop_t* loop) {
#if defined(PORT_SOURCE_FILE)
if (loop->fs_fd != -1) {
/* stop the watcher before we blow away its fileno */
uv__io_stop(loop, &loop->fs_event_watcher, POLLIN);
}
#endif
uv__platform_loop_delete(loop);
return uv__platform_loop_init(loop);
}
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
struct port_event* events;
uintptr_t i;
uintptr_t nfds;
assert(loop->watchers != NULL);
assert(fd >= 0);
events = (struct port_event*) loop->watchers[loop->nwatchers];
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
if (events == NULL)
return;
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if ((int) events[i].portev_object == fd)
events[i].portev_object = -1;
}
int uv__io_check_fd(uv_loop_t* loop, int fd) {
if (port_associate(loop->backend_fd, PORT_SOURCE_FD, fd, POLLIN, 0))
return UV__ERR(errno);
if (port_dissociate(loop->backend_fd, PORT_SOURCE_FD, fd)) {
perror("(libuv) port_dissociate()");
abort();
}
return 0;
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
struct port_event events[1024];
struct port_event* pe;
struct timespec spec;
struct uv__queue* q;
uv__io_t* w;
sigset_t* pset;
sigset_t set;
uint64_t base;
uint64_t diff;
unsigned int nfds;
unsigned int i;
int saved_errno;
int have_signals;
int nevents;
int count;
int err;
int fd;
int user_timeout;
int reset_timeout;
if (loop->nfds == 0) {
assert(uv__queue_empty(&loop->watcher_queue));
return;
}
while (!uv__queue_empty(&loop->watcher_queue)) {
q = uv__queue_head(&loop->watcher_queue);
uv__queue_remove(q);
uv__queue_init(q);
w = uv__queue_data(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
if (port_associate(loop->backend_fd,
PORT_SOURCE_FD,
w->fd,
w->pevents,
0)) {
perror("(libuv) port_associate()");
abort();
}
w->events = w->pevents;
}
pset = NULL;
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
pset = &set;
sigemptyset(pset);
sigaddset(pset, SIGPROF);
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
}
for (;;) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
if (timeout != -1) {
spec.tv_sec = timeout / 1000;
spec.tv_nsec = (timeout % 1000) * 1000000;
}
/* Work around a kernel bug where nfds is not updated. */
events[0].portev_source = 0;
nfds = 1;
saved_errno = 0;
if (pset != NULL)
pthread_sigmask(SIG_BLOCK, pset, NULL);
err = port_getn(loop->backend_fd,
events,
ARRAY_SIZE(events),
&nfds,
timeout == -1 ? NULL : &spec);
if (pset != NULL)
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
if (err) {
/* Work around another kernel bug: port_getn() may return events even
* on error.
*/
if (errno == EINTR || errno == ETIME) {
saved_errno = errno;
} else {
perror("(libuv) port_getn()");
abort();
}
}
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (events[0].portev_source == 0) {
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
goto update_timeout;
}
if (nfds == 0) {
assert(timeout != -1);
return;
}
have_signals = 0;
nevents = 0;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = (void*) events;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
for (i = 0; i < nfds; i++) {
pe = events + i;
fd = pe->portev_object;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
continue;
assert(fd >= 0);
assert((unsigned) fd < loop->nwatchers);
w = loop->watchers[fd];
/* File descriptor that we've stopped watching, ignore. */
if (w == NULL)
continue;
/* Run signal watchers last. This also affects child process watchers
* because those are implemented in terms of signal watchers.
*/
if (w == &loop->signal_io_watcher) {
have_signals = 1;
} else {
uv__metrics_update_idle_time(loop);
w->cb(loop, w, pe->portev_events);
}
nevents++;
if (w != loop->watchers[fd])
continue; /* Disabled by callback. */
/* Events Ports operates in oneshot mode, rearm timer on next run. */
if (w->pevents != 0 && uv__queue_empty(&w->watcher_queue))
uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
}
uv__metrics_inc_events(loop, nevents);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
uv__metrics_inc_events_waiting(loop, nevents);
}
if (have_signals != 0) {
uv__metrics_update_idle_time(loop);
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (have_signals != 0)
return; /* Event loop should cycle now so don't poll again. */
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
}
return;
}
if (saved_errno == ETIME) {
assert(timeout != -1);
return;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
diff = loop->time - base;
if (diff >= (uint64_t) timeout)
return;
timeout -= diff;
}
}
uint64_t uv__hrtime(uv_clocktype_t type) {
return gethrtime();
}
/*
* We could use a static buffer for the path manipulations that we need outside
* of the function, but this function could be called by multiple consumers and
* we don't want to potentially create a race condition in the use of snprintf.
*/
int uv_exepath(char* buffer, size_t* size) {
ssize_t res;
char buf[128];
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
snprintf(buf, sizeof(buf), "/proc/%lu/path/a.out", (unsigned long) getpid());
res = *size - 1;
if (res > 0)
res = readlink(buf, buffer, res);
if (res == -1)
return UV__ERR(errno);
buffer[res] = '\0';
*size = res;
return 0;
}
uint64_t uv_get_free_memory(void) {
return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES);
}
uint64_t uv_get_total_memory(void) {
return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES);
}
uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
void uv_loadavg(double avg[3]) {
(void) getloadavg(avg, 3);
}
#if defined(PORT_SOURCE_FILE)
static int uv__fs_event_rearm(uv_fs_event_t *handle) {
if (handle->fd == PORT_DELETED)
return UV_EBADF;
if (port_associate(handle->loop->fs_fd,
PORT_SOURCE_FILE,
(uintptr_t) &handle->fo,
FILE_ATTRIB | FILE_MODIFIED,
handle) == -1) {
return UV__ERR(errno);
}
handle->fd = PORT_LOADED;
return 0;
}
static void uv__fs_event_read(uv_loop_t* loop,
uv__io_t* w,
unsigned int revents) {
uv_fs_event_t *handle = NULL;
timespec_t timeout;
port_event_t pe;
int events;
int r;
(void) w;
(void) revents;
do {
uint_t n = 1;
/*
* Note that our use of port_getn() here (and not port_get()) is deliberate:
* there is a bug in event ports (Sun bug 6456558) whereby a zeroed timeout
* causes port_get() to return success instead of ETIME when there aren't
* actually any events (!); by using port_getn() in lieu of port_get(),
* we can at least workaround the bug by checking for zero returned events
* and treating it as we would ETIME.
*/
do {
memset(&timeout, 0, sizeof timeout);
r = port_getn(loop->fs_fd, &pe, 1, &n, &timeout);
}
while (r == -1 && errno == EINTR);
if ((r == -1 && errno == ETIME) || n == 0)
break;
handle = (uv_fs_event_t*) pe.portev_user;
assert((r == 0) && "unexpected port_get() error");
if (uv__is_closing(handle)) {
uv__handle_stop(handle);
uv__make_close_pending((uv_handle_t*) handle);
break;
}
events = 0;
if (pe.portev_events & (FILE_ATTRIB | FILE_MODIFIED))
events |= UV_CHANGE;
if (pe.portev_events & ~(FILE_ATTRIB | FILE_MODIFIED))
events |= UV_RENAME;
assert(events != 0);
handle->fd = PORT_FIRED;
handle->cb(handle, NULL, events, 0);
if (handle->fd != PORT_DELETED) {
r = uv__fs_event_rearm(handle);
if (r != 0)
handle->cb(handle, NULL, 0, r);
}
}
while (handle->fd != PORT_DELETED);
}
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
return 0;
}
int uv_fs_event_start(uv_fs_event_t* handle,
uv_fs_event_cb cb,
const char* path,
unsigned int flags) {
int portfd;
int first_run;
int err;
if (uv__is_active(handle))
return UV_EINVAL;
first_run = 0;
if (handle->loop->fs_fd == -1) {
portfd = port_create();
if (portfd == -1)
return UV__ERR(errno);
handle->loop->fs_fd = portfd;
first_run = 1;
}
uv__handle_start(handle);
handle->path = uv__strdup(path);
handle->fd = PORT_UNUSED;
handle->cb = cb;
memset(&handle->fo, 0, sizeof handle->fo);
handle->fo.fo_name = handle->path;
err = uv__fs_event_rearm(handle);
if (err != 0) {
uv_fs_event_stop(handle);
return err;
}
if (first_run) {
err = uv__io_init_start(handle->loop,
&handle->loop->fs_event_watcher,
uv__fs_event_read,
portfd,
POLLIN);
if (err)
uv__handle_stop(handle);
return err;
}
return 0;
}
static int uv__fs_event_stop(uv_fs_event_t* handle) {
int ret = 0;
if (!uv__is_active(handle))
return 0;
if (handle->fd == PORT_LOADED) {
ret = port_dissociate(handle->loop->fs_fd,
PORT_SOURCE_FILE,
(uintptr_t) &handle->fo);
}
handle->fd = PORT_DELETED;
uv__free(handle->path);
handle->path = NULL;
handle->fo.fo_name = NULL;
if (ret == 0)
uv__handle_stop(handle);
return ret;
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
(void) uv__fs_event_stop(handle);
return 0;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
/*
* If we were unable to dissociate the port here, then it is most likely
* that there is a pending queued event. When this happens, we don't want
* to complete the close as it will free the underlying memory for the
* handle, causing a use-after-free problem when the event is processed.
* We defer the final cleanup until after the event is consumed in
* uv__fs_event_read().
*/
if (uv__fs_event_stop(handle) == 0)
uv__make_close_pending((uv_handle_t*) handle);
}
#else /* !defined(PORT_SOURCE_FILE) */
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
return UV_ENOSYS;
}
int uv_fs_event_start(uv_fs_event_t* handle,
uv_fs_event_cb cb,
const char* filename,
unsigned int flags) {
return UV_ENOSYS;
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
return UV_ENOSYS;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
UNREACHABLE();
}
#endif /* defined(PORT_SOURCE_FILE) */
int uv_resident_set_memory(size_t* rss) {
psinfo_t psinfo;
int err;
int fd;
fd = open("/proc/self/psinfo", O_RDONLY);
if (fd == -1)
return UV__ERR(errno);
/* FIXME(bnoordhuis) Handle EINTR. */
err = UV_EINVAL;
if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) {
*rss = (size_t)psinfo.pr_rssize * 1024;
err = 0;
}
uv__close(fd);
return err;
}
int uv_uptime(double* uptime) {
kstat_ctl_t *kc;
kstat_t *ksp;
kstat_named_t *knp;
long hz = sysconf(_SC_CLK_TCK);
kc = kstat_open();
if (kc == NULL)
return UV_EPERM;
ksp = kstat_lookup(kc, (char*) "unix", 0, (char*) "system_misc");
if (kstat_read(kc, ksp, NULL) == -1) {
*uptime = -1;
} else {
knp = (kstat_named_t*) kstat_data_lookup(ksp, (char*) "clk_intr");
*uptime = knp->value.ul / hz;
}
kstat_close(kc);
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
int lookup_instance;
kstat_ctl_t *kc;
kstat_t *ksp;
kstat_named_t *knp;
uv_cpu_info_t* cpu_info;
kc = kstat_open();
if (kc == NULL)
return UV_EPERM;
/* Get count of cpus */
lookup_instance = 0;
while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
lookup_instance++;
}
*cpu_infos = uv__malloc(lookup_instance * sizeof(**cpu_infos));
if (!(*cpu_infos)) {
kstat_close(kc);
return UV_ENOMEM;
}
*count = lookup_instance;
cpu_info = *cpu_infos;
lookup_instance = 0;
while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
if (kstat_read(kc, ksp, NULL) == -1) {
cpu_info->speed = 0;
cpu_info->model = NULL;
} else {
knp = kstat_data_lookup(ksp, (char*) "clock_MHz");
assert(knp->data_type == KSTAT_DATA_INT32 ||
knp->data_type == KSTAT_DATA_INT64);
cpu_info->speed = (knp->data_type == KSTAT_DATA_INT32) ? knp->value.i32
: knp->value.i64;
knp = kstat_data_lookup(ksp, (char*) "brand");
assert(knp->data_type == KSTAT_DATA_STRING);
cpu_info->model = uv__strdup(KSTAT_NAMED_STR_PTR(knp));
}
lookup_instance++;
cpu_info++;
}
cpu_info = *cpu_infos;
lookup_instance = 0;
for (;;) {
ksp = kstat_lookup(kc, (char*) "cpu", lookup_instance, (char*) "sys");
if (ksp == NULL)
break;
if (kstat_read(kc, ksp, NULL) == -1) {
cpu_info->cpu_times.user = 0;
cpu_info->cpu_times.nice = 0;
cpu_info->cpu_times.sys = 0;
cpu_info->cpu_times.idle = 0;
cpu_info->cpu_times.irq = 0;
} else {
knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_user");
assert(knp->data_type == KSTAT_DATA_UINT64);
cpu_info->cpu_times.user = knp->value.ui64;
knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_kernel");
assert(knp->data_type == KSTAT_DATA_UINT64);
cpu_info->cpu_times.sys = knp->value.ui64;
knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_idle");
assert(knp->data_type == KSTAT_DATA_UINT64);
cpu_info->cpu_times.idle = knp->value.ui64;
knp = kstat_data_lookup(ksp, (char*) "intr");
assert(knp->data_type == KSTAT_DATA_UINT64);
cpu_info->cpu_times.irq = knp->value.ui64;
cpu_info->cpu_times.nice = 0;
}
lookup_instance++;
cpu_info++;
}
kstat_close(kc);
return 0;
}
#ifdef SUNOS_NO_IFADDRS
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
*count = 0;
*addresses = NULL;
return UV_ENOSYS;
}
#else /* SUNOS_NO_IFADDRS */
/*
* Inspired By:
* https://blogs.oracle.com/paulie/entry/retrieving_mac_address_in_solaris
* http://www.pauliesworld.org/project/getmac.c
*/
static int uv__set_phys_addr(uv_interface_address_t* address,
struct ifaddrs* ent) {
struct sockaddr_dl* sa_addr;
int sockfd;
size_t i;
struct arpreq arpreq;
/* This appears to only work as root */
sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
for (i = 0; i < sizeof(address->phys_addr); i++) {
/* Check that all bytes of phys_addr are zero. */
if (address->phys_addr[i] != 0)
return 0;
}
memset(&arpreq, 0, sizeof(arpreq));
if (address->address.address4.sin_family == AF_INET) {
struct sockaddr_in* sin = ((struct sockaddr_in*)&arpreq.arp_pa);
sin->sin_addr.s_addr = address->address.address4.sin_addr.s_addr;
} else if (address->address.address4.sin_family == AF_INET6) {
struct sockaddr_in6* sin = ((struct sockaddr_in6*)&arpreq.arp_pa);
memcpy(sin->sin6_addr.s6_addr,
address->address.address6.sin6_addr.s6_addr,
sizeof(address->address.address6.sin6_addr.s6_addr));
} else {
return 0;
}
sockfd = socket(AF_INET, SOCK_DGRAM, 0);
if (sockfd < 0)
return UV__ERR(errno);
if (ioctl(sockfd, SIOCGARP, (char*)&arpreq) == -1) {
uv__close(sockfd);
return UV__ERR(errno);
}
memcpy(address->phys_addr, arpreq.arp_ha.sa_data, sizeof(address->phys_addr));
uv__close(sockfd);
return 0;
}
static int uv__ifaddr_exclude(struct ifaddrs *ent) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
return 1;
if (ent->ifa_addr == NULL)
return 1;
if (ent->ifa_addr->sa_family != AF_INET &&
ent->ifa_addr->sa_family != AF_INET6)
return 1;
return 0;
}
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
uv_interface_address_t* address;
struct ifaddrs* addrs;
struct ifaddrs* ent;
size_t namelen;
char* name;
*count = 0;
*addresses = NULL;
if (getifaddrs(&addrs))
return UV__ERR(errno);
/* Count the number of interfaces */
namelen = 0;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (uv__ifaddr_exclude(ent))
continue;
namelen += strlen(ent->ifa_name) + 1;
(*count)++;
}
if (*count == 0) {
freeifaddrs(addrs);
return 0;
}
*addresses = uv__calloc(1, *count * sizeof(**addresses) + namelen);
if (*addresses == NULL) {
freeifaddrs(addrs);
return UV_ENOMEM;
}
name = (char*) &(*addresses)[*count];
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (uv__ifaddr_exclude(ent))
continue;
namelen = strlen(ent->ifa_name) + 1;
address->name = memcpy(name, ent->ifa_name, namelen);
name += namelen;
if (ent->ifa_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
} else {
address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
}
if (ent->ifa_netmask->sa_family == AF_INET6) {
address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
} else {
address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
}
address->is_internal = !!((ent->ifa_flags & IFF_PRIVATE) ||
(ent->ifa_flags & IFF_LOOPBACK));
uv__set_phys_addr(address, ent);
address++;
}
freeifaddrs(addrs);
return 0;
}
#endif /* SUNOS_NO_IFADDRS */
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
uv__free(addresses);
}
#if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L
size_t strnlen(const char* s, size_t maxlen) {
const char* end;
end = memchr(s, '\0', maxlen);
if (end == NULL)
return maxlen;
return end - s;
}
#endif

View File

@@ -0,0 +1,36 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdint.h>
#include <sys/sysinfo.h>
void uv_loadavg(double avg[3]) {
struct sysinfo info;
if (sysinfo(&info) < 0) return;
avg[0] = (double) info.loads[0] / 65536.0;
avg[1] = (double) info.loads[1] / 65536.0;
avg[2] = (double) info.loads[2] / 65536.0;
}

View File

@@ -0,0 +1,42 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdint.h>
#include <sys/sysinfo.h>
uint64_t uv_get_free_memory(void) {
struct sysinfo info;
if (sysinfo(&info) == 0)
return (uint64_t) info.freeram * info.mem_unit;
return 0;
}
uint64_t uv_get_total_memory(void) {
struct sysinfo info;
if (sysinfo(&info) == 0)
return (uint64_t) info.totalram * info.mem_unit;
return 0;
}

View File

@@ -0,0 +1,673 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/socket.h>
/* ifaddrs is not implemented on AIX and IBM i PASE */
#if !defined(_AIX)
#include <ifaddrs.h>
#endif
static int maybe_bind_socket(int fd) {
union uv__sockaddr s;
socklen_t slen;
slen = sizeof(s);
memset(&s, 0, sizeof(s));
if (getsockname(fd, &s.addr, &slen))
return UV__ERR(errno);
if (s.addr.sa_family == AF_INET)
if (s.in.sin_port != 0)
return 0; /* Already bound to a port. */
if (s.addr.sa_family == AF_INET6)
if (s.in6.sin6_port != 0)
return 0; /* Already bound to a port. */
/* Bind to an arbitrary port. */
if (bind(fd, &s.addr, slen))
return UV__ERR(errno);
return 0;
}
static int new_socket(uv_tcp_t* handle, int domain, unsigned int flags) {
int sockfd;
int err;
sockfd = uv__socket(domain, SOCK_STREAM, 0);
if (sockfd < 0)
return sockfd;
err = uv__stream_open((uv_stream_t*) handle, sockfd, flags);
if (err) {
uv__close(sockfd);
return err;
}
if (flags & UV_HANDLE_BOUND)
return maybe_bind_socket(sockfd);
return 0;
}
static int maybe_new_socket(uv_tcp_t* handle, int domain, unsigned int flags) {
int sockfd;
int err;
if (domain == AF_UNSPEC)
goto out;
sockfd = uv__stream_fd(handle);
if (sockfd == -1)
return new_socket(handle, domain, flags);
if (!(flags & UV_HANDLE_BOUND))
goto out;
if (handle->flags & UV_HANDLE_BOUND)
goto out; /* Already bound to a port. */
err = maybe_bind_socket(sockfd);
if (err)
return err;
out:
handle->flags |= flags;
return 0;
}
int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) {
int domain;
int err;
/* Use the lower 8 bits for the domain */
domain = flags & 0xFF;
if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
return UV_EINVAL;
if (flags & ~0xFF)
return UV_EINVAL;
uv__stream_init(loop, (uv_stream_t*)tcp, UV_TCP);
/* If anything fails beyond this point we need to remove the handle from
* the handle queue, since it was added by uv__handle_init in uv_stream_init.
*/
if (domain != AF_UNSPEC) {
err = new_socket(tcp, domain, 0);
if (err) {
uv__queue_remove(&tcp->handle_queue);
if (tcp->io_watcher.fd != -1)
uv__close(tcp->io_watcher.fd);
tcp->io_watcher.fd = -1;
return err;
}
}
return 0;
}
int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* tcp) {
return uv_tcp_init_ex(loop, tcp, AF_UNSPEC);
}
int uv__tcp_bind(uv_tcp_t* tcp,
const struct sockaddr* addr,
unsigned int addrlen,
unsigned int flags) {
int err;
int on;
/* Cannot set IPv6-only mode on non-IPv6 socket. */
if ((flags & UV_TCP_IPV6ONLY) && addr->sa_family != AF_INET6)
return UV_EINVAL;
err = maybe_new_socket(tcp, addr->sa_family, 0);
if (err)
return err;
on = 1;
if (setsockopt(tcp->io_watcher.fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)))
return UV__ERR(errno);
if (flags & UV_TCP_REUSEPORT) {
err = uv__sock_reuseport(tcp->io_watcher.fd);
if (err)
return err;
}
#ifndef __OpenBSD__
#ifdef IPV6_V6ONLY
if (addr->sa_family == AF_INET6) {
on = (flags & UV_TCP_IPV6ONLY) != 0;
if (setsockopt(tcp->io_watcher.fd,
IPPROTO_IPV6,
IPV6_V6ONLY,
&on,
sizeof on) == -1) {
#if defined(__MVS__)
if (errno == EOPNOTSUPP)
return UV_EINVAL;
#endif
return UV__ERR(errno);
}
}
#endif
#endif
errno = 0;
err = bind(tcp->io_watcher.fd, addr, addrlen);
if (err == -1 && errno != EADDRINUSE) {
if (errno == EAFNOSUPPORT)
/* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a
* socket created with AF_INET to an AF_INET6 address or vice versa. */
return UV_EINVAL;
return UV__ERR(errno);
}
tcp->delayed_error = (err == -1) ? UV__ERR(errno) : 0;
tcp->flags |= UV_HANDLE_BOUND;
if (addr->sa_family == AF_INET6)
tcp->flags |= UV_HANDLE_IPV6;
return 0;
}
static int uv__is_ipv6_link_local(const struct sockaddr* addr) {
const struct sockaddr_in6* a6;
uint8_t b[2];
if (addr->sa_family != AF_INET6)
return 0;
a6 = (const struct sockaddr_in6*) addr;
memcpy(b, &a6->sin6_addr, sizeof(b));
return b[0] == 0xFE && b[1] == 0x80;
}
static int uv__ipv6_link_local_scope_id(void) {
struct sockaddr_in6* a6;
int rv;
#if defined(_AIX)
/* AIX & IBM i do not have ifaddrs
* so fallback to use uv_interface_addresses */
uv_interface_address_t* interfaces;
uv_interface_address_t* ifa;
int count, i;
if (uv_interface_addresses(&interfaces, &count))
return 0;
rv = 0;
for (ifa = interfaces; ifa != &interfaces[count]; ifa++) {
if (uv__is_ipv6_link_local((struct sockaddr*) &ifa->address)) {
rv = ifa->address.address6.sin6_scope_id;
break;
}
}
uv_free_interface_addresses(interfaces, count);
#else
struct ifaddrs* ifa;
struct ifaddrs* p;
if (getifaddrs(&ifa))
return 0;
for (p = ifa; p != NULL; p = p->ifa_next)
if (p->ifa_addr != NULL)
if (uv__is_ipv6_link_local(p->ifa_addr))
break;
rv = 0;
if (p != NULL) {
a6 = (struct sockaddr_in6*) p->ifa_addr;
rv = a6->sin6_scope_id;
}
freeifaddrs(ifa);
#endif /* defined(_AIX) */
return rv;
}
int uv__tcp_connect(uv_connect_t* req,
uv_tcp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen,
uv_connect_cb cb) {
struct sockaddr_in6 tmp6;
int err;
int r;
assert(handle->type == UV_TCP);
if (handle->connect_req != NULL)
return UV_EALREADY; /* FIXME(bnoordhuis) UV_EINVAL or maybe UV_EBUSY. */
if (handle->delayed_error != 0)
goto out;
err = maybe_new_socket(handle,
addr->sa_family,
UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
if (err)
return err;
if (uv__is_ipv6_link_local(addr)) {
memcpy(&tmp6, addr, sizeof(tmp6));
if (tmp6.sin6_scope_id == 0) {
tmp6.sin6_scope_id = uv__ipv6_link_local_scope_id();
addr = (void*) &tmp6;
}
}
do {
errno = 0;
r = connect(uv__stream_fd(handle), addr, addrlen);
} while (r == -1 && errno == EINTR);
/* We not only check the return value, but also check the errno != 0.
* Because in rare cases connect() will return -1 but the errno
* is 0 (for example, on Android 4.3, OnePlus phone A0001_12_150227)
* and actually the tcp three-way handshake is completed.
*/
if (r == -1 && errno != 0) {
if (errno == EINPROGRESS)
; /* not an error */
else if (errno == ECONNREFUSED
#if defined(__OpenBSD__)
|| errno == EINVAL
#endif
)
/* If we get ECONNREFUSED (Solaris) or EINVAL (OpenBSD) wait until the
* next tick to report the error. Solaris and OpenBSD wants to report
* immediately -- other unixes want to wait.
*/
handle->delayed_error = UV__ERR(ECONNREFUSED);
else
return UV__ERR(errno);
}
out:
uv__req_init(handle->loop, req, UV_CONNECT);
req->cb = cb;
req->handle = (uv_stream_t*) handle;
uv__queue_init(&req->queue);
handle->connect_req = req;
uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
if (handle->delayed_error)
uv__io_feed(handle->loop, &handle->io_watcher);
return 0;
}
int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) {
int err;
if (uv__fd_exists(handle->loop, sock))
return UV_EEXIST;
err = uv__nonblock(sock, 1);
if (err)
return err;
return uv__stream_open((uv_stream_t*)handle,
sock,
UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
}
int uv_tcp_getsockname(const uv_tcp_t* handle,
struct sockaddr* name,
int* namelen) {
if (handle->delayed_error)
return handle->delayed_error;
return uv__getsockpeername((const uv_handle_t*) handle,
getsockname,
name,
namelen);
}
int uv_tcp_getpeername(const uv_tcp_t* handle,
struct sockaddr* name,
int* namelen) {
if (handle->delayed_error)
return handle->delayed_error;
return uv__getsockpeername((const uv_handle_t*) handle,
getpeername,
name,
namelen);
}
int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
int fd;
struct linger l = { 1, 0 };
/* Disallow setting SO_LINGER to zero due to some platform inconsistencies */
if (uv__is_stream_shutting(handle))
return UV_EINVAL;
fd = uv__stream_fd(handle);
if (0 != setsockopt(fd, SOL_SOCKET, SO_LINGER, &l, sizeof(l))) {
if (errno == EINVAL) {
/* Open Group Specifications Issue 7, 2018 edition states that
* EINVAL may mean the socket has been shut down already.
* Behavior observed on Solaris, illumos and macOS. */
errno = 0;
} else {
return UV__ERR(errno);
}
}
uv_close((uv_handle_t*) handle, close_cb);
return 0;
}
int uv__tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
unsigned int flags;
int err;
if (tcp->delayed_error)
return tcp->delayed_error;
flags = 0;
#if defined(__MVS__)
/* on zOS the listen call does not bind automatically
if the socket is unbound. Hence the manual binding to
an arbitrary port is required to be done manually
*/
flags |= UV_HANDLE_BOUND;
#endif
err = maybe_new_socket(tcp, AF_INET, flags);
if (err)
return err;
if (listen(tcp->io_watcher.fd, backlog))
return UV__ERR(errno);
tcp->connection_cb = cb;
tcp->flags |= UV_HANDLE_BOUND;
/* Start listening for connections. */
tcp->io_watcher.cb = uv__server_io;
uv__io_start(tcp->loop, &tcp->io_watcher, POLLIN);
return 0;
}
int uv__tcp_nodelay(int fd, int on) {
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)))
return UV__ERR(errno);
return 0;
}
#if (defined(UV__SOLARIS_11_4) && !UV__SOLARIS_11_4) || \
(defined(__DragonFly__) && __DragonFly_version < 500702)
/* DragonFlyBSD <500702 and Solaris <11.4 require millisecond units
* for TCP keepalive options. */
#define UV_KEEPALIVE_FACTOR(x) (x *= 1000)
#else
#define UV_KEEPALIVE_FACTOR(x)
#endif
int uv__tcp_keepalive(int fd, int on, unsigned int delay) {
int idle;
int intvl;
int cnt;
(void) &idle;
(void) &intvl;
(void) &cnt;
if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on)))
return UV__ERR(errno);
if (!on)
return 0;
if (delay < 1)
return UV_EINVAL;
#ifdef __sun
/* The implementation of TCP keep-alive on Solaris/SmartOS is a bit unusual
* compared to other Unix-like systems.
* Thus, we need to specialize it on Solaris.
*
* There are two keep-alive mechanisms on Solaris:
* - By default, the first keep-alive probe is sent out after a TCP connection is idle for two hours.
* If the peer does not respond to the probe within eight minutes, the TCP connection is aborted.
* You can alter the interval for sending out the first probe using the socket option TCP_KEEPALIVE_THRESHOLD
* in milliseconds or TCP_KEEPIDLE in seconds.
* The system default is controlled by the TCP ndd parameter tcp_keepalive_interval. The minimum value is ten seconds.
* The maximum is ten days, while the default is two hours. If you receive no response to the probe,
* you can use the TCP_KEEPALIVE_ABORT_THRESHOLD socket option to change the time threshold for aborting a TCP connection.
* The option value is an unsigned integer in milliseconds. The value zero indicates that TCP should never time out and
* abort the connection when probing. The system default is controlled by the TCP ndd parameter tcp_keepalive_abort_interval.
* The default is eight minutes.
*
* - The second implementation is activated if socket option TCP_KEEPINTVL and/or TCP_KEEPCNT are set.
* The time between each consequent probes is set by TCP_KEEPINTVL in seconds.
* The minimum value is ten seconds. The maximum is ten days, while the default is two hours.
* The TCP connection will be aborted after certain amount of probes, which is set by TCP_KEEPCNT, without receiving response.
*/
idle = delay;
/* Kernel expects at least 10 seconds. */
if (idle < 10)
idle = 10;
/* Kernel expects at most 10 days. */
if (idle > 10*24*60*60)
idle = 10*24*60*60;
UV_KEEPALIVE_FACTOR(idle);
/* `TCP_KEEPIDLE`, `TCP_KEEPINTVL`, and `TCP_KEEPCNT` were not available on Solaris
* until version 11.4, but let's take a chance here. */
#if defined(TCP_KEEPIDLE) && defined(TCP_KEEPINTVL) && defined(TCP_KEEPCNT)
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(idle)))
return UV__ERR(errno);
intvl = 10; /* required at least 10 seconds */
UV_KEEPALIVE_FACTOR(intvl);
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &intvl, sizeof(intvl)))
return UV__ERR(errno);
cnt = 1; /* 1 retry, ensure (TCP_KEEPINTVL * TCP_KEEPCNT) is 10 seconds */
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &cnt, sizeof(cnt)))
return UV__ERR(errno);
#else
/* Fall back to the first implementation of tcp-alive mechanism for older Solaris,
* simulate the tcp-alive mechanism on other platforms via `TCP_KEEPALIVE_THRESHOLD` + `TCP_KEEPALIVE_ABORT_THRESHOLD`.
*/
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE_THRESHOLD, &idle, sizeof(idle)))
return UV__ERR(errno);
/* Note that the consequent probes will not be sent at equal intervals on Solaris,
* but will be sent using the exponential backoff algorithm. */
int time_to_abort = 10; /* 10 seconds */
UV_KEEPALIVE_FACTOR(time_to_abort);
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE_ABORT_THRESHOLD, &time_to_abort, sizeof(time_to_abort)))
return UV__ERR(errno);
#endif
#else /* !defined(__sun) */
idle = delay;
UV_KEEPALIVE_FACTOR(idle);
#ifdef TCP_KEEPIDLE
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(idle)))
return UV__ERR(errno);
#elif defined(TCP_KEEPALIVE)
/* Darwin/macOS uses TCP_KEEPALIVE in place of TCP_KEEPIDLE. */
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &idle, sizeof(idle)))
return UV__ERR(errno);
#endif
#ifdef TCP_KEEPINTVL
intvl = 1; /* 1 second; same as default on Win32 */
UV_KEEPALIVE_FACTOR(intvl);
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &intvl, sizeof(intvl)))
return UV__ERR(errno);
#endif
#ifdef TCP_KEEPCNT
cnt = 10; /* 10 retries; same as hardcoded on Win32 */
if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &cnt, sizeof(cnt)))
return UV__ERR(errno);
#endif
#endif /* !defined(__sun) */
return 0;
}
int uv_tcp_nodelay(uv_tcp_t* handle, int on) {
int err;
if (uv__stream_fd(handle) != -1) {
err = uv__tcp_nodelay(uv__stream_fd(handle), on);
if (err)
return err;
}
if (on)
handle->flags |= UV_HANDLE_TCP_NODELAY;
else
handle->flags &= ~UV_HANDLE_TCP_NODELAY;
return 0;
}
int uv_tcp_keepalive(uv_tcp_t* handle, int on, unsigned int delay) {
int err;
if (uv__stream_fd(handle) != -1) {
err =uv__tcp_keepalive(uv__stream_fd(handle), on, delay);
if (err)
return err;
}
if (on)
handle->flags |= UV_HANDLE_TCP_KEEPALIVE;
else
handle->flags &= ~UV_HANDLE_TCP_KEEPALIVE;
/* TODO Store delay if uv__stream_fd(handle) == -1 but don't want to enlarge
* uv_tcp_t with an int that's almost never used...
*/
return 0;
}
int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
return 0;
}
void uv__tcp_close(uv_tcp_t* handle) {
uv__stream_close((uv_stream_t*)handle);
}
int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int flags1) {
uv_os_sock_t temp[2];
int err;
#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
int flags;
flags = type | SOCK_CLOEXEC;
if ((flags0 & UV_NONBLOCK_PIPE) && (flags1 & UV_NONBLOCK_PIPE))
flags |= SOCK_NONBLOCK;
if (socketpair(AF_UNIX, flags, protocol, temp))
return UV__ERR(errno);
if (flags & UV_FS_O_NONBLOCK) {
fds[0] = temp[0];
fds[1] = temp[1];
return 0;
}
#else
if (socketpair(AF_UNIX, type, protocol, temp))
return UV__ERR(errno);
if ((err = uv__cloexec(temp[0], 1)))
goto fail;
if ((err = uv__cloexec(temp[1], 1)))
goto fail;
#endif
if (flags0 & UV_NONBLOCK_PIPE)
if ((err = uv__nonblock(temp[0], 1)))
goto fail;
if (flags1 & UV_NONBLOCK_PIPE)
if ((err = uv__nonblock(temp[1], 1)))
goto fail;
fds[0] = temp[0];
fds[1] = temp[1];
return 0;
fail:
uv__close(temp[0]);
uv__close(temp[1]);
return err;
}

View File

@@ -0,0 +1,975 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <pthread.h>
#ifdef __OpenBSD__
#include <pthread_np.h>
#endif
#include <assert.h>
#include <errno.h>
#include <sys/time.h>
#include <sys/resource.h> /* getrlimit() */
#include <unistd.h> /* getpagesize() */
#include <limits.h>
#ifdef __MVS__
#include <sys/ipc.h>
#include <sys/sem.h>
#endif
#if defined(__GLIBC__) && !defined(__UCLIBC__)
#include <gnu/libc-version.h> /* gnu_get_libc_version() */
#endif
#if defined(__linux__)
# include <sched.h>
# define uv__cpu_set_t cpu_set_t
#elif defined(__FreeBSD__)
# include <sys/param.h>
# include <sys/cpuset.h>
# include <pthread_np.h>
# define uv__cpu_set_t cpuset_t
#endif
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
/* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
* too small to safely receive signals on.
*
* Musl's PTHREAD_STACK_MIN + MINSIGSTKSZ == 8192 on arm64 (which has
* the largest MINSIGSTKSZ of the architectures that musl supports) so
* let's use that as a lower bound.
*
* We use a hardcoded value because PTHREAD_STACK_MIN + MINSIGSTKSZ
* is between 28 and 133 KB when compiling against glibc, depending
* on the architecture.
*/
static size_t uv__min_stack_size(void) {
static const size_t min = 8192;
#ifdef PTHREAD_STACK_MIN /* Not defined on NetBSD. */
if (min < (size_t) PTHREAD_STACK_MIN)
return PTHREAD_STACK_MIN;
#endif /* PTHREAD_STACK_MIN */
return min;
}
/* On Linux, threads created by musl have a much smaller stack than threads
* created by glibc (80 vs. 2048 or 4096 kB.) Follow glibc for consistency.
*/
static size_t uv__default_stack_size(void) {
#if !defined(__linux__)
return 0;
#elif defined(__PPC__) || defined(__ppc__) || defined(__powerpc__)
return 4 << 20; /* glibc default. */
#else
return 2 << 20; /* glibc default. */
#endif
}
/* On MacOS, threads other than the main thread are created with a reduced
* stack size by default. Adjust to RLIMIT_STACK aligned to the page size.
*/
size_t uv__thread_stack_size(void) {
#if defined(__APPLE__) || defined(__linux__)
struct rlimit lim;
/* getrlimit() can fail on some aarch64 systems due to a glibc bug where
* the system call wrapper invokes the wrong system call. Don't treat
* that as fatal, just use the default stack size instead.
*/
if (getrlimit(RLIMIT_STACK, &lim))
return uv__default_stack_size();
if (lim.rlim_cur == RLIM_INFINITY)
return uv__default_stack_size();
/* pthread_attr_setstacksize() expects page-aligned values. */
lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
if (lim.rlim_cur >= (rlim_t) uv__min_stack_size())
return lim.rlim_cur;
#endif
return uv__default_stack_size();
}
int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
uv_thread_options_t params;
params.flags = UV_THREAD_NO_FLAGS;
return uv_thread_create_ex(tid, &params, entry, arg);
}
int uv_thread_detach(uv_thread_t *tid) {
return UV__ERR(pthread_detach(*tid));
}
int uv_thread_create_ex(uv_thread_t* tid,
const uv_thread_options_t* params,
void (*entry)(void *arg),
void *arg) {
int err;
pthread_attr_t* attr;
pthread_attr_t attr_storage;
size_t pagesize;
size_t stack_size;
size_t min_stack_size;
/* Used to squelch a -Wcast-function-type warning. */
union {
void (*in)(void*);
void* (*out)(void*);
} f;
stack_size =
params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
attr = NULL;
if (stack_size == 0) {
stack_size = uv__thread_stack_size();
} else {
pagesize = (size_t)getpagesize();
/* Round up to the nearest page boundary. */
stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
min_stack_size = uv__min_stack_size();
if (stack_size < min_stack_size)
stack_size = min_stack_size;
}
if (stack_size > 0) {
attr = &attr_storage;
if (pthread_attr_init(attr))
abort();
if (pthread_attr_setstacksize(attr, stack_size))
abort();
}
f.in = entry;
err = pthread_create(tid, attr, f.out, arg);
if (attr != NULL)
pthread_attr_destroy(attr);
return UV__ERR(err);
}
#if UV__CPU_AFFINITY_SUPPORTED
int uv_thread_setaffinity(uv_thread_t* tid,
char* cpumask,
char* oldmask,
size_t mask_size) {
int i;
int r;
uv__cpu_set_t cpuset;
int cpumasksize;
cpumasksize = uv_cpumask_size();
if (cpumasksize < 0)
return cpumasksize;
if (mask_size < (size_t)cpumasksize)
return UV_EINVAL;
if (oldmask != NULL) {
r = uv_thread_getaffinity(tid, oldmask, mask_size);
if (r < 0)
return r;
}
CPU_ZERO(&cpuset);
for (i = 0; i < cpumasksize; i++)
if (cpumask[i])
CPU_SET(i, &cpuset);
#if defined(__ANDROID__) || defined(__OHOS__)
if (sched_setaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset))
r = errno;
else
r = 0;
#else
r = pthread_setaffinity_np(*tid, sizeof(cpuset), &cpuset);
#endif
return UV__ERR(r);
}
int uv_thread_getaffinity(uv_thread_t* tid,
char* cpumask,
size_t mask_size) {
int r;
int i;
uv__cpu_set_t cpuset;
int cpumasksize;
cpumasksize = uv_cpumask_size();
if (cpumasksize < 0)
return cpumasksize;
if (mask_size < (size_t)cpumasksize)
return UV_EINVAL;
CPU_ZERO(&cpuset);
#if defined(__ANDROID__) || defined(__OHOS__)
if (sched_getaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset))
r = errno;
else
r = 0;
#else
r = pthread_getaffinity_np(*tid, sizeof(cpuset), &cpuset);
#endif
if (r)
return UV__ERR(r);
for (i = 0; i < cpumasksize; i++)
cpumask[i] = !!CPU_ISSET(i, &cpuset);
return 0;
}
#else
int uv_thread_setaffinity(uv_thread_t* tid,
char* cpumask,
char* oldmask,
size_t mask_size) {
return UV_ENOTSUP;
}
int uv_thread_getaffinity(uv_thread_t* tid,
char* cpumask,
size_t mask_size) {
return UV_ENOTSUP;
}
#endif /* defined(__linux__) || defined(UV_BSD_H) */
int uv_thread_getcpu(void) {
#if UV__CPU_AFFINITY_SUPPORTED
int cpu;
cpu = sched_getcpu();
if (cpu < 0)
return UV__ERR(errno);
return cpu;
#else
return UV_ENOTSUP;
#endif
}
uv_thread_t uv_thread_self(void) {
return pthread_self();
}
int uv_thread_join(uv_thread_t *tid) {
return UV__ERR(pthread_join(*tid, NULL));
}
int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
return pthread_equal(*t1, *t2);
}
int uv_thread_setname(const char* name) {
if (name == NULL)
return UV_EINVAL;
return uv__thread_setname(name);
}
int uv_thread_getname(uv_thread_t* tid, char* name, size_t size) {
if (name == NULL || size == 0)
return UV_EINVAL;
return uv__thread_getname(tid, name, size);
}
int uv_mutex_init(uv_mutex_t* mutex) {
#if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK)
return UV__ERR(pthread_mutex_init(mutex, NULL));
#else
pthread_mutexattr_t attr;
int err;
if (pthread_mutexattr_init(&attr))
abort();
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK))
abort();
err = pthread_mutex_init(mutex, &attr);
if (pthread_mutexattr_destroy(&attr))
abort();
return UV__ERR(err);
#endif
}
int uv_mutex_init_recursive(uv_mutex_t* mutex) {
pthread_mutexattr_t attr;
int err;
if (pthread_mutexattr_init(&attr))
abort();
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
abort();
err = pthread_mutex_init(mutex, &attr);
if (pthread_mutexattr_destroy(&attr))
abort();
return UV__ERR(err);
}
void uv_mutex_destroy(uv_mutex_t* mutex) {
if (pthread_mutex_destroy(mutex))
abort();
}
void uv_mutex_lock(uv_mutex_t* mutex) {
if (pthread_mutex_lock(mutex))
abort();
}
int uv_mutex_trylock(uv_mutex_t* mutex) {
int err;
err = pthread_mutex_trylock(mutex);
if (err) {
if (err != EBUSY && err != EAGAIN)
abort();
return UV_EBUSY;
}
return 0;
}
void uv_mutex_unlock(uv_mutex_t* mutex) {
if (pthread_mutex_unlock(mutex))
abort();
}
int uv_rwlock_init(uv_rwlock_t* rwlock) {
return UV__ERR(pthread_rwlock_init(rwlock, NULL));
}
void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
if (pthread_rwlock_destroy(rwlock))
abort();
}
void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
if (pthread_rwlock_rdlock(rwlock))
abort();
}
int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
int err;
err = pthread_rwlock_tryrdlock(rwlock);
if (err) {
if (err != EBUSY && err != EAGAIN)
abort();
return UV_EBUSY;
}
return 0;
}
void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
if (pthread_rwlock_unlock(rwlock))
abort();
}
void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
if (pthread_rwlock_wrlock(rwlock))
abort();
}
int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
int err;
err = pthread_rwlock_trywrlock(rwlock);
if (err) {
if (err != EBUSY && err != EAGAIN)
abort();
return UV_EBUSY;
}
return 0;
}
void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
if (pthread_rwlock_unlock(rwlock))
abort();
}
void uv_once(uv_once_t* guard, void (*callback)(void)) {
if (pthread_once(guard, callback))
abort();
}
#if defined(__APPLE__) && defined(__MACH__)
int uv_sem_init(uv_sem_t* sem, unsigned int value) {
kern_return_t err;
err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value);
if (err == KERN_SUCCESS)
return 0;
if (err == KERN_INVALID_ARGUMENT)
return UV_EINVAL;
if (err == KERN_RESOURCE_SHORTAGE)
return UV_ENOMEM;
abort();
return UV_EINVAL; /* Satisfy the compiler. */
}
void uv_sem_destroy(uv_sem_t* sem) {
if (semaphore_destroy(mach_task_self(), *sem))
abort();
}
void uv_sem_post(uv_sem_t* sem) {
if (semaphore_signal(*sem))
abort();
}
void uv_sem_wait(uv_sem_t* sem) {
int r;
do
r = semaphore_wait(*sem);
while (r == KERN_ABORTED);
if (r != KERN_SUCCESS)
abort();
}
int uv_sem_trywait(uv_sem_t* sem) {
mach_timespec_t interval;
kern_return_t err;
interval.tv_sec = 0;
interval.tv_nsec = 0;
err = semaphore_timedwait(*sem, interval);
if (err == KERN_SUCCESS)
return 0;
if (err == KERN_OPERATION_TIMED_OUT)
return UV_EAGAIN;
abort();
return UV_EINVAL; /* Satisfy the compiler. */
}
#else /* !(defined(__APPLE__) && defined(__MACH__)) */
#if defined(__GLIBC__) && !defined(__UCLIBC__)
/* Hack around https://sourceware.org/bugzilla/show_bug.cgi?id=12674
* by providing a custom implementation for glibc < 2.21 in terms of other
* concurrency primitives.
* Refs: https://github.com/nodejs/node/issues/19903 */
/* To preserve ABI compatibility, we treat the uv_sem_t as storage for
* a pointer to the actual struct we're using underneath. */
static uv_once_t glibc_version_check_once = UV_ONCE_INIT;
static int platform_needs_custom_semaphore = 0;
static void glibc_version_check(void) {
const char* version = gnu_get_libc_version();
platform_needs_custom_semaphore =
version[0] == '2' && version[1] == '.' &&
atoi(version + 2) < 21;
}
#elif defined(__MVS__)
#define platform_needs_custom_semaphore 1
#else /* !defined(__GLIBC__) && !defined(__MVS__) */
#define platform_needs_custom_semaphore 0
#endif
typedef struct uv_semaphore_s {
uv_mutex_t mutex;
uv_cond_t cond;
unsigned int value;
} uv_semaphore_t;
#if (defined(__GLIBC__) && !defined(__UCLIBC__)) || \
platform_needs_custom_semaphore
STATIC_ASSERT(sizeof(uv_sem_t) >= sizeof(uv_semaphore_t*));
#endif
static int uv__custom_sem_init(uv_sem_t* sem_, unsigned int value) {
int err;
uv_semaphore_t* sem;
sem = uv__malloc(sizeof(*sem));
if (sem == NULL)
return UV_ENOMEM;
if ((err = uv_mutex_init(&sem->mutex)) != 0) {
uv__free(sem);
return err;
}
if ((err = uv_cond_init(&sem->cond)) != 0) {
uv_mutex_destroy(&sem->mutex);
uv__free(sem);
return err;
}
sem->value = value;
*(uv_semaphore_t**)sem_ = sem;
return 0;
}
static void uv__custom_sem_destroy(uv_sem_t* sem_) {
uv_semaphore_t* sem;
sem = *(uv_semaphore_t**)sem_;
uv_cond_destroy(&sem->cond);
uv_mutex_destroy(&sem->mutex);
uv__free(sem);
}
static void uv__custom_sem_post(uv_sem_t* sem_) {
uv_semaphore_t* sem;
sem = *(uv_semaphore_t**)sem_;
uv_mutex_lock(&sem->mutex);
sem->value++;
if (sem->value == 1)
uv_cond_signal(&sem->cond); /* Release one to replace us. */
uv_mutex_unlock(&sem->mutex);
}
static void uv__custom_sem_wait(uv_sem_t* sem_) {
uv_semaphore_t* sem;
sem = *(uv_semaphore_t**)sem_;
uv_mutex_lock(&sem->mutex);
while (sem->value == 0)
uv_cond_wait(&sem->cond, &sem->mutex);
sem->value--;
uv_mutex_unlock(&sem->mutex);
}
static int uv__custom_sem_trywait(uv_sem_t* sem_) {
uv_semaphore_t* sem;
sem = *(uv_semaphore_t**)sem_;
if (uv_mutex_trylock(&sem->mutex) != 0)
return UV_EAGAIN;
if (sem->value == 0) {
uv_mutex_unlock(&sem->mutex);
return UV_EAGAIN;
}
sem->value--;
uv_mutex_unlock(&sem->mutex);
return 0;
}
static int uv__sem_init(uv_sem_t* sem, unsigned int value) {
if (sem_init(sem, 0, value))
return UV__ERR(errno);
return 0;
}
static void uv__sem_destroy(uv_sem_t* sem) {
if (sem_destroy(sem))
abort();
}
static void uv__sem_post(uv_sem_t* sem) {
if (sem_post(sem))
abort();
}
static void uv__sem_wait(uv_sem_t* sem) {
int r;
do
r = sem_wait(sem);
while (r == -1 && errno == EINTR);
if (r)
abort();
}
static int uv__sem_trywait(uv_sem_t* sem) {
int r;
do
r = sem_trywait(sem);
while (r == -1 && errno == EINTR);
if (r) {
if (errno == EAGAIN)
return UV_EAGAIN;
abort();
}
return 0;
}
int uv_sem_init(uv_sem_t* sem, unsigned int value) {
#if defined(__GLIBC__) && !defined(__UCLIBC__)
uv_once(&glibc_version_check_once, glibc_version_check);
#endif
if (platform_needs_custom_semaphore)
return uv__custom_sem_init(sem, value);
else
return uv__sem_init(sem, value);
}
void uv_sem_destroy(uv_sem_t* sem) {
if (platform_needs_custom_semaphore)
uv__custom_sem_destroy(sem);
else
uv__sem_destroy(sem);
}
void uv_sem_post(uv_sem_t* sem) {
if (platform_needs_custom_semaphore)
uv__custom_sem_post(sem);
else
uv__sem_post(sem);
}
void uv_sem_wait(uv_sem_t* sem) {
if (platform_needs_custom_semaphore)
uv__custom_sem_wait(sem);
else
uv__sem_wait(sem);
}
int uv_sem_trywait(uv_sem_t* sem) {
if (platform_needs_custom_semaphore)
return uv__custom_sem_trywait(sem);
else
return uv__sem_trywait(sem);
}
#endif /* defined(__APPLE__) && defined(__MACH__) */
#if defined(__APPLE__) && defined(__MACH__) || defined(__MVS__)
int uv_cond_init(uv_cond_t* cond) {
return UV__ERR(pthread_cond_init(cond, NULL));
}
#else /* !(defined(__APPLE__) && defined(__MACH__)) */
int uv_cond_init(uv_cond_t* cond) {
pthread_condattr_t attr;
int err;
err = pthread_condattr_init(&attr);
if (err)
return UV__ERR(err);
err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
if (err)
goto error2;
err = pthread_cond_init(cond, &attr);
if (err)
goto error2;
err = pthread_condattr_destroy(&attr);
if (err)
goto error;
return 0;
error:
pthread_cond_destroy(cond);
error2:
pthread_condattr_destroy(&attr);
return UV__ERR(err);
}
#endif /* defined(__APPLE__) && defined(__MACH__) */
void uv_cond_destroy(uv_cond_t* cond) {
#if defined(__APPLE__) && defined(__MACH__)
/* It has been reported that destroying condition variables that have been
* signalled but not waited on can sometimes result in application crashes.
* See https://codereview.chromium.org/1323293005.
*/
pthread_mutex_t mutex;
struct timespec ts;
int err;
if (pthread_mutex_init(&mutex, NULL))
abort();
if (pthread_mutex_lock(&mutex))
abort();
ts.tv_sec = 0;
ts.tv_nsec = 1;
err = pthread_cond_timedwait_relative_np(cond, &mutex, &ts);
if (err != 0 && err != ETIMEDOUT)
abort();
if (pthread_mutex_unlock(&mutex))
abort();
if (pthread_mutex_destroy(&mutex))
abort();
#endif /* defined(__APPLE__) && defined(__MACH__) */
if (pthread_cond_destroy(cond))
abort();
}
void uv_cond_signal(uv_cond_t* cond) {
if (pthread_cond_signal(cond))
abort();
}
void uv_cond_broadcast(uv_cond_t* cond) {
if (pthread_cond_broadcast(cond))
abort();
}
#if defined(__APPLE__) && defined(__MACH__)
void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
int r;
errno = 0;
r = pthread_cond_wait(cond, mutex);
/* Workaround for a bug in OS X at least up to 13.6
* See https://github.com/libuv/libuv/issues/4165
*/
if (r == EINVAL)
if (errno == EBUSY)
return;
if (r)
abort();
}
#else /* !(defined(__APPLE__) && defined(__MACH__)) */
void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
if (pthread_cond_wait(cond, mutex))
abort();
}
#endif
int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
int r;
struct timespec ts;
#if defined(__MVS__)
struct timeval tv;
#endif
#if defined(__APPLE__) && defined(__MACH__)
ts.tv_sec = timeout / NANOSEC;
ts.tv_nsec = timeout % NANOSEC;
r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
#else
#if defined(__MVS__)
if (gettimeofday(&tv, NULL))
abort();
timeout += tv.tv_sec * NANOSEC + tv.tv_usec * 1e3;
#else
timeout += uv__hrtime(UV_CLOCK_PRECISE);
#endif
ts.tv_sec = timeout / NANOSEC;
ts.tv_nsec = timeout % NANOSEC;
r = pthread_cond_timedwait(cond, mutex, &ts);
#endif
if (r == 0)
return 0;
if (r == ETIMEDOUT)
return UV_ETIMEDOUT;
abort();
#ifndef __SUNPRO_C
return UV_EINVAL; /* Satisfy the compiler. */
#endif
}
int uv_key_create(uv_key_t* key) {
return UV__ERR(pthread_key_create(key, NULL));
}
void uv_key_delete(uv_key_t* key) {
if (pthread_key_delete(*key))
abort();
}
void* uv_key_get(uv_key_t* key) {
return pthread_getspecific(*key);
}
void uv_key_set(uv_key_t* key, void* value) {
if (pthread_setspecific(*key, value))
abort();
}
#if defined(_AIX) || defined(__MVS__) || defined(__PASE__)
int uv__thread_setname(const char* name) {
return UV_ENOSYS;
}
#elif defined(__APPLE__)
int uv__thread_setname(const char* name) {
char namebuf[UV_PTHREAD_MAX_NAMELEN_NP];
strncpy(namebuf, name, sizeof(namebuf) - 1);
namebuf[sizeof(namebuf) - 1] = '\0';
int err = pthread_setname_np(namebuf);
if (err)
return UV__ERR(errno);
return 0;
}
#elif defined(__NetBSD__)
int uv__thread_setname(const char* name) {
char namebuf[UV_PTHREAD_MAX_NAMELEN_NP];
strncpy(namebuf, name, sizeof(namebuf) - 1);
namebuf[sizeof(namebuf) - 1] = '\0';
return UV__ERR(pthread_setname_np(pthread_self(), "%s", namebuf));
}
#elif defined(__OpenBSD__)
int uv__thread_setname(const char* name) {
char namebuf[UV_PTHREAD_MAX_NAMELEN_NP];
strncpy(namebuf, name, sizeof(namebuf) - 1);
namebuf[sizeof(namebuf) - 1] = '\0';
pthread_set_name_np(pthread_self(), namebuf);
return 0;
}
#else
int uv__thread_setname(const char* name) {
char namebuf[UV_PTHREAD_MAX_NAMELEN_NP];
strncpy(namebuf, name, sizeof(namebuf) - 1);
namebuf[sizeof(namebuf) - 1] = '\0';
return UV__ERR(pthread_setname_np(pthread_self(), namebuf));
}
#endif
#if (defined(__ANDROID_API__) && __ANDROID_API__ < 26) || \
defined(_AIX) || \
defined(__MVS__) || \
defined(__PASE__)
int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) {
return UV_ENOSYS;
}
#elif defined(__OpenBSD__)
int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) {
char thread_name[UV_PTHREAD_MAX_NAMELEN_NP];
pthread_get_name_np(*tid, thread_name, sizeof(thread_name));
strncpy(name, thread_name, size - 1);
name[size - 1] = '\0';
return 0;
}
#elif defined(__APPLE__)
int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) {
char thread_name[UV_PTHREAD_MAX_NAMELEN_NP];
if (pthread_getname_np(*tid, thread_name, sizeof(thread_name)) != 0)
return UV__ERR(errno);
strncpy(name, thread_name, size - 1);
name[size - 1] = '\0';
return 0;
}
#else
int uv__thread_getname(uv_thread_t* tid, char* name, size_t size) {
int r;
char thread_name[UV_PTHREAD_MAX_NAMELEN_NP];
r = pthread_getname_np(*tid, thread_name, sizeof(thread_name));
if (r != 0)
return UV__ERR(r);
strncpy(name, thread_name, size - 1);
name[size - 1] = '\0';
return 0;
}
#endif

View File

@@ -0,0 +1,510 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdatomic.h>
#include <stdlib.h>
#include <assert.h>
#include <unistd.h>
#include <termios.h>
#include <errno.h>
#include <sys/ioctl.h>
#if defined(__MVS__) && !defined(IMAXBEL)
#define IMAXBEL 0
#endif
#if defined(__PASE__)
/* On IBM i PASE, for better compatibility with running interactive programs in
* a 5250 environment, isatty() will return true for the stdin/stdout/stderr
* streams created by QSH/QP2TERM.
*
* For more, see docs on PASE_STDIO_ISATTY in
* https://www.ibm.com/support/knowledgecenter/ssw_ibm_i_74/apis/pase_environ.htm
*
* This behavior causes problems for Node as it expects that if isatty() returns
* true that TTY ioctls will be supported by that fd (which is not an
* unreasonable expectation) and when they don't it crashes with assertion
* errors.
*
* Here, we create our own version of isatty() that uses ioctl() to identify
* whether the fd is *really* a TTY or not.
*/
static int isreallyatty(int file) {
int rc;
rc = !ioctl(file, TXISATTY + 0x81, NULL);
if (!rc && errno != EBADF)
errno = ENOTTY;
return rc;
}
#define isatty(fd) isreallyatty(fd)
#endif
static int orig_termios_fd = -1;
static struct termios orig_termios;
static _Atomic int termios_spinlock;
int uv__tcsetattr(int fd, int how, const struct termios *term) {
int rc;
do
rc = tcsetattr(fd, how, term);
while (rc == -1 && errno == EINTR);
if (rc == -1)
return UV__ERR(errno);
return 0;
}
static int uv__tty_is_slave(const int fd) {
int result;
#if defined(__linux__) || defined(__FreeBSD__)
int dummy;
result = ioctl(fd, TIOCGPTN, &dummy) != 0;
#elif defined(__APPLE__)
char dummy[256];
result = ioctl(fd, TIOCPTYGNAME, &dummy) != 0;
#elif defined(__NetBSD__)
/*
* NetBSD as an extension returns with ptsname(3) and ptsname_r(3) the slave
* device name for both descriptors, the master one and slave one.
*
* Implement function to compare major device number with pts devices.
*
* The major numbers are machine-dependent, on NetBSD/amd64 they are
* respectively:
* - master tty: ptc - major 6
* - slave tty: pts - major 5
*/
struct stat sb;
/* Lookup device's major for the pts driver and cache it. */
static devmajor_t pts = NODEVMAJOR;
if (pts == NODEVMAJOR) {
pts = getdevmajor("pts", S_IFCHR);
if (pts == NODEVMAJOR)
abort();
}
/* Lookup stat structure behind the file descriptor. */
if (uv__fstat(fd, &sb) != 0)
abort();
/* Assert character device. */
if (!S_ISCHR(sb.st_mode))
abort();
/* Assert valid major. */
if (major(sb.st_rdev) == NODEVMAJOR)
abort();
result = (pts == major(sb.st_rdev));
#else
/* Fallback to ptsname
*/
result = ptsname(fd) == NULL;
#endif
return result;
}
int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, int fd, int unused) {
uv_handle_type type;
int flags;
int newfd;
int r;
int saved_flags;
int mode;
char path[256];
(void)unused; /* deprecated parameter is no longer needed */
/* File descriptors that refer to files cannot be monitored with epoll.
* That restriction also applies to character devices like /dev/random
* (but obviously not /dev/tty.)
*/
type = uv_guess_handle(fd);
if (type == UV_FILE || type == UV_UNKNOWN_HANDLE)
return UV_EINVAL;
flags = 0;
newfd = -1;
/* Save the fd flags in case we need to restore them due to an error. */
do
saved_flags = fcntl(fd, F_GETFL);
while (saved_flags == -1 && errno == EINTR);
if (saved_flags == -1)
return UV__ERR(errno);
mode = saved_flags & O_ACCMODE;
/* Reopen the file descriptor when it refers to a tty. This lets us put the
* tty in non-blocking mode without affecting other processes that share it
* with us.
*
* Example: `node | cat` - if we put our fd 0 in non-blocking mode, it also
* affects fd 1 of `cat` because both file descriptors refer to the same
* struct file in the kernel. When we reopen our fd 0, it points to a
* different struct file, hence changing its properties doesn't affect
* other processes.
*/
if (type == UV_TTY) {
/* Reopening a pty in master mode won't work either because the reopened
* pty will be in slave mode (*BSD) or reopening will allocate a new
* master/slave pair (Linux). Therefore check if the fd points to a
* slave device.
*/
if (uv__tty_is_slave(fd) && ttyname_r(fd, path, sizeof(path)) == 0)
r = uv__open_cloexec(path, mode | O_NOCTTY);
else
r = -1;
if (r < 0) {
/* fallback to using blocking writes */
if (mode != O_RDONLY)
flags |= UV_HANDLE_BLOCKING_WRITES;
goto skip;
}
newfd = r;
r = uv__dup2_cloexec(newfd, fd);
if (r < 0 && r != UV_EINVAL) {
/* EINVAL means newfd == fd which could conceivably happen if another
* thread called close(fd) between our calls to isatty() and open().
* That's a rather unlikely event but let's handle it anyway.
*/
uv__close(newfd);
return r;
}
fd = newfd;
}
skip:
uv__stream_init(loop, (uv_stream_t*) tty, UV_TTY);
/* If anything fails beyond this point we need to remove the handle from
* the handle queue, since it was added by uv__handle_init in uv_stream_init.
*/
if (!(flags & UV_HANDLE_BLOCKING_WRITES))
uv__nonblock(fd, 1);
#if defined(__APPLE__)
r = uv__stream_try_select((uv_stream_t*) tty, &fd);
if (r) {
int rc = r;
if (newfd != -1)
uv__close(newfd);
uv__queue_remove(&tty->handle_queue);
do
r = fcntl(fd, F_SETFL, saved_flags);
while (r == -1 && errno == EINTR);
return rc;
}
#endif
if (mode != O_WRONLY)
flags |= UV_HANDLE_READABLE;
if (mode != O_RDONLY)
flags |= UV_HANDLE_WRITABLE;
uv__stream_open((uv_stream_t*) tty, fd, flags);
tty->mode = UV_TTY_MODE_NORMAL;
return 0;
}
static void uv__tty_make_raw(struct termios* tio) {
assert(tio != NULL);
#if defined __sun || defined __MVS__
/*
* This implementation of cfmakeraw for Solaris and derivatives is taken from
* http://www.perkin.org.uk/posts/solaris-portability-cfmakeraw.html.
*/
tio->c_iflag &= ~(IMAXBEL | IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR |
IGNCR | ICRNL | IXON);
tio->c_oflag &= ~OPOST;
tio->c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
tio->c_cflag &= ~(CSIZE | PARENB);
tio->c_cflag |= CS8;
/*
* By default, most software expects a pending read to block until at
* least one byte becomes available. As per termio(7I), this requires
* setting the MIN and TIME parameters appropriately.
*
* As a somewhat unfortunate artifact of history, the MIN and TIME slots
* in the control character array overlap with the EOF and EOL slots used
* for canonical mode processing. Because the EOF character needs to be
* the ASCII EOT value (aka Control-D), it has the byte value 4. When
* switching to raw mode, this is interpreted as a MIN value of 4; i.e.,
* reads will block until at least four bytes have been input.
*
* Other platforms with a distinct MIN slot like Linux and FreeBSD appear
* to default to a MIN value of 1, so we'll force that value here:
*/
tio->c_cc[VMIN] = 1;
tio->c_cc[VTIME] = 0;
#else
cfmakeraw(tio);
#endif /* #ifdef __sun */
}
int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
struct termios tmp;
int expected;
int fd;
int rc;
if (uv__is_raw_tty_mode(mode)) {
/* There is only a single raw TTY mode on UNIX. */
mode = UV_TTY_MODE_RAW;
}
if (tty->mode == (int) mode)
return 0;
fd = uv__stream_fd(tty);
if (tty->mode == UV_TTY_MODE_NORMAL && mode != UV_TTY_MODE_NORMAL) {
do
rc = tcgetattr(fd, &tty->orig_termios);
while (rc == -1 && errno == EINTR);
if (rc == -1)
return UV__ERR(errno);
/* This is used for uv_tty_reset_mode() */
do
expected = 0;
while (!atomic_compare_exchange_strong(&termios_spinlock, &expected, 1));
if (orig_termios_fd == -1) {
orig_termios = tty->orig_termios;
orig_termios_fd = fd;
}
atomic_store(&termios_spinlock, 0);
}
tmp = tty->orig_termios;
switch (mode) {
case UV_TTY_MODE_NORMAL:
break;
case UV_TTY_MODE_RAW:
tmp.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON);
tmp.c_oflag |= (ONLCR);
tmp.c_cflag |= (CS8);
tmp.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG);
tmp.c_cc[VMIN] = 1;
tmp.c_cc[VTIME] = 0;
break;
case UV_TTY_MODE_IO:
uv__tty_make_raw(&tmp);
break;
default:
UNREACHABLE();
}
/* Apply changes after draining */
rc = uv__tcsetattr(fd, TCSADRAIN, &tmp);
if (rc == 0)
tty->mode = mode;
return rc;
}
void uv__tty_close(uv_tty_t* handle) {
int expected;
int fd;
fd = handle->io_watcher.fd;
if (fd == -1)
goto done;
/* This is used for uv_tty_reset_mode() */
do
expected = 0;
while (!atomic_compare_exchange_strong(&termios_spinlock, &expected, 1));
if (fd == orig_termios_fd) {
/* XXX(bnoordhuis) the tcsetattr is probably wrong when there are still
* other uv_tty_t handles active that refer to the same tty/pty but it's
* hard to recognize that particular situation without maintaining some
* kind of process-global data structure, and that still won't work in a
* multi-process setup.
*/
uv__tcsetattr(fd, TCSANOW, &orig_termios);
orig_termios_fd = -1;
}
atomic_store(&termios_spinlock, 0);
done:
uv__stream_close((uv_stream_t*) handle);
}
int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) {
struct winsize ws;
int err;
do
err = ioctl(uv__stream_fd(tty), TIOCGWINSZ, &ws);
while (err == -1 && errno == EINTR);
if (err == -1)
return UV__ERR(errno);
*width = ws.ws_col;
*height = ws.ws_row;
return 0;
}
uv_handle_type uv_guess_handle(uv_file file) {
struct sockaddr_storage ss;
struct stat s;
socklen_t len;
int type;
if (file < 0)
return UV_UNKNOWN_HANDLE;
if (isatty(file))
return UV_TTY;
if (uv__fstat(file, &s)) {
#if defined(__PASE__)
/* On ibmi receiving RST from TCP instead of FIN immediately puts fd into
* an error state. fstat will return EINVAL, getsockname will also return
* EINVAL, even if sockaddr_storage is valid. (If file does not refer to a
* socket, ENOTSOCK is returned instead.)
* In such cases, we will permit the user to open the connection as uv_tcp
* still, so that the user can get immediately notified of the error in
* their read callback and close this fd.
*/
len = sizeof(ss);
if (getsockname(file, (struct sockaddr*) &ss, &len)) {
if (errno == EINVAL)
return UV_TCP;
}
#endif
return UV_UNKNOWN_HANDLE;
}
if (S_ISREG(s.st_mode))
return UV_FILE;
if (S_ISCHR(s.st_mode))
return UV_FILE; /* XXX UV_NAMED_PIPE? */
if (S_ISFIFO(s.st_mode))
return UV_NAMED_PIPE;
if (!S_ISSOCK(s.st_mode))
return UV_UNKNOWN_HANDLE;
len = sizeof(ss);
if (getsockname(file, (struct sockaddr*) &ss, &len)) {
#if defined(_AIX)
/* On aix receiving RST from TCP instead of FIN immediately puts fd into
* an error state. In such case getsockname will return EINVAL, even if
* sockaddr_storage is valid.
* In such cases, we will permit the user to open the connection as uv_tcp
* still, so that the user can get immediately notified of the error in
* their read callback and close this fd.
*/
if (errno == EINVAL) {
return UV_TCP;
}
#endif
return UV_UNKNOWN_HANDLE;
}
len = sizeof(type);
if (getsockopt(file, SOL_SOCKET, SO_TYPE, &type, &len))
return UV_UNKNOWN_HANDLE;
if (type == SOCK_DGRAM)
if (ss.ss_family == AF_INET || ss.ss_family == AF_INET6)
return UV_UDP;
if (type == SOCK_STREAM) {
#if defined(_AIX) || defined(__DragonFly__)
/* on AIX/DragonFly the getsockname call returns an empty sa structure
* for sockets of type AF_UNIX. For all other types it will
* return a properly filled in structure.
*/
if (len == 0)
return UV_NAMED_PIPE;
#endif /* defined(_AIX) || defined(__DragonFly__) */
if (ss.ss_family == AF_INET || ss.ss_family == AF_INET6)
return UV_TCP;
if (ss.ss_family == AF_UNIX)
return UV_NAMED_PIPE;
}
return UV_UNKNOWN_HANDLE;
}
/* This function is async signal-safe, meaning that it's safe to call from
* inside a signal handler _unless_ execution was inside uv_tty_set_mode()'s
* critical section when the signal was raised.
*/
int uv_tty_reset_mode(void) {
int saved_errno;
int err;
saved_errno = errno;
if (atomic_exchange(&termios_spinlock, 1))
return UV_EBUSY; /* In uv_tty_set_mode() or uv__tty_close(). */
err = 0;
if (orig_termios_fd != -1)
err = uv__tcsetattr(orig_termios_fd, TCSANOW, &orig_termios);
atomic_store(&termios_spinlock, 0);
errno = saved_errno;
return err;
}
void uv_tty_set_vterm_state(uv_tty_vtermstate_t state) {
}
int uv_tty_get_vterm_state(uv_tty_vtermstate_t* state) {
return UV_ENOTSUP;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,474 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/*
* This file is private to libuv. It provides common functionality to both
* Windows and Unix backends.
*/
#ifndef UV_COMMON_H_
#define UV_COMMON_H_
#include <assert.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include "uv.h"
#include "uv/tree.h"
#include "queue.h"
#include "strscpy.h"
#ifndef _MSC_VER
# include <stdatomic.h>
#endif
#if EDOM > 0
# define UV__ERR(x) (-(x))
#else
# define UV__ERR(x) (x)
#endif
#if !defined(snprintf) && defined(_MSC_VER) && _MSC_VER < 1900
extern int snprintf(char*, size_t, const char*, ...);
#endif
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
#define ARRAY_END(a) ((a) + ARRAY_SIZE(a))
#define container_of(ptr, type, member) \
((type *) ((char *) (ptr) - offsetof(type, member)))
/* C11 defines static_assert to be a macro which calls _Static_assert. */
#if defined(static_assert)
#define STATIC_ASSERT(expr) static_assert(expr, #expr)
#else
#define STATIC_ASSERT(expr) \
void uv__static_assert(int static_assert_failed[1 - 2 * !(expr)])
#endif
#ifdef _MSC_VER
#define uv__exchange_int_relaxed(p, v) \
InterlockedExchangeNoFence((LONG volatile*)(p), v)
#else
#define uv__exchange_int_relaxed(p, v) \
atomic_exchange_explicit((_Atomic int*)(p), v, memory_order_relaxed)
#endif
#define UV__UDP_DGRAM_MAXSIZE (64 * 1024)
/* Handle flags. Some flags are specific to Windows or UNIX. */
enum {
/* Used by all handles. */
UV_HANDLE_CLOSING = 0x00000001,
UV_HANDLE_CLOSED = 0x00000002,
UV_HANDLE_ACTIVE = 0x00000004,
UV_HANDLE_REF = 0x00000008,
UV_HANDLE_INTERNAL = 0x00000010,
UV_HANDLE_ENDGAME_QUEUED = 0x00000020,
/* Used by streams. */
UV_HANDLE_LISTENING = 0x00000040,
UV_HANDLE_CONNECTION = 0x00000080,
UV_HANDLE_SHUT = 0x00000200,
UV_HANDLE_READ_PARTIAL = 0x00000400,
UV_HANDLE_READ_EOF = 0x00000800,
/* Used by streams and UDP handles. */
UV_HANDLE_READING = 0x00001000,
UV_HANDLE_BOUND = 0x00002000,
UV_HANDLE_READABLE = 0x00004000,
UV_HANDLE_WRITABLE = 0x00008000,
UV_HANDLE_READ_PENDING = 0x00010000,
UV_HANDLE_SYNC_BYPASS_IOCP = 0x00020000,
UV_HANDLE_ZERO_READ = 0x00040000,
UV_HANDLE_EMULATE_IOCP = 0x00080000,
UV_HANDLE_BLOCKING_WRITES = 0x00100000,
UV_HANDLE_CANCELLATION_PENDING = 0x00200000,
/* Used by uv_tcp_t and uv_udp_t handles */
UV_HANDLE_IPV6 = 0x00400000,
/* Only used by uv_tcp_t handles. */
UV_HANDLE_TCP_NODELAY = 0x01000000,
UV_HANDLE_TCP_KEEPALIVE = 0x02000000,
UV_HANDLE_TCP_SINGLE_ACCEPT = 0x04000000,
UV_HANDLE_TCP_ACCEPT_STATE_CHANGING = 0x08000000,
UV_HANDLE_SHARED_TCP_SOCKET = 0x10000000,
/* Only used by uv_udp_t handles. */
UV_HANDLE_UDP_PROCESSING = 0x01000000,
UV_HANDLE_UDP_CONNECTED = 0x02000000,
UV_HANDLE_UDP_RECVMMSG = 0x04000000,
/* Only used by uv_pipe_t handles. */
UV_HANDLE_NON_OVERLAPPED_PIPE = 0x01000000,
UV_HANDLE_PIPESERVER = 0x02000000,
/* Only used by uv_tty_t handles. */
UV_HANDLE_TTY_READABLE = 0x01000000,
UV_HANDLE_UNUSED0 = 0x02000000,
UV_HANDLE_TTY_SAVED_POSITION = 0x04000000,
UV_HANDLE_TTY_SAVED_ATTRIBUTES = 0x08000000,
/* Only used by uv_signal_t handles. */
UV_SIGNAL_ONE_SHOT_DISPATCHED = 0x01000000,
UV_SIGNAL_ONE_SHOT = 0x02000000,
/* Only used by uv_poll_t handles. */
UV_HANDLE_POLL_SLOW = 0x01000000,
/* Only used by uv_process_t handles. */
UV_HANDLE_REAP = 0x10000000
};
static inline int uv__is_raw_tty_mode(uv_tty_mode_t m) {
return m == UV_TTY_MODE_RAW || m == UV_TTY_MODE_RAW_VT;
}
int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap);
void uv__loop_close(uv_loop_t* loop);
int uv__read_start(uv_stream_t* stream,
uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
int uv__tcp_bind(uv_tcp_t* tcp,
const struct sockaddr* addr,
unsigned int addrlen,
unsigned int flags);
int uv__tcp_connect(uv_connect_t* req,
uv_tcp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen,
uv_connect_cb cb);
int uv__udp_init_ex(uv_loop_t* loop,
uv_udp_t* handle,
unsigned flags,
int domain);
int uv__udp_bind(uv_udp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen,
unsigned int flags);
int uv__udp_connect(uv_udp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen);
int uv__udp_disconnect(uv_udp_t* handle);
int uv__udp_is_connected(uv_udp_t* handle);
int uv__udp_send(uv_udp_send_t* req,
uv_udp_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs,
const struct sockaddr* addr,
unsigned int addrlen,
uv_udp_send_cb send_cb);
int uv__udp_try_send(uv_udp_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs,
const struct sockaddr* addr,
unsigned int addrlen);
int uv__udp_try_send2(uv_udp_t* handle,
unsigned int count,
uv_buf_t* bufs[/*count*/],
unsigned int nbufs[/*count*/],
struct sockaddr* addrs[/*count*/]);
int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloccb,
uv_udp_recv_cb recv_cb);
int uv__udp_recv_stop(uv_udp_t* handle);
void uv__fs_poll_close(uv_fs_poll_t* handle);
int uv__getaddrinfo_translate_error(int sys_err); /* EAI_* error. */
enum uv__work_kind {
UV__WORK_CPU,
UV__WORK_FAST_IO,
UV__WORK_SLOW_IO
};
void uv__work_submit(uv_loop_t* loop,
struct uv__work *w,
enum uv__work_kind kind,
void (*work)(struct uv__work *w),
void (*done)(struct uv__work *w, int status));
void uv__work_done(uv_async_t* handle);
size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs);
int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value);
void uv__fs_scandir_cleanup(uv_fs_t* req);
void uv__fs_readdir_cleanup(uv_fs_t* req);
uv_dirent_type_t uv__fs_get_dirent_type(uv__dirent_t* dent);
int uv__next_timeout(const uv_loop_t* loop);
void uv__run_timers(uv_loop_t* loop);
void uv__timer_close(uv_timer_t* handle);
void uv__process_title_cleanup(void);
void uv__signal_cleanup(void);
void uv__threadpool_cleanup(void);
#define uv__has_active_reqs(loop) \
((loop)->active_reqs.count > 0)
#define uv__req_register(loop) \
do { \
(loop)->active_reqs.count++; \
} \
while (0)
#define uv__req_unregister(loop) \
do { \
assert(uv__has_active_reqs(loop)); \
(loop)->active_reqs.count--; \
} \
while (0)
#define uv__has_active_handles(loop) \
((loop)->active_handles > 0)
#define uv__active_handle_add(h) \
do { \
(h)->loop->active_handles++; \
} \
while (0)
#define uv__active_handle_rm(h) \
do { \
(h)->loop->active_handles--; \
} \
while (0)
#define uv__is_active(h) \
(((h)->flags & UV_HANDLE_ACTIVE) != 0)
#define uv__is_closing(h) \
(((h)->flags & (UV_HANDLE_CLOSING | UV_HANDLE_CLOSED)) != 0)
#if defined(_WIN32)
# define uv__is_stream_shutting(h) \
(h->stream.conn.shutdown_req != NULL)
#else
# define uv__is_stream_shutting(h) \
(h->shutdown_req != NULL)
#endif
#define uv__handle_start(h) \
do { \
if (((h)->flags & UV_HANDLE_ACTIVE) != 0) break; \
(h)->flags |= UV_HANDLE_ACTIVE; \
if (((h)->flags & UV_HANDLE_REF) != 0) uv__active_handle_add(h); \
} \
while (0)
#define uv__handle_stop(h) \
do { \
if (((h)->flags & UV_HANDLE_ACTIVE) == 0) break; \
(h)->flags &= ~UV_HANDLE_ACTIVE; \
if (((h)->flags & UV_HANDLE_REF) != 0) uv__active_handle_rm(h); \
} \
while (0)
#define uv__handle_ref(h) \
do { \
if (((h)->flags & UV_HANDLE_REF) != 0) break; \
(h)->flags |= UV_HANDLE_REF; \
if (((h)->flags & UV_HANDLE_CLOSING) != 0) break; \
if (((h)->flags & UV_HANDLE_ACTIVE) != 0) uv__active_handle_add(h); \
} \
while (0)
#define uv__handle_unref(h) \
do { \
if (((h)->flags & UV_HANDLE_REF) == 0) break; \
(h)->flags &= ~UV_HANDLE_REF; \
if (((h)->flags & UV_HANDLE_CLOSING) != 0) break; \
if (((h)->flags & UV_HANDLE_ACTIVE) != 0) uv__active_handle_rm(h); \
} \
while (0)
#define uv__has_ref(h) \
(((h)->flags & UV_HANDLE_REF) != 0)
#if defined(_WIN32)
# define uv__handle_platform_init(h) ((h)->u.fd = -1)
#else
# define uv__handle_platform_init(h) ((h)->next_closing = NULL)
#endif
#define uv__handle_init(loop_, h, type_) \
do { \
(h)->loop = (loop_); \
(h)->type = (type_); \
(h)->flags = UV_HANDLE_REF; /* Ref the loop when active. */ \
uv__queue_insert_tail(&(loop_)->handle_queue, &(h)->handle_queue); \
uv__handle_platform_init(h); \
} \
while (0)
/* Note: uses an open-coded version of SET_REQ_SUCCESS() because of
* a circular dependency between src/uv-common.h and src/win/internal.h.
*/
#if defined(_WIN32)
# define UV_REQ_INIT(req, typ) \
do { \
(req)->type = (typ); \
(req)->u.io.overlapped.Internal = 0; /* SET_REQ_SUCCESS() */ \
} \
while (0)
#else
# define UV_REQ_INIT(req, typ) \
do { \
(req)->type = (typ); \
} \
while (0)
#endif
#define uv__req_init(loop, req, typ) \
do { \
UV_REQ_INIT(req, typ); \
uv__req_register(loop); \
} \
while (0)
#define uv__get_internal_fields(loop) \
((uv__loop_internal_fields_t*) loop->internal_fields)
#define uv__get_loop_metrics(loop) \
(&uv__get_internal_fields(loop)->loop_metrics)
#define uv__metrics_inc_loop_count(loop) \
do { \
uv__get_loop_metrics(loop)->metrics.loop_count++; \
} while (0)
#define uv__metrics_inc_events(loop, e) \
do { \
uv__get_loop_metrics(loop)->metrics.events += (e); \
} while (0)
#define uv__metrics_inc_events_waiting(loop, e) \
do { \
uv__get_loop_metrics(loop)->metrics.events_waiting += (e); \
} while (0)
/* Allocator prototypes */
void *uv__calloc(size_t count, size_t size);
char *uv__strdup(const char* s);
char *uv__strndup(const char* s, size_t n);
void* uv__malloc(size_t size);
void uv__free(void* ptr);
void* uv__realloc(void* ptr, size_t size);
void* uv__reallocf(void* ptr, size_t size);
typedef struct uv__loop_metrics_s uv__loop_metrics_t;
typedef struct uv__loop_internal_fields_s uv__loop_internal_fields_t;
struct uv__loop_metrics_s {
uv_metrics_t metrics;
uint64_t provider_entry_time;
uint64_t provider_idle_time;
uv_mutex_t lock;
};
void uv__metrics_update_idle_time(uv_loop_t* loop);
void uv__metrics_set_provider_entry_time(uv_loop_t* loop);
#ifdef __linux__
struct uv__iou {
uint32_t* sqhead;
uint32_t* sqtail;
uint32_t sqmask;
uint32_t* sqflags;
uint32_t* cqhead;
uint32_t* cqtail;
uint32_t cqmask;
void* sq; /* pointer to munmap() on event loop teardown */
void* cqe; /* pointer to array of struct uv__io_uring_cqe */
void* sqe; /* pointer to array of struct uv__io_uring_sqe */
size_t sqlen;
size_t cqlen;
size_t maxlen;
size_t sqelen;
int ringfd;
uint32_t in_flight;
};
#endif /* __linux__ */
struct uv__loop_internal_fields_s {
unsigned int flags;
uv__loop_metrics_t loop_metrics;
int current_timeout;
#ifdef __linux__
struct uv__iou ctl;
struct uv__iou iou;
void* inv; /* used by uv__platform_invalidate_fd() */
#endif /* __linux__ */
};
#if defined(_WIN32)
# define UV_PTHREAD_MAX_NAMELEN_NP 32767
#elif defined(__APPLE__)
# define UV_PTHREAD_MAX_NAMELEN_NP 64
#elif defined(__NetBSD__) || defined(__illumos__)
# define UV_PTHREAD_MAX_NAMELEN_NP PTHREAD_MAX_NAMELEN_NP
#elif defined (__linux__)
# define UV_PTHREAD_MAX_NAMELEN_NP 16
#elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
# define UV_PTHREAD_MAX_NAMELEN_NP (MAXCOMLEN + 1)
#else
# define UV_PTHREAD_MAX_NAMELEN_NP 16
#endif
/* Open-coded so downstream users don't have to link libm. */
static inline int uv__isinf(double d) {
uint64_t v;
STATIC_ASSERT(sizeof(v) == sizeof(d));
memcpy(&v, &d, sizeof(v));
return (v << 1 >> 53) == 2047 && !(v << 12);
}
/* Open-coded so downstream users don't have to link libm. */
static inline int uv__isnan(double d) {
uint64_t v;
STATIC_ASSERT(sizeof(v) == sizeof(d));
memcpy(&v, &d, sizeof(v));
return (v << 1 >> 53) == 2047 && !!(v << 12);
}
#endif /* UV_COMMON_H_ */

View File

@@ -0,0 +1,119 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
const char* uv_handle_type_name(uv_handle_type type) {
switch (type) {
#define XX(uc,lc) case UV_##uc: return #lc;
UV_HANDLE_TYPE_MAP(XX)
#undef XX
case UV_FILE: return "file";
case UV_HANDLE_TYPE_MAX:
case UV_UNKNOWN_HANDLE: return NULL;
}
return NULL;
}
uv_handle_type uv_handle_get_type(const uv_handle_t* handle) {
return handle->type;
}
void* uv_handle_get_data(const uv_handle_t* handle) {
return handle->data;
}
uv_loop_t* uv_handle_get_loop(const uv_handle_t* handle) {
return handle->loop;
}
void uv_handle_set_data(uv_handle_t* handle, void* data) {
handle->data = data;
}
const char* uv_req_type_name(uv_req_type type) {
switch (type) {
#define XX(uc,lc) case UV_##uc: return #lc;
UV_REQ_TYPE_MAP(XX)
#undef XX
case UV_REQ_TYPE_MAX:
case UV_UNKNOWN_REQ:
default: /* UV_REQ_TYPE_PRIVATE */
break;
}
return NULL;
}
uv_req_type uv_req_get_type(const uv_req_t* req) {
return req->type;
}
void* uv_req_get_data(const uv_req_t* req) {
return req->data;
}
void uv_req_set_data(uv_req_t* req, void* data) {
req->data = data;
}
size_t uv_stream_get_write_queue_size(const uv_stream_t* stream) {
return stream->write_queue_size;
}
size_t uv_udp_get_send_queue_size(const uv_udp_t* handle) {
return handle->send_queue_size;
}
size_t uv_udp_get_send_queue_count(const uv_udp_t* handle) {
return handle->send_queue_count;
}
uv_pid_t uv_process_get_pid(const uv_process_t* proc) {
return proc->pid;
}
uv_fs_type uv_fs_get_type(const uv_fs_t* req) {
return req->fs_type;
}
ssize_t uv_fs_get_result(const uv_fs_t* req) {
return req->result;
}
void* uv_fs_get_ptr(const uv_fs_t* req) {
return req->ptr;
}
const char* uv_fs_get_path(const uv_fs_t* req) {
return req->path;
}
uv_stat_t* uv_fs_get_statbuf(uv_fs_t* req) {
return &req->statbuf;
}
void* uv_loop_get_data(const uv_loop_t* loop) {
return loop->data;
}
void uv_loop_set_data(uv_loop_t* loop, void* data) {
loop->data = data;
}

View File

@@ -0,0 +1,45 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#define UV_STRINGIFY(v) UV_STRINGIFY_HELPER(v)
#define UV_STRINGIFY_HELPER(v) #v
#define UV_VERSION_STRING_BASE UV_STRINGIFY(UV_VERSION_MAJOR) "." \
UV_STRINGIFY(UV_VERSION_MINOR) "." \
UV_STRINGIFY(UV_VERSION_PATCH)
#if UV_VERSION_IS_RELEASE
# define UV_VERSION_STRING UV_VERSION_STRING_BASE
#else
# define UV_VERSION_STRING UV_VERSION_STRING_BASE "-" UV_VERSION_SUFFIX
#endif
unsigned int uv_version(void) {
return UV_VERSION_HEX;
}
const char* uv_version_string(void) {
return UV_VERSION_STRING;
}

View File

@@ -0,0 +1,98 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include "uv.h"
#include "internal.h"
#include "atomicops-inl.h"
#include "handle-inl.h"
#include "req-inl.h"
void uv__async_endgame(uv_loop_t* loop, uv_async_t* handle) {
if (handle->flags & UV_HANDLE_CLOSING &&
!handle->async_sent) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
uv__handle_close(handle);
}
}
int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
uv_req_t* req;
uv__handle_init(loop, (uv_handle_t*) handle, UV_ASYNC);
handle->async_sent = 0;
handle->async_cb = async_cb;
req = &handle->async_req;
UV_REQ_INIT(req, UV_WAKEUP);
req->data = handle;
uv__handle_start(handle);
return 0;
}
void uv__async_close(uv_loop_t* loop, uv_async_t* handle) {
if (!((uv_async_t*)handle)->async_sent) {
uv__want_endgame(loop, (uv_handle_t*) handle);
}
uv__handle_closing(handle);
}
int uv_async_send(uv_async_t* handle) {
uv_loop_t* loop = handle->loop;
if (handle->type != UV_ASYNC) {
/* Can't set errno because that's not thread-safe. */
return -1;
}
/* The user should make sure never to call uv_async_send to a closing or
* closed handle. */
assert(!(handle->flags & UV_HANDLE_CLOSING));
if (!uv__atomic_exchange_set(&handle->async_sent)) {
POST_COMPLETION_FOR_REQ(loop, &handle->async_req);
}
return 0;
}
void uv__process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
uv_req_t* req) {
assert(handle->type == UV_ASYNC);
assert(req->type == UV_WAKEUP);
handle->async_sent = 0;
if (handle->flags & UV_HANDLE_CLOSING) {
uv__want_endgame(loop, (uv_handle_t*)handle);
} else if (handle->async_cb != NULL) {
handle->async_cb(handle);
}
}

View File

@@ -0,0 +1,61 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_WIN_ATOMICOPS_INL_H_
#define UV_WIN_ATOMICOPS_INL_H_
#include "uv.h"
#include "internal.h"
/* Atomic set operation on char */
#ifdef _MSC_VER /* MSVC */
/* _InterlockedOr8 is supported by MSVC on x32 and x64. It is slightly less
* efficient than InterlockedExchange, but InterlockedExchange8 does not exist,
* and interlocked operations on larger targets might require the target to be
* aligned. */
#pragma intrinsic(_InterlockedOr8)
static char INLINE uv__atomic_exchange_set(char volatile* target) {
return _InterlockedOr8(target, 1);
}
#else /* GCC, Clang in mingw mode */
static inline char uv__atomic_exchange_set(char volatile* target) {
#if defined(__i386__) || defined(__x86_64__)
/* Mingw-32 version, hopefully this works for 64-bit gcc as well. */
const char one = 1;
char old_value;
__asm__ __volatile__ ("lock xchgb %0, %1\n\t"
: "=r"(old_value), "=m"(*target)
: "0"(one), "m"(*target)
: "memory");
return old_value;
#else
return __sync_fetch_and_or(target, 1);
#endif
}
#endif
#endif /* UV_WIN_ATOMICOPS_INL_H_ */

View File

@@ -0,0 +1,683 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <errno.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR)
#include <crtdbg.h>
#endif
#include "uv.h"
#include "internal.h"
#include "queue.h"
#include "handle-inl.h"
#include "heap-inl.h"
#include "req-inl.h"
/* uv_once initialization guards */
static uv_once_t uv_init_guard_ = UV_ONCE_INIT;
#if defined(_DEBUG) && (defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR))
/* Our crt debug report handler allows us to temporarily disable asserts
* just for the current thread.
*/
UV_THREAD_LOCAL int uv__crt_assert_enabled = TRUE;
static int uv__crt_dbg_report_handler(int report_type, char *message, int *ret_val) {
if (uv__crt_assert_enabled || report_type != _CRT_ASSERT)
return FALSE;
if (ret_val) {
/* Set ret_val to 0 to continue with normal execution.
* Set ret_val to 1 to trigger a breakpoint.
*/
if(IsDebuggerPresent())
*ret_val = 1;
else
*ret_val = 0;
}
/* Don't call _CrtDbgReport. */
return TRUE;
}
#else
UV_THREAD_LOCAL int uv__crt_assert_enabled = FALSE;
#endif
#if !defined(__MINGW32__) || __MSVCRT_VERSION__ >= 0x800
static void uv__crt_invalid_parameter_handler(const wchar_t* expression,
const wchar_t* function, const wchar_t * file, unsigned int line,
uintptr_t reserved) {
/* No-op. */
}
#endif
static uv_loop_t** uv__loops;
static int uv__loops_size;
static int uv__loops_capacity;
#define UV__LOOPS_CHUNK_SIZE 8
static uv_mutex_t uv__loops_lock;
static void uv__loops_init(void) {
uv_mutex_init(&uv__loops_lock);
}
static int uv__loops_add(uv_loop_t* loop) {
uv_loop_t** new_loops;
int new_capacity, i;
uv_mutex_lock(&uv__loops_lock);
if (uv__loops_size == uv__loops_capacity) {
new_capacity = uv__loops_capacity + UV__LOOPS_CHUNK_SIZE;
new_loops = uv__realloc(uv__loops, sizeof(uv_loop_t*) * new_capacity);
if (!new_loops)
goto failed_loops_realloc;
uv__loops = new_loops;
for (i = uv__loops_capacity; i < new_capacity; ++i)
uv__loops[i] = NULL;
uv__loops_capacity = new_capacity;
}
uv__loops[uv__loops_size] = loop;
++uv__loops_size;
uv_mutex_unlock(&uv__loops_lock);
return 0;
failed_loops_realloc:
uv_mutex_unlock(&uv__loops_lock);
return UV_ENOMEM;
}
static void uv__loops_remove(uv_loop_t* loop) {
int loop_index;
int smaller_capacity;
uv_loop_t** new_loops;
uv_mutex_lock(&uv__loops_lock);
for (loop_index = 0; loop_index < uv__loops_size; ++loop_index) {
if (uv__loops[loop_index] == loop)
break;
}
/* If loop was not found, ignore */
if (loop_index == uv__loops_size)
goto loop_removed;
uv__loops[loop_index] = uv__loops[uv__loops_size - 1];
uv__loops[uv__loops_size - 1] = NULL;
--uv__loops_size;
if (uv__loops_size == 0) {
uv__loops_capacity = 0;
uv__free(uv__loops);
uv__loops = NULL;
goto loop_removed;
}
/* If we didn't grow to big skip downsizing */
if (uv__loops_capacity < 4 * UV__LOOPS_CHUNK_SIZE)
goto loop_removed;
/* Downsize only if more than half of buffer is free */
smaller_capacity = uv__loops_capacity / 2;
if (uv__loops_size >= smaller_capacity)
goto loop_removed;
new_loops = uv__realloc(uv__loops, sizeof(uv_loop_t*) * smaller_capacity);
if (!new_loops)
goto loop_removed;
uv__loops = new_loops;
uv__loops_capacity = smaller_capacity;
loop_removed:
uv_mutex_unlock(&uv__loops_lock);
}
void uv__wake_all_loops(void) {
int i;
uv_loop_t* loop;
uv_mutex_lock(&uv__loops_lock);
for (i = 0; i < uv__loops_size; ++i) {
loop = uv__loops[i];
assert(loop);
if (loop->iocp != INVALID_HANDLE_VALUE)
PostQueuedCompletionStatus(loop->iocp, 0, 0, NULL);
}
uv_mutex_unlock(&uv__loops_lock);
}
static void uv__init(void) {
/* Tell Windows that we will handle critical errors. */
SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX |
SEM_NOOPENFILEERRORBOX);
/* Tell the CRT to not exit the application when an invalid parameter is
* passed. The main issue is that invalid FDs will trigger this behavior.
*/
#if !defined(__MINGW32__) || __MSVCRT_VERSION__ >= 0x800
_set_invalid_parameter_handler(uv__crt_invalid_parameter_handler);
#endif
/* We also need to setup our debug report handler because some CRT
* functions (eg _get_osfhandle) raise an assert when called with invalid
* FDs even though they return the proper error code in the release build.
*/
#if defined(_DEBUG) && (defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR))
_CrtSetReportHook(uv__crt_dbg_report_handler);
#endif
/* Initialize tracking of all uv loops */
uv__loops_init();
/* Fetch winapi function pointers. This must be done first because other
* initialization code might need these function pointers to be loaded.
*/
uv__winapi_init();
/* Initialize winsock */
uv__winsock_init();
/* Initialize FS */
uv__fs_init();
/* Initialize signal stuff */
uv__signals_init();
/* Initialize console */
uv__console_init();
/* Initialize utilities */
uv__util_init();
/* Initialize system wakeup detection */
uv__init_detect_system_wakeup();
}
int uv_loop_init(uv_loop_t* loop) {
uv__loop_internal_fields_t* lfields;
struct heap* timer_heap;
int err;
/* Initialize libuv itself first */
uv__once_init();
/* Create an I/O completion port */
loop->iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1);
if (loop->iocp == NULL)
return uv_translate_sys_error(GetLastError());
lfields = (uv__loop_internal_fields_t*) uv__calloc(1, sizeof(*lfields));
if (lfields == NULL)
return UV_ENOMEM;
loop->internal_fields = lfields;
err = uv_mutex_init(&lfields->loop_metrics.lock);
if (err)
goto fail_metrics_mutex_init;
memset(&lfields->loop_metrics.metrics,
0,
sizeof(lfields->loop_metrics.metrics));
/* To prevent uninitialized memory access, loop->time must be initialized
* to zero before calling uv_update_time for the first time.
*/
loop->time = 0;
uv_update_time(loop);
uv__queue_init(&loop->wq);
uv__queue_init(&loop->handle_queue);
loop->active_reqs.count = 0;
loop->active_handles = 0;
loop->pending_reqs_tail = NULL;
loop->endgame_handles = NULL;
loop->timer_heap = timer_heap = uv__malloc(sizeof(*timer_heap));
if (timer_heap == NULL) {
err = UV_ENOMEM;
goto fail_timers_alloc;
}
heap_init(timer_heap);
loop->check_handles = NULL;
loop->prepare_handles = NULL;
loop->idle_handles = NULL;
loop->next_prepare_handle = NULL;
loop->next_check_handle = NULL;
loop->next_idle_handle = NULL;
memset(&loop->poll_peer_sockets, 0, sizeof loop->poll_peer_sockets);
loop->timer_counter = 0;
loop->stop_flag = 0;
err = uv_mutex_init(&loop->wq_mutex);
if (err)
goto fail_mutex_init;
err = uv_async_init(loop, &loop->wq_async, uv__work_done);
if (err)
goto fail_async_init;
uv__handle_unref(&loop->wq_async);
loop->wq_async.flags |= UV_HANDLE_INTERNAL;
err = uv__loops_add(loop);
if (err)
goto fail_async_init;
return 0;
fail_async_init:
uv_mutex_destroy(&loop->wq_mutex);
fail_mutex_init:
uv__free(timer_heap);
loop->timer_heap = NULL;
fail_timers_alloc:
uv_mutex_destroy(&lfields->loop_metrics.lock);
fail_metrics_mutex_init:
uv__free(lfields);
loop->internal_fields = NULL;
CloseHandle(loop->iocp);
loop->iocp = INVALID_HANDLE_VALUE;
return err;
}
void uv_update_time(uv_loop_t* loop) {
uint64_t new_time = uv__hrtime(1000);
assert(new_time >= loop->time);
loop->time = new_time;
}
void uv__once_init(void) {
uv_once(&uv_init_guard_, uv__init);
}
void uv__loop_close(uv_loop_t* loop) {
uv__loop_internal_fields_t* lfields;
size_t i;
uv__loops_remove(loop);
/* Close the async handle without needing an extra loop iteration.
* We might have a pending message, but we're just going to destroy the IOCP
* soon, so we can just discard it now without the usual risk of a getting
* another notification from GetQueuedCompletionStatusEx after calling the
* close_cb (which we also skip defining). We'll assert later that queue was
* actually empty and all reqs handled. */
loop->wq_async.async_sent = 0;
loop->wq_async.close_cb = NULL;
uv__handle_closing(&loop->wq_async);
uv__handle_close(&loop->wq_async);
for (i = 0; i < ARRAY_SIZE(loop->poll_peer_sockets); i++) {
SOCKET sock = loop->poll_peer_sockets[i];
if (sock != 0 && sock != INVALID_SOCKET)
closesocket(sock);
}
uv_mutex_lock(&loop->wq_mutex);
assert(uv__queue_empty(&loop->wq) && "thread pool work queue not empty!");
assert(!uv__has_active_reqs(loop));
uv_mutex_unlock(&loop->wq_mutex);
uv_mutex_destroy(&loop->wq_mutex);
uv__free(loop->timer_heap);
loop->timer_heap = NULL;
lfields = uv__get_internal_fields(loop);
uv_mutex_destroy(&lfields->loop_metrics.lock);
uv__free(lfields);
loop->internal_fields = NULL;
CloseHandle(loop->iocp);
}
int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) {
uv__loop_internal_fields_t* lfields;
lfields = uv__get_internal_fields(loop);
if (option == UV_METRICS_IDLE_TIME) {
lfields->flags |= UV_METRICS_IDLE_TIME;
return 0;
}
return UV_ENOSYS;
}
int uv_backend_fd(const uv_loop_t* loop) {
return -1;
}
int uv_loop_fork(uv_loop_t* loop) {
return UV_ENOSYS;
}
static int uv__loop_alive(const uv_loop_t* loop) {
return uv__has_active_handles(loop) ||
uv__has_active_reqs(loop) ||
loop->pending_reqs_tail != NULL ||
loop->endgame_handles != NULL;
}
int uv_loop_alive(const uv_loop_t* loop) {
return uv__loop_alive(loop);
}
int uv_backend_timeout(const uv_loop_t* loop) {
if (loop->stop_flag == 0 &&
/* uv__loop_alive(loop) && */
(uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
loop->pending_reqs_tail == NULL &&
loop->idle_handles == NULL &&
loop->endgame_handles == NULL)
return uv__next_timeout(loop);
return 0;
}
static void uv__poll(uv_loop_t* loop, DWORD timeout) {
uv__loop_internal_fields_t* lfields;
BOOL success;
uv_req_t* req;
OVERLAPPED_ENTRY overlappeds[128];
ULONG count;
ULONG i;
int repeat;
uint64_t timeout_time;
uint64_t user_timeout;
uint64_t actual_timeout;
int reset_timeout;
lfields = uv__get_internal_fields(loop);
timeout_time = loop->time + timeout;
if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
}
for (repeat = 0; ; repeat++) {
actual_timeout = timeout;
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
/* Store the current timeout in a location that's globally accessible so
* other locations like uv__work_done() can determine whether the queue
* of events in the callback were waiting when poll was called.
*/
lfields->current_timeout = timeout;
success = GetQueuedCompletionStatusEx(loop->iocp,
overlappeds,
ARRAY_SIZE(overlappeds),
&count,
timeout,
FALSE);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
/* Placed here because on success the loop will break whether there is an
* empty package or not, or if GetQueuedCompletionStatusEx returned early
* then the timeout will be updated and the loop will run again. In either
* case the idle time will need to be updated.
*/
uv__metrics_update_idle_time(loop);
if (success) {
for (i = 0; i < count; i++) {
/* Package was dequeued, but see if it is not a empty package
* meant only to wake us up.
*/
if (overlappeds[i].lpOverlapped) {
uv__metrics_inc_events(loop, 1);
if (actual_timeout == 0)
uv__metrics_inc_events_waiting(loop, 1);
req = uv__overlapped_to_req(overlappeds[i].lpOverlapped);
uv__insert_pending_req(loop, req);
}
}
/* Some time might have passed waiting for I/O,
* so update the loop time here.
*/
uv_update_time(loop);
} else if (GetLastError() != WAIT_TIMEOUT) {
/* Serious error */
uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx");
} else if (timeout > 0) {
/* GetQueuedCompletionStatus can occasionally return a little early.
* Make sure that the desired timeout target time is reached.
*/
uv_update_time(loop);
if (timeout_time > loop->time) {
timeout = (DWORD)(timeout_time - loop->time);
/* The first call to GetQueuedCompletionStatus should return very
* close to the target time and the second should reach it, but
* this is not stated in the documentation. To make sure a busy
* loop cannot happen, the timeout is increased exponentially
* starting on the third round.
*/
timeout += repeat ? (1 << (repeat - 1)) : 0;
continue;
}
}
break;
}
}
int uv_run(uv_loop_t *loop, uv_run_mode mode) {
DWORD timeout;
int r;
int can_sleep;
r = uv__loop_alive(loop);
if (!r)
uv_update_time(loop);
/* Maintain backwards compatibility by processing timers before entering the
* while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed
* once, which should be done after polling in order to maintain proper
* execution order of the conceptual event loop. */
if (mode == UV_RUN_DEFAULT && r != 0 && loop->stop_flag == 0) {
uv_update_time(loop);
uv__run_timers(loop);
}
while (r != 0 && loop->stop_flag == 0) {
can_sleep = loop->pending_reqs_tail == NULL && loop->idle_handles == NULL;
uv__process_reqs(loop);
uv__idle_invoke(loop);
uv__prepare_invoke(loop);
timeout = 0;
if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
timeout = uv_backend_timeout(loop);
uv__metrics_inc_loop_count(loop);
uv__poll(loop, timeout);
/* Process immediate callbacks (e.g. write_cb) a small fixed number of
* times to avoid loop starvation.*/
for (r = 0; r < 8 && loop->pending_reqs_tail != NULL; r++)
uv__process_reqs(loop);
/* Run one final update on the provider_idle_time in case uv__poll*
* returned because the timeout expired, but no events were received. This
* call will be ignored if the provider_entry_time was either never set (if
* the timeout == 0) or was already updated b/c an event was received.
*/
uv__metrics_update_idle_time(loop);
uv__check_invoke(loop);
uv__process_endgames(loop);
uv_update_time(loop);
uv__run_timers(loop);
r = uv__loop_alive(loop);
if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
break;
}
/* The if statement lets the compiler compile it to a conditional store.
* Avoids dirtying a cache line.
*/
if (loop->stop_flag != 0)
loop->stop_flag = 0;
return r;
}
int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
uv_os_fd_t fd_out;
switch (handle->type) {
case UV_TCP:
fd_out = (uv_os_fd_t)((uv_tcp_t*) handle)->socket;
break;
case UV_NAMED_PIPE:
fd_out = ((uv_pipe_t*) handle)->handle;
break;
case UV_TTY:
fd_out = ((uv_tty_t*) handle)->handle;
break;
case UV_UDP:
fd_out = (uv_os_fd_t)((uv_udp_t*) handle)->socket;
break;
case UV_POLL:
fd_out = (uv_os_fd_t)((uv_poll_t*) handle)->socket;
break;
default:
return UV_EINVAL;
}
if (uv_is_closing(handle) || fd_out == INVALID_HANDLE_VALUE)
return UV_EBADF;
*fd = fd_out;
return 0;
}
int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
int r;
int len;
SOCKET socket;
if (handle == NULL || value == NULL)
return UV_EINVAL;
if (handle->type == UV_TCP)
socket = ((uv_tcp_t*) handle)->socket;
else if (handle->type == UV_UDP)
socket = ((uv_udp_t*) handle)->socket;
else
return UV_ENOTSUP;
len = sizeof(*value);
if (*value == 0)
r = getsockopt(socket, SOL_SOCKET, optname, (char*) value, &len);
else
r = setsockopt(socket, SOL_SOCKET, optname, (const char*) value, len);
if (r == SOCKET_ERROR)
return uv_translate_sys_error(WSAGetLastError());
return 0;
}
int uv_cpumask_size(void) {
return (int)(sizeof(DWORD_PTR) * 8);
}
int uv__getsockpeername(const uv_handle_t* handle,
uv__peersockfunc func,
struct sockaddr* name,
int* namelen,
int delayed_error) {
int result;
uv_os_fd_t fd;
result = uv_fileno(handle, &fd);
if (result != 0)
return result;
if (delayed_error)
return uv_translate_sys_error(delayed_error);
result = func((SOCKET) fd, name, namelen);
if (result != 0)
return uv_translate_sys_error(WSAGetLastError());
return 0;
}

View File

@@ -0,0 +1,56 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include "winapi.h"
static void uv__register_system_resume_callback(void);
void uv__init_detect_system_wakeup(void) {
/* Try registering system power event callback. This is the cleanest
* method, but it will only work on Win8 and above.
*/
uv__register_system_resume_callback();
}
static ULONG CALLBACK uv__system_resume_callback(PVOID Context,
ULONG Type,
PVOID Setting) {
if (Type == PBT_APMRESUMESUSPEND || Type == PBT_APMRESUMEAUTOMATIC)
uv__wake_all_loops();
return 0;
}
static void uv__register_system_resume_callback(void) {
_DEVICE_NOTIFY_SUBSCRIBE_PARAMETERS recipient;
_HPOWERNOTIFY registration_handle;
if (pPowerRegisterSuspendResumeNotification == NULL)
return;
recipient.Callback = uv__system_resume_callback;
recipient.Context = NULL;
(*pPowerRegisterSuspendResumeNotification)(DEVICE_NOTIFY_CALLBACK,
&recipient,
&registration_handle);
}

View File

@@ -0,0 +1,135 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
static int uv__dlerror(uv_lib_t* lib, const char* filename, DWORD errorno);
int uv_dlopen(const char* filename, uv_lib_t* lib) {
WCHAR filename_w[32768];
ssize_t r;
lib->handle = NULL;
lib->errmsg = NULL;
r = uv_wtf8_length_as_utf16(filename);
if (r < 0)
return uv__dlerror(lib, filename, ERROR_NO_UNICODE_TRANSLATION);
if ((size_t) r > ARRAY_SIZE(filename_w))
return uv__dlerror(lib, filename, ERROR_INSUFFICIENT_BUFFER);
uv_wtf8_to_utf16(filename, filename_w, r);
lib->handle = LoadLibraryExW(filename_w, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
if (lib->handle == NULL) {
return uv__dlerror(lib, filename, GetLastError());
}
return 0;
}
void uv_dlclose(uv_lib_t* lib) {
if (lib->errmsg) {
LocalFree((void*)lib->errmsg);
lib->errmsg = NULL;
}
if (lib->handle) {
/* Ignore errors. No good way to signal them without leaking memory. */
FreeLibrary(lib->handle);
lib->handle = NULL;
}
}
int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) {
/* Cast though integer to suppress pedantic warning about forbidden cast. */
*ptr = (void*)(uintptr_t) GetProcAddress(lib->handle, name);
return uv__dlerror(lib, "", *ptr ? 0 : GetLastError());
}
const char* uv_dlerror(const uv_lib_t* lib) {
return lib->errmsg ? lib->errmsg : "no error";
}
static void uv__format_fallback_error(uv_lib_t* lib, int errorno){
static const CHAR fallback_error[] = "error: %1!d!";
DWORD_PTR args[1];
args[0] = (DWORD_PTR) errorno;
FormatMessageA(FORMAT_MESSAGE_FROM_STRING |
FORMAT_MESSAGE_ARGUMENT_ARRAY |
FORMAT_MESSAGE_ALLOCATE_BUFFER,
fallback_error, 0, 0,
(LPSTR) &lib->errmsg,
0, (va_list*) args);
}
static int uv__dlerror(uv_lib_t* lib, const char* filename, DWORD errorno) {
DWORD_PTR arg;
DWORD res;
char* msg;
if (lib->errmsg) {
LocalFree(lib->errmsg);
lib->errmsg = NULL;
}
if (errorno == 0)
return 0;
res = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
(LPSTR) &lib->errmsg, 0, NULL);
if (!res && (GetLastError() == ERROR_MUI_FILE_NOT_FOUND ||
GetLastError() == ERROR_RESOURCE_TYPE_NOT_FOUND)) {
res = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
0, (LPSTR) &lib->errmsg, 0, NULL);
}
if (res && errorno == ERROR_BAD_EXE_FORMAT && strstr(lib->errmsg, "%1")) {
msg = lib->errmsg;
lib->errmsg = NULL;
arg = (DWORD_PTR) filename;
res = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_ARGUMENT_ARRAY |
FORMAT_MESSAGE_FROM_STRING,
msg,
0, 0, (LPSTR) &lib->errmsg, 0, (va_list*) &arg);
LocalFree(msg);
}
if (!res)
uv__format_fallback_error(lib, errorno);
return -1;
}

View File

@@ -0,0 +1,183 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "uv.h"
#include "internal.h"
/*
* Display an error message and abort the event loop.
*/
void uv_fatal_error(const int errorno, const char* syscall) {
char* buf = NULL;
const char* errmsg;
FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
if (buf) {
errmsg = buf;
} else {
errmsg = "Unknown error";
}
/* FormatMessage messages include a newline character already, so don't add
* another. */
if (syscall) {
fprintf(stderr, "%s: (%d) %s", syscall, errorno, errmsg);
} else {
fprintf(stderr, "(%d) %s", errorno, errmsg);
}
if (buf) {
LocalFree(buf);
}
DebugBreak();
abort();
}
int uv_translate_sys_error(int sys_errno) {
if (sys_errno <= 0) {
return sys_errno; /* If < 0 then it's already a libuv error. */
}
switch (sys_errno) {
case WSAEACCES: return UV_EACCES;
case ERROR_ELEVATION_REQUIRED: return UV_EACCES;
case ERROR_CANT_ACCESS_FILE: return UV_EACCES;
case ERROR_ADDRESS_ALREADY_ASSOCIATED: return UV_EADDRINUSE;
case WSAEADDRINUSE: return UV_EADDRINUSE;
case WSAEADDRNOTAVAIL: return UV_EADDRNOTAVAIL;
case WSAEAFNOSUPPORT: return UV_EAFNOSUPPORT;
case WSAEWOULDBLOCK: return UV_EAGAIN;
case ERROR_NO_DATA: return UV_EAGAIN;
case WSAEALREADY: return UV_EALREADY;
case ERROR_INVALID_FLAGS: return UV_EBADF;
case ERROR_INVALID_HANDLE: return UV_EBADF;
case ERROR_LOCK_VIOLATION: return UV_EBUSY;
case ERROR_PIPE_BUSY: return UV_EBUSY;
case ERROR_SHARING_VIOLATION: return UV_EBUSY;
case ERROR_OPERATION_ABORTED: return UV_ECANCELED;
case WSAEINTR: return UV_ECANCELED;
case ERROR_NO_UNICODE_TRANSLATION: return UV_ECHARSET;
case ERROR_CONNECTION_ABORTED: return UV_ECONNABORTED;
case WSAECONNABORTED: return UV_ECONNABORTED;
case ERROR_CONNECTION_REFUSED: return UV_ECONNREFUSED;
case WSAECONNREFUSED: return UV_ECONNREFUSED;
case ERROR_NETNAME_DELETED: return UV_ECONNRESET;
case WSAECONNRESET: return UV_ECONNRESET;
case ERROR_ALREADY_EXISTS: return UV_EEXIST;
case ERROR_FILE_EXISTS: return UV_EEXIST;
case ERROR_NOACCESS: return UV_EFAULT;
case WSAEFAULT: return UV_EFAULT;
case ERROR_HOST_UNREACHABLE: return UV_EHOSTUNREACH;
case WSAEHOSTUNREACH: return UV_EHOSTUNREACH;
case ERROR_INSUFFICIENT_BUFFER: return UV_EINVAL;
case ERROR_INVALID_DATA: return UV_EINVAL;
case ERROR_INVALID_PARAMETER: return UV_EINVAL;
case ERROR_SYMLINK_NOT_SUPPORTED: return UV_EINVAL;
case WSAEINVAL: return UV_EINVAL;
case WSAEPFNOSUPPORT: return UV_EINVAL;
case ERROR_BEGINNING_OF_MEDIA: return UV_EIO;
case ERROR_BUS_RESET: return UV_EIO;
case ERROR_CRC: return UV_EIO;
case ERROR_DEVICE_DOOR_OPEN: return UV_EIO;
case ERROR_DEVICE_REQUIRES_CLEANING: return UV_EIO;
case ERROR_DISK_CORRUPT: return UV_EIO;
case ERROR_EOM_OVERFLOW: return UV_EIO;
case ERROR_FILEMARK_DETECTED: return UV_EIO;
case ERROR_GEN_FAILURE: return UV_EIO;
case ERROR_INVALID_BLOCK_LENGTH: return UV_EIO;
case ERROR_IO_DEVICE: return UV_EIO;
case ERROR_NO_DATA_DETECTED: return UV_EIO;
case ERROR_NO_SIGNAL_SENT: return UV_EIO;
case ERROR_OPEN_FAILED: return UV_EIO;
case ERROR_SETMARK_DETECTED: return UV_EIO;
case ERROR_SIGNAL_REFUSED: return UV_EIO;
case WSAEISCONN: return UV_EISCONN;
case ERROR_CANT_RESOLVE_FILENAME: return UV_ELOOP;
case ERROR_TOO_MANY_OPEN_FILES: return UV_EMFILE;
case WSAEMFILE: return UV_EMFILE;
case WSAEMSGSIZE: return UV_EMSGSIZE;
case ERROR_BUFFER_OVERFLOW: return UV_ENAMETOOLONG;
case ERROR_FILENAME_EXCED_RANGE: return UV_ENAMETOOLONG;
case ERROR_NETWORK_UNREACHABLE: return UV_ENETUNREACH;
case WSAENETUNREACH: return UV_ENETUNREACH;
case WSAENOBUFS: return UV_ENOBUFS;
case ERROR_BAD_PATHNAME: return UV_ENOENT;
case ERROR_DIRECTORY: return UV_ENOENT;
case ERROR_ENVVAR_NOT_FOUND: return UV_ENOENT;
case ERROR_FILE_NOT_FOUND: return UV_ENOENT;
case ERROR_INVALID_NAME: return UV_ENOENT;
case ERROR_INVALID_DRIVE: return UV_ENOENT;
case ERROR_INVALID_REPARSE_DATA: return UV_ENOENT;
case ERROR_MOD_NOT_FOUND: return UV_ENOENT;
case ERROR_PATH_NOT_FOUND: return UV_ENOENT;
case WSAHOST_NOT_FOUND: return UV_ENOENT;
case WSANO_DATA: return UV_ENOENT;
case ERROR_NOT_ENOUGH_MEMORY: return UV_ENOMEM;
case ERROR_OUTOFMEMORY: return UV_ENOMEM;
case ERROR_CANNOT_MAKE: return UV_ENOSPC;
case ERROR_DISK_FULL: return UV_ENOSPC;
case ERROR_EA_TABLE_FULL: return UV_ENOSPC;
case ERROR_END_OF_MEDIA: return UV_ENOSPC;
case ERROR_HANDLE_DISK_FULL: return UV_ENOSPC;
case ERROR_NOT_CONNECTED: return UV_ENOTCONN;
case WSAENOTCONN: return UV_ENOTCONN;
case ERROR_DIR_NOT_EMPTY: return UV_ENOTEMPTY;
case WSAENOTSOCK: return UV_ENOTSOCK;
case ERROR_NOT_SUPPORTED: return UV_ENOTSUP;
case ERROR_BROKEN_PIPE: return UV_EOF;
case ERROR_ACCESS_DENIED: return UV_EPERM;
case ERROR_PRIVILEGE_NOT_HELD: return UV_EPERM;
case ERROR_BAD_PIPE: return UV_EPIPE;
case ERROR_PIPE_NOT_CONNECTED: return UV_EPIPE;
case WSAESHUTDOWN: return UV_EPIPE;
case WSAEPROTONOSUPPORT: return UV_EPROTONOSUPPORT;
case ERROR_WRITE_PROTECT: return UV_EROFS;
case ERROR_SEM_TIMEOUT: return UV_ETIMEDOUT;
case WSAETIMEDOUT: return UV_ETIMEDOUT;
case ERROR_NOT_SAME_DEVICE: return UV_EXDEV;
case ERROR_INVALID_FUNCTION: return UV_EISDIR;
case ERROR_META_EXPANSION_TOO_LONG: return UV_E2BIG;
case WSAESOCKTNOSUPPORT: return UV_ESOCKTNOSUPPORT;
case ERROR_BAD_EXE_FORMAT: return UV_EFTYPE;
default: return UV_UNKNOWN;
}
}
int uv_translate_write_sys_error(int sys_errno) {
switch (sys_errno) {
case ERROR_BROKEN_PIPE: return UV_EPIPE;
case ERROR_NO_DATA: return UV_EPIPE;
default:
return uv_translate_sys_error(sys_errno);
}
}

View File

@@ -0,0 +1,620 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
#include "req-inl.h"
const unsigned int uv_directory_watcher_buffer_size = 4096;
static void uv__fs_event_queue_readdirchanges(uv_loop_t* loop,
uv_fs_event_t* handle) {
assert(handle->dir_handle != INVALID_HANDLE_VALUE);
assert(!handle->req_pending);
memset(&(handle->req.u.io.overlapped), 0,
sizeof(handle->req.u.io.overlapped));
if (!ReadDirectoryChangesW(handle->dir_handle,
handle->buffer,
uv_directory_watcher_buffer_size,
(handle->flags & UV_FS_EVENT_RECURSIVE) ? TRUE : FALSE,
FILE_NOTIFY_CHANGE_FILE_NAME |
FILE_NOTIFY_CHANGE_DIR_NAME |
FILE_NOTIFY_CHANGE_ATTRIBUTES |
FILE_NOTIFY_CHANGE_SIZE |
FILE_NOTIFY_CHANGE_LAST_WRITE |
FILE_NOTIFY_CHANGE_LAST_ACCESS |
FILE_NOTIFY_CHANGE_CREATION |
FILE_NOTIFY_CHANGE_SECURITY,
NULL,
&handle->req.u.io.overlapped,
NULL)) {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(&handle->req, GetLastError());
uv__insert_pending_req(loop, (uv_req_t*)&handle->req);
}
handle->req_pending = 1;
}
static void uv__relative_path(const WCHAR* filename,
const WCHAR* dir,
WCHAR** relpath) {
size_t relpathlen;
size_t filenamelen = wcslen(filename);
size_t dirlen = wcslen(dir);
assert(!_wcsnicmp(filename, dir, dirlen));
if (dirlen > 0 && dir[dirlen - 1] == '\\')
dirlen--;
relpathlen = filenamelen - dirlen - 1;
*relpath = uv__malloc((relpathlen + 1) * sizeof(WCHAR));
if (!*relpath)
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
wcsncpy(*relpath, filename + dirlen + 1, relpathlen);
(*relpath)[relpathlen] = L'\0';
}
static int uv__split_path(const WCHAR* filename, WCHAR** dir,
WCHAR** file) {
size_t len, i;
DWORD dir_len;
if (filename == NULL) {
if (dir != NULL)
*dir = NULL;
*file = NULL;
return 0;
}
len = wcslen(filename);
i = len;
while (i > 0 && filename[--i] != '\\' && filename[i] != '/');
if (i == 0) {
if (dir) {
dir_len = GetCurrentDirectoryW(0, NULL);
if (dir_len == 0) {
return -1;
}
*dir = (WCHAR*)uv__malloc(dir_len * sizeof(WCHAR));
if (!*dir) {
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
}
if (!GetCurrentDirectoryW(dir_len, *dir)) {
uv__free(*dir);
*dir = NULL;
return -1;
}
}
*file = _wcsdup(filename);
} else {
if (dir) {
*dir = (WCHAR*)uv__malloc((i + 2) * sizeof(WCHAR));
if (!*dir) {
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
}
wcsncpy(*dir, filename, i + 1);
(*dir)[i + 1] = L'\0';
}
*file = (WCHAR*)uv__malloc((len - i) * sizeof(WCHAR));
if (!*file) {
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
}
wcsncpy(*file, filename + i + 1, len - i - 1);
(*file)[len - i - 1] = L'\0';
}
return 0;
}
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
uv__handle_init(loop, (uv_handle_t*) handle, UV_FS_EVENT);
handle->dir_handle = INVALID_HANDLE_VALUE;
handle->buffer = NULL;
handle->req_pending = 0;
handle->filew = NULL;
handle->short_filew = NULL;
handle->dirw = NULL;
UV_REQ_INIT(&handle->req, UV_FS_EVENT_REQ);
handle->req.data = handle;
return 0;
}
int uv_fs_event_start(uv_fs_event_t* handle,
uv_fs_event_cb cb,
const char* path,
unsigned int flags) {
int is_path_dir;
size_t size;
DWORD attr, last_error;
WCHAR* dir = NULL, *dir_to_watch, *pathw = NULL;
DWORD short_path_buffer_len;
WCHAR *short_path_buffer;
WCHAR* short_path, *long_path;
short_path = NULL;
if (uv__is_active(handle))
return UV_EINVAL;
handle->cb = cb;
handle->path = uv__strdup(path);
if (!handle->path) {
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
}
uv__handle_start(handle);
last_error = uv__convert_utf8_to_utf16(path, &pathw);
if (last_error)
goto error_uv;
/* Determine whether path is a file or a directory. */
attr = GetFileAttributesW(pathw);
if (attr == INVALID_FILE_ATTRIBUTES) {
last_error = GetLastError();
goto error;
}
is_path_dir = (attr & FILE_ATTRIBUTE_DIRECTORY) ? 1 : 0;
if (is_path_dir) {
/* path is a directory, so that's the directory that we will watch. */
/* Convert to long path. */
size = GetLongPathNameW(pathw, NULL, 0);
if (size) {
long_path = (WCHAR*)uv__malloc(size * sizeof(WCHAR));
if (!long_path) {
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
}
size = GetLongPathNameW(pathw, long_path, size);
if (size) {
long_path[size] = '\0';
} else {
uv__free(long_path);
long_path = NULL;
}
if (long_path) {
uv__free(pathw);
pathw = long_path;
}
}
dir_to_watch = pathw;
} else {
/*
* path is a file. So we split path into dir & file parts, and
* watch the dir directory.
*/
/* Convert to short path. */
short_path_buffer = NULL;
short_path_buffer_len = GetShortPathNameW(pathw, NULL, 0);
if (short_path_buffer_len == 0) {
goto short_path_done;
}
short_path_buffer = uv__malloc(short_path_buffer_len * sizeof(WCHAR));
if (short_path_buffer == NULL) {
goto short_path_done;
}
if (GetShortPathNameW(pathw,
short_path_buffer,
short_path_buffer_len) == 0) {
uv__free(short_path_buffer);
short_path_buffer = NULL;
}
short_path_done:
short_path = short_path_buffer;
if (uv__split_path(pathw, &dir, &handle->filew) != 0) {
last_error = GetLastError();
goto error;
}
if (uv__split_path(short_path, NULL, &handle->short_filew) != 0) {
last_error = GetLastError();
goto error;
}
dir_to_watch = dir;
uv__free(short_path);
short_path = NULL;
uv__free(pathw);
pathw = NULL;
}
handle->dir_handle = CreateFileW(dir_to_watch,
FILE_LIST_DIRECTORY,
FILE_SHARE_READ | FILE_SHARE_DELETE |
FILE_SHARE_WRITE,
NULL,
OPEN_EXISTING,
FILE_FLAG_BACKUP_SEMANTICS |
FILE_FLAG_OVERLAPPED,
NULL);
if (dir) {
uv__free(dir);
dir = NULL;
}
if (handle->dir_handle == INVALID_HANDLE_VALUE) {
last_error = GetLastError();
goto error;
}
if (CreateIoCompletionPort(handle->dir_handle,
handle->loop->iocp,
(ULONG_PTR)handle,
0) == NULL) {
last_error = GetLastError();
goto error;
}
if (!handle->buffer) {
handle->buffer = (char*)uv__malloc(uv_directory_watcher_buffer_size);
}
if (!handle->buffer) {
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
}
memset(&(handle->req.u.io.overlapped), 0,
sizeof(handle->req.u.io.overlapped));
if (!ReadDirectoryChangesW(handle->dir_handle,
handle->buffer,
uv_directory_watcher_buffer_size,
(flags & UV_FS_EVENT_RECURSIVE) ? TRUE : FALSE,
FILE_NOTIFY_CHANGE_FILE_NAME |
FILE_NOTIFY_CHANGE_DIR_NAME |
FILE_NOTIFY_CHANGE_ATTRIBUTES |
FILE_NOTIFY_CHANGE_SIZE |
FILE_NOTIFY_CHANGE_LAST_WRITE |
FILE_NOTIFY_CHANGE_LAST_ACCESS |
FILE_NOTIFY_CHANGE_CREATION |
FILE_NOTIFY_CHANGE_SECURITY,
NULL,
&handle->req.u.io.overlapped,
NULL)) {
last_error = GetLastError();
goto error;
}
assert(is_path_dir ? pathw != NULL : pathw == NULL);
handle->dirw = pathw;
handle->req_pending = 1;
return 0;
error:
last_error = uv_translate_sys_error(last_error);
error_uv:
if (handle->path) {
uv__free(handle->path);
handle->path = NULL;
}
if (handle->filew) {
uv__free(handle->filew);
handle->filew = NULL;
}
if (handle->short_filew) {
uv__free(handle->short_filew);
handle->short_filew = NULL;
}
uv__free(pathw);
if (handle->dir_handle != INVALID_HANDLE_VALUE) {
CloseHandle(handle->dir_handle);
handle->dir_handle = INVALID_HANDLE_VALUE;
}
if (handle->buffer) {
uv__free(handle->buffer);
handle->buffer = NULL;
}
if (uv__is_active(handle))
uv__handle_stop(handle);
uv__free(short_path);
return last_error;
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
if (!uv__is_active(handle))
return 0;
if (handle->dir_handle != INVALID_HANDLE_VALUE) {
CloseHandle(handle->dir_handle);
handle->dir_handle = INVALID_HANDLE_VALUE;
}
uv__handle_stop(handle);
if (handle->filew) {
uv__free(handle->filew);
handle->filew = NULL;
}
if (handle->short_filew) {
uv__free(handle->short_filew);
handle->short_filew = NULL;
}
if (handle->path) {
uv__free(handle->path);
handle->path = NULL;
}
if (handle->dirw) {
uv__free(handle->dirw);
handle->dirw = NULL;
}
return 0;
}
static int file_info_cmp(WCHAR* str, WCHAR* file_name, size_t file_name_len) {
size_t str_len;
if (str == NULL)
return -1;
str_len = wcslen(str);
/*
Since we only care about equality, return early if the strings
aren't the same length
*/
if (str_len != (file_name_len / sizeof(WCHAR)))
return -1;
return _wcsnicmp(str, file_name, str_len);
}
void uv__process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
uv_fs_event_t* handle) {
FILE_NOTIFY_INFORMATION* file_info;
int err, sizew, size;
char* filename = NULL;
WCHAR* filenamew = NULL;
WCHAR* long_filenamew = NULL;
DWORD offset = 0;
assert(req->type == UV_FS_EVENT_REQ);
assert(handle->req_pending);
handle->req_pending = 0;
/* Don't report any callbacks if:
* - We're closing, just push the handle onto the endgame queue
* - We are not active, just ignore the callback
*/
if (!uv__is_active(handle)) {
if (handle->flags & UV_HANDLE_CLOSING) {
uv__want_endgame(loop, (uv_handle_t*) handle);
}
return;
}
file_info = (FILE_NOTIFY_INFORMATION*)(handle->buffer + offset);
if (REQ_SUCCESS(req)) {
if (req->u.io.overlapped.InternalHigh > 0) {
do {
file_info = (FILE_NOTIFY_INFORMATION*)((char*)file_info + offset);
assert(!filename);
assert(!filenamew);
assert(!long_filenamew);
/*
* Fire the event only if we were asked to watch a directory,
* or if the filename filter matches.
*/
if (handle->dirw ||
file_info_cmp(handle->filew,
file_info->FileName,
file_info->FileNameLength) == 0 ||
file_info_cmp(handle->short_filew,
file_info->FileName,
file_info->FileNameLength) == 0) {
if (handle->dirw) {
/*
* We attempt to resolve the long form of the file name explicitly.
* We only do this for file names that might still exist on disk.
* If this fails, we use the name given by ReadDirectoryChangesW.
* This may be the long form or the 8.3 short name in some cases.
*/
if (file_info->Action != FILE_ACTION_REMOVED &&
file_info->Action != FILE_ACTION_RENAMED_OLD_NAME) {
/* Construct a full path to the file. */
size = wcslen(handle->dirw) +
file_info->FileNameLength / sizeof(WCHAR) + 2;
filenamew = (WCHAR*)uv__malloc(size * sizeof(WCHAR));
if (!filenamew) {
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
}
_snwprintf(filenamew, size, L"%s\\%.*s", handle->dirw,
file_info->FileNameLength / (DWORD)sizeof(WCHAR),
file_info->FileName);
filenamew[size - 1] = L'\0';
/* Convert to long name. */
size = GetLongPathNameW(filenamew, NULL, 0);
if (size) {
long_filenamew = (WCHAR*)uv__malloc(size * sizeof(WCHAR));
if (!long_filenamew) {
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
}
size = GetLongPathNameW(filenamew, long_filenamew, size);
if (size) {
long_filenamew[size] = '\0';
} else {
uv__free(long_filenamew);
long_filenamew = NULL;
}
}
uv__free(filenamew);
if (long_filenamew) {
/* Get the file name out of the long path. */
uv__relative_path(long_filenamew,
handle->dirw,
&filenamew);
uv__free(long_filenamew);
long_filenamew = filenamew;
sizew = -1;
} else {
/* We couldn't get the long filename, use the one reported. */
filenamew = file_info->FileName;
sizew = file_info->FileNameLength / sizeof(WCHAR);
}
} else {
/*
* Removed or renamed events cannot be resolved to the long form.
* We therefore use the name given by ReadDirectoryChangesW.
* This may be the long form or the 8.3 short name in some cases.
*/
filenamew = file_info->FileName;
sizew = file_info->FileNameLength / sizeof(WCHAR);
}
} else {
/* We already have the long name of the file, so just use it. */
filenamew = handle->filew;
sizew = -1;
}
/* Convert the filename to utf8. */
uv__convert_utf16_to_utf8(filenamew, sizew, &filename);
switch (file_info->Action) {
case FILE_ACTION_ADDED:
case FILE_ACTION_REMOVED:
case FILE_ACTION_RENAMED_OLD_NAME:
case FILE_ACTION_RENAMED_NEW_NAME:
handle->cb(handle, filename, UV_RENAME, 0);
break;
case FILE_ACTION_MODIFIED:
handle->cb(handle, filename, UV_CHANGE, 0);
break;
}
uv__free(filename);
filename = NULL;
uv__free(long_filenamew);
long_filenamew = NULL;
filenamew = NULL;
}
offset = file_info->NextEntryOffset;
} while (offset && !(handle->flags & UV_HANDLE_CLOSING));
} else {
handle->cb(handle, NULL, UV_CHANGE, 0);
}
} else {
err = GET_REQ_ERROR(req);
/*
* Check whether the ERROR_ACCESS_DENIED is caused by the watched directory
* being actually deleted (not an actual error) or a legit error. Retrieve
* FileStandardInfo to check whether the directory is pending deletion.
*/
FILE_STANDARD_INFO info;
if (err == ERROR_ACCESS_DENIED &&
handle->dirw != NULL &&
GetFileInformationByHandleEx(handle->dir_handle,
FileStandardInfo,
&info,
sizeof(info)) &&
info.Directory &&
info.DeletePending) {
uv__convert_utf16_to_utf8(handle->dirw, -1, &filename);
handle->cb(handle, filename, UV_RENAME, 0);
uv__free(filename);
filename = NULL;
} else {
handle->cb(handle, NULL, 0, uv_translate_sys_error(err));
}
}
if (handle->flags & UV_HANDLE_CLOSING) {
uv__want_endgame(loop, (uv_handle_t*)handle);
} else if (uv__is_active(handle)) {
uv__fs_event_queue_readdirchanges(loop, handle);
}
}
void uv__fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle) {
uv_fs_event_stop(handle);
uv__handle_closing(handle);
if (!handle->req_pending) {
uv__want_endgame(loop, (uv_handle_t*)handle);
}
}
void uv__fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle) {
if ((handle->flags & UV_HANDLE_CLOSING) && !handle->req_pending) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
if (handle->buffer) {
uv__free(handle->buffer);
handle->buffer = NULL;
}
uv__handle_close(handle);
}
}

View File

@@ -0,0 +1,200 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_WIN_FS_FD_HASH_INL_H_
#define UV_WIN_FS_FD_HASH_INL_H_
#include "uv.h"
#include "internal.h"
/* Files are only inserted in uv__fd_hash when the UV_FS_O_FILEMAP flag is
* specified. Thus, when uv__fd_hash_get returns true, the file mapping in the
* info structure should be used for read/write operations.
*
* If the file is empty, the mapping field will be set to
* INVALID_HANDLE_VALUE. This is not an issue since the file mapping needs to
* be created anyway when the file size changes.
*
* Since file descriptors are sequential integers, the modulo operator is used
* as hashing function. For each bucket, a single linked list of arrays is
* kept to minimize allocations. A statically allocated memory buffer is kept
* for the first array in each bucket. */
#define UV__FD_HASH_SIZE 256
#define UV__FD_HASH_GROUP_SIZE 16
struct uv__fd_info_s {
int flags;
BOOLEAN is_directory;
HANDLE mapping;
LARGE_INTEGER size;
LARGE_INTEGER current_pos;
};
struct uv__fd_hash_entry_s {
uv_file fd;
struct uv__fd_info_s info;
};
struct uv__fd_hash_entry_group_s {
struct uv__fd_hash_entry_s entries[UV__FD_HASH_GROUP_SIZE];
struct uv__fd_hash_entry_group_s* next;
};
struct uv__fd_hash_bucket_s {
size_t size;
struct uv__fd_hash_entry_group_s* data;
};
static uv_mutex_t uv__fd_hash_mutex;
static struct uv__fd_hash_entry_group_s
uv__fd_hash_entry_initial[UV__FD_HASH_SIZE * UV__FD_HASH_GROUP_SIZE];
static struct uv__fd_hash_bucket_s uv__fd_hash[UV__FD_HASH_SIZE];
INLINE static void uv__fd_hash_init(void) {
size_t i;
int err;
err = uv_mutex_init(&uv__fd_hash_mutex);
if (err) {
uv_fatal_error(err, "uv_mutex_init");
}
for (i = 0; i < ARRAY_SIZE(uv__fd_hash); ++i) {
uv__fd_hash[i].size = 0;
uv__fd_hash[i].data =
uv__fd_hash_entry_initial + i * UV__FD_HASH_GROUP_SIZE;
}
}
#define FIND_COMMON_VARIABLES \
unsigned i; \
unsigned bucket = fd % ARRAY_SIZE(uv__fd_hash); \
struct uv__fd_hash_entry_s* entry_ptr = NULL; \
struct uv__fd_hash_entry_group_s* group_ptr; \
struct uv__fd_hash_bucket_s* bucket_ptr = &uv__fd_hash[bucket];
#define FIND_IN_GROUP_PTR(group_size) \
do { \
for (i = 0; i < group_size; ++i) { \
if (group_ptr->entries[i].fd == fd) { \
entry_ptr = &group_ptr->entries[i]; \
break; \
} \
} \
} while (0)
#define FIND_IN_BUCKET_PTR() \
do { \
size_t first_group_size = bucket_ptr->size % UV__FD_HASH_GROUP_SIZE; \
if (bucket_ptr->size != 0 && first_group_size == 0) \
first_group_size = UV__FD_HASH_GROUP_SIZE; \
group_ptr = bucket_ptr->data; \
FIND_IN_GROUP_PTR(first_group_size); \
for (group_ptr = group_ptr->next; \
group_ptr != NULL && entry_ptr == NULL; \
group_ptr = group_ptr->next) \
FIND_IN_GROUP_PTR(UV__FD_HASH_GROUP_SIZE); \
} while (0)
INLINE static int uv__fd_hash_get(int fd, struct uv__fd_info_s* info) {
FIND_COMMON_VARIABLES
uv_mutex_lock(&uv__fd_hash_mutex);
FIND_IN_BUCKET_PTR();
if (entry_ptr != NULL) {
*info = entry_ptr->info;
}
uv_mutex_unlock(&uv__fd_hash_mutex);
return entry_ptr != NULL;
}
INLINE static void uv__fd_hash_add(int fd, struct uv__fd_info_s* info) {
FIND_COMMON_VARIABLES
uv_mutex_lock(&uv__fd_hash_mutex);
FIND_IN_BUCKET_PTR();
if (entry_ptr == NULL) {
i = bucket_ptr->size % UV__FD_HASH_GROUP_SIZE;
if (bucket_ptr->size != 0 && i == 0) {
struct uv__fd_hash_entry_group_s* new_group_ptr =
uv__malloc(sizeof(*new_group_ptr));
if (new_group_ptr == NULL) {
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
}
new_group_ptr->next = bucket_ptr->data;
bucket_ptr->data = new_group_ptr;
}
bucket_ptr->size += 1;
entry_ptr = &bucket_ptr->data->entries[i];
entry_ptr->fd = fd;
}
entry_ptr->info = *info;
uv_mutex_unlock(&uv__fd_hash_mutex);
}
INLINE static int uv__fd_hash_remove(int fd, struct uv__fd_info_s* info) {
FIND_COMMON_VARIABLES
uv_mutex_lock(&uv__fd_hash_mutex);
FIND_IN_BUCKET_PTR();
if (entry_ptr != NULL) {
*info = entry_ptr->info;
bucket_ptr->size -= 1;
i = bucket_ptr->size % UV__FD_HASH_GROUP_SIZE;
if (entry_ptr != &bucket_ptr->data->entries[i]) {
*entry_ptr = bucket_ptr->data->entries[i];
}
if (bucket_ptr->size != 0 &&
bucket_ptr->size % UV__FD_HASH_GROUP_SIZE == 0) {
struct uv__fd_hash_entry_group_s* old_group_ptr = bucket_ptr->data;
bucket_ptr->data = old_group_ptr->next;
uv__free(old_group_ptr);
}
}
uv_mutex_unlock(&uv__fd_hash_mutex);
return entry_ptr != NULL;
}
#undef FIND_COMMON_VARIABLES
#undef FIND_IN_GROUP_PTR
#undef FIND_IN_BUCKET_PTR
#endif /* UV_WIN_FS_FD_HASH_INL_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,388 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include "uv.h"
#include "internal.h"
#include "req-inl.h"
#include "idna.h"
/* EAI_* constants. */
#include <winsock2.h>
/* Needed for ConvertInterfaceIndexToLuid and ConvertInterfaceLuidToNameA */
#include <iphlpapi.h>
int uv__getaddrinfo_translate_error(int sys_err) {
switch (sys_err) {
case 0: return 0;
case WSATRY_AGAIN: return UV_EAI_AGAIN;
case WSAEINVAL: return UV_EAI_BADFLAGS;
case WSANO_RECOVERY: return UV_EAI_FAIL;
case WSAEAFNOSUPPORT: return UV_EAI_FAMILY;
case WSA_NOT_ENOUGH_MEMORY: return UV_EAI_MEMORY;
case WSAHOST_NOT_FOUND: return UV_EAI_NONAME;
case WSATYPE_NOT_FOUND: return UV_EAI_SERVICE;
case WSAESOCKTNOSUPPORT: return UV_EAI_SOCKTYPE;
default: return uv_translate_sys_error(sys_err);
}
}
/*
* MinGW is missing this
*/
#if !defined(_MSC_VER) && !defined(__MINGW64_VERSION_MAJOR)
typedef struct addrinfoW {
int ai_flags;
int ai_family;
int ai_socktype;
int ai_protocol;
size_t ai_addrlen;
WCHAR* ai_canonname;
struct sockaddr* ai_addr;
struct addrinfoW* ai_next;
} ADDRINFOW, *PADDRINFOW;
DECLSPEC_IMPORT int WSAAPI GetAddrInfoW(const WCHAR* node,
const WCHAR* service,
const ADDRINFOW* hints,
PADDRINFOW* result);
DECLSPEC_IMPORT void WSAAPI FreeAddrInfoW(PADDRINFOW pAddrInfo);
#endif
static size_t align_offset(size_t off, size_t alignment) {
return ((off + alignment - 1) / alignment) * alignment;
}
#ifndef NDIS_IF_MAX_STRING_SIZE
#define NDIS_IF_MAX_STRING_SIZE IF_MAX_STRING_SIZE
#endif
static void uv__getaddrinfo_work(struct uv__work* w) {
uv_getaddrinfo_t* req;
struct addrinfoW* hints;
int err;
req = container_of(w, uv_getaddrinfo_t, work_req);
hints = req->addrinfow;
req->addrinfow = NULL;
err = GetAddrInfoW(req->node, req->service, hints, &req->addrinfow);
req->retcode = uv__getaddrinfo_translate_error(err);
}
/*
* Called from uv_run when complete. Call user specified callback
* then free returned addrinfo
* Returned addrinfo strings are converted from UTF-16 to UTF-8.
*
* To minimize allocation we calculate total size required,
* and copy all structs and referenced strings into the one block.
* Each size calculation is adjusted to avoid unaligned pointers.
*/
static void uv__getaddrinfo_done(struct uv__work* w, int status) {
uv_getaddrinfo_t* req = container_of(w, uv_getaddrinfo_t, work_req);
/* release input parameter memory */
uv__free(req->alloc);
req->alloc = NULL;
if (status == UV_ECANCELED) {
assert(req->retcode == 0);
req->retcode = UV_EAI_CANCELED;
goto complete;
}
if (req->retcode == 0) {
char* alloc_ptr = NULL;
size_t cur_off = 0;
size_t addrinfo_len;
/* Convert addrinfoW to addrinfo. First calculate required length. */
struct addrinfoW* addrinfow_ptr = req->addrinfow;
while (addrinfow_ptr != NULL) {
cur_off = align_offset(cur_off, sizeof(void*));
cur_off += sizeof(struct addrinfo);
/* TODO: This alignment could be smaller, if we could
portably get the alignment for sockaddr. */
cur_off = align_offset(cur_off, sizeof(void*));
cur_off += addrinfow_ptr->ai_addrlen;
if (addrinfow_ptr->ai_canonname != NULL) {
ssize_t name_len =
uv_utf16_length_as_wtf8(addrinfow_ptr->ai_canonname, -1);
if (name_len < 0) {
req->retcode = name_len;
goto complete;
}
cur_off += name_len + 1;
}
addrinfow_ptr = addrinfow_ptr->ai_next;
}
/* allocate memory for addrinfo results */
addrinfo_len = cur_off;
alloc_ptr = uv__malloc(addrinfo_len);
/* do conversions */
if (alloc_ptr != NULL) {
struct addrinfo *addrinfo_ptr = (struct addrinfo *)alloc_ptr;
cur_off = 0;
addrinfow_ptr = req->addrinfow;
for (;;) {
cur_off += sizeof(struct addrinfo);
assert(cur_off <= addrinfo_len);
/* copy addrinfo struct data */
addrinfo_ptr->ai_family = addrinfow_ptr->ai_family;
addrinfo_ptr->ai_socktype = addrinfow_ptr->ai_socktype;
addrinfo_ptr->ai_protocol = addrinfow_ptr->ai_protocol;
addrinfo_ptr->ai_flags = addrinfow_ptr->ai_flags;
addrinfo_ptr->ai_addrlen = addrinfow_ptr->ai_addrlen;
addrinfo_ptr->ai_canonname = NULL;
addrinfo_ptr->ai_addr = NULL;
addrinfo_ptr->ai_next = NULL;
/* copy sockaddr */
if (addrinfo_ptr->ai_addrlen > 0) {
cur_off = align_offset(cur_off, sizeof(void *));
addrinfo_ptr->ai_addr = (struct sockaddr *)(alloc_ptr + cur_off);
cur_off += addrinfo_ptr->ai_addrlen;
assert(cur_off <= addrinfo_len);
memcpy(addrinfo_ptr->ai_addr,
addrinfow_ptr->ai_addr,
addrinfo_ptr->ai_addrlen);
}
/* convert canonical name to UTF-8 */
if (addrinfow_ptr->ai_canonname != NULL) {
ssize_t name_len = addrinfo_len - cur_off;
addrinfo_ptr->ai_canonname = alloc_ptr + cur_off;
int r = uv__copy_utf16_to_utf8(addrinfow_ptr->ai_canonname,
-1,
addrinfo_ptr->ai_canonname,
(size_t*)&name_len);
assert(r == 0);
cur_off += name_len + 1;
assert(cur_off <= addrinfo_len);
}
/* set next ptr */
addrinfow_ptr = addrinfow_ptr->ai_next;
if (addrinfow_ptr == NULL)
break;
cur_off = align_offset(cur_off, sizeof(void *));
struct addrinfo *next_addrinfo_ptr = (struct addrinfo *)(alloc_ptr + cur_off);
addrinfo_ptr->ai_next = next_addrinfo_ptr;
addrinfo_ptr = next_addrinfo_ptr;
}
req->addrinfo = (struct addrinfo*)alloc_ptr;
} else {
req->retcode = UV_EAI_MEMORY;
}
}
/* return memory to system */
if (req->addrinfow != NULL) {
FreeAddrInfoW(req->addrinfow);
req->addrinfow = NULL;
}
complete:
uv__req_unregister(req->loop);
/* finally do callback with converted result */
if (req->getaddrinfo_cb)
req->getaddrinfo_cb(req, req->retcode, req->addrinfo);
}
void uv_freeaddrinfo(struct addrinfo* ai) {
char* alloc_ptr = (char*)ai;
/* release copied result memory */
uv__free(alloc_ptr);
}
/*
* Entry point for getaddrinfo
* we convert the UTF-8 strings to UNICODE
* and save the UNICODE string pointers in the req
* We also copy hints so that caller does not need to keep memory until the
* callback.
* return 0 if a callback will be made
* return error code if validation fails
*
* To minimize allocation we calculate total size required,
* and copy all structs and referenced strings into the one block.
* Each size calculation is adjusted to avoid unaligned pointers.
*/
int uv_getaddrinfo(uv_loop_t* loop,
uv_getaddrinfo_t* req,
uv_getaddrinfo_cb getaddrinfo_cb,
const char* node,
const char* service,
const struct addrinfo* hints) {
char hostname_ascii[256];
size_t off = 0;
size_t nodesize = 0;
size_t servicesize = 0;
size_t serviceoff = 0;
size_t hintssize = 0;
size_t hintoff = 0;
ssize_t rc;
if (req == NULL || (node == NULL && service == NULL)) {
return UV_EINVAL;
}
UV_REQ_INIT(req, UV_GETADDRINFO);
req->getaddrinfo_cb = getaddrinfo_cb;
req->addrinfo = NULL;
req->loop = loop;
req->retcode = 0;
/* calculate required memory size for all input values */
if (node != NULL) {
rc = uv__idna_toascii(node,
node + strlen(node),
hostname_ascii,
hostname_ascii + sizeof(hostname_ascii));
if (rc < 0)
return rc;
nodesize = strlen(hostname_ascii) + 1;
node = hostname_ascii;
off += nodesize * sizeof(WCHAR);
}
if (service != NULL) {
rc = uv_wtf8_length_as_utf16(service);
if (rc < 0)
return rc;
servicesize = rc;
off = align_offset(off, sizeof(WCHAR));
serviceoff = off;
off += servicesize * sizeof(WCHAR);
}
if (hints != NULL) {
off = align_offset(off, sizeof(void *));
hintoff = off;
hintssize = sizeof(struct addrinfoW);
off += hintssize;
}
/* allocate memory for inputs, and partition it as needed */
req->alloc = uv__malloc(off);
if (!req->alloc)
return UV_ENOMEM;
/* Convert node string to UTF16 into allocated memory and save pointer in the
* request. The node here has been converted to ascii. */
if (node != NULL) {
req->node = (WCHAR*) req->alloc;
uv_wtf8_to_utf16(node, req->node, nodesize);
} else {
req->node = NULL;
}
/* Convert service string to UTF16 into allocated memory and save pointer in
* the req. */
if (service != NULL) {
req->service = (WCHAR*) ((char*) req->alloc + serviceoff);
uv_wtf8_to_utf16(service, req->service, servicesize);
} else {
req->service = NULL;
}
/* copy hints to allocated memory and save pointer in req */
if (hints != NULL) {
req->addrinfow = (struct addrinfoW*) ((char*) req->alloc + hintoff);
req->addrinfow->ai_family = hints->ai_family;
req->addrinfow->ai_socktype = hints->ai_socktype;
req->addrinfow->ai_protocol = hints->ai_protocol;
req->addrinfow->ai_flags = hints->ai_flags;
req->addrinfow->ai_addrlen = 0;
req->addrinfow->ai_canonname = NULL;
req->addrinfow->ai_addr = NULL;
req->addrinfow->ai_next = NULL;
} else {
req->addrinfow = NULL;
}
uv__req_register(loop);
if (getaddrinfo_cb) {
uv__work_submit(loop,
&req->work_req,
UV__WORK_SLOW_IO,
uv__getaddrinfo_work,
uv__getaddrinfo_done);
return 0;
} else {
uv__getaddrinfo_work(&req->work_req);
uv__getaddrinfo_done(&req->work_req, 0);
return req->retcode;
}
}
int uv_if_indextoname(unsigned int ifindex, char* buffer, size_t* size) {
NET_LUID luid;
wchar_t wname[NDIS_IF_MAX_STRING_SIZE + 1]; /* Add one for the NUL. */
int r;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
r = ConvertInterfaceIndexToLuid(ifindex, &luid);
if (r != 0)
return uv_translate_sys_error(r);
r = ConvertInterfaceLuidToNameW(&luid, wname, ARRAY_SIZE(wname));
if (r != 0)
return uv_translate_sys_error(r);
return uv__copy_utf16_to_utf8(wname, -1, buffer, size);
}
int uv_if_indextoiid(unsigned int ifindex, char* buffer, size_t* size) {
int r;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
r = snprintf(buffer, *size, "%d", ifindex);
if (r < 0)
return uv_translate_sys_error(r);
if (r >= (int) *size) {
*size = r + 1;
return UV_ENOBUFS;
}
*size = r;
return 0;
}

View File

@@ -0,0 +1,146 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <stdio.h>
#include "uv.h"
#include "internal.h"
#include "req-inl.h"
#ifndef GetNameInfo
int WSAAPI GetNameInfoW(
const SOCKADDR *pSockaddr,
socklen_t SockaddrLength,
PWCHAR pNodeBuffer,
DWORD NodeBufferSize,
PWCHAR pServiceBuffer,
DWORD ServiceBufferSize,
INT Flags
);
#endif
static void uv__getnameinfo_work(struct uv__work* w) {
uv_getnameinfo_t* req;
WCHAR host[NI_MAXHOST];
WCHAR service[NI_MAXSERV];
size_t size;
int ret;
req = container_of(w, uv_getnameinfo_t, work_req);
if (GetNameInfoW((struct sockaddr*)&req->storage,
sizeof(req->storage),
host,
ARRAY_SIZE(host),
service,
ARRAY_SIZE(service),
req->flags)) {
ret = WSAGetLastError();
req->retcode = uv__getaddrinfo_translate_error(ret);
return;
}
size = sizeof(req->host);
ret = uv__copy_utf16_to_utf8(host, -1, req->host, &size);
if (ret < 0) {
req->retcode = ret;
return;
}
size = sizeof(req->service);
ret = uv__copy_utf16_to_utf8(service, -1, req->service, &size);
if (ret < 0) {
req->retcode = ret;
}
}
/*
* Called from uv_run when complete.
*/
static void uv__getnameinfo_done(struct uv__work* w, int status) {
uv_getnameinfo_t* req;
char* host;
char* service;
req = container_of(w, uv_getnameinfo_t, work_req);
uv__req_unregister(req->loop);
host = service = NULL;
if (status == UV_ECANCELED) {
assert(req->retcode == 0);
req->retcode = UV_EAI_CANCELED;
} else if (req->retcode == 0) {
host = req->host;
service = req->service;
}
if (req->getnameinfo_cb)
req->getnameinfo_cb(req, req->retcode, host, service);
}
/*
* Entry point for getnameinfo
* return 0 if a callback will be made
* return error code if validation fails
*/
int uv_getnameinfo(uv_loop_t* loop,
uv_getnameinfo_t* req,
uv_getnameinfo_cb getnameinfo_cb,
const struct sockaddr* addr,
int flags) {
if (req == NULL || addr == NULL)
return UV_EINVAL;
if (addr->sa_family == AF_INET) {
memcpy(&req->storage,
addr,
sizeof(struct sockaddr_in));
} else if (addr->sa_family == AF_INET6) {
memcpy(&req->storage,
addr,
sizeof(struct sockaddr_in6));
} else {
return UV_EINVAL;
}
UV_REQ_INIT(req, UV_GETNAMEINFO);
uv__req_register(loop);
req->getnameinfo_cb = getnameinfo_cb;
req->flags = flags;
req->loop = loop;
req->retcode = 0;
if (getnameinfo_cb) {
uv__work_submit(loop,
&req->work_req,
UV__WORK_SLOW_IO,
uv__getnameinfo_work,
uv__getnameinfo_done);
return 0;
} else {
uv__getnameinfo_work(&req->work_req);
uv__getnameinfo_done(&req->work_req, 0);
return req->retcode;
}
}

View File

@@ -0,0 +1,180 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_WIN_HANDLE_INL_H_
#define UV_WIN_HANDLE_INL_H_
#include <assert.h>
#include <io.h>
#include "uv.h"
#include "internal.h"
#define DECREASE_ACTIVE_COUNT(loop, handle) \
do { \
if (--(handle)->activecnt == 0 && \
!((handle)->flags & UV_HANDLE_CLOSING)) { \
uv__handle_stop((handle)); \
} \
assert((handle)->activecnt >= 0); \
} while (0)
#define INCREASE_ACTIVE_COUNT(loop, handle) \
do { \
if ((handle)->activecnt++ == 0) { \
uv__handle_start((handle)); \
} \
assert((handle)->activecnt > 0); \
} while (0)
#define DECREASE_PENDING_REQ_COUNT(handle) \
do { \
assert(handle->reqs_pending > 0); \
handle->reqs_pending--; \
\
if (handle->flags & UV_HANDLE_CLOSING && \
handle->reqs_pending == 0) { \
uv__want_endgame(loop, (uv_handle_t*)handle); \
} \
} while (0)
#define uv__handle_closing(handle) \
do { \
assert(!((handle)->flags & UV_HANDLE_CLOSING)); \
\
if (!(((handle)->flags & UV_HANDLE_ACTIVE) && \
((handle)->flags & UV_HANDLE_REF))) \
uv__active_handle_add((uv_handle_t*) (handle)); \
\
(handle)->flags |= UV_HANDLE_CLOSING; \
(handle)->flags &= ~UV_HANDLE_ACTIVE; \
} while (0)
#define uv__handle_close(handle) \
do { \
uv__queue_remove(&(handle)->handle_queue); \
uv__active_handle_rm((uv_handle_t*) (handle)); \
\
(handle)->flags |= UV_HANDLE_CLOSED; \
\
if ((handle)->close_cb) \
(handle)->close_cb((uv_handle_t*) (handle)); \
} while (0)
INLINE static void uv__want_endgame(uv_loop_t* loop, uv_handle_t* handle) {
if (!(handle->flags & UV_HANDLE_ENDGAME_QUEUED)) {
handle->flags |= UV_HANDLE_ENDGAME_QUEUED;
handle->endgame_next = loop->endgame_handles;
loop->endgame_handles = handle;
}
}
INLINE static void uv__process_endgames(uv_loop_t* loop) {
uv_handle_t* handle;
while (loop->endgame_handles) {
handle = loop->endgame_handles;
loop->endgame_handles = handle->endgame_next;
handle->flags &= ~UV_HANDLE_ENDGAME_QUEUED;
switch (handle->type) {
case UV_TCP:
uv__tcp_endgame(loop, (uv_tcp_t*) handle);
break;
case UV_NAMED_PIPE:
uv__pipe_endgame(loop, (uv_pipe_t*) handle);
break;
case UV_TTY:
uv__tty_endgame(loop, (uv_tty_t*) handle);
break;
case UV_UDP:
uv__udp_endgame(loop, (uv_udp_t*) handle);
break;
case UV_POLL:
uv__poll_endgame(loop, (uv_poll_t*) handle);
break;
case UV_TIMER:
uv__timer_close((uv_timer_t*) handle);
uv__handle_close(handle);
break;
case UV_PREPARE:
case UV_CHECK:
case UV_IDLE:
uv__loop_watcher_endgame(loop, handle);
break;
case UV_ASYNC:
uv__async_endgame(loop, (uv_async_t*) handle);
break;
case UV_SIGNAL:
uv__signal_endgame(loop, (uv_signal_t*) handle);
break;
case UV_PROCESS:
uv__process_endgame(loop, (uv_process_t*) handle);
break;
case UV_FS_EVENT:
uv__fs_event_endgame(loop, (uv_fs_event_t*) handle);
break;
case UV_FS_POLL:
uv__fs_poll_endgame(loop, (uv_fs_poll_t*) handle);
break;
default:
assert(0);
break;
}
}
}
INLINE static HANDLE uv__get_osfhandle(int fd)
{
/* _get_osfhandle() raises an assert in debug builds if the FD is invalid.
* But it also correctly checks the FD and returns INVALID_HANDLE_VALUE for
* invalid FDs in release builds (or if you let the assert continue). So this
* wrapper function disables asserts when calling _get_osfhandle. */
HANDLE handle;
UV_BEGIN_DISABLE_CRT_ASSERT();
handle = (HANDLE) _get_osfhandle(fd);
UV_END_DISABLE_CRT_ASSERT();
return handle;
}
#endif /* UV_WIN_HANDLE_INL_H_ */

View File

@@ -0,0 +1,162 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <io.h>
#include <stdlib.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
uv_handle_type uv_guess_handle(uv_file file) {
HANDLE handle;
DWORD mode;
if (file < 0) {
return UV_UNKNOWN_HANDLE;
}
handle = uv__get_osfhandle(file);
switch (GetFileType(handle)) {
case FILE_TYPE_CHAR:
if (GetConsoleMode(handle, &mode)) {
return UV_TTY;
} else {
return UV_FILE;
}
case FILE_TYPE_PIPE:
return UV_NAMED_PIPE;
case FILE_TYPE_DISK:
return UV_FILE;
default:
return UV_UNKNOWN_HANDLE;
}
}
int uv_is_active(const uv_handle_t* handle) {
return (handle->flags & UV_HANDLE_ACTIVE) &&
!(handle->flags & UV_HANDLE_CLOSING);
}
void uv_close(uv_handle_t* handle, uv_close_cb cb) {
uv_loop_t* loop = handle->loop;
if (handle->flags & UV_HANDLE_CLOSING) {
assert(0);
return;
}
handle->close_cb = cb;
/* Handle-specific close actions */
switch (handle->type) {
case UV_TCP:
uv__tcp_close(loop, (uv_tcp_t*)handle);
return;
case UV_NAMED_PIPE:
uv__pipe_close(loop, (uv_pipe_t*) handle);
return;
case UV_TTY:
uv__tty_close((uv_tty_t*) handle);
return;
case UV_UDP:
uv__udp_close(loop, (uv_udp_t*) handle);
return;
case UV_POLL:
uv__poll_close(loop, (uv_poll_t*) handle);
return;
case UV_TIMER:
uv_timer_stop((uv_timer_t*)handle);
uv__handle_closing(handle);
uv__want_endgame(loop, handle);
return;
case UV_PREPARE:
uv_prepare_stop((uv_prepare_t*)handle);
uv__handle_closing(handle);
uv__want_endgame(loop, handle);
return;
case UV_CHECK:
uv_check_stop((uv_check_t*)handle);
uv__handle_closing(handle);
uv__want_endgame(loop, handle);
return;
case UV_IDLE:
uv_idle_stop((uv_idle_t*)handle);
uv__handle_closing(handle);
uv__want_endgame(loop, handle);
return;
case UV_ASYNC:
uv__async_close(loop, (uv_async_t*) handle);
return;
case UV_SIGNAL:
uv__signal_close(loop, (uv_signal_t*) handle);
return;
case UV_PROCESS:
uv__process_close(loop, (uv_process_t*) handle);
return;
case UV_FS_EVENT:
uv__fs_event_close(loop, (uv_fs_event_t*) handle);
return;
case UV_FS_POLL:
uv__fs_poll_close((uv_fs_poll_t*) handle);
uv__handle_closing(handle);
return;
default:
/* Not supported */
abort();
}
}
int uv_is_closing(const uv_handle_t* handle) {
return !!(handle->flags & (UV_HANDLE_CLOSING | UV_HANDLE_CLOSED));
}
uv_os_fd_t uv_get_osfhandle(int fd) {
return uv__get_osfhandle(fd);
}
int uv_open_osfhandle(uv_os_fd_t os_fd) {
return _open_osfhandle((intptr_t) os_fd, 0);
}

View File

@@ -0,0 +1,335 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_WIN_INTERNAL_H_
#define UV_WIN_INTERNAL_H_
#include "uv.h"
#include "../uv-common.h"
#include "uv/tree.h"
#include "winapi.h"
#include "winsock.h"
#ifdef _MSC_VER
# define INLINE __inline
# define UV_THREAD_LOCAL __declspec( thread )
#else
# define INLINE inline
# define UV_THREAD_LOCAL __thread
#endif
#ifdef _DEBUG
extern UV_THREAD_LOCAL int uv__crt_assert_enabled;
#define UV_BEGIN_DISABLE_CRT_ASSERT() \
{ \
int uv__saved_crt_assert_enabled = uv__crt_assert_enabled; \
uv__crt_assert_enabled = FALSE;
#define UV_END_DISABLE_CRT_ASSERT() \
uv__crt_assert_enabled = uv__saved_crt_assert_enabled; \
}
#else
#define UV_BEGIN_DISABLE_CRT_ASSERT()
#define UV_END_DISABLE_CRT_ASSERT()
#endif
/*
* TCP
*/
typedef enum {
UV__IPC_SOCKET_XFER_NONE = 0,
UV__IPC_SOCKET_XFER_TCP_CONNECTION,
UV__IPC_SOCKET_XFER_TCP_SERVER
} uv__ipc_socket_xfer_type_t;
typedef struct {
WSAPROTOCOL_INFOW socket_info;
uint32_t delayed_error;
} uv__ipc_socket_xfer_info_t;
int uv__tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb);
int uv__tcp_accept(uv_tcp_t* server, uv_tcp_t* client);
int uv__tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
int uv__tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle,
const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb);
int uv__tcp_try_write(uv_tcp_t* handle, const uv_buf_t bufs[],
unsigned int nbufs);
void uv__process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle, uv_req_t* req);
void uv__process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_write_t* req);
void uv__process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_req_t* req);
void uv__process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_connect_t* req);
void uv__process_tcp_shutdown_req(uv_loop_t* loop,
uv_tcp_t* stream,
uv_shutdown_t* req);
void uv__tcp_close(uv_loop_t* loop, uv_tcp_t* tcp);
void uv__tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle);
int uv__tcp_xfer_export(uv_tcp_t* handle,
int pid,
uv__ipc_socket_xfer_type_t* xfer_type,
uv__ipc_socket_xfer_info_t* xfer_info);
int uv__tcp_xfer_import(uv_tcp_t* tcp,
uv__ipc_socket_xfer_type_t xfer_type,
uv__ipc_socket_xfer_info_t* xfer_info);
/*
* UDP
*/
void uv__process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle, uv_req_t* req);
void uv__process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
uv_udp_send_t* req);
void uv__udp_close(uv_loop_t* loop, uv_udp_t* handle);
void uv__udp_endgame(uv_loop_t* loop, uv_udp_t* handle);
/*
* Pipes
*/
int uv__create_stdio_pipe_pair(uv_loop_t* loop,
uv_pipe_t* parent_pipe, HANDLE* child_pipe_ptr, unsigned int flags);
int uv__pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
int uv__pipe_accept(uv_pipe_t* server, uv_stream_t* client);
int uv__pipe_read_start(uv_pipe_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
void uv__pipe_read_stop(uv_pipe_t* handle);
int uv__pipe_write(uv_loop_t* loop,
uv_write_t* req,
uv_pipe_t* handle,
const uv_buf_t bufs[],
size_t nbufs,
uv_stream_t* send_handle,
uv_write_cb cb);
void uv__pipe_shutdown(uv_loop_t* loop, uv_pipe_t* handle, uv_shutdown_t* req);
void uv__process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_req_t* req);
void uv__process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_write_t* req);
void uv__process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_req_t* raw_req);
void uv__process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_connect_t* req);
void uv__process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_shutdown_t* req);
void uv__pipe_close(uv_loop_t* loop, uv_pipe_t* handle);
void uv__pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle);
/*
* TTY
*/
void uv__console_init(void);
int uv__tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
int uv__tty_read_stop(uv_tty_t* handle);
int uv__tty_write(uv_loop_t* loop, uv_write_t* req, uv_tty_t* handle,
const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb);
int uv__tty_try_write(uv_tty_t* handle, const uv_buf_t bufs[],
unsigned int nbufs);
void uv__tty_close(uv_tty_t* handle);
void uv__process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
uv_req_t* req);
void uv__process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
uv_write_t* req);
#define uv__process_tty_accept_req(loop, handle, req) abort()
#define uv__process_tty_connect_req(loop, handle, req) abort()
void uv__process_tty_shutdown_req(uv_loop_t* loop,
uv_tty_t* stream,
uv_shutdown_t* req);
void uv__tty_endgame(uv_loop_t* loop, uv_tty_t* handle);
/*
* Poll watchers
*/
void uv__process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
uv_req_t* req);
int uv__poll_close(uv_loop_t* loop, uv_poll_t* handle);
void uv__poll_endgame(uv_loop_t* loop, uv_poll_t* handle);
/*
* Loop watchers
*/
void uv__loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle);
void uv__prepare_invoke(uv_loop_t* loop);
void uv__check_invoke(uv_loop_t* loop);
void uv__idle_invoke(uv_loop_t* loop);
void uv__once_init(void);
/*
* Async watcher
*/
void uv__async_close(uv_loop_t* loop, uv_async_t* handle);
void uv__async_endgame(uv_loop_t* loop, uv_async_t* handle);
void uv__process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
uv_req_t* req);
/*
* Signal watcher
*/
void uv__signals_init(void);
int uv__signal_dispatch(int signum);
void uv__signal_close(uv_loop_t* loop, uv_signal_t* handle);
void uv__signal_endgame(uv_loop_t* loop, uv_signal_t* handle);
void uv__process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
uv_req_t* req);
/*
* Spawn
*/
void uv__process_proc_exit(uv_loop_t* loop, uv_process_t* handle);
void uv__process_close(uv_loop_t* loop, uv_process_t* handle);
void uv__process_endgame(uv_loop_t* loop, uv_process_t* handle);
/*
* FS
*/
void uv__fs_init(void);
/*
* FS Event
*/
void uv__process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
uv_fs_event_t* handle);
void uv__fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle);
void uv__fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle);
/*
* Stat poller.
*/
void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle);
/*
* Utilities.
*/
void uv__util_init(void);
uint64_t uv__hrtime(unsigned int scale);
__declspec(noreturn) void uv_fatal_error(const int errorno, const char* syscall);
int uv__convert_utf16_to_utf8(const WCHAR* utf16, size_t utf16len, char** utf8);
int uv__copy_utf16_to_utf8(const WCHAR* utf16, size_t utf16len, char* utf8, size_t *size);
int uv__convert_utf8_to_utf16(const char* utf8, WCHAR** utf16);
typedef int (WINAPI *uv__peersockfunc)(SOCKET, struct sockaddr*, int*);
int uv__getsockpeername(const uv_handle_t* handle,
uv__peersockfunc func,
struct sockaddr* name,
int* namelen,
int delayed_error);
int uv__random_rtlgenrandom(void* buf, size_t buflen);
/*
* Process stdio handles.
*/
int uv__stdio_create(uv_loop_t* loop,
const uv_process_options_t* options,
BYTE** buffer_ptr);
void uv__stdio_destroy(BYTE* buffer);
void uv__stdio_noinherit(BYTE* buffer);
int uv__stdio_verify(BYTE* buffer, WORD size);
WORD uv__stdio_size(BYTE* buffer);
HANDLE uv__stdio_handle(BYTE* buffer, int fd);
/*
* Winapi and ntapi utility functions
*/
void uv__winapi_init(void);
/*
* Winsock utility functions
*/
void uv__winsock_init(void);
int uv__ntstatus_to_winsock_error(NTSTATUS status);
BOOL uv__get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target);
BOOL uv__get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target);
int WSAAPI uv__wsarecv_workaround(SOCKET socket, WSABUF* buffers,
DWORD buffer_count, DWORD* bytes, DWORD* flags, WSAOVERLAPPED *overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
int WSAAPI uv__wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
DWORD buffer_count, DWORD* bytes, DWORD* flags, struct sockaddr* addr,
int* addr_len, WSAOVERLAPPED *overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
int WSAAPI uv__msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in,
AFD_POLL_INFO* info_out, OVERLAPPED* overlapped);
/* Whether there are any non-IFS LSPs stacked on TCP */
extern int uv_tcp_non_ifs_lsp_ipv4;
extern int uv_tcp_non_ifs_lsp_ipv6;
/* Ip address used to bind to any port at any interface */
extern struct sockaddr_in uv_addr_ip4_any_;
extern struct sockaddr_in6 uv_addr_ip6_any_;
/*
* Wake all loops with fake message
*/
void uv__wake_all_loops(void);
/*
* Init system wake-up detection
*/
void uv__init_detect_system_wakeup(void);
int uv_translate_write_sys_error(int sys_errno);
#endif /* UV_WIN_INTERNAL_H_ */

View File

@@ -0,0 +1,122 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
void uv__loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle) {
if (handle->flags & UV_HANDLE_CLOSING) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
handle->flags |= UV_HANDLE_CLOSED;
uv__handle_close(handle);
}
}
#define UV_LOOP_WATCHER_DEFINE(name, NAME) \
int uv_##name##_init(uv_loop_t* loop, uv_##name##_t* handle) { \
uv__handle_init(loop, (uv_handle_t*) handle, UV_##NAME); \
\
return 0; \
} \
\
\
int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) { \
uv_loop_t* loop = handle->loop; \
uv_##name##_t* old_head; \
\
assert(handle->type == UV_##NAME); \
\
if (uv__is_active(handle)) \
return 0; \
\
if (cb == NULL) \
return UV_EINVAL; \
\
old_head = loop->name##_handles; \
\
handle->name##_next = old_head; \
handle->name##_prev = NULL; \
\
if (old_head) { \
old_head->name##_prev = handle; \
} \
\
loop->name##_handles = handle; \
\
handle->name##_cb = cb; \
uv__handle_start(handle); \
\
return 0; \
} \
\
\
int uv_##name##_stop(uv_##name##_t* handle) { \
uv_loop_t* loop = handle->loop; \
\
assert(handle->type == UV_##NAME); \
\
if (!uv__is_active(handle)) \
return 0; \
\
/* Update loop head if needed */ \
if (loop->name##_handles == handle) { \
loop->name##_handles = handle->name##_next; \
} \
\
/* Update the iterator-next pointer of needed */ \
if (loop->next_##name##_handle == handle) { \
loop->next_##name##_handle = handle->name##_next; \
} \
\
if (handle->name##_prev) { \
handle->name##_prev->name##_next = handle->name##_next; \
} \
if (handle->name##_next) { \
handle->name##_next->name##_prev = handle->name##_prev; \
} \
\
uv__handle_stop(handle); \
\
return 0; \
} \
\
\
void uv__##name##_invoke(uv_loop_t* loop) { \
uv_##name##_t* handle; \
\
(loop)->next_##name##_handle = (loop)->name##_handles; \
\
while ((loop)->next_##name##_handle != NULL) { \
handle = (loop)->next_##name##_handle; \
(loop)->next_##name##_handle = handle->name##_next; \
\
handle->name##_cb(handle); \
} \
}
UV_LOOP_WATCHER_DEFINE(prepare, PREPARE)
UV_LOOP_WATCHER_DEFINE(check, CHECK)
UV_LOOP_WATCHER_DEFINE(idle, IDLE)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,586 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <io.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
#include "req-inl.h"
static const GUID uv_msafd_provider_ids[UV_MSAFD_PROVIDER_COUNT] = {
{0xe70f1aa0, 0xab8b, 0x11cf,
{0x8c, 0xa3, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}},
{0xf9eab0c0, 0x26d4, 0x11d0,
{0xbb, 0xbf, 0x00, 0xaa, 0x00, 0x6c, 0x34, 0xe4}},
{0x9fc48064, 0x7298, 0x43e4,
{0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}},
{0xa00943d9, 0x9c2e, 0x4633,
{0x9b, 0x59, 0x00, 0x57, 0xa3, 0x16, 0x09, 0x94}}
};
typedef struct uv_single_fd_set_s {
unsigned int fd_count;
SOCKET fd_array[1];
} uv_single_fd_set_t;
static OVERLAPPED overlapped_dummy_;
static uv_once_t overlapped_dummy_init_guard_ = UV_ONCE_INIT;
static AFD_POLL_INFO afd_poll_info_dummy_;
static void uv__init_overlapped_dummy(void) {
HANDLE event;
event = CreateEvent(NULL, TRUE, TRUE, NULL);
if (event == NULL)
uv_fatal_error(GetLastError(), "CreateEvent");
memset(&overlapped_dummy_, 0, sizeof overlapped_dummy_);
overlapped_dummy_.hEvent = (HANDLE) ((uintptr_t) event | 1);
}
static OVERLAPPED* uv__get_overlapped_dummy(void) {
uv_once(&overlapped_dummy_init_guard_, uv__init_overlapped_dummy);
return &overlapped_dummy_;
}
static AFD_POLL_INFO* uv__get_afd_poll_info_dummy(void) {
return &afd_poll_info_dummy_;
}
static void uv__fast_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
uv_req_t* req;
AFD_POLL_INFO* afd_poll_info;
int result;
/* Find a yet unsubmitted req to submit. */
if (handle->submitted_events_1 == 0) {
req = &handle->poll_req_1;
afd_poll_info = &handle->afd_poll_info_1;
handle->submitted_events_1 = handle->events;
handle->mask_events_1 = 0;
handle->mask_events_2 = handle->events;
} else if (handle->submitted_events_2 == 0) {
req = &handle->poll_req_2;
afd_poll_info = &handle->afd_poll_info_2;
handle->submitted_events_2 = handle->events;
handle->mask_events_1 = handle->events;
handle->mask_events_2 = 0;
} else {
/* Just wait until there's an unsubmitted req. This will happen almost
* immediately as one of the 2 outstanding requests is about to return.
* When this happens, uv__fast_poll_process_poll_req will be called, and
* the pending events, if needed, will be processed in a subsequent
* request. */
return;
}
/* Setting Exclusive to TRUE makes the other poll request return if there is
* any. */
afd_poll_info->Exclusive = TRUE;
afd_poll_info->NumberOfHandles = 1;
afd_poll_info->Timeout.QuadPart = INT64_MAX;
afd_poll_info->Handles[0].Handle = (HANDLE) handle->socket;
afd_poll_info->Handles[0].Status = 0;
afd_poll_info->Handles[0].Events = 0;
if (handle->events & UV_READABLE) {
afd_poll_info->Handles[0].Events |= AFD_POLL_RECEIVE |
AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT;
} else {
if (handle->events & UV_DISCONNECT) {
afd_poll_info->Handles[0].Events |= AFD_POLL_DISCONNECT;
}
}
if (handle->events & UV_WRITABLE) {
afd_poll_info->Handles[0].Events |= AFD_POLL_SEND | AFD_POLL_CONNECT_FAIL;
}
memset(&req->u.io.overlapped, 0, sizeof req->u.io.overlapped);
result = uv__msafd_poll((SOCKET) handle->peer_socket,
afd_poll_info,
afd_poll_info,
&req->u.io.overlapped);
if (result != 0 && WSAGetLastError() != WSA_IO_PENDING) {
/* Queue this req, reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv__insert_pending_req(loop, req);
}
}
static void uv__fast_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
uv_req_t* req) {
unsigned char mask_events;
AFD_POLL_INFO* afd_poll_info;
if (req == &handle->poll_req_1) {
afd_poll_info = &handle->afd_poll_info_1;
handle->submitted_events_1 = 0;
mask_events = handle->mask_events_1;
} else if (req == &handle->poll_req_2) {
afd_poll_info = &handle->afd_poll_info_2;
handle->submitted_events_2 = 0;
mask_events = handle->mask_events_2;
} else {
assert(0);
return;
}
/* Report an error unless the select was just interrupted. */
if (!REQ_SUCCESS(req)) {
DWORD error = GET_REQ_SOCK_ERROR(req);
if (error != WSAEINTR && handle->events != 0) {
handle->events = 0; /* Stop the watcher */
handle->poll_cb(handle, uv_translate_sys_error(error), 0);
}
} else if (afd_poll_info->NumberOfHandles >= 1) {
unsigned char events = 0;
if ((afd_poll_info->Handles[0].Events & (AFD_POLL_RECEIVE |
AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT)) != 0) {
events |= UV_READABLE;
if ((afd_poll_info->Handles[0].Events & AFD_POLL_DISCONNECT) != 0) {
events |= UV_DISCONNECT;
}
}
if ((afd_poll_info->Handles[0].Events & (AFD_POLL_SEND |
AFD_POLL_CONNECT_FAIL)) != 0) {
events |= UV_WRITABLE;
}
events &= handle->events & ~mask_events;
if (afd_poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) {
/* Stop polling. */
handle->events = 0;
if (uv__is_active(handle))
uv__handle_stop(handle);
}
if (events != 0) {
handle->poll_cb(handle, 0, events);
}
}
if ((handle->events & ~(handle->submitted_events_1 |
handle->submitted_events_2)) != 0) {
uv__fast_poll_submit_poll_req(loop, handle);
} else if ((handle->flags & UV_HANDLE_CLOSING) &&
handle->submitted_events_1 == 0 &&
handle->submitted_events_2 == 0) {
uv__want_endgame(loop, (uv_handle_t*) handle);
}
}
static SOCKET uv__fast_poll_create_peer_socket(HANDLE iocp,
WSAPROTOCOL_INFOW* protocol_info) {
SOCKET sock = 0;
sock = WSASocketW(protocol_info->iAddressFamily,
protocol_info->iSocketType,
protocol_info->iProtocol,
protocol_info,
0,
WSA_FLAG_OVERLAPPED);
if (sock == INVALID_SOCKET) {
return INVALID_SOCKET;
}
if (!SetHandleInformation((HANDLE) sock, HANDLE_FLAG_INHERIT, 0)) {
goto error;
};
if (CreateIoCompletionPort((HANDLE) sock,
iocp,
(ULONG_PTR) sock,
0) == NULL) {
goto error;
}
return sock;
error:
closesocket(sock);
return INVALID_SOCKET;
}
static SOCKET uv__fast_poll_get_peer_socket(uv_loop_t* loop,
WSAPROTOCOL_INFOW* protocol_info) {
int index, i;
SOCKET peer_socket;
index = -1;
for (i = 0; (size_t) i < ARRAY_SIZE(uv_msafd_provider_ids); i++) {
if (memcmp((void*) &protocol_info->ProviderId,
(void*) &uv_msafd_provider_ids[i],
sizeof protocol_info->ProviderId) == 0) {
index = i;
}
}
/* Check if the protocol uses an msafd socket. */
if (index < 0) {
return INVALID_SOCKET;
}
/* If we didn't (try) to create a peer socket yet, try to make one. Don't try
* again if the peer socket creation failed earlier for the same protocol. */
peer_socket = loop->poll_peer_sockets[index];
if (peer_socket == 0) {
peer_socket = uv__fast_poll_create_peer_socket(loop->iocp, protocol_info);
loop->poll_peer_sockets[index] = peer_socket;
}
return peer_socket;
}
static DWORD WINAPI uv__slow_poll_thread_proc(void* arg) {
uv_req_t* req = (uv_req_t*) arg;
uv_poll_t* handle = (uv_poll_t*) req->data;
unsigned char reported_events;
int r;
uv_single_fd_set_t rfds, wfds, efds;
struct timeval timeout;
assert(handle->type == UV_POLL);
assert(req->type == UV_POLL_REQ);
if (handle->events & UV_READABLE) {
rfds.fd_count = 1;
rfds.fd_array[0] = handle->socket;
} else {
rfds.fd_count = 0;
}
if (handle->events & UV_WRITABLE) {
wfds.fd_count = 1;
wfds.fd_array[0] = handle->socket;
efds.fd_count = 1;
efds.fd_array[0] = handle->socket;
} else {
wfds.fd_count = 0;
efds.fd_count = 0;
}
/* Make the select() time out after 3 minutes. If select() hangs because the
* user closed the socket, we will at least not hang indefinitely. */
timeout.tv_sec = 3 * 60;
timeout.tv_usec = 0;
r = select(1, (fd_set*) &rfds, (fd_set*) &wfds, (fd_set*) &efds, &timeout);
if (r == SOCKET_ERROR) {
/* Queue this req, reporting an error. */
SET_REQ_ERROR(&handle->poll_req_1, WSAGetLastError());
POST_COMPLETION_FOR_REQ(handle->loop, req);
return 0;
}
reported_events = 0;
if (r > 0) {
if (rfds.fd_count > 0) {
assert(rfds.fd_count == 1);
assert(rfds.fd_array[0] == handle->socket);
reported_events |= UV_READABLE;
}
if (wfds.fd_count > 0) {
assert(wfds.fd_count == 1);
assert(wfds.fd_array[0] == handle->socket);
reported_events |= UV_WRITABLE;
} else if (efds.fd_count > 0) {
assert(efds.fd_count == 1);
assert(efds.fd_array[0] == handle->socket);
reported_events |= UV_WRITABLE;
}
}
SET_REQ_SUCCESS(req);
req->u.io.overlapped.InternalHigh = (DWORD) reported_events;
POST_COMPLETION_FOR_REQ(handle->loop, req);
return 0;
}
static void uv__slow_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
uv_req_t* req;
/* Find a yet unsubmitted req to submit. */
if (handle->submitted_events_1 == 0) {
req = &handle->poll_req_1;
handle->submitted_events_1 = handle->events;
handle->mask_events_1 = 0;
handle->mask_events_2 = handle->events;
} else if (handle->submitted_events_2 == 0) {
req = &handle->poll_req_2;
handle->submitted_events_2 = handle->events;
handle->mask_events_1 = handle->events;
handle->mask_events_2 = 0;
} else {
assert(0);
return;
}
if (!QueueUserWorkItem(uv__slow_poll_thread_proc,
(void*) req,
WT_EXECUTELONGFUNCTION)) {
/* Make this req pending, reporting an error. */
SET_REQ_ERROR(req, GetLastError());
uv__insert_pending_req(loop, req);
}
}
static void uv__slow_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
uv_req_t* req) {
unsigned char mask_events;
int err;
if (req == &handle->poll_req_1) {
handle->submitted_events_1 = 0;
mask_events = handle->mask_events_1;
} else if (req == &handle->poll_req_2) {
handle->submitted_events_2 = 0;
mask_events = handle->mask_events_2;
} else {
assert(0);
return;
}
if (!REQ_SUCCESS(req)) {
/* Error. */
if (handle->events != 0) {
err = GET_REQ_ERROR(req);
handle->events = 0; /* Stop the watcher */
handle->poll_cb(handle, uv_translate_sys_error(err), 0);
}
} else {
/* Got some events. */
int events = req->u.io.overlapped.InternalHigh & handle->events & ~mask_events;
if (events != 0) {
handle->poll_cb(handle, 0, events);
}
}
if ((handle->events & ~(handle->submitted_events_1 |
handle->submitted_events_2)) != 0) {
uv__slow_poll_submit_poll_req(loop, handle);
} else if ((handle->flags & UV_HANDLE_CLOSING) &&
handle->submitted_events_1 == 0 &&
handle->submitted_events_2 == 0) {
uv__want_endgame(loop, (uv_handle_t*) handle);
}
}
int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
return uv_poll_init_socket(loop, handle, (SOCKET) uv__get_osfhandle(fd));
}
int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
uv_os_sock_t socket) {
WSAPROTOCOL_INFOW protocol_info;
int len;
SOCKET peer_socket, base_socket;
DWORD bytes;
DWORD yes = 1;
/* Set the socket to nonblocking mode */
if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR)
return uv_translate_sys_error(WSAGetLastError());
/* Try to obtain a base handle for the socket. This increases this chances that
* we find an AFD handle and are able to use the fast poll mechanism.
*/
#ifndef NDEBUG
base_socket = INVALID_SOCKET;
#endif
if (WSAIoctl(socket,
SIO_BASE_HANDLE,
NULL,
0,
&base_socket,
sizeof base_socket,
&bytes,
NULL,
NULL) == 0) {
assert(base_socket != 0 && base_socket != INVALID_SOCKET);
socket = base_socket;
}
uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL);
handle->socket = socket;
handle->events = 0;
/* Obtain protocol information about the socket. */
len = sizeof protocol_info;
if (getsockopt(socket,
SOL_SOCKET,
SO_PROTOCOL_INFOW,
(char*) &protocol_info,
&len) != 0) {
return uv_translate_sys_error(WSAGetLastError());
}
/* Get the peer socket that is needed to enable fast poll. If the returned
* value is NULL, the protocol is not implemented by MSAFD and we'll have to
* use slow mode. */
peer_socket = uv__fast_poll_get_peer_socket(loop, &protocol_info);
if (peer_socket != INVALID_SOCKET) {
/* Initialize fast poll specific fields. */
handle->peer_socket = peer_socket;
} else {
/* Initialize slow poll specific fields. */
handle->flags |= UV_HANDLE_POLL_SLOW;
}
/* Initialize 2 poll reqs. */
handle->submitted_events_1 = 0;
UV_REQ_INIT(&handle->poll_req_1, UV_POLL_REQ);
handle->poll_req_1.data = handle;
handle->submitted_events_2 = 0;
UV_REQ_INIT(&handle->poll_req_2, UV_POLL_REQ);
handle->poll_req_2.data = handle;
return 0;
}
static int uv__poll_set(uv_poll_t* handle, int events, uv_poll_cb cb) {
int submitted_events;
assert(handle->type == UV_POLL);
assert(!(handle->flags & UV_HANDLE_CLOSING));
assert((events & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT |
UV_PRIORITIZED)) == 0);
handle->events = events;
handle->poll_cb = cb;
if (handle->events == 0) {
uv__handle_stop(handle);
return 0;
}
uv__handle_start(handle);
submitted_events = handle->submitted_events_1 | handle->submitted_events_2;
if (handle->events & ~submitted_events) {
if (handle->flags & UV_HANDLE_POLL_SLOW) {
uv__slow_poll_submit_poll_req(handle->loop, handle);
} else {
uv__fast_poll_submit_poll_req(handle->loop, handle);
}
}
return 0;
}
int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb) {
return uv__poll_set(handle, events, cb);
}
int uv_poll_stop(uv_poll_t* handle) {
return uv__poll_set(handle, 0, handle->poll_cb);
}
void uv__process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) {
if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
uv__fast_poll_process_poll_req(loop, handle, req);
} else {
uv__slow_poll_process_poll_req(loop, handle, req);
}
}
int uv__poll_close(uv_loop_t* loop, uv_poll_t* handle) {
AFD_POLL_INFO afd_poll_info;
DWORD error;
int result;
handle->events = 0;
uv__handle_closing(handle);
if (handle->submitted_events_1 == 0 &&
handle->submitted_events_2 == 0) {
uv__want_endgame(loop, (uv_handle_t*) handle);
return 0;
}
if (handle->flags & UV_HANDLE_POLL_SLOW)
return 0;
/* Cancel outstanding poll requests by executing another, unique poll
* request that forces the outstanding ones to return. */
afd_poll_info.Exclusive = TRUE;
afd_poll_info.NumberOfHandles = 1;
afd_poll_info.Timeout.QuadPart = INT64_MAX;
afd_poll_info.Handles[0].Handle = (HANDLE) handle->socket;
afd_poll_info.Handles[0].Status = 0;
afd_poll_info.Handles[0].Events = AFD_POLL_ALL;
result = uv__msafd_poll(handle->socket,
&afd_poll_info,
uv__get_afd_poll_info_dummy(),
uv__get_overlapped_dummy());
if (result == SOCKET_ERROR) {
error = WSAGetLastError();
if (error != WSA_IO_PENDING)
return uv_translate_sys_error(error);
}
return 0;
}
void uv__poll_endgame(uv_loop_t* loop, uv_poll_t* handle) {
assert(handle->flags & UV_HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));
assert(handle->submitted_events_1 == 0);
assert(handle->submitted_events_2 == 0);
uv__handle_close(handle);
}

View File

@@ -0,0 +1,419 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <io.h>
#include <stdio.h>
#include <stdlib.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
/*
* The `child_stdio_buffer` buffer has the following layout:
* int number_of_fds
* unsigned char crt_flags[number_of_fds]
* HANDLE os_handle[number_of_fds]
*/
#define CHILD_STDIO_SIZE(count) \
(sizeof(int) + \
sizeof(unsigned char) * (count) + \
sizeof(uintptr_t) * (count))
#define CHILD_STDIO_COUNT(buffer) \
*((unsigned int*) (buffer))
#define CHILD_STDIO_CRT_FLAGS(buffer, fd) \
*((unsigned char*) (buffer) + sizeof(int) + fd)
#define CHILD_STDIO_HANDLE(buffer, fd) \
((void*) ((unsigned char*) (buffer) + \
sizeof(int) + \
sizeof(unsigned char) * \
CHILD_STDIO_COUNT((buffer)) + \
sizeof(HANDLE) * (fd)))
/* CRT file descriptor mode flags */
#define FOPEN 0x01
#define FEOFLAG 0x02
#define FCRLF 0x04
#define FPIPE 0x08
#define FNOINHERIT 0x10
#define FAPPEND 0x20
#define FDEV 0x40
#define FTEXT 0x80
/*
* Clear the HANDLE_FLAG_INHERIT flag from all HANDLEs that were inherited
* the parent process. Don't check for errors - the stdio handles may not be
* valid, or may be closed already. There is no guarantee that this function
* does a perfect job.
*/
void uv_disable_stdio_inheritance(void) {
HANDLE handle;
STARTUPINFOW si;
/* Make the windows stdio handles non-inheritable. */
handle = GetStdHandle(STD_INPUT_HANDLE);
if (handle != NULL && handle != INVALID_HANDLE_VALUE)
SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
handle = GetStdHandle(STD_OUTPUT_HANDLE);
if (handle != NULL && handle != INVALID_HANDLE_VALUE)
SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
handle = GetStdHandle(STD_ERROR_HANDLE);
if (handle != NULL && handle != INVALID_HANDLE_VALUE)
SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
/* Make inherited CRT FDs non-inheritable. */
GetStartupInfoW(&si);
if (uv__stdio_verify(si.lpReserved2, si.cbReserved2))
uv__stdio_noinherit(si.lpReserved2);
}
static int uv__duplicate_handle(uv_loop_t* loop, HANDLE handle, HANDLE* dup) {
HANDLE current_process;
/* _get_osfhandle will sometimes return -2 in case of an error. This seems to
* happen when fd <= 2 and the process' corresponding stdio handle is set to
* NULL. Unfortunately DuplicateHandle will happily duplicate (HANDLE) -2, so
* this situation goes unnoticed until someone tries to use the duplicate.
* Therefore we filter out known-invalid handles here. */
if (handle == INVALID_HANDLE_VALUE ||
handle == NULL ||
handle == (HANDLE) -2) {
*dup = INVALID_HANDLE_VALUE;
return ERROR_INVALID_HANDLE;
}
current_process = GetCurrentProcess();
if (!DuplicateHandle(current_process,
handle,
current_process,
dup,
0,
TRUE,
DUPLICATE_SAME_ACCESS)) {
*dup = INVALID_HANDLE_VALUE;
return GetLastError();
}
return 0;
}
static int uv__duplicate_fd(uv_loop_t* loop, int fd, HANDLE* dup) {
HANDLE handle;
if (fd == -1) {
*dup = INVALID_HANDLE_VALUE;
return ERROR_INVALID_HANDLE;
}
handle = uv__get_osfhandle(fd);
return uv__duplicate_handle(loop, handle, dup);
}
int uv__create_nul_handle(HANDLE* handle_ptr,
DWORD access) {
HANDLE handle;
SECURITY_ATTRIBUTES sa;
sa.nLength = sizeof sa;
sa.lpSecurityDescriptor = NULL;
sa.bInheritHandle = TRUE;
handle = CreateFileW(L"NUL",
access,
FILE_SHARE_READ | FILE_SHARE_WRITE,
&sa,
OPEN_EXISTING,
0,
NULL);
if (handle == INVALID_HANDLE_VALUE) {
return GetLastError();
}
*handle_ptr = handle;
return 0;
}
int uv__stdio_create(uv_loop_t* loop,
const uv_process_options_t* options,
BYTE** buffer_ptr) {
BYTE* buffer;
int count, i;
int err;
count = options->stdio_count;
if (count < 0 || count > 255) {
/* Only support FDs 0-255 */
return ERROR_NOT_SUPPORTED;
} else if (count < 3) {
/* There should always be at least 3 stdio handles. */
count = 3;
}
/* Allocate the child stdio buffer */
buffer = (BYTE*) uv__malloc(CHILD_STDIO_SIZE(count));
if (buffer == NULL) {
return ERROR_OUTOFMEMORY;
}
/* Prepopulate the buffer with INVALID_HANDLE_VALUE handles so we can clean
* up on failure. */
CHILD_STDIO_COUNT(buffer) = count;
for (i = 0; i < count; i++) {
CHILD_STDIO_CRT_FLAGS(buffer, i) = 0;
memset(CHILD_STDIO_HANDLE(buffer, i), 0xFF, sizeof(HANDLE));
}
for (i = 0; i < count; i++) {
uv_stdio_container_t fdopt;
if (i < options->stdio_count) {
fdopt = options->stdio[i];
} else {
fdopt.flags = UV_IGNORE;
}
switch (fdopt.flags & (UV_IGNORE | UV_CREATE_PIPE | UV_INHERIT_FD |
UV_INHERIT_STREAM)) {
case UV_IGNORE:
/* Starting a process with no stdin/stout/stderr can confuse it. So no
* matter what the user specified, we make sure the first three FDs are
* always open in their typical modes, e. g. stdin be readable and
* stdout/err should be writable. For FDs > 2, don't do anything - all
* handles in the stdio buffer are initialized with.
* INVALID_HANDLE_VALUE, which should be okay. */
if (i <= 2) {
HANDLE nul;
DWORD access = (i == 0) ? FILE_GENERIC_READ :
FILE_GENERIC_WRITE | FILE_READ_ATTRIBUTES;
err = uv__create_nul_handle(&nul, access);
if (err)
goto error;
memcpy(CHILD_STDIO_HANDLE(buffer, i), &nul, sizeof(HANDLE));
CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV;
}
break;
case UV_CREATE_PIPE: {
/* Create a pair of two connected pipe ends; one end is turned into an
* uv_pipe_t for use by the parent. The other one is given to the
* child. */
uv_pipe_t* parent_pipe = (uv_pipe_t*) fdopt.data.stream;
HANDLE child_pipe = INVALID_HANDLE_VALUE;
/* Create a new, connected pipe pair. stdio[i]. stream should point to
* an uninitialized, but not connected pipe handle. */
assert(fdopt.data.stream->type == UV_NAMED_PIPE);
assert(!(fdopt.data.stream->flags & UV_HANDLE_CONNECTION));
assert(!(fdopt.data.stream->flags & UV_HANDLE_PIPESERVER));
err = uv__create_stdio_pipe_pair(loop,
parent_pipe,
&child_pipe,
fdopt.flags);
if (err)
goto error;
memcpy(CHILD_STDIO_HANDLE(buffer, i), &child_pipe, sizeof(HANDLE));
CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FPIPE;
break;
}
case UV_INHERIT_FD: {
/* Inherit a raw FD. */
HANDLE child_handle;
/* Make an inheritable duplicate of the handle. */
err = uv__duplicate_fd(loop, fdopt.data.fd, &child_handle);
if (err) {
/* If fdopt. data. fd is not valid and fd <= 2, then ignore the
* error. */
if (fdopt.data.fd <= 2 && err == ERROR_INVALID_HANDLE) {
CHILD_STDIO_CRT_FLAGS(buffer, i) = 0;
memset(CHILD_STDIO_HANDLE(buffer, i), 0xFF, sizeof(HANDLE));
break;
}
goto error;
}
/* Figure out what the type is. */
switch (GetFileType(child_handle)) {
case FILE_TYPE_DISK:
CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN;
break;
case FILE_TYPE_PIPE:
CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FPIPE;
break;
case FILE_TYPE_CHAR:
case FILE_TYPE_REMOTE:
CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV;
break;
case FILE_TYPE_UNKNOWN:
if (GetLastError() != 0) {
err = GetLastError();
CloseHandle(child_handle);
goto error;
}
CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV;
break;
default:
assert(0);
return -1;
}
memcpy(CHILD_STDIO_HANDLE(buffer, i), &child_handle, sizeof(HANDLE));
break;
}
case UV_INHERIT_STREAM: {
/* Use an existing stream as the stdio handle for the child. */
HANDLE stream_handle, child_handle;
unsigned char crt_flags;
uv_stream_t* stream = fdopt.data.stream;
/* Leech the handle out of the stream. */
if (stream->type == UV_TTY) {
stream_handle = ((uv_tty_t*) stream)->handle;
crt_flags = FOPEN | FDEV;
} else if (stream->type == UV_NAMED_PIPE &&
stream->flags & UV_HANDLE_CONNECTION) {
stream_handle = ((uv_pipe_t*) stream)->handle;
crt_flags = FOPEN | FPIPE;
} else {
stream_handle = INVALID_HANDLE_VALUE;
crt_flags = 0;
}
if (stream_handle == NULL ||
stream_handle == INVALID_HANDLE_VALUE) {
/* The handle is already closed, or not yet created, or the stream
* type is not supported. */
err = ERROR_NOT_SUPPORTED;
goto error;
}
/* Make an inheritable copy of the handle. */
err = uv__duplicate_handle(loop, stream_handle, &child_handle);
if (err)
goto error;
memcpy(CHILD_STDIO_HANDLE(buffer, i), &child_handle, sizeof(HANDLE));
CHILD_STDIO_CRT_FLAGS(buffer, i) = crt_flags;
break;
}
default:
assert(0);
return -1;
}
}
*buffer_ptr = buffer;
return 0;
error:
uv__stdio_destroy(buffer);
return err;
}
void uv__stdio_destroy(BYTE* buffer) {
int i, count;
count = CHILD_STDIO_COUNT(buffer);
for (i = 0; i < count; i++) {
HANDLE handle = uv__stdio_handle(buffer, i);
if (handle != INVALID_HANDLE_VALUE) {
CloseHandle(handle);
}
}
uv__free(buffer);
}
void uv__stdio_noinherit(BYTE* buffer) {
int i, count;
count = CHILD_STDIO_COUNT(buffer);
for (i = 0; i < count; i++) {
HANDLE handle = uv__stdio_handle(buffer, i);
if (handle != INVALID_HANDLE_VALUE) {
SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
}
}
}
int uv__stdio_verify(BYTE* buffer, WORD size) {
unsigned int count;
/* Check the buffer pointer. */
if (buffer == NULL)
return 0;
/* Verify that the buffer is at least big enough to hold the count. */
if (size < CHILD_STDIO_SIZE(0))
return 0;
/* Verify if the count is within range. */
count = CHILD_STDIO_COUNT(buffer);
if (count > 256)
return 0;
/* Verify that the buffer size is big enough to hold info for N FDs. */
if (size < CHILD_STDIO_SIZE(count))
return 0;
return 1;
}
WORD uv__stdio_size(BYTE* buffer) {
return (WORD) CHILD_STDIO_SIZE(CHILD_STDIO_COUNT((buffer)));
}
HANDLE uv__stdio_handle(BYTE* buffer, int fd) {
HANDLE handle;
memcpy(&handle, CHILD_STDIO_HANDLE(buffer, fd), sizeof(HANDLE));
return handle;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,214 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_WIN_REQ_INL_H_
#define UV_WIN_REQ_INL_H_
#include <assert.h>
#include "uv.h"
#include "internal.h"
#define SET_REQ_STATUS(req, status) \
(req)->u.io.overlapped.Internal = (ULONG_PTR) (status)
#define SET_REQ_ERROR(req, error) \
SET_REQ_STATUS((req), NTSTATUS_FROM_WIN32((error)))
/* Note: used open-coded in UV_REQ_INIT() because of a circular dependency
* between src/uv-common.h and src/win/internal.h.
*/
#define SET_REQ_SUCCESS(req) \
SET_REQ_STATUS((req), STATUS_SUCCESS)
#define GET_REQ_STATUS(req) \
((NTSTATUS) (req)->u.io.overlapped.Internal)
#define REQ_SUCCESS(req) \
(NT_SUCCESS(GET_REQ_STATUS((req))))
#define GET_REQ_ERROR(req) \
(pRtlNtStatusToDosError(GET_REQ_STATUS((req))))
#define GET_REQ_SOCK_ERROR(req) \
(uv__ntstatus_to_winsock_error(GET_REQ_STATUS((req))))
#define REGISTER_HANDLE_REQ(loop, handle) \
do { \
INCREASE_ACTIVE_COUNT((loop), (handle)); \
uv__req_register((loop)); \
} while (0)
#define UNREGISTER_HANDLE_REQ(loop, handle) \
do { \
DECREASE_ACTIVE_COUNT((loop), (handle)); \
uv__req_unregister((loop)); \
} while (0)
#define UV_SUCCEEDED_WITHOUT_IOCP(result) \
((result) && (handle->flags & UV_HANDLE_SYNC_BYPASS_IOCP))
#define UV_SUCCEEDED_WITH_IOCP(result) \
((result) || (GetLastError() == ERROR_IO_PENDING))
#define POST_COMPLETION_FOR_REQ(loop, req) \
if (!PostQueuedCompletionStatus((loop)->iocp, \
0, \
0, \
&((req)->u.io.overlapped))) { \
uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus"); \
}
INLINE static uv_req_t* uv__overlapped_to_req(OVERLAPPED* overlapped) {
return container_of(overlapped, uv_req_t, u.io.overlapped);
}
INLINE static void uv__insert_pending_req(uv_loop_t* loop, uv_req_t* req) {
req->next_req = NULL;
if (loop->pending_reqs_tail) {
#ifdef _DEBUG
/* Ensure the request is not already in the queue, or the queue
* will get corrupted.
*/
uv_req_t* current = loop->pending_reqs_tail;
do {
assert(req != current);
current = current->next_req;
} while(current != loop->pending_reqs_tail);
#endif
req->next_req = loop->pending_reqs_tail->next_req;
loop->pending_reqs_tail->next_req = req;
loop->pending_reqs_tail = req;
} else {
req->next_req = req;
loop->pending_reqs_tail = req;
}
}
#define DELEGATE_STREAM_REQ(loop, req, method, handle_at) \
do { \
switch (((uv_handle_t*) (req)->handle_at)->type) { \
case UV_TCP: \
uv__process_tcp_##method##_req(loop, \
(uv_tcp_t*) ((req)->handle_at), \
req); \
break; \
\
case UV_NAMED_PIPE: \
uv__process_pipe_##method##_req(loop, \
(uv_pipe_t*) ((req)->handle_at), \
req); \
break; \
\
case UV_TTY: \
uv__process_tty_##method##_req(loop, \
(uv_tty_t*) ((req)->handle_at), \
req); \
break; \
\
default: \
assert(0); \
} \
} while (0)
INLINE static void uv__process_reqs(uv_loop_t* loop) {
uv_req_t* req;
uv_req_t* first;
uv_req_t* next;
if (loop->pending_reqs_tail == NULL)
return;
first = loop->pending_reqs_tail->next_req;
next = first;
loop->pending_reqs_tail = NULL;
while (next != NULL) {
req = next;
next = req->next_req != first ? req->next_req : NULL;
switch (req->type) {
case UV_READ:
DELEGATE_STREAM_REQ(loop, req, read, data);
break;
case UV_WRITE:
DELEGATE_STREAM_REQ(loop, (uv_write_t*) req, write, handle);
break;
case UV_ACCEPT:
DELEGATE_STREAM_REQ(loop, req, accept, data);
break;
case UV_CONNECT:
DELEGATE_STREAM_REQ(loop, (uv_connect_t*) req, connect, handle);
break;
case UV_SHUTDOWN:
DELEGATE_STREAM_REQ(loop, (uv_shutdown_t*) req, shutdown, handle);
break;
case UV_UDP_RECV:
uv__process_udp_recv_req(loop, (uv_udp_t*) req->data, req);
break;
case UV_UDP_SEND:
uv__process_udp_send_req(loop,
((uv_udp_send_t*) req)->handle,
(uv_udp_send_t*) req);
break;
case UV_WAKEUP:
uv__process_async_wakeup_req(loop, (uv_async_t*) req->data, req);
break;
case UV_SIGNAL_REQ:
uv__process_signal_req(loop, (uv_signal_t*) req->data, req);
break;
case UV_POLL_REQ:
uv__process_poll_req(loop, (uv_poll_t*) req->data, req);
break;
case UV_PROCESS_EXIT:
uv__process_proc_exit(loop, (uv_process_t*) req->data);
break;
case UV_FS_EVENT_REQ:
uv__process_fs_event_req(loop, req, (uv_fs_event_t*) req->data);
break;
default:
assert(0);
}
}
}
#endif /* UV_WIN_REQ_INL_H_ */

View File

@@ -0,0 +1,282 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <signal.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
#include "req-inl.h"
RB_HEAD(uv_signal_tree_s, uv_signal_s);
static struct uv_signal_tree_s uv__signal_tree = RB_INITIALIZER(uv__signal_tree);
static CRITICAL_SECTION uv__signal_lock;
static BOOL WINAPI uv__signal_control_handler(DWORD type);
int uv__signal_start(uv_signal_t* handle,
uv_signal_cb signal_cb,
int signum,
int oneshot);
void uv__signals_init(void) {
InitializeCriticalSection(&uv__signal_lock);
if (!SetConsoleCtrlHandler(uv__signal_control_handler, TRUE))
abort();
}
void uv__signal_cleanup(void) {
/* TODO(bnoordhuis) Undo effects of uv_signal_init()? */
}
static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2) {
/* Compare signums first so all watchers with the same signnum end up
* adjacent. */
if (w1->signum < w2->signum) return -1;
if (w1->signum > w2->signum) return 1;
/* Sort by loop pointer, so we can easily look up the first item after
* { .signum = x, .loop = NULL }. */
if ((uintptr_t) w1->loop < (uintptr_t) w2->loop) return -1;
if ((uintptr_t) w1->loop > (uintptr_t) w2->loop) return 1;
if ((uintptr_t) w1 < (uintptr_t) w2) return -1;
if ((uintptr_t) w1 > (uintptr_t) w2) return 1;
return 0;
}
RB_GENERATE_STATIC(uv_signal_tree_s, uv_signal_s, tree_entry, uv__signal_compare)
/*
* Dispatches signal {signum} to all active uv_signal_t watchers in all loops.
* Returns 1 if the signal was dispatched to any watcher, or 0 if there were
* no active signal watchers observing this signal.
*/
int uv__signal_dispatch(int signum) {
uv_signal_t lookup;
uv_signal_t* handle;
int dispatched;
dispatched = 0;
EnterCriticalSection(&uv__signal_lock);
lookup.signum = signum;
lookup.loop = NULL;
for (handle = RB_NFIND(uv_signal_tree_s, &uv__signal_tree, &lookup);
handle != NULL && handle->signum == signum;
handle = RB_NEXT(uv_signal_tree_s, handle)) {
unsigned long previous = InterlockedExchange(
(volatile LONG*) &handle->pending_signum, signum);
if (handle->flags & UV_SIGNAL_ONE_SHOT_DISPATCHED)
continue;
if (!previous) {
POST_COMPLETION_FOR_REQ(handle->loop, &handle->signal_req);
}
dispatched = 1;
if (handle->flags & UV_SIGNAL_ONE_SHOT)
handle->flags |= UV_SIGNAL_ONE_SHOT_DISPATCHED;
}
LeaveCriticalSection(&uv__signal_lock);
return dispatched;
}
static BOOL WINAPI uv__signal_control_handler(DWORD type) {
switch (type) {
case CTRL_C_EVENT:
return uv__signal_dispatch(SIGINT);
case CTRL_BREAK_EVENT:
return uv__signal_dispatch(SIGBREAK);
case CTRL_CLOSE_EVENT:
if (uv__signal_dispatch(SIGHUP)) {
/* Windows will terminate the process after the control handler
* returns. After that it will just terminate our process. Therefore
* block the signal handler so the main loop has some time to pick up
* the signal and do something for a few seconds. */
Sleep(INFINITE);
return TRUE;
}
return FALSE;
case CTRL_LOGOFF_EVENT:
case CTRL_SHUTDOWN_EVENT:
/* These signals are only sent to services. Services have their own
* notification mechanism, so there's no point in handling these. */
default:
/* We don't handle these. */
return FALSE;
}
}
int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) {
uv__handle_init(loop, (uv_handle_t*) handle, UV_SIGNAL);
handle->pending_signum = 0;
handle->signum = 0;
handle->signal_cb = NULL;
UV_REQ_INIT(&handle->signal_req, UV_SIGNAL_REQ);
handle->signal_req.data = handle;
return 0;
}
int uv_signal_stop(uv_signal_t* handle) {
uv_signal_t* removed_handle;
/* If the watcher wasn't started, this is a no-op. */
if (handle->signum == 0)
return 0;
EnterCriticalSection(&uv__signal_lock);
removed_handle = RB_REMOVE(uv_signal_tree_s, &uv__signal_tree, handle);
assert(removed_handle == handle);
LeaveCriticalSection(&uv__signal_lock);
handle->signum = 0;
uv__handle_stop(handle);
return 0;
}
int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) {
return uv__signal_start(handle, signal_cb, signum, 0);
}
int uv_signal_start_oneshot(uv_signal_t* handle,
uv_signal_cb signal_cb,
int signum) {
return uv__signal_start(handle, signal_cb, signum, 1);
}
int uv__signal_start(uv_signal_t* handle,
uv_signal_cb signal_cb,
int signum,
int oneshot) {
/* Test for invalid signal values. */
if (signum <= 0 || signum >= NSIG)
return UV_EINVAL;
/* Short circuit: if the signal watcher is already watching {signum} don't go
* through the process of deregistering and registering the handler.
* Additionally, this avoids pending signals getting lost in the (small) time
* frame that handle->signum == 0. */
if (signum == handle->signum) {
handle->signal_cb = signal_cb;
return 0;
}
/* If the signal handler was already active, stop it first. */
if (handle->signum != 0) {
int r = uv_signal_stop(handle);
/* uv_signal_stop is infallible. */
assert(r == 0);
}
EnterCriticalSection(&uv__signal_lock);
handle->signum = signum;
if (oneshot)
handle->flags |= UV_SIGNAL_ONE_SHOT;
RB_INSERT(uv_signal_tree_s, &uv__signal_tree, handle);
LeaveCriticalSection(&uv__signal_lock);
handle->signal_cb = signal_cb;
uv__handle_start(handle);
return 0;
}
void uv__process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
uv_req_t* req) {
long dispatched_signum;
assert(handle->type == UV_SIGNAL);
assert(req->type == UV_SIGNAL_REQ);
dispatched_signum = InterlockedExchange(
(volatile LONG*) &handle->pending_signum, 0);
assert(dispatched_signum != 0);
/* Check if the pending signal equals the signum that we are watching for.
* These can get out of sync when the handler is stopped and restarted while
* the signal_req is pending. */
if (dispatched_signum == handle->signum)
handle->signal_cb(handle, dispatched_signum);
if (handle->flags & UV_SIGNAL_ONE_SHOT)
uv_signal_stop(handle);
if (handle->flags & UV_HANDLE_CLOSING) {
/* When it is closing, it must be stopped at this point. */
assert(handle->signum == 0);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
}
void uv__signal_close(uv_loop_t* loop, uv_signal_t* handle) {
uv_signal_stop(handle);
uv__handle_closing(handle);
if (handle->pending_signum == 0) {
uv__want_endgame(loop, (uv_handle_t*) handle);
}
}
void uv__signal_endgame(uv_loop_t* loop, uv_signal_t* handle) {
assert(handle->flags & UV_HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));
assert(handle->signum == 0);
assert(handle->pending_signum == 0);
handle->flags |= UV_HANDLE_CLOSED;
uv__handle_close(handle);
}

View File

@@ -0,0 +1,42 @@
/* Copyright the libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#if defined(_MSC_VER) && _MSC_VER < 1900
#include <stdio.h>
#include <stdarg.h>
/* Emulate snprintf() on MSVC<2015, _snprintf() doesn't zero-terminate the buffer
* on overflow...
*/
int snprintf(char* buf, size_t len, const char* fmt, ...) {
int n;
va_list ap;
va_start(ap, fmt);
n = _vscprintf(fmt, ap);
vsnprintf_s(buf, len, _TRUNCATE, fmt, ap);
va_end(ap);
return n;
}
#endif

View File

@@ -0,0 +1,54 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_WIN_STREAM_INL_H_
#define UV_WIN_STREAM_INL_H_
#include <assert.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
#include "req-inl.h"
INLINE static void uv__stream_init(uv_loop_t* loop,
uv_stream_t* handle,
uv_handle_type type) {
uv__handle_init(loop, (uv_handle_t*) handle, type);
handle->write_queue_size = 0;
handle->activecnt = 0;
handle->stream.conn.shutdown_req = NULL;
handle->stream.conn.write_reqs_pending = 0;
UV_REQ_INIT(&handle->read_req, UV_READ);
handle->read_req.event_handle = NULL;
handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
handle->read_req.data = handle;
}
INLINE static void uv__connection_init(uv_stream_t* handle) {
handle->flags |= UV_HANDLE_CONNECTION;
}
#endif /* UV_WIN_STREAM_INL_H_ */

View File

@@ -0,0 +1,252 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include "uv.h"
#include "internal.h"
#include "handle-inl.h"
#include "req-inl.h"
int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
int err;
if (uv__is_closing(stream)) {
return UV_EINVAL;
}
err = ERROR_INVALID_PARAMETER;
switch (stream->type) {
case UV_TCP:
err = uv__tcp_listen((uv_tcp_t*)stream, backlog, cb);
break;
case UV_NAMED_PIPE:
err = uv__pipe_listen((uv_pipe_t*)stream, backlog, cb);
break;
default:
assert(0);
}
return uv_translate_sys_error(err);
}
int uv_accept(uv_stream_t* server, uv_stream_t* client) {
int err;
err = ERROR_INVALID_PARAMETER;
switch (server->type) {
case UV_TCP:
err = uv__tcp_accept((uv_tcp_t*)server, (uv_tcp_t*)client);
break;
case UV_NAMED_PIPE:
err = uv__pipe_accept((uv_pipe_t*)server, client);
break;
default:
assert(0);
}
return uv_translate_sys_error(err);
}
int uv__read_start(uv_stream_t* handle,
uv_alloc_cb alloc_cb,
uv_read_cb read_cb) {
int err;
err = ERROR_INVALID_PARAMETER;
switch (handle->type) {
case UV_TCP:
err = uv__tcp_read_start((uv_tcp_t*)handle, alloc_cb, read_cb);
break;
case UV_NAMED_PIPE:
err = uv__pipe_read_start((uv_pipe_t*)handle, alloc_cb, read_cb);
break;
case UV_TTY:
err = uv__tty_read_start((uv_tty_t*) handle, alloc_cb, read_cb);
break;
default:
assert(0);
}
return uv_translate_sys_error(err);
}
int uv_read_stop(uv_stream_t* handle) {
int err;
if (!(handle->flags & UV_HANDLE_READING))
return 0;
err = 0;
if (handle->type == UV_TTY) {
err = uv__tty_read_stop((uv_tty_t*) handle);
} else if (handle->type == UV_NAMED_PIPE) {
uv__pipe_read_stop((uv_pipe_t*) handle);
} else {
handle->flags &= ~UV_HANDLE_READING;
DECREASE_ACTIVE_COUNT(handle->loop, handle);
}
return uv_translate_sys_error(err);
}
int uv_write(uv_write_t* req,
uv_stream_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs,
uv_write_cb cb) {
uv_loop_t* loop = handle->loop;
int err;
if (!(handle->flags & UV_HANDLE_WRITABLE)) {
return UV_EPIPE;
}
err = ERROR_INVALID_PARAMETER;
switch (handle->type) {
case UV_TCP:
err = uv__tcp_write(loop, req, (uv_tcp_t*) handle, bufs, nbufs, cb);
break;
case UV_NAMED_PIPE:
err = uv__pipe_write(
loop, req, (uv_pipe_t*) handle, bufs, nbufs, NULL, cb);
return uv_translate_write_sys_error(err);
case UV_TTY:
err = uv__tty_write(loop, req, (uv_tty_t*) handle, bufs, nbufs, cb);
break;
default:
assert(0);
}
return uv_translate_sys_error(err);
}
int uv_write2(uv_write_t* req,
uv_stream_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs,
uv_stream_t* send_handle,
uv_write_cb cb) {
uv_loop_t* loop = handle->loop;
int err;
if (send_handle == NULL) {
return uv_write(req, handle, bufs, nbufs, cb);
}
if (handle->type != UV_NAMED_PIPE || !((uv_pipe_t*) handle)->ipc) {
return UV_EINVAL;
} else if (!(handle->flags & UV_HANDLE_WRITABLE)) {
return UV_EPIPE;
}
err = uv__pipe_write(
loop, req, (uv_pipe_t*) handle, bufs, nbufs, send_handle, cb);
return uv_translate_write_sys_error(err);
}
int uv_try_write(uv_stream_t* stream,
const uv_buf_t bufs[],
unsigned int nbufs) {
if (stream->flags & UV_HANDLE_CLOSING)
return UV_EBADF;
if (!(stream->flags & UV_HANDLE_WRITABLE))
return UV_EPIPE;
switch (stream->type) {
case UV_TCP:
return uv__tcp_try_write((uv_tcp_t*) stream, bufs, nbufs);
case UV_TTY:
return uv__tty_try_write((uv_tty_t*) stream, bufs, nbufs);
case UV_NAMED_PIPE:
return UV_EAGAIN;
default:
assert(0);
return UV_ENOSYS;
}
}
int uv_try_write2(uv_stream_t* stream,
const uv_buf_t bufs[],
unsigned int nbufs,
uv_stream_t* send_handle) {
if (send_handle != NULL)
return UV_EAGAIN;
return uv_try_write(stream, bufs, nbufs);
}
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
uv_loop_t* loop = handle->loop;
if (!(handle->flags & UV_HANDLE_WRITABLE) ||
uv__is_stream_shutting(handle) ||
uv__is_closing(handle)) {
return UV_ENOTCONN;
}
UV_REQ_INIT(req, UV_SHUTDOWN);
req->handle = handle;
req->cb = cb;
handle->flags &= ~UV_HANDLE_WRITABLE;
handle->stream.conn.shutdown_req = req;
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle);
if (handle->stream.conn.write_reqs_pending == 0) {
if (handle->type == UV_NAMED_PIPE)
uv__pipe_shutdown(loop, (uv_pipe_t*) handle, req);
else
uv__insert_pending_req(loop, (uv_req_t*) req);
}
return 0;
}
int uv_is_readable(const uv_stream_t* handle) {
return !!(handle->flags & UV_HANDLE_READABLE);
}
int uv_is_writable(const uv_stream_t* handle) {
return !!(handle->flags & UV_HANDLE_WRITABLE);
}
int uv_stream_set_blocking(uv_stream_t* handle, int blocking) {
if (handle->type != UV_NAMED_PIPE)
return UV_EINVAL;
if (blocking != 0)
handle->flags |= UV_HANDLE_BLOCKING_WRITES;
else
handle->flags &= ~UV_HANDLE_BLOCKING_WRITES;
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,568 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <limits.h>
#include <stdlib.h>
#if defined(__MINGW64_VERSION_MAJOR)
/* MemoryBarrier expands to __mm_mfence in some cases (x86+sse2), which may
* require this header in some versions of mingw64. */
#include <intrin.h>
#endif
#include "uv.h"
#include "internal.h"
typedef void (*uv__once_cb)(void);
typedef struct {
uv__once_cb callback;
} uv__once_data_t;
static BOOL WINAPI uv__once_inner(INIT_ONCE *once, void* param, void** context) {
uv__once_data_t* data = param;
data->callback();
return TRUE;
}
void uv_once(uv_once_t* guard, uv__once_cb callback) {
uv__once_data_t data = { .callback = callback };
InitOnceExecuteOnce(&guard->init_once, uv__once_inner, (void*) &data, NULL);
}
/* Verify that uv_thread_t can be stored in a TLS slot. */
STATIC_ASSERT(sizeof(uv_thread_t) <= sizeof(void*));
static uv_key_t uv__current_thread_key;
static uv_once_t uv__current_thread_init_guard = UV_ONCE_INIT;
static uv_once_t uv__thread_name_once = UV_ONCE_INIT;
HRESULT (WINAPI *pGetThreadDescription)(HANDLE, PWSTR*);
HRESULT (WINAPI *pSetThreadDescription)(HANDLE, PCWSTR);
static void uv__init_current_thread_key(void) {
if (uv_key_create(&uv__current_thread_key))
abort();
}
struct thread_ctx {
void (*entry)(void* arg);
void* arg;
uv_thread_t self;
};
static UINT __stdcall uv__thread_start(void* arg) {
struct thread_ctx *ctx_p;
struct thread_ctx ctx;
ctx_p = arg;
ctx = *ctx_p;
uv__free(ctx_p);
uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
uv_key_set(&uv__current_thread_key, ctx.self);
ctx.entry(ctx.arg);
return 0;
}
int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
uv_thread_options_t params;
params.flags = UV_THREAD_NO_FLAGS;
return uv_thread_create_ex(tid, &params, entry, arg);
}
int uv_thread_detach(uv_thread_t *tid) {
if (CloseHandle(*tid) == 0)
return uv_translate_sys_error(GetLastError());
return 0;
}
int uv_thread_create_ex(uv_thread_t* tid,
const uv_thread_options_t* params,
void (*entry)(void *arg),
void *arg) {
struct thread_ctx* ctx;
int err;
HANDLE thread;
SYSTEM_INFO sysinfo;
size_t stack_size;
size_t pagesize;
stack_size =
params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
if (stack_size != 0) {
GetNativeSystemInfo(&sysinfo);
pagesize = (size_t)sysinfo.dwPageSize;
/* Round up to the nearest page boundary. */
stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
if ((unsigned)stack_size != stack_size)
return UV_EINVAL;
}
ctx = uv__malloc(sizeof(*ctx));
if (ctx == NULL)
return UV_ENOMEM;
ctx->entry = entry;
ctx->arg = arg;
/* Create the thread in suspended state so we have a chance to pass
* its own creation handle to it */
thread = (HANDLE) _beginthreadex(NULL,
(unsigned)stack_size,
uv__thread_start,
ctx,
CREATE_SUSPENDED,
NULL);
if (thread == NULL) {
err = errno;
uv__free(ctx);
} else {
err = 0;
*tid = thread;
ctx->self = thread;
ResumeThread(thread);
}
switch (err) {
case 0:
return 0;
case EACCES:
return UV_EACCES;
case EAGAIN:
return UV_EAGAIN;
case EINVAL:
return UV_EINVAL;
}
return UV_EIO;
}
int uv_thread_setaffinity(uv_thread_t* tid,
char* cpumask,
char* oldmask,
size_t mask_size) {
int i;
HANDLE hproc;
DWORD_PTR procmask;
DWORD_PTR sysmask;
DWORD_PTR threadmask;
DWORD_PTR oldthreadmask;
int cpumasksize;
cpumasksize = uv_cpumask_size();
assert(cpumasksize > 0);
if (mask_size < (size_t)cpumasksize)
return UV_EINVAL;
hproc = GetCurrentProcess();
if (!GetProcessAffinityMask(hproc, &procmask, &sysmask))
return uv_translate_sys_error(GetLastError());
threadmask = 0;
for (i = 0; i < cpumasksize; i++) {
if (cpumask[i]) {
if (procmask & (1 << i))
threadmask |= 1 << i;
else
return UV_EINVAL;
}
}
oldthreadmask = SetThreadAffinityMask(*tid, threadmask);
if (oldthreadmask == 0)
return uv_translate_sys_error(GetLastError());
if (oldmask != NULL) {
for (i = 0; i < cpumasksize; i++)
oldmask[i] = (oldthreadmask >> i) & 1;
}
return 0;
}
int uv_thread_getaffinity(uv_thread_t* tid,
char* cpumask,
size_t mask_size) {
int i;
HANDLE hproc;
DWORD_PTR procmask;
DWORD_PTR sysmask;
DWORD_PTR threadmask;
int cpumasksize;
cpumasksize = uv_cpumask_size();
assert(cpumasksize > 0);
if (mask_size < (size_t)cpumasksize)
return UV_EINVAL;
hproc = GetCurrentProcess();
if (!GetProcessAffinityMask(hproc, &procmask, &sysmask))
return uv_translate_sys_error(GetLastError());
threadmask = SetThreadAffinityMask(*tid, procmask);
if (threadmask == 0 || SetThreadAffinityMask(*tid, threadmask) == 0)
return uv_translate_sys_error(GetLastError());
for (i = 0; i < cpumasksize; i++)
cpumask[i] = (threadmask >> i) & 1;
return 0;
}
int uv_thread_getcpu(void) {
return GetCurrentProcessorNumber();
}
uv_thread_t uv_thread_self(void) {
uv_thread_t key;
uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
key = uv_key_get(&uv__current_thread_key);
if (key == NULL) {
/* If the thread wasn't started by uv_thread_create (such as the main
* thread), we assign an id to it now. */
if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
GetCurrentProcess(), &key, 0,
FALSE, DUPLICATE_SAME_ACCESS)) {
uv_fatal_error(GetLastError(), "DuplicateHandle");
}
uv_key_set(&uv__current_thread_key, key);
}
return key;
}
int uv_thread_join(uv_thread_t *tid) {
if (WaitForSingleObject(*tid, INFINITE))
return uv_translate_sys_error(GetLastError());
else {
CloseHandle(*tid);
*tid = 0;
MemoryBarrier(); /* For feature parity with pthread_join(). */
return 0;
}
}
int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
return *t1 == *t2;
}
static void uv__thread_name_init_once(void) {
HMODULE m;
m = GetModuleHandleA("api-ms-win-core-processthreads-l1-1-3.dll");
if (m != NULL) {
pGetThreadDescription = (void*) GetProcAddress(m, "GetThreadDescription");
pSetThreadDescription = (void*) GetProcAddress(m, "SetThreadDescription");
}
}
int uv_thread_setname(const char* name) {
HRESULT hr;
WCHAR* namew;
int err;
char namebuf[UV_PTHREAD_MAX_NAMELEN_NP];
uv_once(&uv__thread_name_once, uv__thread_name_init_once);
if (pSetThreadDescription == NULL)
return UV_ENOSYS;
if (name == NULL)
return UV_EINVAL;
strncpy(namebuf, name, sizeof(namebuf) - 1);
namebuf[sizeof(namebuf) - 1] = '\0';
namew = NULL;
err = uv__convert_utf8_to_utf16(namebuf, &namew);
if (err)
return err;
hr = pSetThreadDescription(GetCurrentThread(), namew);
uv__free(namew);
if (FAILED(hr))
return uv_translate_sys_error(HRESULT_CODE(hr));
return 0;
}
int uv_thread_getname(uv_thread_t* tid, char* name, size_t size) {
HRESULT hr;
WCHAR* namew;
char* thread_name;
size_t buf_size;
int r;
DWORD exit_code;
uv_once(&uv__thread_name_once, uv__thread_name_init_once);
if (pGetThreadDescription == NULL)
return UV_ENOSYS;
if (name == NULL || size == 0)
return UV_EINVAL;
if (tid == NULL || *tid == NULL)
return UV_EINVAL;
/* Check if the thread handle is valid */
if (!GetExitCodeThread(*tid, &exit_code) || exit_code != STILL_ACTIVE)
return UV_ENOENT;
namew = NULL;
thread_name = NULL;
hr = pGetThreadDescription(*tid, &namew);
if (FAILED(hr))
return uv_translate_sys_error(HRESULT_CODE(hr));
buf_size = size;
r = uv__copy_utf16_to_utf8(namew, -1, name, &buf_size);
if (r == UV_ENOBUFS) {
r = uv__convert_utf16_to_utf8(namew, wcslen(namew), &thread_name);
if (r == 0) {
uv__strscpy(name, thread_name, size);
uv__free(thread_name);
}
}
LocalFree(namew);
return r;
}
int uv_mutex_init(uv_mutex_t* mutex) {
InitializeCriticalSection(mutex);
return 0;
}
int uv_mutex_init_recursive(uv_mutex_t* mutex) {
return uv_mutex_init(mutex);
}
void uv_mutex_destroy(uv_mutex_t* mutex) {
DeleteCriticalSection(mutex);
}
void uv_mutex_lock(uv_mutex_t* mutex) {
EnterCriticalSection(mutex);
}
int uv_mutex_trylock(uv_mutex_t* mutex) {
if (TryEnterCriticalSection(mutex))
return 0;
else
return UV_EBUSY;
}
void uv_mutex_unlock(uv_mutex_t* mutex) {
LeaveCriticalSection(mutex);
}
/* Ensure that the ABI for this type remains stable in v1.x */
#ifdef _WIN64
STATIC_ASSERT(sizeof(uv_rwlock_t) == 80);
#else
STATIC_ASSERT(sizeof(uv_rwlock_t) == 48);
#endif
int uv_rwlock_init(uv_rwlock_t* rwlock) {
memset(rwlock, 0, sizeof(*rwlock));
InitializeSRWLock(&rwlock->read_write_lock_);
return 0;
}
void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
/* SRWLock does not need explicit destruction so long as there are no waiting threads
See: https://docs.microsoft.com/windows/win32/api/synchapi/nf-synchapi-initializesrwlock#remarks */
}
void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
AcquireSRWLockShared(&rwlock->read_write_lock_);
}
int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
if (!TryAcquireSRWLockShared(&rwlock->read_write_lock_))
return UV_EBUSY;
return 0;
}
void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
ReleaseSRWLockShared(&rwlock->read_write_lock_);
}
void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
AcquireSRWLockExclusive(&rwlock->read_write_lock_);
}
int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
if (!TryAcquireSRWLockExclusive(&rwlock->read_write_lock_))
return UV_EBUSY;
return 0;
}
void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
ReleaseSRWLockExclusive(&rwlock->read_write_lock_);
}
int uv_sem_init(uv_sem_t* sem, unsigned int value) {
*sem = CreateSemaphore(NULL, value, INT_MAX, NULL);
if (*sem == NULL)
return uv_translate_sys_error(GetLastError());
else
return 0;
}
void uv_sem_destroy(uv_sem_t* sem) {
if (!CloseHandle(*sem))
abort();
}
void uv_sem_post(uv_sem_t* sem) {
if (!ReleaseSemaphore(*sem, 1, NULL))
abort();
}
void uv_sem_wait(uv_sem_t* sem) {
if (WaitForSingleObject(*sem, INFINITE) != WAIT_OBJECT_0)
abort();
}
int uv_sem_trywait(uv_sem_t* sem) {
DWORD r = WaitForSingleObject(*sem, 0);
if (r == WAIT_OBJECT_0)
return 0;
if (r == WAIT_TIMEOUT)
return UV_EAGAIN;
abort();
return -1; /* Satisfy the compiler. */
}
int uv_cond_init(uv_cond_t* cond) {
InitializeConditionVariable(&cond->cond_var);
return 0;
}
void uv_cond_destroy(uv_cond_t* cond) {
/* nothing to do */
(void) &cond;
}
void uv_cond_signal(uv_cond_t* cond) {
WakeConditionVariable(&cond->cond_var);
}
void uv_cond_broadcast(uv_cond_t* cond) {
WakeAllConditionVariable(&cond->cond_var);
}
void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
if (!SleepConditionVariableCS(&cond->cond_var, mutex, INFINITE))
abort();
}
int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
if (SleepConditionVariableCS(&cond->cond_var, mutex, (DWORD)(timeout / 1e6)))
return 0;
if (GetLastError() != ERROR_TIMEOUT)
abort();
return UV_ETIMEDOUT;
}
int uv_key_create(uv_key_t* key) {
key->tls_index = TlsAlloc();
if (key->tls_index == TLS_OUT_OF_INDEXES)
return UV_ENOMEM;
return 0;
}
void uv_key_delete(uv_key_t* key) {
if (TlsFree(key->tls_index) == FALSE)
abort();
key->tls_index = TLS_OUT_OF_INDEXES;
}
void* uv_key_get(uv_key_t* key) {
void* value;
value = TlsGetValue(key->tls_index);
if (value == NULL)
if (GetLastError() != ERROR_SUCCESS)
abort();
return value;
}
void uv_key_set(uv_key_t* key, void* value) {
if (TlsSetValue(key->tls_index, value) == FALSE)
abort();
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More