// src/unix/core.c: line 304
// uv_run
int uv_run(uv_loop_t* loop, uv_run_mode mode) {
int timeout;
int r;
int ran_pending;
r = uv__loop_alive(loop);
if (!r)
uv__update_time(loop);
while (r != 0 && loop->stop_flag == 0) {
// 更新当前时间
uv__update_time(loop);
// 执行到期的定时器
uv__run_timers(loop);
ran_pending = uv__run_pending(loop);
// 执行idle的回调
uv__run_idle(loop);
uv__run_prepare(loop);
timeout = 0;
if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
timeout = uv_backend_timeout(loop);
// epoll wait & thread pool
// 执行IO的回调
uv__io_poll(loop, timeout);
// 执行check的回调
uv__run_check(loop);
uv__run_closing_handles(loop);
if (mode == UV_RUN_ONCE) {
/* UV_RUN_ONCE implies forward progress: at least one callback must have
* been invoked when it returns. uv__io_poll() can return without doing
* I/O (meaning: no callbacks) when its timeout expires - which means we
* have pending timers that satisfy the forward progress constraint.
*
* UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
* the check.
*/
uv__update_time(loop);
uv__run_timers(loop);
}
r = uv__loop_alive(loop);
if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
break;
}
/* The if statement lets gcc compile it to a conditional store. Avoids
* dirtying a cache line.
*/
if (loop->stop_flag != 0)
loop->stop_flag = 0;
return r;
}
static int uv__run_pending(uv_loop_t* loop) {
QUEUE* q;
uv__io_t* w;
if (QUEUE_EMPTY(&loop->pending_queue))
return 0;
while (!QUEUE_EMPTY(&loop->pending_queue)) {
q = QUEUE_HEAD(&loop->pending_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, pending_queue);
w->cb(loop, w, UV__POLLOUT);
}
return 1;
}
src/unix/core.c: 250, uv__run_closing_handles
static void uv__run_closing_handles(uv_loop_t* loop) {
uv_handle_t* p;
uv_handle_t* q;
p = loop->closing_handles;
loop->closing_handles = NULL;
while (p) {
q = p->next_closing;
uv__finish_close(p);
p = q;
}
}
src/unix/linux-core.c: 138, uv__io_poll
// 事件循环的奥秘, 大多能在这个函数里找到答案:
void uv__io_poll(uv_loop_t* loop, int timeout) {
struct uv__epoll_event events[1024];
struct uv__epoll_event* pe;
struct uv__epoll_event e;
QUEUE* q;
uv__io_t* w;
uint64_t sigmask;
uint64_t base;
uint64_t diff;
int nevents;
int count;
int nfds;
int fd;
int op;
int i;
static int no_epoll_wait;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
return;
}
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
e.events = w->pevents;
e.data = w->fd;
if (w->events == 0)
op = UV__EPOLL_CTL_ADD;
else
op = UV__EPOLL_CTL_MOD;
/* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
* events, skip the syscall and squelch the events after epoll_wait().
*/
if (uv__epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
if (errno != EEXIST)
abort();
assert(op == UV__EPOLL_CTL_ADD);
/* We've reactivated a file descriptor that's been watched before. */
if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_MOD, w->fd, &e))
abort();
}
w->events = w->pevents;
}
sigmask = 0;
if (loop->flags & UV_LOOP_BLOCK_SIGPROF)
sigmask |= 1 << (SIGPROF - 1);
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
for (;;) {
if (no_epoll_wait || sigmask) {
nfds = uv__epoll_pwait(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout,
sigmask);
} else {
nfds = uv__epoll_wait(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout);
if (nfds == -1 && errno == ENOSYS) {
no_epoll_wait = 1;
continue;
}
}
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
assert(timeout != -1);
return;
}
if (nfds == -1) {
if (errno != EINTR)
abort();
if (timeout == -1)
continue;
if (timeout == 0)
return;
/* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout;
}
nevents = 0;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = (void*) events;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
for (i = 0; i < nfds; i++) {
pe = events + i;
fd = pe->data;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
continue;
assert(fd >= 0);
assert((unsigned) fd < loop->nwatchers);
w = loop->watchers[fd];
if (w == NULL) {
/* File descriptor that we've stopped watching, disarm it.
*
* Ignore all errors because we may be racing with another thread
* when the file descriptor is closed.
*/
uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, pe);
continue;
}
/* Give users only events they're interested in. Prevents spurious
* callbacks when previous callback invocation in this loop has stopped
* the current watcher. Also, filters out events that users has not
* requested us to watch.
*/
pe->events &= w->pevents | UV__POLLERR | UV__POLLHUP;
/* Work around an epoll quirk where it sometimes reports just the
* EPOLLERR or EPOLLHUP event. In order to force the event loop to
* move forward, we merge in the read/write events that the watcher
* is interested in; uv__read() and uv__write() will then deal with
* the error or hangup in the usual fashion.
*
* Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
* reads the available data, calls uv_read_stop(), then sometime later
* calls uv_read_start() again. By then, libuv has forgotten about the
* hangup and the kernel won't report EPOLLIN again because there's
* nothing left to read. If anything, libuv is to blame here. The
* current hack is just a quick bandaid; to properly fix it, libuv
* needs to remember the error/hangup event. We should get that for
* free when we switch over to edge-triggered I/O.
*/
if (pe->events == UV__EPOLLERR || pe->events == UV__EPOLLHUP)
pe->events |= w->pevents & (UV__EPOLLIN | UV__EPOLLOUT);
if (pe->events != 0) {
w->cb(loop, w, pe->events);
nevents++;
}
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
}
return;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
diff = loop->time - base;
if (diff >= (uint64_t) timeout)
return;
timeout -= diff;
}
}
该PO为https://github.com/abbshr/abbshr.github.io/issues/26 续, 之前有一些重要内容没提到, 比如epoll在libuv中是如何使用的, 观察者函数定义等.
首先回顾事件循环的入口函数
uv_run
:(注: libuv v0.10.x)
调用
uv_run
时需要传入一个uv_loop_t*
类型的参数, 拿一个官方例子来看:uv_default_loop
函数的实现如下:uv__loop_int
函数:src/unix/linux-syscalls.c
实现了uv__epoll_*
函数, 并在src/unix/linux-core.c中实现了高层封装.linux内核头文件中定义的宏以
__NR_
为前缀系统调用编号定义在:
/usr/include/i386-linux-gnu/asm/unistd_32.h
cat -n unistd_32.h | grep epoll
:由此可知, libuv中调用epoll不是通过
#include<sys/epoll>
, 而是通过系统调用实现的!剩下的是一些
uv_run
中涉及的watchers等函数.src/unix/core.c: 697, uv__run_pending
src/unix/core.c: 250, uv__run_closing_handles
src/unix/linux-core.c: 138, uv__io_poll
src/unix/loop-watcher.c, 定义watchers的相关操作
src/queue.h: 定义了libuv中队列的操作
(完)