ctx 60 src/fs-poll.c struct poll_ctx* ctx; ctx 69 src/fs-poll.c ctx = calloc(1, sizeof(*ctx) + len); ctx 71 src/fs-poll.c if (ctx == NULL) ctx 74 src/fs-poll.c ctx->loop = loop; ctx 75 src/fs-poll.c ctx->poll_cb = cb; ctx 76 src/fs-poll.c ctx->interval = interval ? interval : 1; ctx 77 src/fs-poll.c ctx->start_time = uv_now(loop); ctx 78 src/fs-poll.c ctx->parent_handle = handle; ctx 79 src/fs-poll.c memcpy(ctx->path, path, len + 1); ctx 81 src/fs-poll.c if (uv_timer_init(loop, &ctx->timer_handle)) ctx 84 src/fs-poll.c ctx->timer_handle.flags |= UV__HANDLE_INTERNAL; ctx 85 src/fs-poll.c uv__handle_unref(&ctx->timer_handle); ctx 87 src/fs-poll.c if (uv_fs_stat(loop, &ctx->fs_req, ctx->path, poll_cb)) ctx 90 src/fs-poll.c handle->poll_ctx = ctx; ctx 98 src/fs-poll.c struct poll_ctx* ctx; ctx 103 src/fs-poll.c ctx = handle->poll_ctx; ctx 104 src/fs-poll.c assert(ctx != NULL); ctx 105 src/fs-poll.c assert(ctx->parent_handle != NULL); ctx 106 src/fs-poll.c ctx->parent_handle = NULL; ctx 112 src/fs-poll.c if (uv__is_active(&ctx->timer_handle)) ctx 113 src/fs-poll.c uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb); ctx 127 src/fs-poll.c struct poll_ctx* ctx; ctx 129 src/fs-poll.c ctx = container_of(timer, struct poll_ctx, timer_handle); ctx 130 src/fs-poll.c assert(ctx->parent_handle != NULL); ctx 131 src/fs-poll.c assert(ctx->parent_handle->poll_ctx == ctx); ctx 132 src/fs-poll.c ctx->start_time = uv_now(ctx->loop); ctx 134 src/fs-poll.c if (uv_fs_stat(ctx->loop, &ctx->fs_req, ctx->path, poll_cb)) ctx 141 src/fs-poll.c struct poll_ctx* ctx; ctx 144 src/fs-poll.c ctx = container_of(req, struct poll_ctx, fs_req); ctx 146 src/fs-poll.c if (ctx->parent_handle == NULL) { /* handle has been stopped or closed */ ctx 147 src/fs-poll.c uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb); ctx 153 src/fs-poll.c if (ctx->busy_polling != req->result) { ctx 154 src/fs-poll.c ctx->poll_cb(ctx->parent_handle, ctx 156 src/fs-poll.c &ctx->statbuf, ctx 158 src/fs-poll.c ctx->busy_polling = req->result; ctx 165 src/fs-poll.c if (ctx->busy_polling != 0) ctx 166 src/fs-poll.c if (ctx->busy_polling < 0 || !statbuf_eq(&ctx->statbuf, statbuf)) ctx 167 src/fs-poll.c ctx->poll_cb(ctx->parent_handle, 0, &ctx->statbuf, statbuf); ctx 169 src/fs-poll.c ctx->statbuf = *statbuf; ctx 170 src/fs-poll.c ctx->busy_polling = 1; ctx 175 src/fs-poll.c if (ctx->parent_handle == NULL) { /* handle has been stopped by callback */ ctx 176 src/fs-poll.c uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb); ctx 181 src/fs-poll.c interval = ctx->interval; ctx 182 src/fs-poll.c interval -= (uv_now(ctx->loop) - ctx->start_time) % interval; ctx 184 src/fs-poll.c if (uv_timer_start(&ctx->timer_handle, timer_cb, interval, 0)) ctx 236 src/unix/fsevents.c FSEventStreamContext ctx; ctx 242 src/unix/fsevents.c ctx.version = 0; ctx 243 src/unix/fsevents.c ctx.info = loop; ctx 244 src/unix/fsevents.c ctx.retain = NULL; ctx 245 src/unix/fsevents.c ctx.release = NULL; ctx 246 src/unix/fsevents.c ctx.copyDescription = NULL; ctx 261 src/unix/fsevents.c &ctx, ctx 368 src/unix/fsevents.c CFRunLoopSourceContext ctx; ctx 403 src/unix/fsevents.c memset(&ctx, 0, sizeof(ctx)); ctx 404 src/unix/fsevents.c ctx.info = loop; ctx 405 src/unix/fsevents.c ctx.perform = uv__cf_loop_cb; ctx 406 src/unix/fsevents.c state->signal_source = CFRunLoopSourceCreate(NULL, 0, &ctx); ctx 307 src/uv-common.c struct thread_ctx ctx; ctx 310 src/uv-common.c ctx = *ctx_p; ctx 312 src/uv-common.c ctx.entry(ctx.arg); ctx 319 src/uv-common.c struct thread_ctx* ctx; ctx 322 src/uv-common.c ctx = malloc(sizeof(*ctx)); ctx 323 src/uv-common.c if (ctx == NULL) ctx 326 src/uv-common.c ctx->entry = entry; ctx 327 src/uv-common.c ctx->arg = arg; ctx 330 src/uv-common.c *tid = (HANDLE) _beginthreadex(NULL, 0, uv__thread_start, ctx, 0, NULL); ctx 333 src/uv-common.c err = pthread_create(tid, NULL, uv__thread_start, ctx); ctx 337 src/uv-common.c free(ctx); ctx 44 test/benchmark-async.c struct ctx* ctx = container_of(handle, struct ctx, worker_async); ctx 46 test/benchmark-async.c ASSERT(0 == uv_async_send(&ctx->main_async)); ctx 47 test/benchmark-async.c ctx->worker_sent++; ctx 48 test/benchmark-async.c ctx->worker_seen++; ctx 50 test/benchmark-async.c if (ctx->worker_sent >= NUM_PINGS) ctx 51 test/benchmark-async.c uv_close((uv_handle_t*) &ctx->worker_async, NULL); ctx 56 test/benchmark-async.c struct ctx* ctx = container_of(handle, struct ctx, main_async); ctx 58 test/benchmark-async.c ASSERT(0 == uv_async_send(&ctx->worker_async)); ctx 59 test/benchmark-async.c ctx->main_sent++; ctx 60 test/benchmark-async.c ctx->main_seen++; ctx 62 test/benchmark-async.c if (ctx->main_sent >= NUM_PINGS) ctx 63 test/benchmark-async.c uv_close((uv_handle_t*) &ctx->main_async, NULL); ctx 68 test/benchmark-async.c struct ctx* ctx = arg; ctx 69 test/benchmark-async.c ASSERT(0 == uv_async_send(&ctx->main_async)); ctx 70 test/benchmark-async.c ASSERT(0 == uv_run(ctx->loop, UV_RUN_DEFAULT)); ctx 75 test/benchmark-async.c struct ctx* threads; ctx 76 test/benchmark-async.c struct ctx* ctx; ctx 84 test/benchmark-async.c ctx = threads + i; ctx 85 test/benchmark-async.c ctx->nthreads = nthreads; ctx 86 test/benchmark-async.c ctx->loop = uv_loop_new(); ctx 87 test/benchmark-async.c ASSERT(ctx->loop != NULL); ctx 88 test/benchmark-async.c ASSERT(0 == uv_async_init(ctx->loop, &ctx->worker_async, worker_async_cb)); ctx 89 test/benchmark-async.c ASSERT(0 == uv_async_init(uv_default_loop(), &ctx->main_async, main_async_cb)); ctx 90 test/benchmark-async.c ASSERT(0 == uv_thread_create(&ctx->thread, worker, ctx)); ctx 103 test/benchmark-async.c ctx = threads + i; ctx 104 test/benchmark-async.c ASSERT(ctx->worker_sent == NUM_PINGS); ctx 105 test/benchmark-async.c ASSERT(ctx->worker_seen == NUM_PINGS); ctx 106 test/benchmark-async.c ASSERT(ctx->main_sent == (unsigned int) NUM_PINGS); ctx 107 test/benchmark-async.c ASSERT(ctx->main_seen == (unsigned int) NUM_PINGS); ctx 137 test/benchmark-multi-accept.c struct ipc_peer_ctx* ctx; ctx 138 test/benchmark-multi-accept.c ctx = container_of(req, struct ipc_peer_ctx, write_req); ctx 139 test/benchmark-multi-accept.c uv_close((uv_handle_t*) &ctx->peer_handle, ipc_close_cb); ctx 144 test/benchmark-multi-accept.c struct ipc_peer_ctx* ctx; ctx 145 test/benchmark-multi-accept.c ctx = container_of(handle, struct ipc_peer_ctx, peer_handle); ctx 146 test/benchmark-multi-accept.c free(ctx); ctx 151 test/benchmark-multi-accept.c struct ipc_client_ctx* ctx; ctx 152 test/benchmark-multi-accept.c ctx = container_of(req, struct ipc_client_ctx, connect_req); ctx 154 test/benchmark-multi-accept.c ASSERT(0 == uv_read2_start((uv_stream_t*) &ctx->ipc_pipe, ctx 161 test/benchmark-multi-accept.c struct ipc_client_ctx* ctx; ctx 162 test/benchmark-multi-accept.c ctx = container_of(handle, struct ipc_client_ctx, ipc_pipe); ctx 163 test/benchmark-multi-accept.c return uv_buf_init(ctx->scratch, sizeof(ctx->scratch)); ctx 171 test/benchmark-multi-accept.c struct ipc_client_ctx* ctx; ctx 174 test/benchmark-multi-accept.c ctx = container_of(ipc_pipe, struct ipc_client_ctx, ipc_pipe); ctx 178 test/benchmark-multi-accept.c ASSERT(0 == uv_tcp_init(loop, (uv_tcp_t*) ctx->server_handle)); ctx 180 test/benchmark-multi-accept.c ASSERT(0 == uv_pipe_init(loop, (uv_pipe_t*) ctx->server_handle, 0)); ctx 184 test/benchmark-multi-accept.c ASSERT(0 == uv_accept((uv_stream_t*) &ctx->ipc_pipe, ctx->server_handle)); ctx 185 test/benchmark-multi-accept.c uv_close((uv_handle_t*) &ctx->ipc_pipe, NULL); ctx 196 test/benchmark-multi-accept.c struct ipc_server_ctx ctx; ctx 201 test/benchmark-multi-accept.c ctx.num_connects = num_servers; ctx 204 test/benchmark-multi-accept.c ASSERT(0 == uv_tcp_init(loop, (uv_tcp_t*) &ctx.server_handle)); ctx 205 test/benchmark-multi-accept.c ASSERT(0 == uv_tcp_bind((uv_tcp_t*) &ctx.server_handle, listen_addr)); ctx 210 test/benchmark-multi-accept.c ASSERT(0 == uv_pipe_init(loop, &ctx.ipc_pipe, 1)); ctx 211 test/benchmark-multi-accept.c ASSERT(0 == uv_pipe_bind(&ctx.ipc_pipe, IPC_PIPE_NAME)); ctx 212 test/benchmark-multi-accept.c ASSERT(0 == uv_listen((uv_stream_t*) &ctx.ipc_pipe, 128, ipc_connection_cb)); ctx 218 test/benchmark-multi-accept.c uv_close((uv_handle_t*) &ctx.server_handle, NULL); ctx 227 test/benchmark-multi-accept.c struct ipc_client_ctx ctx; ctx 229 test/benchmark-multi-accept.c ctx.server_handle = server_handle; ctx 230 test/benchmark-multi-accept.c ctx.server_handle->data = "server handle"; ctx 232 test/benchmark-multi-accept.c ASSERT(0 == uv_pipe_init(loop, &ctx.ipc_pipe, 1)); ctx 233 test/benchmark-multi-accept.c uv_pipe_connect(&ctx.connect_req, ctx 234 test/benchmark-multi-accept.c &ctx.ipc_pipe, ctx 242 test/benchmark-multi-accept.c struct server_ctx *ctx; ctx 245 test/benchmark-multi-accept.c ctx = arg; ctx 249 test/benchmark-multi-accept.c ASSERT(0 == uv_async_init(loop, &ctx->async_handle, sv_async_cb)); ctx 250 test/benchmark-multi-accept.c uv_unref((uv_handle_t*) &ctx->async_handle); ctx 253 test/benchmark-multi-accept.c uv_sem_wait(&ctx->semaphore); ctx 254 test/benchmark-multi-accept.c get_listen_handle(loop, (uv_stream_t*) &ctx->server_handle); ctx 255 test/benchmark-multi-accept.c uv_sem_post(&ctx->semaphore); ctx 258 test/benchmark-multi-accept.c ASSERT(0 == uv_listen((uv_stream_t*) &ctx->server_handle, ctx 268 test/benchmark-multi-accept.c struct server_ctx* ctx; ctx 269 test/benchmark-multi-accept.c ctx = container_of(handle, struct server_ctx, async_handle); ctx 270 test/benchmark-multi-accept.c uv_close((uv_handle_t*) &ctx->server_handle, NULL); ctx 271 test/benchmark-multi-accept.c uv_close((uv_handle_t*) &ctx->async_handle, NULL); ctx 277 test/benchmark-multi-accept.c struct server_ctx* ctx; ctx 279 test/benchmark-multi-accept.c ctx = container_of(server_handle, struct server_ctx, server_handle); ctx 294 test/benchmark-multi-accept.c ctx->num_connects++; ctx 311 test/benchmark-multi-accept.c struct client_ctx* ctx = container_of(req, struct client_ctx, connect_req); ctx 312 test/benchmark-multi-accept.c uv_idle_start(&ctx->idle_handle, cl_idle_cb); ctx 318 test/benchmark-multi-accept.c struct client_ctx* ctx = container_of(handle, struct client_ctx, idle_handle); ctx 319 test/benchmark-multi-accept.c uv_close((uv_handle_t*) &ctx->client_handle, cl_close_cb); ctx 320 test/benchmark-multi-accept.c uv_idle_stop(&ctx->idle_handle); ctx 325 test/benchmark-multi-accept.c struct client_ctx* ctx; ctx 327 test/benchmark-multi-accept.c ctx = container_of(handle, struct client_ctx, client_handle); ctx 329 test/benchmark-multi-accept.c if (--ctx->num_connects == 0) { ctx 330 test/benchmark-multi-accept.c uv_close((uv_handle_t*) &ctx->idle_handle, NULL); ctx 334 test/benchmark-multi-accept.c ASSERT(0 == uv_tcp_init(handle->loop, (uv_tcp_t*) &ctx->client_handle)); ctx 335 test/benchmark-multi-accept.c ASSERT(0 == uv_tcp_connect(&ctx->connect_req, ctx 336 test/benchmark-multi-accept.c (uv_tcp_t*) &ctx->client_handle, ctx 363 test/benchmark-multi-accept.c struct server_ctx* ctx = servers + i; ctx 364 test/benchmark-multi-accept.c ASSERT(0 == uv_sem_init(&ctx->semaphore, 0)); ctx 365 test/benchmark-multi-accept.c ASSERT(0 == uv_thread_create(&ctx->thread_id, server_cb, ctx)); ctx 371 test/benchmark-multi-accept.c struct client_ctx* ctx = clients + i; ctx 372 test/benchmark-multi-accept.c ctx->num_connects = NUM_CONNECTS / num_clients; ctx 373 test/benchmark-multi-accept.c handle = (uv_tcp_t*) &ctx->client_handle; ctx 376 test/benchmark-multi-accept.c ASSERT(0 == uv_tcp_connect(&ctx->connect_req, ctx 380 test/benchmark-multi-accept.c ASSERT(0 == uv_idle_init(loop, &ctx->idle_handle)); ctx 391 test/benchmark-multi-accept.c struct server_ctx* ctx = servers + i; ctx 392 test/benchmark-multi-accept.c uv_async_send(&ctx->async_handle); ctx 393 test/benchmark-multi-accept.c ASSERT(0 == uv_thread_join(&ctx->thread_id)); ctx 394 test/benchmark-multi-accept.c uv_sem_destroy(&ctx->semaphore); ctx 403 test/benchmark-multi-accept.c struct server_ctx* ctx = servers + i; ctx 406 test/benchmark-multi-accept.c ctx->num_connects / time, ctx 407 test/benchmark-multi-accept.c ctx->num_connects, ctx 408 test/benchmark-multi-accept.c ctx->num_connects * 100.0 / NUM_CONNECTS); ctx 49 test/test-ipc-send-recv.c static struct echo_ctx ctx; ctx 66 test/test-ipc-send-recv.c ASSERT(pending == ctx.expected_type); ctx 67 test/test-ipc-send-recv.c ASSERT(handle == &ctx.channel); ctx 71 test/test-ipc-send-recv.c r = uv_pipe_init(ctx.channel.loop, &ctx.recv.pipe, 0); ctx 73 test/test-ipc-send-recv.c r = uv_tcp_init(ctx.channel.loop, &ctx.recv.tcp); ctx 78 test/test-ipc-send-recv.c r = uv_accept((uv_stream_t*)&ctx.channel, &ctx.recv.stream); ctx 81 test/test-ipc-send-recv.c uv_close((uv_handle_t*)&ctx.channel, NULL); ctx 82 test/test-ipc-send-recv.c uv_close(&ctx.send.handle, NULL); ctx 83 test/test-ipc-send-recv.c uv_close(&ctx.recv.handle, NULL); ctx 93 test/test-ipc-send-recv.c spawn_helper(&ctx.channel, &process, "ipc_send_recv_helper"); ctx 96 test/test-ipc-send-recv.c r = uv_write2(&ctx.write_req, ctx 97 test/test-ipc-send-recv.c (uv_stream_t*)&ctx.channel, ctx 99 test/test-ipc-send-recv.c &ctx.send.stream, ctx 103 test/test-ipc-send-recv.c r = uv_read2_start((uv_stream_t*)&ctx.channel, alloc_cb, recv_cb); ctx 118 test/test-ipc-send-recv.c ctx.expected_type = UV_NAMED_PIPE; ctx 120 test/test-ipc-send-recv.c r = uv_pipe_init(uv_default_loop(), &ctx.send.pipe, 1); ctx 123 test/test-ipc-send-recv.c r = uv_pipe_bind(&ctx.send.pipe, TEST_PIPENAME); ctx 137 test/test-ipc-send-recv.c ctx.expected_type = UV_TCP; ctx 139 test/test-ipc-send-recv.c r = uv_tcp_init(uv_default_loop(), &ctx.send.tcp); ctx 142 test/test-ipc-send-recv.c r = uv_tcp_bind(&ctx.send.tcp, uv_ip4_addr("127.0.0.1", TEST_PORT)); ctx 157 test/test-ipc-send-recv.c uv_close(&ctx.recv.handle, NULL); ctx 158 test/test-ipc-send-recv.c uv_close((uv_handle_t*)&ctx.channel, NULL); ctx 169 test/test-ipc-send-recv.c ASSERT(handle == &ctx.channel); ctx 175 test/test-ipc-send-recv.c r = uv_pipe_init(ctx.channel.loop, &ctx.recv.pipe, 0); ctx 177 test/test-ipc-send-recv.c r = uv_tcp_init(ctx.channel.loop, &ctx.recv.tcp); ctx 182 test/test-ipc-send-recv.c r = uv_accept((uv_stream_t*)handle, &ctx.recv.stream); ctx 185 test/test-ipc-send-recv.c r = uv_write2(&ctx.write_req, ctx 186 test/test-ipc-send-recv.c (uv_stream_t*)&ctx.channel, ctx 188 test/test-ipc-send-recv.c &ctx.recv.stream, ctx 200 test/test-ipc-send-recv.c memset(&ctx, 0, sizeof(ctx)); ctx 202 test/test-ipc-send-recv.c r = uv_pipe_init(uv_default_loop(), &ctx.channel, 1); ctx 205 test/test-ipc-send-recv.c uv_pipe_open(&ctx.channel, 0); ctx 206 test/test-ipc-send-recv.c ASSERT(uv_is_readable((uv_stream_t*)&ctx.channel)); ctx 207 test/test-ipc-send-recv.c ASSERT(uv_is_writable((uv_stream_t*)&ctx.channel)); ctx 208 test/test-ipc-send-recv.c ASSERT(!uv_is_closing((uv_handle_t*)&ctx.channel)); ctx 210 test/test-ipc-send-recv.c r = uv_read2_start((uv_stream_t*)&ctx.channel, alloc_cb, read2_cb); ctx 54 test/test-signal.c struct signal_ctx* ctx = container_of(handle, struct signal_ctx, handle); ctx 55 test/test-signal.c ASSERT(signum == ctx->signum); ctx 57 test/test-signal.c if (++ctx->ncalls == NSIGNALS) { ctx 58 test/test-signal.c if (ctx->stop_or_close == STOP) ctx 60 test/test-signal.c else if (ctx->stop_or_close == CLOSE) ctx 69 test/test-signal.c struct timer_ctx* ctx = container_of(handle, struct timer_ctx, handle); ctx 71 test/test-signal.c raise(ctx->signum); ctx 73 test/test-signal.c if (++ctx->ncalls == NSIGNALS) ctx 78 test/test-signal.c static void start_watcher(uv_loop_t* loop, int signum, struct signal_ctx* ctx) { ctx 79 test/test-signal.c ctx->ncalls = 0; ctx 80 test/test-signal.c ctx->signum = signum; ctx 81 test/test-signal.c ctx->stop_or_close = CLOSE; ctx 82 test/test-signal.c ASSERT(0 == uv_signal_init(loop, &ctx->handle)); ctx 83 test/test-signal.c ASSERT(0 == uv_signal_start(&ctx->handle, signal_cb, signum)); ctx 87 test/test-signal.c static void start_timer(uv_loop_t* loop, int signum, struct timer_ctx* ctx) { ctx 88 test/test-signal.c ctx->ncalls = 0; ctx 89 test/test-signal.c ctx->signum = signum; ctx 90 test/test-signal.c ASSERT(0 == uv_timer_init(loop, &ctx->handle)); ctx 91 test/test-signal.c ASSERT(0 == uv_timer_start(&ctx->handle, timer_cb, 5, 5));