52 #ifndef USE_NATIVE_THREAD_PRIORITY
53 #define USE_NATIVE_THREAD_PRIORITY 0
54 #define RUBY_THREAD_PRIORITY_MAX 3
55 #define RUBY_THREAD_PRIORITY_MIN -3
59 #define THREAD_DEBUG 0
73 #define eKillSignal INT2FIX(0)
74 #define eTerminateSignal INT2FIX(1)
77 #define closed_stream_error GET_VM()->special_exceptions[ruby_error_closed_stream]
87 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
100 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
102 rb_gc_save_machine_context(th); \
103 SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
106 #define GVL_UNLOCK_BEGIN() do { \
107 rb_thread_t *_th_stored = GET_THREAD(); \
108 RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
109 gvl_release(_th_stored->vm);
111 #define GVL_UNLOCK_END() \
112 gvl_acquire(_th_stored->vm, _th_stored); \
113 rb_thread_set_current(_th_stored); \
116 #define blocking_region_begin(th, region, func, arg) \
118 (region)->prev_status = (th)->status; \
119 set_unblock_function((th), (func), (arg), &(region)->oldubf); \
120 (th)->blocking_region_buffer = (region); \
121 (th)->status = THREAD_STOPPED; \
122 thread_debug("enter blocking region (%p)\n", (void *)(th)); \
123 RB_GC_SAVE_MACHINE_CONTEXT(th); \
124 gvl_release((th)->vm); \
127 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
128 rb_thread_t *__th = GET_THREAD(); \
129 struct rb_blocking_region_buffer __region; \
130 blocking_region_begin(__th, &__region, (ubf), (ubfarg)); \
132 blocking_region_end(__th, &__region); \
133 RUBY_VM_CHECK_INTS(); \
137 #ifdef HAVE_VA_ARGS_MACRO
138 void rb_thread_debug(
const char *file,
int line,
const char *fmt, ...);
139 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
140 #define POSITION_FORMAT "%s:%d:"
141 #define POSITION_ARGS ,file, line
143 void rb_thread_debug(
const char *fmt, ...);
144 #define thread_debug rb_thread_debug
145 #define POSITION_FORMAT
146 #define POSITION_ARGS
149 # if THREAD_DEBUG < 0
150 static int rb_thread_debug_enabled;
161 rb_thread_s_debug(
void)
163 return INT2NUM(rb_thread_debug_enabled);
177 rb_thread_debug_enabled =
RTEST(val) ?
NUM2INT(val) : 0;
181 # define rb_thread_debug_enabled THREAD_DEBUG
184 #define thread_debug if(0)printf
188 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
191 VALUE *register_stack_start));
197 #define DEBUG_OUT() \
198 WaitForSingleObject(&debug_mutex, INFINITE); \
199 printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
201 ReleaseMutex(&debug_mutex);
203 #elif defined(HAVE_PTHREAD_H)
206 #define DEBUG_OUT() \
207 pthread_mutex_lock(&debug_mutex); \
208 printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
210 pthread_mutex_unlock(&debug_mutex);
213 #error "unsupported thread type"
217 static int debug_mutex_initialized = 1;
222 #ifdef HAVE_VA_ARGS_MACRO
223 const char *file,
int line,
225 const char *fmt, ...)
230 if (!rb_thread_debug_enabled)
return;
232 if (debug_mutex_initialized == 1) {
233 debug_mutex_initialized = 0;
234 native_mutex_initialize(&debug_mutex);
255 native_mutex_unlock(lock);
261 native_mutex_destroy(lock);
313 if (th != main_thread) {
320 thread_debug(
"terminate_i: main thread (%p)\n", (
void *)th);
350 if (err)
rb_bug(
"invalid keeping_mutexes: %s", err);
361 rb_bug(
"rb_thread_terminate_all: called by child thread (%p, %p)",
368 thread_debug(
"rb_thread_terminate_all (main thread: %p)\n", (
void *)th);
391 th->machine_register_stack_start = th->machine_register_stack_end = 0;
412 native_thread_destroy(th);
420 native_thread_init_stack(th);
432 # ifdef USE_SIGALTSTACK
435 rb_register_sigaltstack(th);
438 ruby_thread_set_native(th);
442 th->machine_register_stack_start = register_stack_start;
446 gvl_acquire(th->
vm, th);
448 thread_debug(
"thread start (get lock): %p\n", (
void *)th);
505 rb_bug(
"thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
517 if (join_th == main_th) errinfo =
Qnil;
519 switch (join_th->
status) {
554 "can't start a new thread (frozen ThreadGroup)");
572 err = native_thread_create(th);
588 if (
GET_VM()->inhibit_thread_creation)
650 #define DELAY_INFTY 1E30
745 return target_th->
self;
835 time.
tv_usec = (int)((d - (
int)d) * 1e6);
860 }
while (th->
status == status);
867 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
870 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
895 native_sleep(th, &tv);
929 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
932 if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
1025 gvl_yield(th->
vm, th);
1047 gvl_acquire(th->
vm, th);
1049 thread_debug(
"leave blocking region (%p)\n", (
void *)th);
1050 remove_signal_thread_list(th);
1070 int saved_errno =
errno;
1075 errno = saved_errno;
1121 int saved_errno = 0;
1131 saved_errno =
errno;
1133 errno = saved_errno;
1143 int saved_errno = 0;
1148 saved_errno =
errno;
1151 errno = saved_errno;
1212 fprintf(stderr,
"[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1220 rb_bug(
"rb_thread_call_with_gvl: called by a thread which has GVL.");
1281 int timer_interrupt = interrupt & 0x01;
1282 int finalizer_interrupt = interrupt & 0x04;
1310 if (finalizer_interrupt) {
1314 if (timer_interrupt) {
1315 unsigned long limits_us = 250 * 1000;
1349 rb_bug(
"deprecated function rb_gc_mark_threads is called");
1401 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
1402 #define USE_SIGALTSTACK
1409 #ifdef USE_SIGALTSTACK
1437 #define THREAD_IO_WAITING_P(th) ( \
1438 ((th)->status == THREAD_STOPPED || \
1439 (th)->status == THREAD_STOPPED_FOREVER) && \
1440 (th)->blocking_region_buffer && \
1441 (th)->unblock.func == ubf_select && \
1671 "stopping only thread\n\tnote: use sleep to stop forever");
2032 str =
rb_sprintf(
"#<%s:%p %s>", cname, (
void *)thread, status);
2264 #if USE_NATIVE_THREAD_PRIORITY
2266 native_thread_apply_priority(th);
2282 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
2315 fds->fdset =
ALLOC(fd_set);
2316 FD_ZERO(fds->fdset);
2322 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
2324 if (size <
sizeof(fd_set))
2325 size =
sizeof(fd_set);
2326 dst->maxfd = src->maxfd;
2328 memcpy(dst->fdset, src->fdset, size);
2334 if (fds->fdset)
xfree(fds->fdset);
2343 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
2349 size_t m = howmany(n + 1, NFDBITS) *
sizeof(fd_mask);
2350 size_t o = howmany(fds->maxfd, NFDBITS) *
sizeof(fd_mask);
2352 if (m <
sizeof(fd_set)) m =
sizeof(fd_set);
2353 if (o <
sizeof(fd_set)) o =
sizeof(fd_set);
2356 fds->fdset =
xrealloc(fds->fdset, m);
2357 memset((
char *)fds->fdset + o, 0, m - o);
2359 if (n >= fds->maxfd) fds->maxfd = n + 1;
2372 if (n >= fds->maxfd)
return;
2379 if (n >= fds->maxfd)
return 0;
2380 return FD_ISSET(n, fds->fdset) != 0;
2386 size_t size = howmany(max, NFDBITS) *
sizeof(fd_mask);
2388 if (size <
sizeof(fd_set)) size =
sizeof(fd_set);
2390 dst->fdset =
xrealloc(dst->fdset, size);
2391 memcpy(dst->fdset, src, size);
2397 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
2399 if (size >
sizeof(fd_set)) {
2402 memcpy(dst,
rb_fd_ptr(src),
sizeof(fd_set));
2408 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
2410 if (size <
sizeof(fd_set))
2411 size =
sizeof(fd_set);
2412 dst->maxfd = src->maxfd;
2413 dst->fdset =
xrealloc(dst->fdset, size);
2414 memcpy(dst->fdset, src->fdset, size);
2433 return select(n, r, w, e, timeout);
2441 #define FD_ZERO(f) rb_fd_zero(f)
2442 #define FD_SET(i, f) rb_fd_set((i), (f))
2443 #define FD_CLR(i, f) rb_fd_clr((i), (f))
2444 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
2446 #elif defined(_WIN32)
2452 set->fdset =
ALLOC(fd_set);
2453 FD_ZERO(set->fdset);
2470 if (max >
FD_SETSIZE || max > dst->fd_count) {
2474 memcpy(dst->fd_array, src->fdset->fd_array, max);
2475 dst->fd_count =
max;
2492 for (i = 0; i <
set->fdset->fd_count; i++) {
2493 if (set->fdset->fd_array[i] == s) {
2497 if (set->fdset->fd_count >= (
unsigned)set->capa) {
2499 set->fdset =
xrealloc(set->fdset,
sizeof(
unsigned int) +
sizeof(SOCKET) * set->capa);
2501 set->fdset->fd_array[
set->fdset->fd_count++] = s;
2509 #define FD_ZERO(f) rb_fd_zero(f)
2510 #define FD_SET(i, f) rb_fd_set((i), (f))
2511 #define FD_CLR(i, f) rb_fd_clr((i), (f))
2512 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
2515 #define rb_fd_rcopy(d, s) (*(d) = *(s))
2518 #if defined(__CYGWIN__)
2555 # if defined(__CYGWIN__)
2560 # if defined(__CYGWIN__)
2562 limit = (double)start_time.
tv_sec + (
double)start_time.
tv_usec*1e-6;
2566 limit += (double)timeout->
tv_sec+(
double)timeout->
tv_usec*1e-6;
2567 wait_rest = *timeout;
2568 timeout = &wait_rest;
2581 #if defined(__CYGWIN__)
2587 wait_100ms.
tv_usec = 100 * 1000;
2590 wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) < 0) ? &wait_100ms : timeout;
2594 if (result < 0) lerrno =
errno;
2595 if (result != 0)
break;
2606 subtract_tv(&elapsed, &start_time);
2608 if (!subtract_tv(timeout, &elapsed)) {
2612 if (cmp_tv(&wait_100ms, timeout) > 0) wait = timeout;
2614 }
while (__th->interrupt_flag == 0);
2616 }
while (result == 0 && !finish);
2618 #elif defined(_WIN32)
2622 result = native_fd_select(n, read, write, except, timeout, th);
2623 if (result < 0) lerrno =
errno;
2628 result =
rb_fd_select(n, read, write, except, timeout);
2629 if (result < 0) lerrno =
errno;
2651 wait_rest.
tv_sec = (
unsigned int)d;
2652 wait_rest.
tv_usec = (int)((d-(
double)wait_rest.
tv_sec)*1e6);
2679 thread_debug(
"rb_thread_wait_fd_rw(%d, %s)\n", fd, read ?
"read" :
"write");
2685 while (result <= 0) {
2693 thread_debug(
"rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ?
"read" :
"write");
2757 if (!read && !write && !except) {
2775 return do_select(max, read, write, except, timeout);
2783 #if defined(HAVE_POLL) && defined(linux)
2790 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
2791 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
2792 #define POLLEX_SET (POLLPRI)
2794 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
2795 #define TIMET_MIN (~(time_t)0 <= 0 ? (time_t)(((unsigned_time_t)1) << (sizeof(time_t) * CHAR_BIT - 1)) : (time_t)0)
2799 int ppoll(
struct pollfd *fds, nfds_t nfds,
2800 const struct timespec *ts,
const sigset_t *sigmask)
2811 tmp2 = ts->
tv_nsec / (1000 * 1000);
2815 timeout_ms = tmp + tmp2;
2820 return poll(fds, nfds, timeout_ms);
2845 fds.events = (short)events;
2850 result = ppoll(&fds, 1, timeout, NULL);
2851 if (result < 0) lerrno =
errno;
2876 if (fds.revents & POLLNVAL) {
2886 if (fds.revents & POLLIN_SET)
2888 if (fds.revents & POLLOUT_SET)
2890 if (fds.revents & POLLEX_SET)
2975 #ifdef USE_CONSERVATIVE_STACK_END
2980 *stack_end_p = &stack_end;
2989 th->machine_register_stack_end = rb_ia64_bsp();
3021 if (vm->prove_profile.enable) {
3024 if (vm->during_gc) {
3034 if (timer_thread_id && native_stop_timer_thread(close_anyway)) {
3035 native_reset_timer_thread();
3042 native_reset_timer_thread();
3049 rb_thread_create_timer_thread();
3070 if (
RTEST(coverages)) {
3145 return ptr ?
sizeof(
struct thgroup) : 0;
3324 "can't move from the enclosed thread group");
3357 #define GetMutexPtr(obj, tobj) \
3358 TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
3362 #define mutex_mark NULL
3372 if (err)
rb_bug(
"%s", err);
3374 native_mutex_destroy(&mutex->
lock);
3375 native_cond_destroy(&mutex->
cond);
3409 native_mutex_initialize(&mutex->
lock);
3410 native_cond_initialize(&mutex->
cond, RB_CONDATTR_CLOCK_MONOTONIC);
3472 native_mutex_lock(&mutex->
lock);
3473 if (mutex->
th == 0) {
3479 native_mutex_unlock(&mutex->
lock);
3487 int interrupted = 0;
3510 timeout_rel.
tv_nsec = timeout_ms * 1000 * 1000;
3511 timeout = native_cond_timeout(&mutex->
cond, timeout_rel);
3512 err = native_cond_timedwait(&mutex->
cond, &mutex->
lock, &timeout);
3515 native_cond_wait(&mutex->
cond, &mutex->
lock);
3528 native_mutex_lock(&mutex->
lock);
3530 native_cond_broadcast(&mutex->
cond);
3531 native_mutex_unlock(&mutex->
lock);
3561 while (mutex->
th != th) {
3571 native_mutex_lock(&mutex->
lock);
3585 interrupted =
lock_func(th, mutex, timeout_ms);
3586 native_mutex_unlock(&mutex->
lock);
3589 if (patrol_thread == th)
3590 patrol_thread =
NULL;
3595 if (mutex->
th && interrupted == 2) {
3599 th->
status = prev_status;
3619 native_mutex_lock(&mutex->
lock);
3621 if (mutex->
th == 0) {
3622 err =
"Attempt to unlock a mutex which is not locked";
3624 else if (mutex->
th != th) {
3625 err =
"Attempt to unlock a mutex which is locked by another thread";
3630 native_cond_signal(&mutex->
cond);
3633 native_mutex_unlock(&mutex->
lock);
3637 if (th_mutex == mutex) {
3644 if (tmp_mutex == mutex) {
3648 th_mutex = tmp_mutex;
3711 if (!
NIL_P(timeout)) {
3716 if (
NIL_P(timeout)) {
3722 end = time(0) - beg;
3778 #define GetBarrierPtr(obj) ((VALUE)rb_check_typeddata((obj), &barrier_data_type))
3794 if (!mutex)
return Qfalse;
3858 #if SIZEOF_LONG == SIZEOF_VOIDP
3859 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
3860 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3861 #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
3862 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
3868 if (paired_obj_id) {
3903 VALUE other_paired_obj = pair_list;
3926 if (pair_list ==
Qundef) {
3994 if (outer && !outermost) {
4006 if (result == p.list) {
4053 #define RUBY_EVENT_REMOVED 0x1000000
4079 hook->
flag = events;
4149 for (; hook; hook = hook->
next) {
4154 if (flag & hook->
flag) {
4155 (*hook->
func)(flag, hook->
data,
self,
id, klass);
4192 th->
tracing &= ~EVENT_RUNNING_VM;
4232 if (func == 0 || hook->
func == func) {
4515 if (p->
klass != 0) {
4526 klass =
RBASIC(klass)->klass;
4529 klass =
rb_iv_get(klass,
"__attached__");
4533 argv[0] = eventname;
4538 argv[5] = klass ? klass :
Qnil;
4568 volatile int raised;
4569 volatile int outer_state;
4572 if (running == ev && !always) {
4580 outer_state = th->
state;
4585 result = (*func)(
arg, running);
4600 th->
state = outer_state;
4645 #define rb_intern(str) rb_intern_const(str)
4662 #if THREAD_DEBUG < 0
4718 recursive_key =
rb_intern(
"__recursive_key__");
4732 gvl_acquire(th->
vm, th);
4737 rb_thread_create_timer_thread();
4740 (void)native_mutex_trylock;
4765 native_mutex_lock(&mutex->
lock);
4769 native_mutex_unlock(&mutex->
lock);
4775 #ifdef DEBUG_DEADLOCK_CHECK
4788 native_mutex_lock(&mutex->
lock);
4790 native_mutex_unlock(&mutex->
lock);
4806 if (patrol_thread && patrol_thread !=
GET_THREAD())
return;
4814 #ifdef DEBUG_DEADLOCK_CHECK
4827 if (coverage &&
RBASIC(coverage)->klass == 0) {
4843 return GET_VM()->coverages;
4849 GET_VM()->coverages = coverages;