12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
16 #ifdef HAVE_SYS_RESOURCE_H
17 #include <sys/resource.h>
19 #ifdef HAVE_THR_STKSEGMENT
24 #elif HAVE_SYS_FCNTL_H
25 #include <sys/fcntl.h>
28 static void native_mutex_lock(pthread_mutex_t *lock);
29 static void native_mutex_unlock(pthread_mutex_t *lock);
30 static int native_mutex_trylock(pthread_mutex_t *lock);
31 static void native_mutex_initialize(pthread_mutex_t *lock);
32 static void native_mutex_destroy(pthread_mutex_t *lock);
38 static pthread_t timer_thread_id;
40 #define RB_CONDATTR_CLOCK_MONOTONIC 1
42 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCKID_T) && \
43 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && defined(HAVE_CLOCK_GETTIME)
44 #define USE_MONOTONIC_COND 1
46 #define USE_MONOTONIC_COND 0
78 native_mutex_lock(&vm->
gvl.
lock);
79 gvl_acquire_common(vm);
80 native_mutex_unlock(&vm->
gvl.
lock);
88 native_cond_signal(&vm->
gvl.
cond);
94 native_mutex_lock(&vm->
gvl.
lock);
95 gvl_release_common(vm);
96 native_mutex_unlock(&vm->
gvl.
lock);
102 native_mutex_lock(&vm->
gvl.
lock);
104 gvl_release_common(vm);
122 native_mutex_unlock(&vm->
gvl.
lock);
124 native_mutex_lock(&vm->
gvl.
lock);
129 gvl_acquire_common(vm);
130 native_mutex_unlock(&vm->
gvl.
lock);
136 native_mutex_initialize(&vm->
gvl.
lock);
137 native_cond_initialize(&vm->
gvl.
cond, RB_CONDATTR_CLOCK_MONOTONIC);
138 native_cond_initialize(&vm->
gvl.
switch_cond, RB_CONDATTR_CLOCK_MONOTONIC);
151 native_cond_destroy(&vm->
gvl.
cond);
152 native_mutex_destroy(&vm->
gvl.
lock);
162 #define NATIVE_MUTEX_LOCK_DEBUG 0
165 mutex_debug(
const char *
msg, pthread_mutex_t *lock)
167 if (NATIVE_MUTEX_LOCK_DEBUG) {
169 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
171 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(
EXIT_FAILURE);}
172 fprintf(stdout,
"%s: %p\n", msg, (
void *)lock);
173 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(
EXIT_FAILURE);}
178 native_mutex_lock(pthread_mutex_t *lock)
181 mutex_debug(
"lock", lock);
182 if ((r = pthread_mutex_lock(lock)) != 0) {
188 native_mutex_unlock(pthread_mutex_t *lock)
191 mutex_debug(
"unlock", lock);
192 if ((r = pthread_mutex_unlock(lock)) != 0) {
198 native_mutex_trylock(pthread_mutex_t *lock)
201 mutex_debug(
"trylock", lock);
202 if ((r = pthread_mutex_trylock(lock)) != 0) {
214 native_mutex_initialize(pthread_mutex_t *lock)
216 int r = pthread_mutex_init(lock, 0);
217 mutex_debug(
"init", lock);
224 native_mutex_destroy(pthread_mutex_t *lock)
226 int r = pthread_mutex_destroy(lock);
227 mutex_debug(
"destroy", lock);
237 pthread_condattr_t attr;
239 pthread_condattr_init(&attr);
241 #if USE_MONOTONIC_COND
242 cond->clockid = CLOCK_REALTIME;
243 if (flags & RB_CONDATTR_CLOCK_MONOTONIC) {
244 r = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
246 cond->clockid = CLOCK_MONOTONIC;
251 r = pthread_cond_init(&cond->
cond, &attr);
252 pthread_condattr_destroy(&attr);
263 int r = pthread_cond_destroy(&cond->
cond);
284 r = pthread_cond_signal(&cond->
cond);
285 }
while (r == EAGAIN);
296 r = pthread_cond_broadcast(&cond->
cond);
297 }
while (r == EAGAIN);
306 int r = pthread_cond_wait(&cond->
cond, mutex);
324 r = pthread_cond_timedwait(&cond->
cond, mutex, ts);
325 }
while (r == EINTR);
334 #if SIZEOF_TIME_T == SIZEOF_LONG
336 #elif SIZEOF_TIME_T == SIZEOF_INT
338 #elif SIZEOF_TIME_T == SIZEOF_LONG_LONG
341 # error cannot find integer type which size is same as time_t.
344 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
354 #if USE_MONOTONIC_COND
355 if (cond->clockid == CLOCK_MONOTONIC) {
356 ret = clock_gettime(cond->clockid, &now);
362 if (cond->clockid != CLOCK_REALTIME)
363 rb_bug(
"unsupported clockid %d", cond->clockid);
369 now.tv_sec = tv.tv_sec;
370 now.tv_nsec = tv.tv_usec * 1000;
372 #if USE_MONOTONIC_COND
375 timeout.tv_sec = now.tv_sec;
376 timeout.tv_nsec = now.tv_nsec;
377 timeout.tv_sec += timeout_rel.tv_sec;
378 timeout.tv_nsec += timeout_rel.tv_nsec;
380 if (timeout.tv_nsec >= 1000*1000*1000) {
382 timeout.tv_nsec -= 1000*1000*1000;
385 if (timeout.tv_sec < now.tv_sec)
391 #define native_cleanup_push pthread_cleanup_push
392 #define native_cleanup_pop pthread_cleanup_pop
393 #ifdef HAVE_SCHED_YIELD
394 #define native_thread_yield() (void)sched_yield()
396 #define native_thread_yield() ((void)0)
399 #if defined(SIGVTALRM) && !defined(__CYGWIN__) && !defined(__SYMBIAN32__)
400 #define USE_SIGNAL_THREAD_LIST 1
402 #ifdef USE_SIGNAL_THREAD_LIST
403 static void add_signal_thread_list(
rb_thread_t *th);
404 static void remove_signal_thread_list(
rb_thread_t *th);
408 static pthread_key_t ruby_native_thread_key;
417 ruby_thread_from_native(
void)
419 return pthread_getspecific(ruby_native_thread_key);
425 return pthread_setspecific(ruby_native_thread_key, th) == 0;
435 pthread_key_create(&ruby_native_thread_key, NULL);
437 native_thread_init(th);
438 #ifdef USE_SIGNAL_THREAD_LIST
439 native_mutex_initialize(&signal_thread_list_lock);
448 ruby_thread_set_native(th);
457 #define USE_THREAD_CACHE 0
460 static rb_thread_t *register_cached_thread_and_wait(
void);
463 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
464 #define STACKADDR_AVAILABLE 1
465 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
466 #define STACKADDR_AVAILABLE 1
467 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
468 #define STACKADDR_AVAILABLE 1
469 #elif defined HAVE_PTHREAD_GETTHRDS_NP
470 #define STACKADDR_AVAILABLE 1
473 #ifdef STACKADDR_AVAILABLE
478 get_stack(
void **addr,
size_t *
size)
480 #define CHECK_ERR(expr) \
481 {int err = (expr); if (err) return err;}
482 #ifdef HAVE_PTHREAD_GETATTR_NP
486 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
487 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
488 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
491 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
492 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
494 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
496 pthread_attr_destroy(&attr);
497 #elif defined HAVE_PTHREAD_ATTR_GET_NP
499 CHECK_ERR(pthread_attr_init(&attr));
500 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
501 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
502 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
505 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
506 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
509 pthread_attr_destroy(&attr);
510 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP)
511 pthread_t th = pthread_self();
512 *addr = pthread_get_stackaddr_np(th);
513 *size = pthread_get_stacksize_np(th);
514 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
516 # if defined HAVE_THR_STKSEGMENT
517 CHECK_ERR(thr_stksegment(&stk));
519 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
523 #elif defined HAVE_PTHREAD_GETTHRDS_NP
524 pthread_t th = pthread_self();
525 struct __pthrdsinfo thinfo;
527 int regsiz=
sizeof(reg);
528 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
529 &thinfo,
sizeof(thinfo),
531 *addr = thinfo.__pi_stackaddr;
532 *size = thinfo.__pi_stacksize;
535 #error STACKADDR_AVAILABLE is defined but not implemented.
544 size_t stack_maxsize;
547 VALUE *register_stack_start;
549 } native_main_thread;
551 #ifdef STACK_END_ADDRESS
552 extern void *STACK_END_ADDRESS;
555 #undef ruby_init_stack
563 native_main_thread.id = pthread_self();
564 #ifdef STACK_END_ADDRESS
565 native_main_thread.stack_start = STACK_END_ADDRESS;
567 if (!native_main_thread.stack_start ||
569 native_main_thread.stack_start > addr,
570 native_main_thread.stack_start < addr)) {
571 native_main_thread.stack_start = (
VALUE *)addr;
575 if (!native_main_thread.register_stack_start ||
576 (
VALUE*)bsp < native_main_thread.register_stack_start) {
577 native_main_thread.register_stack_start = (
VALUE*)bsp;
583 #if defined(STACKADDR_AVAILABLE)
586 get_stack(&stackaddr, &size);
587 space =
STACK_DIR_UPPER((
char *)addr - (
char *)stackaddr, (
char *)stackaddr - (
char *)addr);
588 #elif defined(HAVE_GETRLIMIT)
590 if (
getrlimit(RLIMIT_STACK, &rlim) == 0) {
591 size = (size_t)rlim.rlim_cur;
593 space = size > 5 * 1024 * 1024 ? 1024 * 1024 : size / 5;
595 native_main_thread.stack_maxsize = size - space;
599 #define CHECK_ERR(expr) \
600 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
607 if (pthread_equal(curr, native_main_thread.id)) {
612 #ifdef STACKADDR_AVAILABLE
616 if (get_stack(&start, &size) == 0) {
625 th->machine_register_stack_start = native_main_thread.register_stack_start;
633 #define USE_NATIVE_THREAD_INIT 1
637 thread_start_func_1(
void *th_ptr)
644 #if !defined USE_NATIVE_THREAD_INIT
648 #if defined USE_NATIVE_THREAD_INIT
649 native_thread_init_stack(th);
651 native_thread_init(th);
653 #if defined USE_NATIVE_THREAD_INIT
663 if ((th = register_cached_thread_and_wait()) != 0) {
673 struct cached_thread_entry {
676 struct cached_thread_entry *next;
681 static pthread_mutex_t thread_cache_lock = PTHREAD_MUTEX_INITIALIZER;
682 struct cached_thread_entry *cached_thread_root;
685 register_cached_thread_and_wait(
void)
689 struct cached_thread_entry *entry =
690 (
struct cached_thread_entry *)
malloc(
sizeof(
struct cached_thread_entry));
695 ts.
tv_sec = tv.tv_sec + 60;
696 ts.
tv_nsec = tv.tv_usec * 1000;
698 pthread_mutex_lock(&thread_cache_lock);
700 entry->th_area = &th_area;
702 entry->next = cached_thread_root;
703 cached_thread_root = entry;
705 native_cond_timedwait(&cond, &thread_cache_lock, &ts);
708 struct cached_thread_entry *e = cached_thread_root;
709 struct cached_thread_entry *prev = cached_thread_root;
713 if (prev == cached_thread_root) {
714 cached_thread_root = e->next;
717 prev->next = e->next;
727 native_cond_destroy(&cond);
729 pthread_mutex_unlock(&thread_cache_lock);
740 struct cached_thread_entry *entry;
742 if (cached_thread_root) {
743 pthread_mutex_lock(&thread_cache_lock);
744 entry = cached_thread_root;
746 if (cached_thread_root) {
747 cached_thread_root = entry->next;
748 *entry->th_area = th;
753 native_cond_signal(entry->cond);
755 pthread_mutex_unlock(&thread_cache_lock);
763 RUBY_STACK_MIN_LIMIT = 64 * 1024,
765 RUBY_STACK_MIN_LIMIT = 512 * 1024,
767 RUBY_STACK_SPACE_LIMIT = 1024 * 1024
770 #ifdef PTHREAD_STACK_MIN
771 #define RUBY_STACK_MIN ((RUBY_STACK_MIN_LIMIT < PTHREAD_STACK_MIN) ? \
772 PTHREAD_STACK_MIN * 2 : RUBY_STACK_MIN_LIMIT)
774 #define RUBY_STACK_MIN (RUBY_STACK_MIN_LIMIT)
776 #define RUBY_STACK_SPACE (RUBY_STACK_MIN/5 > RUBY_STACK_SPACE_LIMIT ? \
777 RUBY_STACK_SPACE_LIMIT : RUBY_STACK_MIN/5)
784 if (use_cached_thread(th)) {
785 thread_debug(
"create (use cached thread): %p\n", (
void *)th);
789 const size_t stack_size = RUBY_STACK_MIN;
790 const size_t space = RUBY_STACK_SPACE;
798 CHECK_ERR(pthread_attr_init(&attr));
800 #ifdef PTHREAD_STACK_MIN
801 thread_debug(
"create - stack size: %lu\n", (
unsigned long)stack_size);
802 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
805 #ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
806 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
808 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
810 err = pthread_create(&th->
thread_id, &attr, thread_start_func_1, th);
812 CHECK_ERR(pthread_attr_destroy(&attr));
818 native_thread_join(pthread_t th)
820 int err = pthread_join(th, 0);
827 #if USE_NATIVE_THREAD_PRIORITY
832 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
833 struct sched_param sp;
837 pthread_getschedparam(th->
thread_id, &policy, &sp);
838 max = sched_get_priority_max(policy);
839 min = sched_get_priority_min(policy);
841 if (min > priority) {
844 else if (max < priority) {
848 sp.sched_priority = priority;
849 pthread_setschedparam(th->
thread_id, policy, &sp);
858 ubf_pthread_cond_signal(
void *ptr)
861 thread_debug(
"ubf_pthread_cond_signal (%p)\n", (
void *)th);
876 timeout_rel.tv_nsec = timeout_tv->
tv_usec * 1000;
886 if (timeout_rel.tv_sec > 100000000) {
887 timeout_rel.tv_sec = 100000000;
888 timeout_rel.tv_nsec = 0;
891 timeout = native_cond_timeout(cond, timeout_rel);
896 pthread_mutex_lock(lock);
902 thread_debug(
"native_sleep: interrupted before sleep\n");
906 native_cond_wait(cond, lock);
908 native_cond_timedwait(cond, lock, &timeout);
913 pthread_mutex_unlock(lock);
920 #ifdef USE_SIGNAL_THREAD_LIST
921 struct signal_thread_list {
923 struct signal_thread_list *prev;
924 struct signal_thread_list *next;
927 static struct signal_thread_list signal_thread_list_anchor = {
931 #define FGLOCK(lock, body) do { \
932 native_mutex_lock(lock); \
936 native_mutex_unlock(lock); \
941 print_signal_list(
char *str)
943 struct signal_thread_list *
list =
944 signal_thread_list_anchor.next;
947 thread_debug(
"%p (%p), ", list->th, list->th->thread_id);
958 FGLOCK(&signal_thread_list_lock, {
959 struct signal_thread_list *list =
960 malloc(
sizeof(
struct signal_thread_list));
963 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
969 list->prev = &signal_thread_list_anchor;
970 list->next = signal_thread_list_anchor.next;
972 list->next->prev =
list;
974 signal_thread_list_anchor.next =
list;
984 FGLOCK(&signal_thread_list_lock, {
985 struct signal_thread_list *list =
986 (
struct signal_thread_list *)
989 list->prev->next = list->next;
991 list->next->prev = list->prev;
1010 ubf_select(
void *ptr)
1013 add_signal_thread_list(th);
1014 if (pthread_self() != timer_thread_id)
1016 ubf_select_each(th);
1020 ping_signal_thread_list(
void) {
1021 if (signal_thread_list_anchor.next) {
1022 FGLOCK(&signal_thread_list_lock, {
1023 struct signal_thread_list *
list;
1025 list = signal_thread_list_anchor.next;
1027 ubf_select_each(list->th);
1035 check_signal_thread_list(
void)
1037 if (signal_thread_list_anchor.next)
1043 static void add_signal_thread_list(
rb_thread_t *th) { }
1044 static void remove_signal_thread_list(
rb_thread_t *th) { }
1045 #define ubf_select 0
1046 static void ping_signal_thread_list(
void) {
return; }
1047 static int check_signal_thread_list(
void) {
return 0; }
1050 static int timer_thread_pipe[2] = {-1, -1};
1051 static int timer_thread_pipe_owner_process;
1055 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
1064 if (timer_thread_pipe_owner_process == getpid()) {
1065 const char *buff =
"!";
1067 if ((result = write(timer_thread_pipe[1], buff, 1)) <= 0) {
1069 case EINTR:
goto retry;
1071 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1079 if (TT_DEBUG)
WRITE_CONST(2,
"rb_thread_wakeup_timer_thread: write\n");
1088 consume_communication_pipe(
void)
1090 #define CCP_READ_BUFF_SIZE 1024
1092 static char buff[CCP_READ_BUFF_SIZE];
1096 result = read(timer_thread_pipe[0], buff, CCP_READ_BUFF_SIZE);
1099 case EINTR:
goto retry;
1107 close_communication_pipe(
void)
1109 if (close(timer_thread_pipe[0]) < 0) {
1110 rb_bug_errno(
"native_stop_timer_thread - close(ttp[0])", errno);
1112 if (close(timer_thread_pipe[1]) < 0) {
1113 rb_bug_errno(
"native_stop_timer_thread - close(ttp[1])", errno);
1115 timer_thread_pipe[0] = timer_thread_pipe[1] = -1;
1121 #define TIME_QUANTUM_USEC (100 * 1000)
1124 thread_timer(
void *
p)
1130 if (TT_DEBUG)
WRITE_CONST(2,
"start timer thread\n");
1137 ping_signal_thread_list();
1139 need_polling = check_signal_thread_list();
1145 FD_SET(timer_thread_pipe[0], &rfds);
1147 if (gvl->
waiting > 0 || need_polling) {
1149 timeout.tv_usec = TIME_QUANTUM_USEC;
1152 result = select(timer_thread_pipe[0] + 1, &rfds, 0, 0, &timeout);
1156 result = select(timer_thread_pipe[0] + 1, &rfds, 0, 0, 0);
1162 else if (result > 0) {
1163 consume_communication_pipe();
1178 if (TT_DEBUG)
WRITE_CONST(2,
"finish timer thread\n");
1183 rb_thread_create_timer_thread(
void)
1187 if (!timer_thread_id) {
1188 pthread_attr_t attr;
1191 pthread_attr_init(&attr);
1192 #ifdef PTHREAD_STACK_MIN
1193 if (PTHREAD_STACK_MIN < 4096 * 3) {
1198 pthread_attr_setstacksize(&attr,
1202 pthread_attr_setstacksize(&attr,
1208 if (timer_thread_pipe_owner_process != getpid()) {
1209 if (timer_thread_pipe[0] != -1) {
1211 close_communication_pipe();
1214 err = pipe(timer_thread_pipe);
1216 rb_bug_errno(
"thread_timer: Failed to create communication pipe for timer thread", errno);
1220 #if defined(HAVE_FCNTL) && defined(F_GETFL) && defined(F_SETFL)
1223 #if defined(O_NONBLOCK)
1224 oflags =
fcntl(timer_thread_pipe[1], F_GETFL);
1228 #if defined(FD_CLOEXEC)
1229 oflags =
fcntl(timer_thread_pipe[0], F_GETFD);
1230 fcntl(timer_thread_pipe[0], F_SETFD, oflags | FD_CLOEXEC);
1231 oflags =
fcntl(timer_thread_pipe[1], F_GETFD);
1232 fcntl(timer_thread_pipe[1], F_SETFD, oflags | FD_CLOEXEC);
1238 timer_thread_pipe_owner_process = getpid();
1242 if (timer_thread_id) {
1243 rb_bug(
"rb_thread_create_timer_thread: Timer thread was already created\n");
1245 err = pthread_create(&timer_thread_id, &attr, thread_timer, &
GET_VM()->gvl);
1247 fprintf(stderr,
"[FATAL] Failed to create timer thread (errno: %d)\n", err);
1250 pthread_attr_destroy(&attr);
1257 native_stop_timer_thread(
int close_anyway)
1262 if (TT_DEBUG) fprintf(stderr,
"stop timer thread\n");
1266 native_thread_join(timer_thread_id);
1267 if (TT_DEBUG) fprintf(stderr,
"joined timer thread\n");
1268 timer_thread_id = 0;
1286 native_reset_timer_thread(
void)
1288 if (TT_DEBUG) fprintf(stderr,
"reset timer thread\n");
1291 #ifdef HAVE_SIGALTSTACK
1293 ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
1297 const size_t water_mark = 1024 * 1024;
1304 #ifdef STACKADDR_AVAILABLE
1305 else if (get_stack(&base, &size) == 0) {
1313 if (size > water_mark) size = water_mark;
1315 if (size > ~(
size_t)base+1) size = ~(size_t)base+1;
1316 if (addr > base && addr <= (
void *)((
char *)base + size))
return 1;
1319 if (size > (
size_t)base) size = (
size_t)base;
1320 if (addr > (
void *)((
char *)base - size) && addr <= base)
return 1;
1329 if (fd == timer_thread_pipe[0] ||
1330 fd == timer_thread_pipe[1]) {