Ruby  1.9.3p392(2013-02-22revision39386)
thread.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  thread.c -
4 
5  $Author: usa $
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 /*
12  YARV Thread Design
13 
14  model 1: Userlevel Thread
15  Same as traditional ruby thread.
16 
17  model 2: Native Thread with Global VM lock
18  Using pthread (or Windows thread) and Ruby threads run concurrent.
19 
20  model 3: Native Thread with fine grain lock
21  Using pthread and Ruby threads run concurrent or parallel.
22 
23 ------------------------------------------------------------------------
24 
25  model 2:
26  A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
27  When thread scheduling, running thread release GVL. If running thread
28  try blocking operation, this thread must release GVL and another
29  thread can continue this flow. After blocking operation, thread
30  must check interrupt (RUBY_VM_CHECK_INTS).
31 
32  Every VM can run parallel.
33 
34  Ruby threads are scheduled by OS thread scheduler.
35 
36 ------------------------------------------------------------------------
37 
38  model 3:
39  Every threads run concurrent or parallel and to access shared object
40  exclusive access control is needed. For example, to access String
41  object or Array object, fine grain lock must be locked every time.
42  */
43 
44 
45 /* for model 2 */
46 
47 #include "eval_intern.h"
48 #include "gc.h"
49 #include "internal.h"
50 #include "ruby/io.h"
51 
52 #ifndef USE_NATIVE_THREAD_PRIORITY
53 #define USE_NATIVE_THREAD_PRIORITY 0
54 #define RUBY_THREAD_PRIORITY_MAX 3
55 #define RUBY_THREAD_PRIORITY_MIN -3
56 #endif
57 
58 #ifndef THREAD_DEBUG
59 #define THREAD_DEBUG 0
60 #endif
61 
64 
65 static void sleep_timeval(rb_thread_t *th, struct timeval time);
66 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec);
67 static void sleep_forever(rb_thread_t *th, int nodeadlock);
68 static double timeofday(void);
69 static int rb_threadptr_dead(rb_thread_t *th);
70 
71 static void rb_check_deadlock(rb_vm_t *vm);
72 
73 #define eKillSignal INT2FIX(0)
74 #define eTerminateSignal INT2FIX(1)
75 static volatile int system_working = 1;
76 
77 #define closed_stream_error GET_VM()->special_exceptions[ruby_error_closed_stream]
78 
79 inline static void
81 {
82  st_delete(table, &key, 0);
83 }
84 
85 /********************************************************************************/
86 
87 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
88 
92 };
93 
95  struct rb_unblock_callback *old);
96 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
97 
98 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
99 
100 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
101  do { \
102  rb_gc_save_machine_context(th); \
103  SET_MACHINE_STACK_END(&(th)->machine_stack_end); \
104  } while (0)
105 
106 #define GVL_UNLOCK_BEGIN() do { \
107  rb_thread_t *_th_stored = GET_THREAD(); \
108  RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
109  gvl_release(_th_stored->vm);
110 
111 #define GVL_UNLOCK_END() \
112  gvl_acquire(_th_stored->vm, _th_stored); \
113  rb_thread_set_current(_th_stored); \
114 } while(0)
115 
116 #define blocking_region_begin(th, region, func, arg) \
117  do { \
118  (region)->prev_status = (th)->status; \
119  set_unblock_function((th), (func), (arg), &(region)->oldubf); \
120  (th)->blocking_region_buffer = (region); \
121  (th)->status = THREAD_STOPPED; \
122  thread_debug("enter blocking region (%p)\n", (void *)(th)); \
123  RB_GC_SAVE_MACHINE_CONTEXT(th); \
124  gvl_release((th)->vm); \
125  } while (0)
126 
127 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
128  rb_thread_t *__th = GET_THREAD(); \
129  struct rb_blocking_region_buffer __region; \
130  blocking_region_begin(__th, &__region, (ubf), (ubfarg)); \
131  exec; \
132  blocking_region_end(__th, &__region); \
133  RUBY_VM_CHECK_INTS(); \
134 } while(0)
135 
136 #if THREAD_DEBUG
137 #ifdef HAVE_VA_ARGS_MACRO
138 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
139 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
140 #define POSITION_FORMAT "%s:%d:"
141 #define POSITION_ARGS ,file, line
142 #else
143 void rb_thread_debug(const char *fmt, ...);
144 #define thread_debug rb_thread_debug
145 #define POSITION_FORMAT
146 #define POSITION_ARGS
147 #endif
148 
149 # if THREAD_DEBUG < 0
150 static int rb_thread_debug_enabled;
151 
152 /*
153  * call-seq:
154  * Thread.DEBUG -> num
155  *
156  * Returns the thread debug level. Available only if compiled with
157  * THREAD_DEBUG=-1.
158  */
159 
160 static VALUE
161 rb_thread_s_debug(void)
162 {
163  return INT2NUM(rb_thread_debug_enabled);
164 }
165 
166 /*
167  * call-seq:
168  * Thread.DEBUG = num
169  *
170  * Sets the thread debug level. Available only if compiled with
171  * THREAD_DEBUG=-1.
172  */
173 
174 static VALUE
175 rb_thread_s_debug_set(VALUE self, VALUE val)
176 {
177  rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
178  return val;
179 }
180 # else
181 # define rb_thread_debug_enabled THREAD_DEBUG
182 # endif
183 #else
184 #define thread_debug if(0)printf
185 #endif
186 
187 #ifndef __ia64
188 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
189 #endif
190 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
191  VALUE *register_stack_start));
192 static void timer_thread_function(void *);
193 
194 #if defined(_WIN32)
195 #include "thread_win32.c"
196 
197 #define DEBUG_OUT() \
198  WaitForSingleObject(&debug_mutex, INFINITE); \
199  printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
200  fflush(stdout); \
201  ReleaseMutex(&debug_mutex);
202 
203 #elif defined(HAVE_PTHREAD_H)
204 #include "thread_pthread.c"
205 
206 #define DEBUG_OUT() \
207  pthread_mutex_lock(&debug_mutex); \
208  printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
209  fflush(stdout); \
210  pthread_mutex_unlock(&debug_mutex);
211 
212 #else
213 #error "unsupported thread type"
214 #endif
215 
216 #if THREAD_DEBUG
217 static int debug_mutex_initialized = 1;
218 static rb_thread_lock_t debug_mutex;
219 
220 void
221 rb_thread_debug(
222 #ifdef HAVE_VA_ARGS_MACRO
223  const char *file, int line,
224 #endif
225  const char *fmt, ...)
226 {
227  va_list args;
228  char buf[BUFSIZ];
229 
230  if (!rb_thread_debug_enabled) return;
231 
232  if (debug_mutex_initialized == 1) {
233  debug_mutex_initialized = 0;
234  native_mutex_initialize(&debug_mutex);
235  }
236 
237  va_start(args, fmt);
238  vsnprintf(buf, BUFSIZ, fmt, args);
239  va_end(args);
240 
241  DEBUG_OUT();
242 }
243 #endif
244 
245 void
247 {
248  gvl_release(vm);
249  gvl_destroy(vm);
250 }
251 
252 void
254 {
255  native_mutex_unlock(lock);
256 }
257 
258 void
260 {
261  native_mutex_destroy(lock);
262 }
263 
264 static void
266  struct rb_unblock_callback *old)
267 {
268  check_ints:
269  RUBY_VM_CHECK_INTS(); /* check signal or so */
270  native_mutex_lock(&th->interrupt_lock);
271  if (th->interrupt_flag) {
272  native_mutex_unlock(&th->interrupt_lock);
273  goto check_ints;
274  }
275  else {
276  if (old) *old = th->unblock;
277  th->unblock.func = func;
278  th->unblock.arg = arg;
279  }
280  native_mutex_unlock(&th->interrupt_lock);
281 }
282 
283 static void
285 {
286  native_mutex_lock(&th->interrupt_lock);
287  th->unblock = *old;
288  native_mutex_unlock(&th->interrupt_lock);
289 }
290 
291 void
293 {
294  native_mutex_lock(&th->interrupt_lock);
296  if (th->unblock.func) {
297  (th->unblock.func)(th->unblock.arg);
298  }
299  else {
300  /* none */
301  }
302  native_mutex_unlock(&th->interrupt_lock);
303 }
304 
305 
306 static int
308 {
309  VALUE thval = key;
310  rb_thread_t *th;
311  GetThreadPtr(thval, th);
312 
313  if (th != main_thread) {
314  thread_debug("terminate_i: %p\n", (void *)th);
317  th->status = THREAD_TO_KILL;
318  }
319  else {
320  thread_debug("terminate_i: main thread (%p)\n", (void *)th);
321  }
322  return ST_CONTINUE;
323 }
324 
325 typedef struct rb_mutex_struct
326 {
329  struct rb_thread_struct volatile *th;
332 } rb_mutex_t;
333 
334 static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
335 static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
336 
337 void
339 {
340  const char *err;
341  rb_mutex_t *mutex;
342  rb_mutex_t *mutexes = th->keeping_mutexes;
343 
344  while (mutexes) {
345  mutex = mutexes;
346  /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
347  mutexes); */
348  mutexes = mutex->next_mutex;
349  err = rb_mutex_unlock_th(mutex, th);
350  if (err) rb_bug("invalid keeping_mutexes: %s", err);
351  }
352 }
353 
354 void
356 {
357  rb_thread_t *th = GET_THREAD(); /* main thread */
358  rb_vm_t *vm = th->vm;
359 
360  if (vm->main_thread != th) {
361  rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
362  (void *)vm->main_thread, (void *)th);
363  }
364 
365  /* unlock all locking mutexes */
367 
368  thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
370  vm->inhibit_thread_creation = 1;
371 
372  while (!rb_thread_alone()) {
373  PUSH_TAG();
374  if (EXEC_TAG() == 0) {
376  }
377  else {
378  /* ignore exception */
379  }
380  POP_TAG();
381  }
382 }
383 
384 static void
386 {
387  rb_thread_t *th = th_ptr;
388  th->status = THREAD_KILLED;
390 #ifdef __ia64
391  th->machine_register_stack_start = th->machine_register_stack_end = 0;
392 #endif
393 }
394 
395 static void
396 thread_cleanup_func(void *th_ptr, int atfork)
397 {
398  rb_thread_t *th = th_ptr;
399 
400  th->locking_mutex = Qfalse;
402 
403  /*
404  * Unfortunately, we can't release native threading resource at fork
405  * because libc may have unstable locking state therefore touching
406  * a threading resource may cause a deadlock.
407  */
408  if (atfork)
409  return;
410 
411  native_mutex_destroy(&th->interrupt_lock);
412  native_thread_destroy(th);
413 }
414 
415 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
416 
417 void
419 {
420  native_thread_init_stack(th);
421 }
422 
423 static int
424 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
425 {
426  int state;
427  VALUE args = th->first_args;
428  rb_proc_t *proc;
429  rb_thread_t *join_th;
430  rb_thread_t *main_th;
431  VALUE errinfo = Qnil;
432 # ifdef USE_SIGALTSTACK
433  void rb_register_sigaltstack(rb_thread_t *th);
434 
435  rb_register_sigaltstack(th);
436 # endif
437 
438  ruby_thread_set_native(th);
439 
440  th->machine_stack_start = stack_start;
441 #ifdef __ia64
442  th->machine_register_stack_start = register_stack_start;
443 #endif
444  thread_debug("thread start: %p\n", (void *)th);
445 
446  gvl_acquire(th->vm, th);
447  {
448  thread_debug("thread start (get lock): %p\n", (void *)th);
450 
451  TH_PUSH_TAG(th);
452  if ((state = EXEC_TAG()) == 0) {
453  SAVE_ROOT_JMPBUF(th, {
454  if (!th->first_func) {
455  GetProcPtr(th->first_proc, proc);
456  th->errinfo = Qnil;
457  th->local_lfp = proc->block.lfp;
458  th->local_svar = Qnil;
459  th->value = rb_vm_invoke_proc(th, proc, proc->block.self,
460  (int)RARRAY_LEN(args), RARRAY_PTR(args), 0);
461  }
462  else {
463  th->value = (*th->first_func)((void *)args);
464  }
465  });
466  }
467  else {
468  errinfo = th->errinfo;
469  if (NIL_P(errinfo)) errinfo = rb_errinfo();
470  if (state == TAG_FATAL) {
471  /* fatal error within this thread, need to stop whole script */
472  }
473  else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
474  if (th->safe_level >= 4) {
476  rb_sprintf("Insecure exit at level %d", th->safe_level));
477  errinfo = Qnil;
478  }
479  }
480  else if (th->safe_level < 4 &&
483  /* exit on main_thread */
484  }
485  else {
486  errinfo = Qnil;
487  }
488  th->value = Qnil;
489  }
490 
491  th->status = THREAD_KILLED;
492  thread_debug("thread end: %p\n", (void *)th);
493 
494  main_th = th->vm->main_thread;
495  if (th != main_th) {
496  if (TYPE(errinfo) == T_OBJECT) {
497  /* treat with normal error object */
498  rb_threadptr_raise(main_th, 1, &errinfo);
499  }
500  }
501  TH_POP_TAG();
502 
503  /* locking_mutex must be Qfalse */
504  if (th->locking_mutex != Qfalse) {
505  rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
506  (void *)th, th->locking_mutex);
507  }
508 
509  /* delete self other than main thread from living_threads */
510  if (th != main_th) {
512  }
513 
514  /* wake up joining threads */
515  join_th = th->join_list_head;
516  while (join_th) {
517  if (join_th == main_th) errinfo = Qnil;
518  rb_threadptr_interrupt(join_th);
519  switch (join_th->status) {
521  join_th->status = THREAD_RUNNABLE;
522  default: break;
523  }
524  join_th = join_th->join_list_next;
525  }
526 
528  if (th != main_th) rb_check_deadlock(th->vm);
529 
530  if (!th->root_fiber) {
532  th->stack = 0;
533  }
534  }
535  if (th->vm->main_thread == th) {
537  }
538  else {
540  gvl_release(th->vm);
541  }
542 
543  return 0;
544 }
545 
546 static VALUE
548 {
549  rb_thread_t *th;
550  int err;
551 
552  if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
554  "can't start a new thread (frozen ThreadGroup)");
555  }
556  GetThreadPtr(thval, th);
557 
558  /* setup thread environment */
559  th->first_func = fn;
560  th->first_proc = fn ? Qfalse : rb_block_proc();
561  th->first_args = args; /* GC: shouldn't put before above line */
562 
563  th->priority = GET_THREAD()->priority;
564  th->thgroup = GET_THREAD()->thgroup;
565 
566  native_mutex_initialize(&th->interrupt_lock);
567  if (GET_VM()->event_hooks != NULL)
568  th->event_flags |= RUBY_EVENT_VM;
569 
570  /* kick thread */
571  st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
572  err = native_thread_create(th);
573  if (err) {
575  th->status = THREAD_KILLED;
576  rb_raise(rb_eThreadError, "can't create Thread (%d)", err);
577  }
578  return thval;
579 }
580 
581 /* :nodoc: */
582 static VALUE
584 {
585  rb_thread_t *th;
586  VALUE thread = rb_thread_alloc(klass);
587 
588  if (GET_VM()->inhibit_thread_creation)
589  rb_raise(rb_eThreadError, "can't alloc thread");
590 
591  rb_obj_call_init(thread, argc, argv);
592  GetThreadPtr(thread, th);
593  if (!th->first_args) {
594  rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
595  rb_class2name(klass));
596  }
597  return thread;
598 }
599 
600 /*
601  * call-seq:
602  * Thread.start([args]*) {|args| block } -> thread
603  * Thread.fork([args]*) {|args| block } -> thread
604  *
605  * Basically the same as <code>Thread::new</code>. However, if class
606  * <code>Thread</code> is subclassed, then calling <code>start</code> in that
607  * subclass will not invoke the subclass's <code>initialize</code> method.
608  */
609 
610 static VALUE
612 {
613  return thread_create_core(rb_thread_alloc(klass), args, 0);
614 }
615 
616 /* :nodoc: */
617 static VALUE
619 {
620  rb_thread_t *th;
621  if (!rb_block_given_p()) {
622  rb_raise(rb_eThreadError, "must be called with a block");
623  }
624  GetThreadPtr(thread, th);
625  if (th->first_args) {
626  VALUE proc = th->first_proc, line, loc;
627  const char *file;
628  if (!proc || !RTEST(loc = rb_proc_location(proc))) {
629  rb_raise(rb_eThreadError, "already initialized thread");
630  }
631  file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
632  if (NIL_P(line = RARRAY_PTR(loc)[1])) {
633  rb_raise(rb_eThreadError, "already initialized thread - %s",
634  file);
635  }
636  rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
637  file, NUM2INT(line));
638  }
639  return thread_create_core(thread, args, 0);
640 }
641 
642 VALUE
644 {
646 }
647 
648 
649 /* +infty, for this purpose */
650 #define DELAY_INFTY 1E30
651 
652 struct join_arg {
654  double limit;
655  int forever;
656 };
657 
658 static VALUE
660 {
661  struct join_arg *p = (struct join_arg *)arg;
662  rb_thread_t *target_th = p->target, *th = p->waiting;
663 
664  if (target_th->status != THREAD_KILLED) {
665  rb_thread_t **pth = &target_th->join_list_head;
666 
667  while (*pth) {
668  if (*pth == th) {
669  *pth = th->join_list_next;
670  break;
671  }
672  pth = &(*pth)->join_list_next;
673  }
674  }
675 
676  return Qnil;
677 }
678 
679 static VALUE
681 {
682  struct join_arg *p = (struct join_arg *)arg;
683  rb_thread_t *target_th = p->target, *th = p->waiting;
684  double now, limit = p->limit;
685 
686  while (target_th->status != THREAD_KILLED) {
687  if (p->forever) {
688  sleep_forever(th, 1);
689  }
690  else {
691  now = timeofday();
692  if (now > limit) {
693  thread_debug("thread_join: timeout (thid: %p)\n",
694  (void *)target_th->thread_id);
695  return Qfalse;
696  }
697  sleep_wait_for_interrupt(th, limit - now);
698  }
699  thread_debug("thread_join: interrupted (thid: %p)\n",
700  (void *)target_th->thread_id);
701  }
702  return Qtrue;
703 }
704 
705 static VALUE
706 thread_join(rb_thread_t *target_th, double delay)
707 {
708  rb_thread_t *th = GET_THREAD();
709  struct join_arg arg;
710 
711  arg.target = target_th;
712  arg.waiting = th;
713  arg.limit = timeofday() + delay;
714  arg.forever = delay == DELAY_INFTY;
715 
716  thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
717 
718  if (target_th->status != THREAD_KILLED) {
719  th->join_list_next = target_th->join_list_head;
720  target_th->join_list_head = th;
721  if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
722  remove_from_join_list, (VALUE)&arg)) {
723  return Qnil;
724  }
725  }
726 
727  thread_debug("thread_join: success (thid: %p)\n",
728  (void *)target_th->thread_id);
729 
730  if (target_th->errinfo != Qnil) {
731  VALUE err = target_th->errinfo;
732 
733  if (FIXNUM_P(err)) {
734  /* */
735  }
736  else if (TYPE(target_th->errinfo) == T_NODE) {
739  }
740  else {
741  /* normal exception */
742  rb_exc_raise(err);
743  }
744  }
745  return target_th->self;
746 }
747 
748 /*
749  * call-seq:
750  * thr.join -> thr
751  * thr.join(limit) -> thr
752  *
753  * The calling thread will suspend execution and run <i>thr</i>. Does not
754  * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
755  * the time limit expires, <code>nil</code> will be returned, otherwise
756  * <i>thr</i> is returned.
757  *
758  * Any threads not joined will be killed when the main program exits. If
759  * <i>thr</i> had previously raised an exception and the
760  * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
761  * (so the exception has not yet been processed) it will be processed at this
762  * time.
763  *
764  * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
765  * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
766  * x.join # Let x thread finish, a will be killed on exit.
767  *
768  * <em>produces:</em>
769  *
770  * axyz
771  *
772  * The following example illustrates the <i>limit</i> parameter.
773  *
774  * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
775  * puts "Waiting" until y.join(0.15)
776  *
777  * <em>produces:</em>
778  *
779  * tick...
780  * Waiting
781  * tick...
782  * Waitingtick...
783  *
784  *
785  * tick...
786  */
787 
788 static VALUE
790 {
791  rb_thread_t *target_th;
792  double delay = DELAY_INFTY;
793  VALUE limit;
794 
795  GetThreadPtr(self, target_th);
796 
797  rb_scan_args(argc, argv, "01", &limit);
798  if (!NIL_P(limit)) {
799  delay = rb_num2dbl(limit);
800  }
801 
802  return thread_join(target_th, delay);
803 }
804 
805 /*
806  * call-seq:
807  * thr.value -> obj
808  *
809  * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
810  * its value.
811  *
812  * a = Thread.new { 2 + 2 }
813  * a.value #=> 4
814  */
815 
816 static VALUE
818 {
819  rb_thread_t *th;
820  GetThreadPtr(self, th);
822  return th->value;
823 }
824 
825 /*
826  * Thread Scheduling
827  */
828 
829 static struct timeval
830 double2timeval(double d)
831 {
832  struct timeval time;
833 
834  time.tv_sec = (int)d;
835  time.tv_usec = (int)((d - (int)d) * 1e6);
836  if (time.tv_usec < 0) {
837  time.tv_usec += (int)1e6;
838  time.tv_sec -= 1;
839  }
840  return time;
841 }
842 
843 static void
844 sleep_forever(rb_thread_t *th, int deadlockable)
845 {
846  enum rb_thread_status prev_status = th->status;
847  enum rb_thread_status status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
848 
849  th->status = status;
850  do {
851  if (deadlockable) {
852  th->vm->sleeper++;
853  rb_check_deadlock(th->vm);
854  }
855  native_sleep(th, 0);
856  if (deadlockable) {
857  th->vm->sleeper--;
858  }
860  } while (th->status == status);
861  th->status = prev_status;
862 }
863 
864 static void
866 {
867 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
868  struct timespec ts;
869 
870  if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
871  tp->tv_sec = ts.tv_sec;
872  tp->tv_usec = ts.tv_nsec / 1000;
873  } else
874 #endif
875  {
876  gettimeofday(tp, NULL);
877  }
878 }
879 
880 static void
882 {
883  struct timeval to, tvn;
884  enum rb_thread_status prev_status = th->status;
885 
886  getclockofday(&to);
887  to.tv_sec += tv.tv_sec;
888  if ((to.tv_usec += tv.tv_usec) >= 1000000) {
889  to.tv_sec++;
890  to.tv_usec -= 1000000;
891  }
892 
893  th->status = THREAD_STOPPED;
894  do {
895  native_sleep(th, &tv);
897  getclockofday(&tvn);
898  if (to.tv_sec < tvn.tv_sec) break;
899  if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
900  thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
901  (long)to.tv_sec, (long)to.tv_usec,
902  (long)tvn.tv_sec, (long)tvn.tv_usec);
903  tv.tv_sec = to.tv_sec - tvn.tv_sec;
904  if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
905  --tv.tv_sec;
906  tv.tv_usec += 1000000;
907  }
908  } while (th->status == THREAD_STOPPED);
909  th->status = prev_status;
910 }
911 
912 void
914 {
915  thread_debug("rb_thread_sleep_forever\n");
917 }
918 
919 static void
921 {
922  thread_debug("rb_thread_sleep_deadly\n");
924 }
925 
926 static double
928 {
929 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
930  struct timespec tp;
931 
932  if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
933  return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
934  } else
935 #endif
936  {
937  struct timeval tv;
938  gettimeofday(&tv, NULL);
939  return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
940  }
941 }
942 
943 static void
945 {
946  sleep_timeval(th, double2timeval(sleepsec));
947 }
948 
949 static void
951 {
952  struct timeval time;
953  time.tv_sec = 0;
954  time.tv_usec = 100 * 1000; /* 0.1 sec */
955  sleep_timeval(th, time);
956 }
957 
958 void
960 {
961  rb_thread_t *th = GET_THREAD();
962  sleep_timeval(th, time);
963 }
964 
965 void
967 {
969  if (!rb_thread_alone()) {
970  rb_thread_t *th = GET_THREAD();
971  sleep_for_polling(th);
972  }
973 }
974 
975 /*
976  * CAUTION: This function causes thread switching.
977  * rb_thread_check_ints() check ruby's interrupts.
978  * some interrupt needs thread switching/invoke handlers,
979  * and so on.
980  */
981 
982 void
984 {
986 }
987 
988 /*
989  * Hidden API for tcl/tk wrapper.
990  * There is no guarantee to perpetuate it.
991  */
992 int
994 {
995  return rb_signal_buff_size() != 0;
996 }
997 
998 /* This function can be called in blocking region. */
999 int
1001 {
1002  rb_thread_t *th;
1003  GetThreadPtr(thval, th);
1004  return RUBY_VM_INTERRUPTED(th);
1005 }
1006 
1007 void
1009 {
1011 }
1012 
1014 
1015 static void
1016 rb_thread_schedule_limits(unsigned long limits_us)
1017 {
1018  thread_debug("rb_thread_schedule\n");
1019  if (!rb_thread_alone()) {
1020  rb_thread_t *th = GET_THREAD();
1021 
1022  if (th->running_time_us >= limits_us) {
1023  thread_debug("rb_thread_schedule/switch start\n");
1025  gvl_yield(th->vm, th);
1027  thread_debug("rb_thread_schedule/switch done\n");
1028  }
1029  }
1030 }
1031 
1032 void
1034 {
1036 
1037  if (UNLIKELY(GET_THREAD()->interrupt_flag)) {
1039  }
1040 }
1041 
1042 /* blocking region */
1043 
1044 static inline void
1046 {
1047  gvl_acquire(th->vm, th);
1049  thread_debug("leave blocking region (%p)\n", (void *)th);
1050  remove_signal_thread_list(th);
1051  th->blocking_region_buffer = 0;
1052  reset_unblock_function(th, &region->oldubf);
1053  if (th->status == THREAD_STOPPED) {
1054  th->status = region->prev_status;
1055  }
1056 }
1057 
1060 {
1061  rb_thread_t *th = GET_THREAD();
1063  blocking_region_begin(th, region, ubf_select, th);
1064  return region;
1065 }
1066 
1067 void
1069 {
1070  int saved_errno = errno;
1071  rb_thread_t *th = GET_THREAD();
1072  blocking_region_end(th, region);
1073  xfree(region);
1075  errno = saved_errno;
1076 }
1077 
1078 /*
1079  * rb_thread_blocking_region - permit concurrent/parallel execution.
1080  *
1081  * This function does:
1082  * (1) release GVL.
1083  * Other Ruby threads may run in parallel.
1084  * (2) call func with data1.
1085  * (3) acquire GVL.
1086  * Other Ruby threads can not run in parallel any more.
1087  *
1088  * If another thread interrupts this thread (Thread#kill, signal delivery,
1089  * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1090  * "un-blocking function"). `ubf()' should interrupt `func()' execution.
1091  *
1092  * There are built-in ubfs and you can specify these ubfs.
1093  * However, we can not guarantee our built-in ubfs interrupt
1094  * your `func()' correctly. Be careful to use rb_thread_blocking_region().
1095  *
1096  * * RUBY_UBF_IO: ubf for IO operation
1097  * * RUBY_UBF_PROCESS: ubf for process operation
1098  *
1099  * NOTE: You can not execute most of Ruby C API and touch Ruby
1100  * objects in `func()' and `ubf()', including raising an
1101  * exception, because current thread doesn't acquire GVL
1102  * (cause synchronization problem). If you need to do it,
1103  * read source code of C APIs and confirm by yourself.
1104  *
1105  * NOTE: In short, this API is difficult to use safely. I recommend you
1106  * use other ways if you have. We lack experiences to use this API.
1107  * Please report your problem related on it.
1108  *
1109  * Safe C API:
1110  * * rb_thread_interrupted() - check interrupt flag
1111  * * ruby_xalloc(), ruby_xrealloc(), ruby_xfree() -
1112  * if they called without GVL, acquire GVL automatically.
1113  */
1114 VALUE
1116  rb_blocking_function_t *func, void *data1,
1117  rb_unblock_function_t *ubf, void *data2)
1118 {
1119  VALUE val;
1120  rb_thread_t *th = GET_THREAD();
1121  int saved_errno = 0;
1122 
1123  th->waiting_fd = -1;
1124  if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
1125  ubf = ubf_select;
1126  data2 = th;
1127  }
1128 
1129  BLOCKING_REGION({
1130  val = func(data1);
1131  saved_errno = errno;
1132  }, ubf, data2);
1133  errno = saved_errno;
1134 
1135  return val;
1136 }
1137 
1138 VALUE
1140 {
1141  VALUE val;
1142  rb_thread_t *th = GET_THREAD();
1143  int saved_errno = 0;
1144 
1145  th->waiting_fd = fd;
1146  BLOCKING_REGION({
1147  val = func(data1);
1148  saved_errno = errno;
1149  }, ubf_select, th);
1150  th->waiting_fd = -1;
1151  errno = saved_errno;
1152 
1153  return val;
1154 }
1155 
1156 /* alias of rb_thread_blocking_region() */
1157 
1158 VALUE
1160  rb_blocking_function_t *func, void *data1,
1161  rb_unblock_function_t *ubf, void *data2)
1162 {
1163  return rb_thread_blocking_region(func, data1, ubf, data2);
1164 }
1165 
1166 /*
1167  * rb_thread_call_with_gvl - re-enter into Ruby world while releasing GVL.
1168  *
1169  ***
1170  *** This API is EXPERIMENTAL!
1171  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1172  ***
1173  *
1174  * While releasing GVL using rb_thread_blocking_region() or
1175  * rb_thread_call_without_gvl(), you can not access Ruby values or invoke methods.
1176  * If you need to access it, you must use this function rb_thread_call_with_gvl().
1177  *
1178  * This function rb_thread_call_with_gvl() does:
1179  * (1) acquire GVL.
1180  * (2) call passed function `func'.
1181  * (3) release GVL.
1182  * (4) return a value which is returned at (2).
1183  *
1184  * NOTE: You should not return Ruby object at (2) because such Object
1185  * will not marked.
1186  *
1187  * NOTE: If an exception is raised in `func', this function "DOES NOT"
1188  * protect (catch) the exception. If you have any resources
1189  * which should free before throwing exception, you need use
1190  * rb_protect() in `func' and return a value which represents
1191  * exception is raised.
1192  *
1193  * NOTE: This functions should not be called by a thread which
1194  * is not created as Ruby thread (created by Thread.new or so).
1195  * In other words, this function *DOES NOT* associate
1196  * NON-Ruby thread to Ruby thread.
1197  */
1198 void *
1199 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1200 {
1201  rb_thread_t *th = ruby_thread_from_native();
1202  struct rb_blocking_region_buffer *brb;
1203  struct rb_unblock_callback prev_unblock;
1204  void *r;
1205 
1206  if (th == 0) {
1207  /* Error is occurred, but we can't use rb_bug()
1208  * because this thread is not Ruby's thread.
1209  * What should we do?
1210  */
1211 
1212  fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1213  exit(EXIT_FAILURE);
1214  }
1215 
1217  prev_unblock = th->unblock;
1218 
1219  if (brb == 0) {
1220  rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1221  }
1222 
1223  blocking_region_end(th, brb);
1224  /* enter to Ruby world: You can access Ruby values, methods and so on. */
1225  r = (*func)(data1);
1226  /* leave from Ruby world: You can not access Ruby values, etc. */
1227  blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg);
1228  return r;
1229 }
1230 
1231 /*
1232  * ruby_thread_has_gvl_p - check if current native thread has GVL.
1233  *
1234  ***
1235  *** This API is EXPERIMENTAL!
1236  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1237  ***
1238  */
1239 
1240 int
1242 {
1243  rb_thread_t *th = ruby_thread_from_native();
1244 
1245  if (th && th->blocking_region_buffer == 0) {
1246  return 1;
1247  }
1248  else {
1249  return 0;
1250  }
1251 }
1252 
1253 /*
1254  * call-seq:
1255  * Thread.pass -> nil
1256  *
1257  * Give the thread scheduler a hint to pass execution to another thread.
1258  * A running thread may or may not switch, it depends on OS and processor.
1259  */
1260 
1261 static VALUE
1263 {
1265  return Qnil;
1266 }
1267 
1268 /*
1269  *
1270  */
1271 
1272 static void
1274 {
1275  rb_atomic_t interrupt;
1276 
1277  if (th->raised_flag) return;
1278 
1279  while ((interrupt = ATOMIC_EXCHANGE(th->interrupt_flag, 0)) != 0) {
1280  enum rb_thread_status status = th->status;
1281  int timer_interrupt = interrupt & 0x01;
1282  int finalizer_interrupt = interrupt & 0x04;
1283  int sig;
1284 
1285  th->status = THREAD_RUNNABLE;
1286 
1287  /* signal handling */
1288  if (th == th->vm->main_thread) {
1289  while ((sig = rb_get_next_signal()) != 0) {
1290  rb_signal_exec(th, sig);
1291  }
1292  }
1293 
1294  /* exception from another thread */
1295  if (th->thrown_errinfo) {
1296  VALUE err = th->thrown_errinfo;
1297  th->thrown_errinfo = 0;
1298  thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
1299 
1300  if (err == eKillSignal || err == eTerminateSignal) {
1301  th->errinfo = INT2FIX(TAG_FATAL);
1302  TH_JUMP_TAG(th, TAG_FATAL);
1303  }
1304  else {
1305  rb_exc_raise(err);
1306  }
1307  }
1308  th->status = status;
1309 
1310  if (finalizer_interrupt) {
1312  }
1313 
1314  if (timer_interrupt) {
1315  unsigned long limits_us = 250 * 1000;
1316 
1317  if (th->priority > 0)
1318  limits_us <<= th->priority;
1319  else
1320  limits_us >>= -th->priority;
1321 
1322  if (status == THREAD_RUNNABLE)
1323  th->running_time_us += TIME_QUANTUM_USEC;
1324 
1325  EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
1326 
1327  rb_thread_schedule_limits(limits_us);
1328  }
1329  }
1330 }
1331 
1332 void
1334 {
1336 }
1337 
1338 void
1340 {
1341  rb_thread_t *th;
1342  GetThreadPtr(thval, th);
1344 }
1345 
1346 void
1348 {
1349  rb_bug("deprecated function rb_gc_mark_threads is called");
1350 }
1351 
1352 /*****************************************************/
1353 
1354 static void
1356 {
1358 }
1359 
1360 static VALUE
1362 {
1363  VALUE exc;
1364 
1365  again:
1366  if (rb_threadptr_dead(th)) {
1367  return Qnil;
1368  }
1369 
1370  if (th->thrown_errinfo != 0 || th->raised_flag) {
1372  goto again;
1373  }
1374 
1375  exc = rb_make_exception(argc, argv);
1376  th->thrown_errinfo = exc;
1377  rb_threadptr_ready(th);
1378  return Qnil;
1379 }
1380 
1381 void
1383 {
1384  VALUE argv[2];
1385 
1386  argv[0] = rb_eSignal;
1387  argv[1] = INT2FIX(sig);
1388  rb_threadptr_raise(th->vm->main_thread, 2, argv);
1389 }
1390 
1391 void
1393 {
1394  VALUE argv[2];
1395 
1396  argv[0] = rb_eSystemExit;
1397  argv[1] = rb_str_new2("exit");
1398  rb_threadptr_raise(th->vm->main_thread, 2, argv);
1399 }
1400 
1401 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
1402 #define USE_SIGALTSTACK
1403 #endif
1404 
1405 void
1407 {
1408  th->raised_flag = 0;
1409 #ifdef USE_SIGALTSTACK
1411 #else
1412  th->errinfo = sysstack_error;
1413  TH_JUMP_TAG(th, TAG_RAISE);
1414 #endif
1415 }
1416 
1417 int
1419 {
1420  if (th->raised_flag & RAISED_EXCEPTION) {
1421  return 1;
1422  }
1424  return 0;
1425 }
1426 
1427 int
1429 {
1430  if (!(th->raised_flag & RAISED_EXCEPTION)) {
1431  return 0;
1432  }
1433  th->raised_flag &= ~RAISED_EXCEPTION;
1434  return 1;
1435 }
1436 
1437 #define THREAD_IO_WAITING_P(th) ( \
1438  ((th)->status == THREAD_STOPPED || \
1439  (th)->status == THREAD_STOPPED_FOREVER) && \
1440  (th)->blocking_region_buffer && \
1441  (th)->unblock.func == ubf_select && \
1442  1)
1443 
1444 static int
1446 {
1447  int fd = (int)data;
1448  rb_thread_t *th;
1449  GetThreadPtr((VALUE)key, th);
1450 
1451  if (THREAD_IO_WAITING_P(th)) {
1452  native_mutex_lock(&th->interrupt_lock);
1453  if (THREAD_IO_WAITING_P(th) && th->waiting_fd == fd) {
1456  (th->unblock.func)(th->unblock.arg);
1457  }
1458  native_mutex_unlock(&th->interrupt_lock);
1459  }
1460  return ST_CONTINUE;
1461 }
1462 
1463 void
1465 {
1466  st_foreach(GET_THREAD()->vm->living_threads, thread_fd_close_i, (st_index_t)fd);
1467 }
1468 
1469 /*
1470  * call-seq:
1471  * thr.raise
1472  * thr.raise(string)
1473  * thr.raise(exception [, string [, array]])
1474  *
1475  * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
1476  * caller does not have to be <i>thr</i>.
1477  *
1478  * Thread.abort_on_exception = true
1479  * a = Thread.new { sleep(200) }
1480  * a.raise("Gotcha")
1481  *
1482  * <em>produces:</em>
1483  *
1484  * prog.rb:3: Gotcha (RuntimeError)
1485  * from prog.rb:2:in `initialize'
1486  * from prog.rb:2:in `new'
1487  * from prog.rb:2
1488  */
1489 
1490 static VALUE
1492 {
1493  rb_thread_t *th;
1494  GetThreadPtr(self, th);
1495  rb_threadptr_raise(th, argc, argv);
1496  return Qnil;
1497 }
1498 
1499 
1500 /*
1501  * call-seq:
1502  * thr.exit -> thr or nil
1503  * thr.kill -> thr or nil
1504  * thr.terminate -> thr or nil
1505  *
1506  * Terminates <i>thr</i> and schedules another thread to be run. If this thread
1507  * is already marked to be killed, <code>exit</code> returns the
1508  * <code>Thread</code>. If this is the main thread, or the last thread, exits
1509  * the process.
1510  */
1511 
1512 VALUE
1514 {
1515  rb_thread_t *th;
1516 
1517  GetThreadPtr(thread, th);
1518 
1519  if (th != GET_THREAD() && th->safe_level < 4) {
1520  rb_secure(4);
1521  }
1522  if (th->status == THREAD_TO_KILL || th->status == THREAD_KILLED) {
1523  return thread;
1524  }
1525  if (th == th->vm->main_thread) {
1527  }
1528 
1529  thread_debug("rb_thread_kill: %p (%p)\n", (void *)th, (void *)th->thread_id);
1530 
1533  th->status = THREAD_TO_KILL;
1534 
1535  return thread;
1536 }
1537 
1538 
1539 /*
1540  * call-seq:
1541  * Thread.kill(thread) -> thread
1542  *
1543  * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
1544  *
1545  * count = 0
1546  * a = Thread.new { loop { count += 1 } }
1547  * sleep(0.1) #=> 0
1548  * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
1549  * count #=> 93947
1550  * a.alive? #=> false
1551  */
1552 
1553 static VALUE
1555 {
1556  return rb_thread_kill(th);
1557 }
1558 
1559 
1560 /*
1561  * call-seq:
1562  * Thread.exit -> thread
1563  *
1564  * Terminates the currently running thread and schedules another thread to be
1565  * run. If this thread is already marked to be killed, <code>exit</code>
1566  * returns the <code>Thread</code>. If this is the main thread, or the last
1567  * thread, exit the process.
1568  */
1569 
1570 static VALUE
1572 {
1573  return rb_thread_kill(GET_THREAD()->self);
1574 }
1575 
1576 
1577 /*
1578  * call-seq:
1579  * thr.wakeup -> thr
1580  *
1581  * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1582  * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1583  *
1584  * c = Thread.new { Thread.stop; puts "hey!" }
1585  * sleep 0.1 while c.status!='sleep'
1586  * c.wakeup
1587  * c.join
1588  *
1589  * <em>produces:</em>
1590  *
1591  * hey!
1592  */
1593 
1594 VALUE
1596 {
1597  if (!RTEST(rb_thread_wakeup_alive(thread))) {
1598  rb_raise(rb_eThreadError, "killed thread");
1599  }
1600  return thread;
1601 }
1602 
1603 VALUE
1605 {
1606  rb_thread_t *th;
1607  GetThreadPtr(thread, th);
1608 
1609  if (th->status == THREAD_KILLED) {
1610  return Qnil;
1611  }
1612  rb_threadptr_ready(th);
1613  if (th->status != THREAD_TO_KILL) {
1614  th->status = THREAD_RUNNABLE;
1615  }
1616  return thread;
1617 }
1618 
1619 
1620 /*
1621  * call-seq:
1622  * thr.run -> thr
1623  *
1624  * Wakes up <i>thr</i>, making it eligible for scheduling.
1625  *
1626  * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1627  * sleep 0.1 while a.status!='sleep'
1628  * puts "Got here"
1629  * a.run
1630  * a.join
1631  *
1632  * <em>produces:</em>
1633  *
1634  * a
1635  * Got here
1636  * c
1637  */
1638 
1639 VALUE
1641 {
1642  rb_thread_wakeup(thread);
1644  return thread;
1645 }
1646 
1647 
1648 /*
1649  * call-seq:
1650  * Thread.stop -> nil
1651  *
1652  * Stops execution of the current thread, putting it into a ``sleep'' state,
1653  * and schedules execution of another thread.
1654  *
1655  * a = Thread.new { print "a"; Thread.stop; print "c" }
1656  * sleep 0.1 while a.status!='sleep'
1657  * print "b"
1658  * a.run
1659  * a.join
1660  *
1661  * <em>produces:</em>
1662  *
1663  * abc
1664  */
1665 
1666 VALUE
1668 {
1669  if (rb_thread_alone()) {
1671  "stopping only thread\n\tnote: use sleep to stop forever");
1672  }
1674  return Qnil;
1675 }
1676 
1677 static int
1679 {
1680  VALUE ary = (VALUE)data;
1681  rb_thread_t *th;
1682  GetThreadPtr((VALUE)key, th);
1683 
1684  switch (th->status) {
1685  case THREAD_RUNNABLE:
1686  case THREAD_STOPPED:
1688  case THREAD_TO_KILL:
1689  rb_ary_push(ary, th->self);
1690  default:
1691  break;
1692  }
1693  return ST_CONTINUE;
1694 }
1695 
1696 /********************************************************************/
1697 
1698 /*
1699  * call-seq:
1700  * Thread.list -> array
1701  *
1702  * Returns an array of <code>Thread</code> objects for all threads that are
1703  * either runnable or stopped.
1704  *
1705  * Thread.new { sleep(200) }
1706  * Thread.new { 1000000.times {|i| i*i } }
1707  * Thread.new { Thread.stop }
1708  * Thread.list.each {|t| p t}
1709  *
1710  * <em>produces:</em>
1711  *
1712  * #<Thread:0x401b3e84 sleep>
1713  * #<Thread:0x401b3f38 run>
1714  * #<Thread:0x401b3fb0 sleep>
1715  * #<Thread:0x401bdf4c run>
1716  */
1717 
1718 VALUE
1720 {
1721  VALUE ary = rb_ary_new();
1722  st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
1723  return ary;
1724 }
1725 
1726 VALUE
1728 {
1729  return GET_THREAD()->self;
1730 }
1731 
1732 /*
1733  * call-seq:
1734  * Thread.current -> thread
1735  *
1736  * Returns the currently executing thread.
1737  *
1738  * Thread.current #=> #<Thread:0x401bdf4c run>
1739  */
1740 
1741 static VALUE
1743 {
1744  return rb_thread_current();
1745 }
1746 
1747 VALUE
1749 {
1750  return GET_THREAD()->vm->main_thread->self;
1751 }
1752 
1753 /*
1754  * call-seq:
1755  * Thread.main -> thread
1756  *
1757  * Returns the main thread.
1758  */
1759 
1760 static VALUE
1762 {
1763  return rb_thread_main();
1764 }
1765 
1766 
1767 /*
1768  * call-seq:
1769  * Thread.abort_on_exception -> true or false
1770  *
1771  * Returns the status of the global ``abort on exception'' condition. The
1772  * default is <code>false</code>. When set to <code>true</code>, or if the
1773  * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1774  * command line option <code>-d</code> was specified) all threads will abort
1775  * (the process will <code>exit(0)</code>) if an exception is raised in any
1776  * thread. See also <code>Thread::abort_on_exception=</code>.
1777  */
1778 
1779 static VALUE
1781 {
1782  return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
1783 }
1784 
1785 
1786 /*
1787  * call-seq:
1788  * Thread.abort_on_exception= boolean -> true or false
1789  *
1790  * When set to <code>true</code>, all threads will abort if an exception is
1791  * raised. Returns the new state.
1792  *
1793  * Thread.abort_on_exception = true
1794  * t1 = Thread.new do
1795  * puts "In new thread"
1796  * raise "Exception from thread"
1797  * end
1798  * sleep(1)
1799  * puts "not reached"
1800  *
1801  * <em>produces:</em>
1802  *
1803  * In new thread
1804  * prog.rb:4: Exception from thread (RuntimeError)
1805  * from prog.rb:2:in `initialize'
1806  * from prog.rb:2:in `new'
1807  * from prog.rb:2
1808  */
1809 
1810 static VALUE
1812 {
1813  rb_secure(4);
1814  GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
1815  return val;
1816 }
1817 
1818 
1819 /*
1820  * call-seq:
1821  * thr.abort_on_exception -> true or false
1822  *
1823  * Returns the status of the thread-local ``abort on exception'' condition for
1824  * <i>thr</i>. The default is <code>false</code>. See also
1825  * <code>Thread::abort_on_exception=</code>.
1826  */
1827 
1828 static VALUE
1830 {
1831  rb_thread_t *th;
1832  GetThreadPtr(thread, th);
1833  return th->abort_on_exception ? Qtrue : Qfalse;
1834 }
1835 
1836 
1837 /*
1838  * call-seq:
1839  * thr.abort_on_exception= boolean -> true or false
1840  *
1841  * When set to <code>true</code>, causes all threads (including the main
1842  * program) to abort if an exception is raised in <i>thr</i>. The process will
1843  * effectively <code>exit(0)</code>.
1844  */
1845 
1846 static VALUE
1848 {
1849  rb_thread_t *th;
1850  rb_secure(4);
1851 
1852  GetThreadPtr(thread, th);
1853  th->abort_on_exception = RTEST(val);
1854  return val;
1855 }
1856 
1857 
1858 /*
1859  * call-seq:
1860  * thr.group -> thgrp or nil
1861  *
1862  * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1863  * the thread is not a member of any group.
1864  *
1865  * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1866  */
1867 
1868 VALUE
1870 {
1871  rb_thread_t *th;
1872  VALUE group;
1873  GetThreadPtr(thread, th);
1874  group = th->thgroup;
1875 
1876  if (!group) {
1877  group = Qnil;
1878  }
1879  return group;
1880 }
1881 
1882 static const char *
1884 {
1885  switch (status) {
1886  case THREAD_RUNNABLE:
1887  return "run";
1888  case THREAD_STOPPED:
1890  return "sleep";
1891  case THREAD_TO_KILL:
1892  return "aborting";
1893  case THREAD_KILLED:
1894  return "dead";
1895  default:
1896  return "unknown";
1897  }
1898 }
1899 
1900 static int
1902 {
1903  return th->status == THREAD_KILLED;
1904 }
1905 
1906 
1907 /*
1908  * call-seq:
1909  * thr.status -> string, false or nil
1910  *
1911  * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1912  * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1913  * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1914  * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1915  * terminated with an exception.
1916  *
1917  * a = Thread.new { raise("die now") }
1918  * b = Thread.new { Thread.stop }
1919  * c = Thread.new { Thread.exit }
1920  * d = Thread.new { sleep }
1921  * d.kill #=> #<Thread:0x401b3678 aborting>
1922  * a.status #=> nil
1923  * b.status #=> "sleep"
1924  * c.status #=> false
1925  * d.status #=> "aborting"
1926  * Thread.current.status #=> "run"
1927  */
1928 
1929 static VALUE
1931 {
1932  rb_thread_t *th;
1933  GetThreadPtr(thread, th);
1934 
1935  if (rb_threadptr_dead(th)) {
1936  if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
1937  /* TODO */ ) {
1938  return Qnil;
1939  }
1940  return Qfalse;
1941  }
1942  return rb_str_new2(thread_status_name(th->status));
1943 }
1944 
1945 
1946 /*
1947  * call-seq:
1948  * thr.alive? -> true or false
1949  *
1950  * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1951  *
1952  * thr = Thread.new { }
1953  * thr.join #=> #<Thread:0x401b3fb0 dead>
1954  * Thread.current.alive? #=> true
1955  * thr.alive? #=> false
1956  */
1957 
1958 static VALUE
1960 {
1961  rb_thread_t *th;
1962  GetThreadPtr(thread, th);
1963 
1964  if (rb_threadptr_dead(th))
1965  return Qfalse;
1966  return Qtrue;
1967 }
1968 
1969 /*
1970  * call-seq:
1971  * thr.stop? -> true or false
1972  *
1973  * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1974  *
1975  * a = Thread.new { Thread.stop }
1976  * b = Thread.current
1977  * a.stop? #=> true
1978  * b.stop? #=> false
1979  */
1980 
1981 static VALUE
1983 {
1984  rb_thread_t *th;
1985  GetThreadPtr(thread, th);
1986 
1987  if (rb_threadptr_dead(th))
1988  return Qtrue;
1989  if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
1990  return Qtrue;
1991  return Qfalse;
1992 }
1993 
1994 /*
1995  * call-seq:
1996  * thr.safe_level -> integer
1997  *
1998  * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
1999  * levels can help when implementing sandboxes which run insecure code.
2000  *
2001  * thr = Thread.new { $SAFE = 3; sleep }
2002  * Thread.current.safe_level #=> 0
2003  * thr.safe_level #=> 3
2004  */
2005 
2006 static VALUE
2008 {
2009  rb_thread_t *th;
2010  GetThreadPtr(thread, th);
2011 
2012  return INT2NUM(th->safe_level);
2013 }
2014 
2015 /*
2016  * call-seq:
2017  * thr.inspect -> string
2018  *
2019  * Dump the name, id, and status of _thr_ to a string.
2020  */
2021 
2022 static VALUE
2024 {
2025  const char *cname = rb_obj_classname(thread);
2026  rb_thread_t *th;
2027  const char *status;
2028  VALUE str;
2029 
2030  GetThreadPtr(thread, th);
2031  status = thread_status_name(th->status);
2032  str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
2033  OBJ_INFECT(str, thread);
2034 
2035  return str;
2036 }
2037 
2038 VALUE
2040 {
2041  rb_thread_t *th;
2042  st_data_t val;
2043 
2044  GetThreadPtr(thread, th);
2045  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2046  rb_raise(rb_eSecurityError, "Insecure: thread locals");
2047  }
2048  if (!th->local_storage) {
2049  return Qnil;
2050  }
2051  if (st_lookup(th->local_storage, id, &val)) {
2052  return (VALUE)val;
2053  }
2054  return Qnil;
2055 }
2056 
2057 /*
2058  * call-seq:
2059  * thr[sym] -> obj or nil
2060  *
2061  * Attribute Reference---Returns the value of a thread-local variable, using
2062  * either a symbol or a string name. If the specified variable does not exist,
2063  * returns <code>nil</code>.
2064  *
2065  * [
2066  * Thread.new { Thread.current["name"] = "A" },
2067  * Thread.new { Thread.current[:name] = "B" },
2068  * Thread.new { Thread.current["name"] = "C" }
2069  * ].each do |th|
2070  * th.join
2071  * puts "#{th.inspect}: #{th[:name]}"
2072  * end
2073  *
2074  * <em>produces:</em>
2075  *
2076  * #<Thread:0x00000002a54220 dead>: A
2077  * #<Thread:0x00000002a541a8 dead>: B
2078  * #<Thread:0x00000002a54130 dead>: C
2079  */
2080 
2081 static VALUE
2083 {
2084  return rb_thread_local_aref(thread, rb_to_id(id));
2085 }
2086 
2087 VALUE
2089 {
2090  rb_thread_t *th;
2091  GetThreadPtr(thread, th);
2092 
2093  if (rb_safe_level() >= 4 && th != GET_THREAD()) {
2094  rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
2095  }
2096  if (OBJ_FROZEN(thread)) {
2097  rb_error_frozen("thread locals");
2098  }
2099  if (!th->local_storage) {
2101  }
2102  if (NIL_P(val)) {
2103  st_delete_wrap(th->local_storage, id);
2104  return Qnil;
2105  }
2106  st_insert(th->local_storage, id, val);
2107  return val;
2108 }
2109 
2110 /*
2111  * call-seq:
2112  * thr[sym] = obj -> obj
2113  *
2114  * Attribute Assignment---Sets or creates the value of a thread-local variable,
2115  * using either a symbol or a string. See also <code>Thread#[]</code>.
2116  */
2117 
2118 static VALUE
2120 {
2121  return rb_thread_local_aset(self, rb_to_id(id), val);
2122 }
2123 
2124 /*
2125  * call-seq:
2126  * thr.key?(sym) -> true or false
2127  *
2128  * Returns <code>true</code> if the given string (or symbol) exists as a
2129  * thread-local variable.
2130  *
2131  * me = Thread.current
2132  * me[:oliver] = "a"
2133  * me.key?(:oliver) #=> true
2134  * me.key?(:stanley) #=> false
2135  */
2136 
2137 static VALUE
2139 {
2140  rb_thread_t *th;
2141  ID id = rb_to_id(key);
2142 
2143  GetThreadPtr(self, th);
2144 
2145  if (!th->local_storage) {
2146  return Qfalse;
2147  }
2148  if (st_lookup(th->local_storage, id, 0)) {
2149  return Qtrue;
2150  }
2151  return Qfalse;
2152 }
2153 
2154 static int
2156 {
2157  rb_ary_push(ary, ID2SYM(key));
2158  return ST_CONTINUE;
2159 }
2160 
2161 static int
2163 {
2164  return vm->living_threads->num_entries;
2165 }
2166 
2167 int
2169 {
2170  int num = 1;
2171  if (GET_THREAD()->vm->living_threads) {
2172  num = vm_living_thread_num(GET_THREAD()->vm);
2173  thread_debug("rb_thread_alone: %d\n", num);
2174  }
2175  return num == 1;
2176 }
2177 
2178 /*
2179  * call-seq:
2180  * thr.keys -> array
2181  *
2182  * Returns an an array of the names of the thread-local variables (as Symbols).
2183  *
2184  * thr = Thread.new do
2185  * Thread.current[:cat] = 'meow'
2186  * Thread.current["dog"] = 'woof'
2187  * end
2188  * thr.join #=> #<Thread:0x401b3f10 dead>
2189  * thr.keys #=> [:dog, :cat]
2190  */
2191 
2192 static VALUE
2194 {
2195  rb_thread_t *th;
2196  VALUE ary = rb_ary_new();
2197  GetThreadPtr(self, th);
2198 
2199  if (th->local_storage) {
2201  }
2202  return ary;
2203 }
2204 
2205 /*
2206  * call-seq:
2207  * thr.priority -> integer
2208  *
2209  * Returns the priority of <i>thr</i>. Default is inherited from the
2210  * current thread which creating the new thread, or zero for the
2211  * initial main thread; higher-priority thread will run more frequently
2212  * than lower-priority threads (but lower-priority threads can also run).
2213  *
2214  * This is just hint for Ruby thread scheduler. It may be ignored on some
2215  * platform.
2216  *
2217  * Thread.current.priority #=> 0
2218  */
2219 
2220 static VALUE
2222 {
2223  rb_thread_t *th;
2224  GetThreadPtr(thread, th);
2225  return INT2NUM(th->priority);
2226 }
2227 
2228 
2229 /*
2230  * call-seq:
2231  * thr.priority= integer -> thr
2232  *
2233  * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
2234  * will run more frequently than lower-priority threads (but lower-priority
2235  * threads can also run).
2236  *
2237  * This is just hint for Ruby thread scheduler. It may be ignored on some
2238  * platform.
2239  *
2240  * count1 = count2 = 0
2241  * a = Thread.new do
2242  * loop { count1 += 1 }
2243  * end
2244  * a.priority = -1
2245  *
2246  * b = Thread.new do
2247  * loop { count2 += 1 }
2248  * end
2249  * b.priority = -2
2250  * sleep 1 #=> 1
2251  * count1 #=> 622504
2252  * count2 #=> 5832
2253  */
2254 
2255 static VALUE
2257 {
2258  rb_thread_t *th;
2259  int priority;
2260  GetThreadPtr(thread, th);
2261 
2262  rb_secure(4);
2263 
2264 #if USE_NATIVE_THREAD_PRIORITY
2265  th->priority = NUM2INT(prio);
2266  native_thread_apply_priority(th);
2267 #else
2268  priority = NUM2INT(prio);
2269  if (priority > RUBY_THREAD_PRIORITY_MAX) {
2270  priority = RUBY_THREAD_PRIORITY_MAX;
2271  }
2272  else if (priority < RUBY_THREAD_PRIORITY_MIN) {
2273  priority = RUBY_THREAD_PRIORITY_MIN;
2274  }
2275  th->priority = priority;
2276 #endif
2277  return INT2NUM(th->priority);
2278 }
2279 
2280 /* for IO */
2281 
2282 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
2283 
2284 /*
2285  * several Unix platforms support file descriptors bigger than FD_SETSIZE
2286  * in select(2) system call.
2287  *
2288  * - Linux 2.2.12 (?)
2289  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
2290  * select(2) documents how to allocate fd_set dynamically.
2291  * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
2292  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
2293  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
2294  * select(2) documents how to allocate fd_set dynamically.
2295  * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
2296  * - HP-UX documents how to allocate fd_set dynamically.
2297  * http://docs.hp.com/en/B2355-60105/select.2.html
2298  * - Solaris 8 has select_large_fdset
2299  *
2300  * When fd_set is not big enough to hold big file descriptors,
2301  * it should be allocated dynamically.
2302  * Note that this assumes fd_set is structured as bitmap.
2303  *
2304  * rb_fd_init allocates the memory.
2305  * rb_fd_term free the memory.
2306  * rb_fd_set may re-allocates bitmap.
2307  *
2308  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
2309  */
2310 
2311 void
2312 rb_fd_init(rb_fdset_t *fds)
2313 {
2314  fds->maxfd = 0;
2315  fds->fdset = ALLOC(fd_set);
2316  FD_ZERO(fds->fdset);
2317 }
2318 
2319 void
2321 {
2322  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
2323 
2324  if (size < sizeof(fd_set))
2325  size = sizeof(fd_set);
2326  dst->maxfd = src->maxfd;
2327  dst->fdset = xmalloc(size);
2328  memcpy(dst->fdset, src->fdset, size);
2329 }
2330 
2331 void
2332 rb_fd_term(rb_fdset_t *fds)
2333 {
2334  if (fds->fdset) xfree(fds->fdset);
2335  fds->maxfd = 0;
2336  fds->fdset = 0;
2337 }
2338 
2339 void
2340 rb_fd_zero(rb_fdset_t *fds)
2341 {
2342  if (fds->fdset)
2343  MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
2344 }
2345 
2346 static void
2347 rb_fd_resize(int n, rb_fdset_t *fds)
2348 {
2349  size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
2350  size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
2351 
2352  if (m < sizeof(fd_set)) m = sizeof(fd_set);
2353  if (o < sizeof(fd_set)) o = sizeof(fd_set);
2354 
2355  if (m > o) {
2356  fds->fdset = xrealloc(fds->fdset, m);
2357  memset((char *)fds->fdset + o, 0, m - o);
2358  }
2359  if (n >= fds->maxfd) fds->maxfd = n + 1;
2360 }
2361 
2362 void
2363 rb_fd_set(int n, rb_fdset_t *fds)
2364 {
2365  rb_fd_resize(n, fds);
2366  FD_SET(n, fds->fdset);
2367 }
2368 
2369 void
2370 rb_fd_clr(int n, rb_fdset_t *fds)
2371 {
2372  if (n >= fds->maxfd) return;
2373  FD_CLR(n, fds->fdset);
2374 }
2375 
2376 int
2377 rb_fd_isset(int n, const rb_fdset_t *fds)
2378 {
2379  if (n >= fds->maxfd) return 0;
2380  return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
2381 }
2382 
2383 void
2384 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
2385 {
2386  size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
2387 
2388  if (size < sizeof(fd_set)) size = sizeof(fd_set);
2389  dst->maxfd = max;
2390  dst->fdset = xrealloc(dst->fdset, size);
2391  memcpy(dst->fdset, src, size);
2392 }
2393 
2394 static void
2395 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
2396 {
2397  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
2398 
2399  if (size > sizeof(fd_set)) {
2400  rb_raise(rb_eArgError, "too large fdsets");
2401  }
2402  memcpy(dst, rb_fd_ptr(src), sizeof(fd_set));
2403 }
2404 
2405 void
2406 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
2407 {
2408  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
2409 
2410  if (size < sizeof(fd_set))
2411  size = sizeof(fd_set);
2412  dst->maxfd = src->maxfd;
2413  dst->fdset = xrealloc(dst->fdset, size);
2414  memcpy(dst->fdset, src->fdset, size);
2415 }
2416 
2417 int
2418 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
2419 {
2420  fd_set *r = NULL, *w = NULL, *e = NULL;
2421  if (readfds) {
2422  rb_fd_resize(n - 1, readfds);
2423  r = rb_fd_ptr(readfds);
2424  }
2425  if (writefds) {
2426  rb_fd_resize(n - 1, writefds);
2427  w = rb_fd_ptr(writefds);
2428  }
2429  if (exceptfds) {
2430  rb_fd_resize(n - 1, exceptfds);
2431  e = rb_fd_ptr(exceptfds);
2432  }
2433  return select(n, r, w, e, timeout);
2434 }
2435 
2436 #undef FD_ZERO
2437 #undef FD_SET
2438 #undef FD_CLR
2439 #undef FD_ISSET
2440 
2441 #define FD_ZERO(f) rb_fd_zero(f)
2442 #define FD_SET(i, f) rb_fd_set((i), (f))
2443 #define FD_CLR(i, f) rb_fd_clr((i), (f))
2444 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
2445 
2446 #elif defined(_WIN32)
2447 
2448 void
2449 rb_fd_init(rb_fdset_t *set)
2450 {
2451  set->capa = FD_SETSIZE;
2452  set->fdset = ALLOC(fd_set);
2453  FD_ZERO(set->fdset);
2454 }
2455 
2456 void
2458 {
2459  rb_fd_init(dst);
2460  rb_fd_dup(dst, src);
2461 }
2462 
2463 static void
2464 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
2465 {
2466  int max = rb_fd_max(src);
2467 
2468  /* we assume src is the result of select() with dst, so dst should be
2469  * larger or equal than src. */
2470  if (max > FD_SETSIZE || max > dst->fd_count) {
2471  rb_raise(rb_eArgError, "too large fdsets");
2472  }
2473 
2474  memcpy(dst->fd_array, src->fdset->fd_array, max);
2475  dst->fd_count = max;
2476 }
2477 
2478 void
2479 rb_fd_term(rb_fdset_t *set)
2480 {
2481  xfree(set->fdset);
2482  set->fdset = NULL;
2483  set->capa = 0;
2484 }
2485 
2486 void
2487 rb_fd_set(int fd, rb_fdset_t *set)
2488 {
2489  unsigned int i;
2490  SOCKET s = rb_w32_get_osfhandle(fd);
2491 
2492  for (i = 0; i < set->fdset->fd_count; i++) {
2493  if (set->fdset->fd_array[i] == s) {
2494  return;
2495  }
2496  }
2497  if (set->fdset->fd_count >= (unsigned)set->capa) {
2498  set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
2499  set->fdset = xrealloc(set->fdset, sizeof(unsigned int) + sizeof(SOCKET) * set->capa);
2500  }
2501  set->fdset->fd_array[set->fdset->fd_count++] = s;
2502 }
2503 
2504 #undef FD_ZERO
2505 #undef FD_SET
2506 #undef FD_CLR
2507 #undef FD_ISSET
2508 
2509 #define FD_ZERO(f) rb_fd_zero(f)
2510 #define FD_SET(i, f) rb_fd_set((i), (f))
2511 #define FD_CLR(i, f) rb_fd_clr((i), (f))
2512 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
2513 
2514 #else
2515 #define rb_fd_rcopy(d, s) (*(d) = *(s))
2516 #endif
2517 
2518 #if defined(__CYGWIN__)
2519 static long
2520 cmp_tv(const struct timeval *a, const struct timeval *b)
2521 {
2522  long d = (a->tv_sec - b->tv_sec);
2523  return (d != 0) ? d : (a->tv_usec - b->tv_usec);
2524 }
2525 
2526 static int
2527 subtract_tv(struct timeval *rest, const struct timeval *wait)
2528 {
2529  if (rest->tv_sec < wait->tv_sec) {
2530  return 0;
2531  }
2532  while (rest->tv_usec < wait->tv_usec) {
2533  if (rest->tv_sec <= wait->tv_sec) {
2534  return 0;
2535  }
2536  rest->tv_sec -= 1;
2537  rest->tv_usec += 1000 * 1000;
2538  }
2539  rest->tv_sec -= wait->tv_sec;
2540  rest->tv_usec -= wait->tv_usec;
2541  return rest->tv_sec != 0 || rest->tv_usec != 0;
2542 }
2543 #endif
2544 
2545 static int
2546 do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except,
2547  struct timeval *timeout)
2548 {
2549  int result, lerrno;
2550  rb_fdset_t UNINITIALIZED_VAR(orig_read);
2551  rb_fdset_t UNINITIALIZED_VAR(orig_write);
2552  rb_fdset_t UNINITIALIZED_VAR(orig_except);
2553  double limit = 0;
2554  struct timeval wait_rest;
2555 # if defined(__CYGWIN__)
2556  struct timeval start_time;
2557 # endif
2558 
2559  if (timeout) {
2560 # if defined(__CYGWIN__)
2561  gettimeofday(&start_time, NULL);
2562  limit = (double)start_time.tv_sec + (double)start_time.tv_usec*1e-6;
2563 # else
2564  limit = timeofday();
2565 # endif
2566  limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
2567  wait_rest = *timeout;
2568  timeout = &wait_rest;
2569  }
2570 
2571  if (read)
2572  rb_fd_init_copy(&orig_read, read);
2573  if (write)
2574  rb_fd_init_copy(&orig_write, write);
2575  if (except)
2576  rb_fd_init_copy(&orig_except, except);
2577 
2578  retry:
2579  lerrno = 0;
2580 
2581 #if defined(__CYGWIN__)
2582  {
2583  int finish = 0;
2584  /* polling duration: 100ms */
2585  struct timeval wait_100ms, *wait;
2586  wait_100ms.tv_sec = 0;
2587  wait_100ms.tv_usec = 100 * 1000; /* 100 ms */
2588 
2589  do {
2590  wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) < 0) ? &wait_100ms : timeout;
2591  BLOCKING_REGION({
2592  do {
2593  result = rb_fd_select(n, read, write, except, wait);
2594  if (result < 0) lerrno = errno;
2595  if (result != 0) break;
2596 
2597  if (read)
2598  rb_fd_dup(read, &orig_read);
2599  if (write)
2600  rb_fd_dup(write, &orig_write);
2601  if (except)
2602  rb_fd_dup(except, &orig_except);
2603  if (timeout) {
2604  struct timeval elapsed;
2605  gettimeofday(&elapsed, NULL);
2606  subtract_tv(&elapsed, &start_time);
2607  gettimeofday(&start_time, NULL);
2608  if (!subtract_tv(timeout, &elapsed)) {
2609  finish = 1;
2610  break;
2611  }
2612  if (cmp_tv(&wait_100ms, timeout) > 0) wait = timeout;
2613  }
2614  } while (__th->interrupt_flag == 0);
2615  }, 0, 0);
2616  } while (result == 0 && !finish);
2617  }
2618 #elif defined(_WIN32)
2619  {
2620  rb_thread_t *th = GET_THREAD();
2621  BLOCKING_REGION({
2622  result = native_fd_select(n, read, write, except, timeout, th);
2623  if (result < 0) lerrno = errno;
2624  }, ubf_select, th);
2625  }
2626 #else
2627  BLOCKING_REGION({
2628  result = rb_fd_select(n, read, write, except, timeout);
2629  if (result < 0) lerrno = errno;
2630  }, ubf_select, GET_THREAD());
2631 #endif
2632 
2633  errno = lerrno;
2634 
2635  if (result < 0) {
2636  switch (errno) {
2637  case EINTR:
2638 #ifdef ERESTART
2639  case ERESTART:
2640 #endif
2641  if (read)
2642  rb_fd_dup(read, &orig_read);
2643  if (write)
2644  rb_fd_dup(write, &orig_write);
2645  if (except)
2646  rb_fd_dup(except, &orig_except);
2647 
2648  if (timeout) {
2649  double d = limit - timeofday();
2650 
2651  wait_rest.tv_sec = (unsigned int)d;
2652  wait_rest.tv_usec = (int)((d-(double)wait_rest.tv_sec)*1e6);
2653  if (wait_rest.tv_sec < 0) wait_rest.tv_sec = 0;
2654  if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
2655  }
2656 
2657  goto retry;
2658  default:
2659  break;
2660  }
2661  }
2662 
2663  if (read)
2664  rb_fd_term(&orig_read);
2665  if (write)
2666  rb_fd_term(&orig_write);
2667  if (except)
2668  rb_fd_term(&orig_except);
2669 
2670  return result;
2671 }
2672 
2673 static void
2674 rb_thread_wait_fd_rw(int fd, int read)
2675 {
2676  int result = 0;
2677  int events = read ? RB_WAITFD_IN : RB_WAITFD_OUT;
2678 
2679  thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
2680 
2681  if (fd < 0) {
2682  rb_raise(rb_eIOError, "closed stream");
2683  }
2684  if (rb_thread_alone()) return;
2685  while (result <= 0) {
2686  result = rb_wait_for_single_fd(fd, events, NULL);
2687 
2688  if (result < 0) {
2689  rb_sys_fail(0);
2690  }
2691  }
2692 
2693  thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
2694 }
2695 
2696 void
2698 {
2699  rb_thread_wait_fd_rw(fd, 1);
2700 }
2701 
2702 int
2704 {
2705  rb_thread_wait_fd_rw(fd, 0);
2706  return TRUE;
2707 }
2708 
2709 int
2710 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
2711  struct timeval *timeout)
2712 {
2713  rb_fdset_t fdsets[3];
2714  rb_fdset_t *rfds = NULL;
2715  rb_fdset_t *wfds = NULL;
2716  rb_fdset_t *efds = NULL;
2717  int retval;
2718 
2719  if (read) {
2720  rfds = &fdsets[0];
2721  rb_fd_init(rfds);
2722  rb_fd_copy(rfds, read, max);
2723  }
2724  if (write) {
2725  wfds = &fdsets[1];
2726  rb_fd_init(wfds);
2727  rb_fd_copy(wfds, write, max);
2728  }
2729  if (except) {
2730  efds = &fdsets[2];
2731  rb_fd_init(efds);
2732  rb_fd_copy(efds, except, max);
2733  }
2734 
2735  retval = rb_thread_fd_select(max, rfds, wfds, efds, timeout);
2736 
2737  if (rfds) {
2738  rb_fd_rcopy(read, rfds);
2739  rb_fd_term(rfds);
2740  }
2741  if (wfds) {
2742  rb_fd_rcopy(write, wfds);
2743  rb_fd_term(wfds);
2744  }
2745  if (efds) {
2746  rb_fd_rcopy(except, efds);
2747  rb_fd_term(efds);
2748  }
2749 
2750  return retval;
2751 }
2752 
2753 int
2754 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
2755  struct timeval *timeout)
2756 {
2757  if (!read && !write && !except) {
2758  if (!timeout) {
2760  return 0;
2761  }
2762  rb_thread_wait_for(*timeout);
2763  return 0;
2764  }
2765 
2766  if (read) {
2767  rb_fd_resize(max - 1, read);
2768  }
2769  if (write) {
2770  rb_fd_resize(max - 1, write);
2771  }
2772  if (except) {
2773  rb_fd_resize(max - 1, except);
2774  }
2775  return do_select(max, read, write, except, timeout);
2776 }
2777 
2778 /*
2779  * poll() is supported by many OSes, but so far Linux is the only
2780  * one we know of that supports using poll() in all places select()
2781  * would work.
2782  */
2783 #if defined(HAVE_POLL) && defined(linux)
2784 # define USE_POLL
2785 #endif
2786 
2787 #ifdef USE_POLL
2788 
2789 /* The same with linux kernel. TODO: make platform independent definition. */
2790 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
2791 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
2792 #define POLLEX_SET (POLLPRI)
2793 
2794 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
2795 #define TIMET_MIN (~(time_t)0 <= 0 ? (time_t)(((unsigned_time_t)1) << (sizeof(time_t) * CHAR_BIT - 1)) : (time_t)0)
2796 
2797 #ifndef HAVE_PPOLL
2798 /* TODO: don't ignore sigmask */
2799 int ppoll(struct pollfd *fds, nfds_t nfds,
2800  const struct timespec *ts, const sigset_t *sigmask)
2801 {
2802  int timeout_ms;
2803 
2804  if (ts) {
2805  int tmp, tmp2;
2806 
2807  if (ts->tv_sec > TIMET_MAX/1000)
2808  timeout_ms = -1;
2809  else {
2810  tmp = ts->tv_sec * 1000;
2811  tmp2 = ts->tv_nsec / (1000 * 1000);
2812  if (TIMET_MAX - tmp < tmp2)
2813  timeout_ms = -1;
2814  else
2815  timeout_ms = tmp + tmp2;
2816  }
2817  } else
2818  timeout_ms = -1;
2819 
2820  return poll(fds, nfds, timeout_ms);
2821 }
2822 #endif
2823 
2824 /*
2825  * returns a mask of events
2826  */
2827 int
2828 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
2829 {
2830  struct pollfd fds;
2831  int result, lerrno;
2832  double limit = 0;
2833  struct timespec ts;
2834  struct timespec *timeout = NULL;
2835 
2836  if (tv) {
2837  ts.tv_sec = tv->tv_sec;
2838  ts.tv_nsec = tv->tv_usec * 1000;
2839  limit = timeofday();
2840  limit += (double)tv->tv_sec + (double)tv->tv_usec * 1e-6;
2841  timeout = &ts;
2842  }
2843 
2844  fds.fd = fd;
2845  fds.events = (short)events;
2846 
2847 retry:
2848  lerrno = 0;
2849  BLOCKING_REGION({
2850  result = ppoll(&fds, 1, timeout, NULL);
2851  if (result < 0) lerrno = errno;
2852  }, ubf_select, GET_THREAD());
2853 
2854  if (result < 0) {
2855  errno = lerrno;
2856  switch (errno) {
2857  case EINTR:
2858 #ifdef ERESTART
2859  case ERESTART:
2860 #endif
2861  if (timeout) {
2862  double d = limit - timeofday();
2863 
2864  ts.tv_sec = (long)d;
2865  ts.tv_nsec = (long)((d - (double)ts.tv_sec) * 1e9);
2866  if (ts.tv_sec < 0)
2867  ts.tv_sec = 0;
2868  if (ts.tv_nsec < 0)
2869  ts.tv_nsec = 0;
2870  }
2871  goto retry;
2872  }
2873  return -1;
2874  }
2875 
2876  if (fds.revents & POLLNVAL) {
2877  errno = EBADF;
2878  return -1;
2879  }
2880 
2881  /*
2882  * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
2883  * Therefore we need fix it up.
2884  */
2885  result = 0;
2886  if (fds.revents & POLLIN_SET)
2887  result |= RB_WAITFD_IN;
2888  if (fds.revents & POLLOUT_SET)
2889  result |= RB_WAITFD_OUT;
2890  if (fds.revents & POLLEX_SET)
2891  result |= RB_WAITFD_PRI;
2892 
2893  return result;
2894 }
2895 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
2896 static rb_fdset_t *init_set_fd(int fd, rb_fdset_t *fds)
2897 {
2898  rb_fd_init(fds);
2899  rb_fd_set(fd, fds);
2900 
2901  return fds;
2902 }
2903 
2904 struct select_args {
2905  union {
2906  int fd;
2907  int error;
2908  } as;
2912  struct timeval *tv;
2913 };
2914 
2915 static VALUE
2917 {
2918  struct select_args *args = (struct select_args *)ptr;
2919  int r;
2920 
2921  r = rb_thread_fd_select(args->as.fd + 1,
2922  args->read, args->write, args->except, args->tv);
2923  if (r == -1)
2924  args->as.error = errno;
2925  if (r > 0) {
2926  r = 0;
2927  if (args->read && rb_fd_isset(args->as.fd, args->read))
2928  r |= RB_WAITFD_IN;
2929  if (args->write && rb_fd_isset(args->as.fd, args->write))
2930  r |= RB_WAITFD_OUT;
2931  if (args->except && rb_fd_isset(args->as.fd, args->except))
2932  r |= RB_WAITFD_PRI;
2933  }
2934  return (VALUE)r;
2935 }
2936 
2937 static VALUE
2939 {
2940  struct select_args *args = (struct select_args *)ptr;
2941 
2942  if (args->read) rb_fd_term(args->read);
2943  if (args->write) rb_fd_term(args->write);
2944  if (args->except) rb_fd_term(args->except);
2945 
2946  return (VALUE)-1;
2947 }
2948 
2949 int
2950 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
2951 {
2952  rb_fdset_t rfds, wfds, efds;
2953  struct select_args args;
2954  int r;
2955  VALUE ptr = (VALUE)&args;
2956 
2957  args.as.fd = fd;
2958  args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
2959  args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
2960  args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
2961  args.tv = tv;
2962 
2963  r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
2964  if (r == -1)
2965  errno = args.as.error;
2966 
2967  return r;
2968 }
2969 #endif /* ! USE_POLL */
2970 
2971 /*
2972  * for GC
2973  */
2974 
2975 #ifdef USE_CONSERVATIVE_STACK_END
2976 void
2978 {
2979  VALUE stack_end;
2980  *stack_end_p = &stack_end;
2981 }
2982 #endif
2983 
2984 void
2986 {
2988 #ifdef __ia64
2989  th->machine_register_stack_end = rb_ia64_bsp();
2990 #endif
2991  setjmp(th->machine_regs);
2992 }
2993 
2994 /*
2995  *
2996  */
2997 
2998 void
3000 {
3001  /* mth must be main_thread */
3002  if (rb_signal_buff_size() > 0) {
3003  /* wakeup main thread */
3005  }
3006 }
3007 
3008 static void
3010 {
3011  rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
3012 
3013  /* for time slice */
3015 
3016  /* check signal */
3018 
3019 #if 0
3020  /* prove profiler */
3021  if (vm->prove_profile.enable) {
3022  rb_thread_t *th = vm->running_thread;
3023 
3024  if (vm->during_gc) {
3025  /* GC prove profiling */
3026  }
3027  }
3028 #endif
3029 }
3030 
3031 void
3033 {
3034  if (timer_thread_id && native_stop_timer_thread(close_anyway)) {
3035  native_reset_timer_thread();
3036  }
3037 }
3038 
3039 void
3041 {
3042  native_reset_timer_thread();
3043 }
3044 
3045 void
3047 {
3048  system_working = 1;
3049  rb_thread_create_timer_thread();
3050 }
3051 
3052 static int
3054 {
3055  int i;
3056  VALUE lines = (VALUE)val;
3057 
3058  for (i = 0; i < RARRAY_LEN(lines); i++) {
3059  if (RARRAY_PTR(lines)[i] != Qnil) {
3060  RARRAY_PTR(lines)[i] = INT2FIX(0);
3061  }
3062  }
3063  return ST_CONTINUE;
3064 }
3065 
3066 static void
3068 {
3069  VALUE coverages = rb_get_coverages();
3070  if (RTEST(coverages)) {
3071  st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
3072  }
3073 }
3074 
3075 static void
3077 {
3078  rb_thread_t *th = GET_THREAD();
3079  rb_vm_t *vm = th->vm;
3080  VALUE thval = th->self;
3081  vm->main_thread = th;
3082 
3083  gvl_atfork(th->vm);
3084  st_foreach(vm->living_threads, atfork, (st_data_t)th);
3085  st_clear(vm->living_threads);
3086  st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
3087  vm->sleeper = 0;
3088  clear_coverage();
3089 }
3090 
3091 static int
3093 {
3094  VALUE thval = key;
3095  rb_thread_t *th;
3096  GetThreadPtr(thval, th);
3097 
3098  if (th != (rb_thread_t *)current_th) {
3099  if (th->keeping_mutexes) {
3101  }
3102  th->keeping_mutexes = NULL;
3104  }
3105  return ST_CONTINUE;
3106 }
3107 
3108 void
3110 {
3112  GET_THREAD()->join_list_head = 0;
3113 
3114  /* We don't want reproduce CVE-2003-0900. */
3116 }
3117 
3118 static int
3120 {
3121  VALUE thval = key;
3122  rb_thread_t *th;
3123  GetThreadPtr(thval, th);
3124 
3125  if (th != (rb_thread_t *)current_th) {
3127  }
3128  return ST_CONTINUE;
3129 }
3130 
3131 void
3133 {
3135 }
3136 
3137 struct thgroup {
3140 };
3141 
3142 static size_t
3143 thgroup_memsize(const void *ptr)
3144 {
3145  return ptr ? sizeof(struct thgroup) : 0;
3146 }
3147 
3149  "thgroup",
3151 };
3152 
3153 /*
3154  * Document-class: ThreadGroup
3155  *
3156  * <code>ThreadGroup</code> provides a means of keeping track of a number of
3157  * threads as a group. A <code>Thread</code> can belong to only one
3158  * <code>ThreadGroup</code> at a time; adding a thread to a new group will
3159  * remove it from any previous group.
3160  *
3161  * Newly created threads belong to the same group as the thread from which they
3162  * were created.
3163  */
3164 
3165 static VALUE
3167 {
3168  VALUE group;
3169  struct thgroup *data;
3170 
3171  group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
3172  data->enclosed = 0;
3173  data->group = group;
3174 
3175  return group;
3176 }
3177 
3181 };
3182 
3183 static int
3185 {
3186  VALUE thread = (VALUE)key;
3187  VALUE ary = ((struct thgroup_list_params *)data)->ary;
3188  VALUE group = ((struct thgroup_list_params *)data)->group;
3189  rb_thread_t *th;
3190  GetThreadPtr(thread, th);
3191 
3192  if (th->thgroup == group) {
3193  rb_ary_push(ary, thread);
3194  }
3195  return ST_CONTINUE;
3196 }
3197 
3198 /*
3199  * call-seq:
3200  * thgrp.list -> array
3201  *
3202  * Returns an array of all existing <code>Thread</code> objects that belong to
3203  * this group.
3204  *
3205  * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
3206  */
3207 
3208 static VALUE
3210 {
3211  VALUE ary = rb_ary_new();
3212  struct thgroup_list_params param;
3213 
3214  param.ary = ary;
3215  param.group = group;
3216  st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
3217  return ary;
3218 }
3219 
3220 
3221 /*
3222  * call-seq:
3223  * thgrp.enclose -> thgrp
3224  *
3225  * Prevents threads from being added to or removed from the receiving
3226  * <code>ThreadGroup</code>. New threads can still be started in an enclosed
3227  * <code>ThreadGroup</code>.
3228  *
3229  * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
3230  * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
3231  * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
3232  * tg.add thr
3233  *
3234  * <em>produces:</em>
3235  *
3236  * ThreadError: can't move from the enclosed thread group
3237  */
3238 
3239 static VALUE
3241 {
3242  struct thgroup *data;
3243 
3244  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
3245  data->enclosed = 1;
3246 
3247  return group;
3248 }
3249 
3250 
3251 /*
3252  * call-seq:
3253  * thgrp.enclosed? -> true or false
3254  *
3255  * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
3256  * ThreadGroup#enclose.
3257  */
3258 
3259 static VALUE
3261 {
3262  struct thgroup *data;
3263 
3264  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
3265  if (data->enclosed)
3266  return Qtrue;
3267  return Qfalse;
3268 }
3269 
3270 
3271 /*
3272  * call-seq:
3273  * thgrp.add(thread) -> thgrp
3274  *
3275  * Adds the given <em>thread</em> to this group, removing it from any other
3276  * group to which it may have previously belonged.
3277  *
3278  * puts "Initial group is #{ThreadGroup::Default.list}"
3279  * tg = ThreadGroup.new
3280  * t1 = Thread.new { sleep }
3281  * t2 = Thread.new { sleep }
3282  * puts "t1 is #{t1}"
3283  * puts "t2 is #{t2}"
3284  * tg.add(t1)
3285  * puts "Initial group now #{ThreadGroup::Default.list}"
3286  * puts "tg group now #{tg.list}"
3287  *
3288  * <em>produces:</em>
3289  *
3290  * Initial group is #<Thread:0x401bdf4c>
3291  * t1 is #<Thread:0x401b3c90>
3292  * t2 is #<Thread:0x401b3c18>
3293  * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
3294  * tg group now #<Thread:0x401b3c90>
3295  */
3296 
3297 static VALUE
3299 {
3300  rb_thread_t *th;
3301  struct thgroup *data;
3302 
3303  rb_secure(4);
3304  GetThreadPtr(thread, th);
3305 
3306  if (OBJ_FROZEN(group)) {
3307  rb_raise(rb_eThreadError, "can't move to the frozen thread group");
3308  }
3309  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
3310  if (data->enclosed) {
3311  rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
3312  }
3313 
3314  if (!th->thgroup) {
3315  return Qnil;
3316  }
3317 
3318  if (OBJ_FROZEN(th->thgroup)) {
3319  rb_raise(rb_eThreadError, "can't move from the frozen thread group");
3320  }
3322  if (data->enclosed) {
3324  "can't move from the enclosed thread group");
3325  }
3326 
3327  th->thgroup = group;
3328  return group;
3329 }
3330 
3331 
3332 /*
3333  * Document-class: Mutex
3334  *
3335  * Mutex implements a simple semaphore that can be used to coordinate access to
3336  * shared data from multiple concurrent threads.
3337  *
3338  * Example:
3339  *
3340  * require 'thread'
3341  * semaphore = Mutex.new
3342  *
3343  * a = Thread.new {
3344  * semaphore.synchronize {
3345  * # access shared resource
3346  * }
3347  * }
3348  *
3349  * b = Thread.new {
3350  * semaphore.synchronize {
3351  * # access shared resource
3352  * }
3353  * }
3354  *
3355  */
3356 
3357 #define GetMutexPtr(obj, tobj) \
3358  TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
3359 
3360 static const char *rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
3361 
3362 #define mutex_mark NULL
3363 
3364 static void
3365 mutex_free(void *ptr)
3366 {
3367  if (ptr) {
3368  rb_mutex_t *mutex = ptr;
3369  if (mutex->th) {
3370  /* rb_warn("free locked mutex"); */
3371  const char *err = rb_mutex_unlock_th(mutex, mutex->th);
3372  if (err) rb_bug("%s", err);
3373  }
3374  native_mutex_destroy(&mutex->lock);
3375  native_cond_destroy(&mutex->cond);
3376  }
3377  ruby_xfree(ptr);
3378 }
3379 
3380 static size_t
3381 mutex_memsize(const void *ptr)
3382 {
3383  return ptr ? sizeof(rb_mutex_t) : 0;
3384 }
3385 
3387  "mutex",
3389 };
3390 
3391 VALUE
3393 {
3394  if (rb_typeddata_is_kind_of(obj, &mutex_data_type)) {
3395  return Qtrue;
3396  }
3397  else {
3398  return Qfalse;
3399  }
3400 }
3401 
3402 static VALUE
3404 {
3405  VALUE volatile obj;
3406  rb_mutex_t *mutex;
3407 
3408  obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
3409  native_mutex_initialize(&mutex->lock);
3410  native_cond_initialize(&mutex->cond, RB_CONDATTR_CLOCK_MONOTONIC);
3411  return obj;
3412 }
3413 
3414 /*
3415  * call-seq:
3416  * Mutex.new -> mutex
3417  *
3418  * Creates a new Mutex
3419  */
3420 static VALUE
3422 {
3423  return self;
3424 }
3425 
3426 VALUE
3428 {
3429  return mutex_alloc(rb_cMutex);
3430 }
3431 
3432 /*
3433  * call-seq:
3434  * mutex.locked? -> true or false
3435  *
3436  * Returns +true+ if this lock is currently held by some thread.
3437  */
3438 VALUE
3440 {
3441  rb_mutex_t *mutex;
3442  GetMutexPtr(self, mutex);
3443  return mutex->th ? Qtrue : Qfalse;
3444 }
3445 
3446 static void
3448 {
3449  rb_mutex_t *mutex;
3450  GetMutexPtr(self, mutex);
3451 
3452  if (th->keeping_mutexes) {
3453  mutex->next_mutex = th->keeping_mutexes;
3454  }
3455  th->keeping_mutexes = mutex;
3456 }
3457 
3458 /*
3459  * call-seq:
3460  * mutex.try_lock -> true or false
3461  *
3462  * Attempts to obtain the lock and returns immediately. Returns +true+ if the
3463  * lock was granted.
3464  */
3465 VALUE
3467 {
3468  rb_mutex_t *mutex;
3469  VALUE locked = Qfalse;
3470  GetMutexPtr(self, mutex);
3471 
3472  native_mutex_lock(&mutex->lock);
3473  if (mutex->th == 0) {
3474  mutex->th = GET_THREAD();
3475  locked = Qtrue;
3476 
3477  mutex_locked(GET_THREAD(), self);
3478  }
3479  native_mutex_unlock(&mutex->lock);
3480 
3481  return locked;
3482 }
3483 
3484 static int
3485 lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
3486 {
3487  int interrupted = 0;
3488  int err = 0;
3489 
3490  mutex->cond_waiting++;
3491  for (;;) {
3492  if (!mutex->th) {
3493  mutex->th = th;
3494  break;
3495  }
3496  if (RUBY_VM_INTERRUPTED(th)) {
3497  interrupted = 1;
3498  break;
3499  }
3500  if (err == ETIMEDOUT) {
3501  interrupted = 2;
3502  break;
3503  }
3504 
3505  if (timeout_ms) {
3506  struct timespec timeout_rel;
3507  struct timespec timeout;
3508 
3509  timeout_rel.tv_sec = 0;
3510  timeout_rel.tv_nsec = timeout_ms * 1000 * 1000;
3511  timeout = native_cond_timeout(&mutex->cond, timeout_rel);
3512  err = native_cond_timedwait(&mutex->cond, &mutex->lock, &timeout);
3513  }
3514  else {
3515  native_cond_wait(&mutex->cond, &mutex->lock);
3516  err = 0;
3517  }
3518  }
3519  mutex->cond_waiting--;
3520 
3521  return interrupted;
3522 }
3523 
3524 static void
3525 lock_interrupt(void *ptr)
3526 {
3527  rb_mutex_t *mutex = (rb_mutex_t *)ptr;
3528  native_mutex_lock(&mutex->lock);
3529  if (mutex->cond_waiting > 0)
3530  native_cond_broadcast(&mutex->cond);
3531  native_mutex_unlock(&mutex->lock);
3532 }
3533 
3534 /*
3535  * At maximum, only one thread can use cond_timedwait and watch deadlock
3536  * periodically. Multiple polling thread (i.e. concurrent deadlock check)
3537  * introduces new race conditions. [Bug #6278] [ruby-core:44275]
3538  */
3540 
3541 /*
3542  * call-seq:
3543  * mutex.lock -> self
3544  *
3545  * Attempts to grab the lock and waits if it isn't available.
3546  * Raises +ThreadError+ if +mutex+ was locked by the current thread.
3547  */
3548 VALUE
3550 {
3551 
3552  if (rb_mutex_trylock(self) == Qfalse) {
3553  rb_mutex_t *mutex;
3554  rb_thread_t *th = GET_THREAD();
3555  GetMutexPtr(self, mutex);
3556 
3557  if (mutex->th == GET_THREAD()) {
3558  rb_raise(rb_eThreadError, "deadlock; recursive locking");
3559  }
3560 
3561  while (mutex->th != th) {
3562  int interrupted;
3563  enum rb_thread_status prev_status = th->status;
3564  int timeout_ms = 0;
3565  struct rb_unblock_callback oldubf;
3566 
3567  set_unblock_function(th, lock_interrupt, mutex, &oldubf);
3569  th->locking_mutex = self;
3570 
3571  native_mutex_lock(&mutex->lock);
3572  th->vm->sleeper++;
3573  /*
3574  * Carefully! while some contended threads are in lock_func(),
3575  * vm->sleepr is unstable value. we have to avoid both deadlock
3576  * and busy loop.
3577  */
3578  if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
3579  !patrol_thread) {
3580  timeout_ms = 100;
3581  patrol_thread = th;
3582  }
3583 
3584  GVL_UNLOCK_BEGIN();
3585  interrupted = lock_func(th, mutex, timeout_ms);
3586  native_mutex_unlock(&mutex->lock);
3587  GVL_UNLOCK_END();
3588 
3589  if (patrol_thread == th)
3590  patrol_thread = NULL;
3591 
3592  reset_unblock_function(th, &oldubf);
3593 
3594  th->locking_mutex = Qfalse;
3595  if (mutex->th && interrupted == 2) {
3596  rb_check_deadlock(th->vm);
3597  }
3598  if (th->status == THREAD_STOPPED_FOREVER) {
3599  th->status = prev_status;
3600  }
3601  th->vm->sleeper--;
3602 
3603  if (mutex->th == th) mutex_locked(th, self);
3604 
3605  if (interrupted) {
3607  }
3608  }
3609  }
3610  return self;
3611 }
3612 
3613 static const char *
3615 {
3616  const char *err = NULL;
3617  rb_mutex_t *th_mutex;
3618 
3619  native_mutex_lock(&mutex->lock);
3620 
3621  if (mutex->th == 0) {
3622  err = "Attempt to unlock a mutex which is not locked";
3623  }
3624  else if (mutex->th != th) {
3625  err = "Attempt to unlock a mutex which is locked by another thread";
3626  }
3627  else {
3628  mutex->th = 0;
3629  if (mutex->cond_waiting > 0)
3630  native_cond_signal(&mutex->cond);
3631  }
3632 
3633  native_mutex_unlock(&mutex->lock);
3634 
3635  if (!err) {
3636  th_mutex = th->keeping_mutexes;
3637  if (th_mutex == mutex) {
3638  th->keeping_mutexes = mutex->next_mutex;
3639  }
3640  else {
3641  while (1) {
3642  rb_mutex_t *tmp_mutex;
3643  tmp_mutex = th_mutex->next_mutex;
3644  if (tmp_mutex == mutex) {
3645  th_mutex->next_mutex = tmp_mutex->next_mutex;
3646  break;
3647  }
3648  th_mutex = tmp_mutex;
3649  }
3650  }
3651  mutex->next_mutex = NULL;
3652  }
3653 
3654  return err;
3655 }
3656 
3657 /*
3658  * call-seq:
3659  * mutex.unlock -> self
3660  *
3661  * Releases the lock.
3662  * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
3663  */
3664 VALUE
3666 {
3667  const char *err;
3668  rb_mutex_t *mutex;
3669  GetMutexPtr(self, mutex);
3670 
3671  err = rb_mutex_unlock_th(mutex, GET_THREAD());
3672  if (err) rb_raise(rb_eThreadError, "%s", err);
3673 
3674  return self;
3675 }
3676 
3677 static void
3679 {
3680  rb_mutex_t *mutex;
3681 
3682  while (mutexes) {
3683  mutex = mutexes;
3684  mutexes = mutex->next_mutex;
3685  mutex->th = 0;
3686  mutex->next_mutex = 0;
3687  }
3688 }
3689 
3690 static VALUE
3692 {
3694  return Qnil;
3695 }
3696 
3697 static VALUE
3699 {
3700  const struct timeval *t = (struct timeval *)time;
3701  rb_thread_wait_for(*t);
3702  return Qnil;
3703 }
3704 
3705 VALUE
3707 {
3708  time_t beg, end;
3709  struct timeval t;
3710 
3711  if (!NIL_P(timeout)) {
3712  t = rb_time_interval(timeout);
3713  }
3714  rb_mutex_unlock(self);
3715  beg = time(0);
3716  if (NIL_P(timeout)) {
3718  }
3719  else {
3721  }
3722  end = time(0) - beg;
3723  return INT2FIX(end);
3724 }
3725 
3726 /*
3727  * call-seq:
3728  * mutex.sleep(timeout = nil) -> number
3729  *
3730  * Releases the lock and sleeps +timeout+ seconds if it is given and
3731  * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
3732  * the current thread.
3733  */
3734 static VALUE
3736 {
3737  VALUE timeout;
3738 
3739  rb_scan_args(argc, argv, "01", &timeout);
3740  return rb_mutex_sleep(self, timeout);
3741 }
3742 
3743 /*
3744  * call-seq:
3745  * mutex.synchronize { ... } -> result of the block
3746  *
3747  * Obtains a lock, runs the block, and releases the lock when the block
3748  * completes. See the example under +Mutex+.
3749  */
3750 
3751 VALUE
3753 {
3754  rb_mutex_lock(mutex);
3755  return rb_ensure(func, arg, rb_mutex_unlock, mutex);
3756 }
3757 
3758 /*
3759  * Document-class: Barrier
3760  */
3761 static void
3762 barrier_mark(void *ptr)
3763 {
3764  rb_gc_mark((VALUE)ptr);
3765 }
3766 
3768  "barrier",
3769  {barrier_mark, 0, 0,},
3770 };
3771 
3772 static VALUE
3774 {
3775  return TypedData_Wrap_Struct(klass, &barrier_data_type, (void *)mutex_alloc(0));
3776 }
3777 
3778 #define GetBarrierPtr(obj) ((VALUE)rb_check_typeddata((obj), &barrier_data_type))
3779 
3780 VALUE
3782 {
3783  VALUE barrier = barrier_alloc(rb_cBarrier);
3784  rb_mutex_lock((VALUE)DATA_PTR(barrier));
3785  return barrier;
3786 }
3787 
3788 VALUE
3790 {
3791  VALUE mutex = GetBarrierPtr(self);
3792  rb_mutex_t *m;
3793 
3794  if (!mutex) return Qfalse;
3795  GetMutexPtr(mutex, m);
3796  if (m->th == GET_THREAD()) return Qfalse;
3797  rb_mutex_lock(mutex);
3798  if (DATA_PTR(self)) return Qtrue;
3799  rb_mutex_unlock(mutex);
3800  return Qfalse;
3801 }
3802 
3803 VALUE
3805 {
3806  return rb_mutex_unlock(GetBarrierPtr(self));
3807 }
3808 
3809 VALUE
3811 {
3812  VALUE mutex = GetBarrierPtr(self);
3813  DATA_PTR(self) = 0;
3814  return rb_mutex_unlock(mutex);
3815 }
3816 
3817 /* variables for recursive traversals */
3819 
3820 /*
3821  * Returns the current "recursive list" used to detect recursion.
3822  * This list is a hash table, unique for the current thread and for
3823  * the current __callee__.
3824  */
3825 
3826 static VALUE
3828 {
3829  volatile VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
3831  VALUE list;
3832  if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3833  hash = rb_hash_new();
3834  OBJ_UNTRUST(hash);
3835  rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
3836  list = Qnil;
3837  }
3838  else {
3839  list = rb_hash_aref(hash, sym);
3840  }
3841  if (NIL_P(list) || TYPE(list) != T_HASH) {
3842  list = rb_hash_new();
3843  OBJ_UNTRUST(list);
3844  rb_hash_aset(hash, sym, list);
3845  }
3846  return list;
3847 }
3848 
3849 /*
3850  * Returns Qtrue iff obj_id (or the pair <obj, paired_obj>) is already
3851  * in the recursion list.
3852  * Assumes the recursion list is valid.
3853  */
3854 
3855 static VALUE
3856 recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
3857 {
3858 #if SIZEOF_LONG == SIZEOF_VOIDP
3859  #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
3860 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3861  #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
3862  rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
3863 #endif
3864 
3865  VALUE pair_list = rb_hash_lookup2(list, obj_id, Qundef);
3866  if (pair_list == Qundef)
3867  return Qfalse;
3868  if (paired_obj_id) {
3869  if (TYPE(pair_list) != T_HASH) {
3870  if (!OBJ_ID_EQL(paired_obj_id, pair_list))
3871  return Qfalse;
3872  }
3873  else {
3874  if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
3875  return Qfalse;
3876  }
3877  }
3878  return Qtrue;
3879 }
3880 
3881 /*
3882  * Pushes obj_id (or the pair <obj_id, paired_obj_id>) in the recursion list.
3883  * For a single obj_id, it sets list[obj_id] to Qtrue.
3884  * For a pair, it sets list[obj_id] to paired_obj_id if possible,
3885  * otherwise list[obj_id] becomes a hash like:
3886  * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
3887  * Assumes the recursion list is valid.
3888  */
3889 
3890 static void
3892 {
3893  VALUE pair_list;
3894 
3895  if (!paired_obj) {
3896  rb_hash_aset(list, obj, Qtrue);
3897  }
3898  else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
3899  rb_hash_aset(list, obj, paired_obj);
3900  }
3901  else {
3902  if (TYPE(pair_list) != T_HASH){
3903  VALUE other_paired_obj = pair_list;
3904  pair_list = rb_hash_new();
3905  OBJ_UNTRUST(pair_list);
3906  rb_hash_aset(pair_list, other_paired_obj, Qtrue);
3907  rb_hash_aset(list, obj, pair_list);
3908  }
3909  rb_hash_aset(pair_list, paired_obj, Qtrue);
3910  }
3911 }
3912 
3913 /*
3914  * Pops obj_id (or the pair <obj_id, paired_obj_id>) from the recursion list.
3915  * For a pair, if list[obj_id] is a hash, then paired_obj_id is
3916  * removed from the hash and no attempt is made to simplify
3917  * list[obj_id] from {only_one_paired_id => true} to only_one_paired_id
3918  * Assumes the recursion list is valid.
3919  */
3920 
3921 static void
3923 {
3924  if (paired_obj) {
3925  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
3926  if (pair_list == Qundef) {
3927  VALUE symname = rb_inspect(ID2SYM(rb_frame_this_func()));
3928  VALUE thrname = rb_inspect(rb_thread_current());
3929  rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list for %s in %s",
3930  StringValuePtr(symname), StringValuePtr(thrname));
3931  }
3932  if (TYPE(pair_list) == T_HASH) {
3933  rb_hash_delete(pair_list, paired_obj);
3934  if (!RHASH_EMPTY_P(pair_list)) {
3935  return; /* keep hash until is empty */
3936  }
3937  }
3938  }
3939  rb_hash_delete(list, obj);
3940 }
3941 
3943  VALUE (*func) (VALUE, VALUE, int);
3949 };
3950 
3951 static VALUE
3953 {
3954  VALUE result = Qundef;
3955  int state;
3956 
3957  recursive_push(p->list, p->objid, p->pairid);
3958  PUSH_TAG();
3959  if ((state = EXEC_TAG()) == 0) {
3960  result = (*p->func)(p->obj, p->arg, FALSE);
3961  }
3962  POP_TAG();
3963  recursive_pop(p->list, p->objid, p->pairid);
3964  if (state)
3965  JUMP_TAG(state);
3966  return result;
3967 }
3968 
3969 /*
3970  * Calls func(obj, arg, recursive), where recursive is non-zero if the
3971  * current method is called recursively on obj, or on the pair <obj, pairid>
3972  * If outer is 0, then the innermost func will be called with recursive set
3973  * to Qtrue, otherwise the outermost func will be called. In the latter case,
3974  * all inner func are short-circuited by throw.
3975  * Implementation details: the value thrown is the recursive list which is
3976  * proper to the current method and unlikely to be catched anywhere else.
3977  * list[recursive_key] is used as a flag for the outermost call.
3978  */
3979 
3980 static VALUE
3981 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
3982 {
3983  VALUE result = Qundef;
3984  struct exec_recursive_params p;
3985  int outermost;
3987  p.objid = rb_obj_id(obj);
3988  p.obj = obj;
3989  p.pairid = pairid;
3990  p.arg = arg;
3991  outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
3992 
3993  if (recursive_check(p.list, p.objid, pairid)) {
3994  if (outer && !outermost) {
3995  rb_throw_obj(p.list, p.list);
3996  }
3997  return (*func)(obj, arg, TRUE);
3998  }
3999  else {
4000  p.func = func;
4001 
4002  if (outermost) {
4003  recursive_push(p.list, ID2SYM(recursive_key), 0);
4004  result = rb_catch_obj(p.list, exec_recursive_i, (VALUE)&p);
4005  recursive_pop(p.list, ID2SYM(recursive_key), 0);
4006  if (result == p.list) {
4007  result = (*func)(obj, arg, TRUE);
4008  }
4009  }
4010  else {
4011  result = exec_recursive_i(0, &p);
4012  }
4013  }
4014  *(volatile struct exec_recursive_params *)&p;
4015  return result;
4016 }
4017 
4018 /*
4019  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4020  * current method is called recursively on obj
4021  */
4022 
4023 VALUE
4025 {
4026  return exec_recursive(func, obj, 0, arg, 0);
4027 }
4028 
4029 /*
4030  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4031  * current method is called recursively on the ordered pair <obj, paired_obj>
4032  */
4033 
4034 VALUE
4036 {
4037  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 0);
4038 }
4039 
4040 /*
4041  * If recursion is detected on the current method and obj, the outermost
4042  * func will be called with (obj, arg, Qtrue). All inner func will be
4043  * short-circuited using throw.
4044  */
4045 
4046 VALUE
4048 {
4049  return exec_recursive(func, obj, 0, arg, 1);
4050 }
4051 
4052 /* tracer */
4053 #define RUBY_EVENT_REMOVED 0x1000000
4054 
4055 enum {
4061 };
4062 
4063 static VALUE thread_suppress_tracing(rb_thread_t *th, int ev, VALUE (*func)(VALUE, int), VALUE arg, int always, int pop_p);
4064 
4068  VALUE self;
4072 };
4073 
4074 static rb_event_hook_t *
4076 {
4078  hook->func = func;
4079  hook->flag = events;
4080  hook->data = data;
4081  return hook;
4082 }
4083 
4084 static void
4086 {
4087  rb_event_hook_t *hook = th->event_hooks;
4089 
4090  while (hook) {
4091  if (!(flag & RUBY_EVENT_REMOVED))
4092  flag |= hook->flag;
4093  hook = hook->next;
4094  }
4095  th->event_flags = flag;
4096 }
4097 
4098 static void
4101 {
4102  rb_event_hook_t *hook = alloc_event_hook(func, events, data);
4103  hook->next = th->event_hooks;
4104  th->event_hooks = hook;
4106 }
4107 
4108 static rb_thread_t *
4110 {
4111  rb_thread_t *th;
4112  GetThreadPtr(thval, th);
4113  return th;
4114 }
4115 
4116 void
4119 {
4120  rb_threadptr_add_event_hook(thval2thread_t(thval), func, events, data);
4121 }
4122 
4123 static int
4125 {
4126  VALUE thval = key;
4127  rb_thread_t *th;
4128  GetThreadPtr(thval, th);
4129 
4130  if (flag) {
4131  th->event_flags |= RUBY_EVENT_VM;
4132  }
4133  else {
4134  th->event_flags &= (~RUBY_EVENT_VM);
4135  }
4136  return ST_CONTINUE;
4137 }
4138 
4139 static void
4141 {
4142  st_foreach(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
4143 }
4144 
4145 static inline int
4146 exec_event_hooks(const rb_event_hook_t *hook, rb_event_flag_t flag, VALUE self, ID id, VALUE klass)
4147 {
4148  int removed = 0;
4149  for (; hook; hook = hook->next) {
4150  if (hook->flag & RUBY_EVENT_REMOVED) {
4151  removed++;
4152  continue;
4153  }
4154  if (flag & hook->flag) {
4155  (*hook->func)(flag, hook->data, self, id, klass);
4156  }
4157  }
4158  return removed;
4159 }
4160 
4161 static int remove_defered_event_hook(rb_event_hook_t **root);
4162 
4163 static VALUE
4164 thread_exec_event_hooks(VALUE args, int running)
4165 {
4166  struct event_call_args *argp = (struct event_call_args *)args;
4167  rb_thread_t *th = argp->th;
4168  rb_event_flag_t flag = argp->event;
4169  VALUE self = argp->self;
4170  ID id = argp->id;
4171  VALUE klass = argp->klass;
4172  const rb_event_flag_t wait_event = th->event_flags;
4173  int removed;
4174 
4175  if (self == rb_mRubyVMFrozenCore) return 0;
4176 
4177  if ((wait_event & flag) && !(running & EVENT_RUNNING_THREAD)) {
4179  removed = exec_event_hooks(th->event_hooks, flag, self, id, klass);
4180  th->tracing &= ~EVENT_RUNNING_THREAD;
4181  if (removed) {
4183  }
4184  }
4185  if (wait_event & RUBY_EVENT_VM) {
4186  if (th->vm->event_hooks == NULL) {
4187  th->event_flags &= (~RUBY_EVENT_VM);
4188  }
4189  else if (!(running & EVENT_RUNNING_VM)) {
4190  th->tracing |= EVENT_RUNNING_VM;
4191  removed = exec_event_hooks(th->vm->event_hooks, flag, self, id, klass);
4192  th->tracing &= ~EVENT_RUNNING_VM;
4193  if (removed) {
4195  }
4196  }
4197  }
4198  return 0;
4199 }
4200 
4201 void
4203 {
4204  const VALUE errinfo = th->errinfo;
4205  struct event_call_args args;
4206  args.th = th;
4207  args.event = flag;
4208  args.self = self;
4209  args.id = id;
4210  args.klass = klass;
4211  args.proc = 0;
4213  th->errinfo = errinfo;
4214 }
4215 
4216 void
4218 {
4219  rb_event_hook_t *hook = alloc_event_hook(func, events, data);
4220  rb_vm_t *vm = GET_VM();
4221 
4222  hook->next = vm->event_hooks;
4223  vm->event_hooks = hook;
4224 
4226 }
4227 
4228 static int
4230 {
4231  while (hook) {
4232  if (func == 0 || hook->func == func) {
4233  hook->flag |= RUBY_EVENT_REMOVED;
4234  }
4235  hook = hook->next;
4236  }
4237  return -1;
4238 }
4239 
4240 static int
4242 {
4243  rb_event_hook_t *hook = *root, *next;
4244 
4245  while (hook) {
4246  next = hook->next;
4247  if (func == 0 || hook->func == func || (hook->flag & RUBY_EVENT_REMOVED)) {
4248  *root = next;
4249  xfree(hook);
4250  }
4251  else {
4252  root = &hook->next;
4253  }
4254  hook = next;
4255  }
4256  return -1;
4257 }
4258 
4259 static int
4261 {
4262  rb_event_hook_t *hook = *root, *next;
4263 
4264  while (hook) {
4265  next = hook->next;
4266  if (hook->flag & RUBY_EVENT_REMOVED) {
4267  *root = next;
4268  xfree(hook);
4269  }
4270  else {
4271  root = &hook->next;
4272  }
4273  hook = next;
4274  }
4275  return -1;
4276 }
4277 
4278 static int
4280 {
4281  int ret;
4282  if (th->tracing & EVENT_RUNNING_THREAD) {
4283  ret = defer_remove_event_hook(th->event_hooks, func);
4284  }
4285  else {
4286  ret = remove_event_hook(&th->event_hooks, func);
4287  }
4289  return ret;
4290 }
4291 
4292 int
4294 {
4295  return rb_threadptr_remove_event_hook(thval2thread_t(thval), func);
4296 }
4297 
4298 static rb_event_hook_t *
4300 {
4301  while (hook) {
4302  if (!(hook->flag & RUBY_EVENT_REMOVED))
4303  return hook;
4304  hook = hook->next;
4305  }
4306  return NULL;
4307 }
4308 
4309 static int
4311 {
4312  rb_thread_t *th = thval2thread_t((VALUE)key);
4313  if (!(th->tracing & EVENT_RUNNING_VM)) return ST_CONTINUE;
4314  *(rb_thread_t **)data = th;
4315  return ST_STOP;
4316 }
4317 
4318 static rb_thread_t *
4320 {
4321  rb_thread_t *found = NULL;
4323  return found;
4324 }
4325 
4326 int
4328 {
4329  rb_vm_t *vm = GET_VM();
4331  int ret;
4332 
4334  ret = defer_remove_event_hook(vm->event_hooks, func);
4335  }
4336  else {
4337  ret = remove_event_hook(&vm->event_hooks, func);
4338  }
4339 
4340  if (hook && !search_live_hook(vm->event_hooks)) {
4342  }
4343 
4344  return ret;
4345 }
4346 
4347 static int
4349 {
4350  rb_thread_t *th;
4351  GetThreadPtr((VALUE)key, th);
4353  return ST_CONTINUE;
4354 }
4355 
4356 void
4358 {
4359  st_foreach(GET_VM()->living_threads, clear_trace_func_i, (st_data_t) 0);
4361 }
4362 
4363 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
4364 
4365 /*
4366  * call-seq:
4367  * set_trace_func(proc) -> proc
4368  * set_trace_func(nil) -> nil
4369  *
4370  * Establishes _proc_ as the handler for tracing, or disables
4371  * tracing if the parameter is +nil+. _proc_ takes up
4372  * to six parameters: an event name, a filename, a line number, an
4373  * object id, a binding, and the name of a class. _proc_ is
4374  * invoked whenever an event occurs. Events are: <code>c-call</code>
4375  * (call a C-language routine), <code>c-return</code> (return from a
4376  * C-language routine), <code>call</code> (call a Ruby method),
4377  * <code>class</code> (start a class or module definition),
4378  * <code>end</code> (finish a class or module definition),
4379  * <code>line</code> (execute code on a new line), <code>raise</code>
4380  * (raise an exception), and <code>return</code> (return from a Ruby
4381  * method). Tracing is disabled within the context of _proc_.
4382  *
4383  * class Test
4384  * def test
4385  * a = 1
4386  * b = 2
4387  * end
4388  * end
4389  *
4390  * set_trace_func proc { |event, file, line, id, binding, classname|
4391  * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
4392  * }
4393  * t = Test.new
4394  * t.test
4395  *
4396  * line prog.rb:11 false
4397  * c-call prog.rb:11 new Class
4398  * c-call prog.rb:11 initialize Object
4399  * c-return prog.rb:11 initialize Object
4400  * c-return prog.rb:11 new Class
4401  * line prog.rb:12 false
4402  * call prog.rb:2 test Test
4403  * line prog.rb:3 test Test
4404  * line prog.rb:4 test Test
4405  * return prog.rb:4 test Test
4406  */
4407 
4408 static VALUE
4410 {
4412 
4413  if (NIL_P(trace)) {
4414  GET_THREAD()->tracing = EVENT_RUNNING_NOTHING;
4415  return Qnil;
4416  }
4417 
4418  if (!rb_obj_is_proc(trace)) {
4419  rb_raise(rb_eTypeError, "trace_func needs to be Proc");
4420  }
4421 
4423  return trace;
4424 }
4425 
4426 static void
4428 {
4429  if (!rb_obj_is_proc(trace)) {
4430  rb_raise(rb_eTypeError, "trace_func needs to be Proc");
4431  }
4432 
4434 }
4435 
4436 /*
4437  * call-seq:
4438  * thr.add_trace_func(proc) -> proc
4439  *
4440  * Adds _proc_ as a handler for tracing.
4441  * See <code>Thread#set_trace_func</code> and +set_trace_func+.
4442  */
4443 
4444 static VALUE
4446 {
4447  rb_thread_t *th;
4448  GetThreadPtr(obj, th);
4449  thread_add_trace_func(th, trace);
4450  return trace;
4451 }
4452 
4453 /*
4454  * call-seq:
4455  * thr.set_trace_func(proc) -> proc
4456  * thr.set_trace_func(nil) -> nil
4457  *
4458  * Establishes _proc_ on _thr_ as the handler for tracing, or
4459  * disables tracing if the parameter is +nil+.
4460  * See +set_trace_func+.
4461  */
4462 
4463 static VALUE
4465 {
4466  rb_thread_t *th;
4467  GetThreadPtr(obj, th);
4469 
4470  if (NIL_P(trace)) {
4472  return Qnil;
4473  }
4474  thread_add_trace_func(th, trace);
4475  return trace;
4476 }
4477 
4478 static const char *
4480 {
4481  switch (event) {
4482  case RUBY_EVENT_LINE:
4483  return "line";
4484  case RUBY_EVENT_CLASS:
4485  return "class";
4486  case RUBY_EVENT_END:
4487  return "end";
4488  case RUBY_EVENT_CALL:
4489  return "call";
4490  case RUBY_EVENT_RETURN:
4491  return "return";
4492  case RUBY_EVENT_C_CALL:
4493  return "c-call";
4494  case RUBY_EVENT_C_RETURN:
4495  return "c-return";
4496  case RUBY_EVENT_RAISE:
4497  return "raise";
4498  default:
4499  return "unknown";
4500  }
4501 }
4502 
4503 static VALUE
4504 call_trace_proc(VALUE args, int tracing)
4505 {
4506  struct event_call_args *p = (struct event_call_args *)args;
4507  const char *srcfile = rb_sourcefile();
4508  VALUE eventname = rb_str_new2(get_event_name(p->event));
4509  VALUE filename = srcfile ? rb_str_new2(srcfile) : Qnil;
4510  VALUE argv[6];
4511  int line = rb_sourceline();
4512  ID id = 0;
4513  VALUE klass = 0;
4514 
4515  if (p->klass != 0) {
4516  id = p->id;
4517  klass = p->klass;
4518  }
4519  else {
4520  rb_thread_method_id_and_class(p->th, &id, &klass);
4521  }
4522  if (id == ID_ALLOCATOR)
4523  return Qnil;
4524  if (klass) {
4525  if (TYPE(klass) == T_ICLASS) {
4526  klass = RBASIC(klass)->klass;
4527  }
4528  else if (FL_TEST(klass, FL_SINGLETON)) {
4529  klass = rb_iv_get(klass, "__attached__");
4530  }
4531  }
4532 
4533  argv[0] = eventname;
4534  argv[1] = filename;
4535  argv[2] = INT2FIX(line);
4536  argv[3] = id ? ID2SYM(id) : Qnil;
4537  argv[4] = (p->self && srcfile) ? rb_binding_new() : Qnil;
4538  argv[5] = klass ? klass : Qnil;
4539 
4540  return rb_proc_call_with_block(p->proc, 6, argv, Qnil);
4541 }
4542 
4543 static void
4545 {
4546  struct event_call_args args;
4547 
4548  args.th = GET_THREAD();
4549  args.event = event;
4550  args.proc = proc;
4551  args.self = self;
4552  args.id = id;
4553  args.klass = klass;
4555 }
4556 
4557 VALUE
4559 {
4560  rb_thread_t *th = GET_THREAD();
4561  return thread_suppress_tracing(th, EVENT_RUNNING_TRACE, func, arg, always, 0);
4562 }
4563 
4564 static VALUE
4565 thread_suppress_tracing(rb_thread_t *th, int ev, VALUE (*func)(VALUE, int), VALUE arg, int always, int pop_p)
4566 {
4567  int state, tracing = th->tracing, running = tracing & ev;
4568  volatile int raised;
4569  volatile int outer_state;
4570  VALUE result = Qnil;
4571 
4572  if (running == ev && !always) {
4573  return Qnil;
4574  }
4575  else {
4576  th->tracing |= ev;
4577  }
4578 
4579  raised = rb_threadptr_reset_raised(th);
4580  outer_state = th->state;
4581  th->state = 0;
4582 
4583  PUSH_TAG();
4584  if ((state = EXEC_TAG()) == 0) {
4585  result = (*func)(arg, running);
4586  }
4587 
4588  if (raised) {
4590  }
4591  POP_TAG();
4592 
4593  th->tracing = tracing;
4594  if (state) {
4595  if (pop_p) {
4597  }
4598  JUMP_TAG(state);
4599  }
4600  th->state = outer_state;
4601 
4602  return result;
4603 }
4604 
4605 /*
4606  * call-seq:
4607  * thr.backtrace -> array
4608  *
4609  * Returns the current back trace of the _thr_.
4610  */
4611 
4612 static VALUE
4614 {
4615  return rb_thread_backtrace(thval);
4616 }
4617 
4618 /*
4619  * Document-class: ThreadError
4620  *
4621  * Raised when an invalid operation is attempted on a thread.
4622  *
4623  * For example, when no other thread has been started:
4624  *
4625  * Thread.stop
4626  *
4627  * <em>raises the exception:</em>
4628  *
4629  * ThreadError: stopping only thread
4630  */
4631 
4632 /*
4633  * +Thread+ encapsulates the behavior of a thread of
4634  * execution, including the main thread of the Ruby script.
4635  *
4636  * In the descriptions of the methods in this class, the parameter _sym_
4637  * refers to a symbol, which is either a quoted string or a
4638  * +Symbol+ (such as <code>:name</code>).
4639  */
4640 
4641 void
4643 {
4644 #undef rb_intern
4645 #define rb_intern(str) rb_intern_const(str)
4646 
4647  VALUE cThGroup;
4648  rb_thread_t *th = GET_THREAD();
4649 
4660  rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
4662 #if THREAD_DEBUG < 0
4663  rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
4664  rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
4665 #endif
4666 
4667  rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
4672  rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
4685  rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
4686  rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
4690 
4692 
4693  closed_stream_error = rb_exc_new2(rb_eIOError, "stream closed");
4696 
4697  cThGroup = rb_define_class("ThreadGroup", rb_cObject);
4699  rb_define_method(cThGroup, "list", thgroup_list, 0);
4700  rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
4701  rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
4702  rb_define_method(cThGroup, "add", thgroup_add, 1);
4703 
4704  {
4705  th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
4706  rb_define_const(cThGroup, "Default", th->thgroup);
4707  }
4708 
4709  rb_cMutex = rb_define_class("Mutex", rb_cObject);
4711  rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
4713  rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
4716  rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
4717 
4718  recursive_key = rb_intern("__recursive_key__");
4720 
4721  /* trace */
4722  rb_define_global_function("set_trace_func", set_trace_func, 1);
4723  rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
4724  rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
4725 
4726  /* init thread core */
4727  {
4728  /* main thread setting */
4729  {
4730  /* acquire global vm lock */
4731  gvl_init(th->vm);
4732  gvl_acquire(th->vm, th);
4733  native_mutex_initialize(&th->interrupt_lock);
4734  }
4735  }
4736 
4737  rb_thread_create_timer_thread();
4738 
4739  /* suppress warnings on cygwin, mingw and mswin.*/
4740  (void)native_mutex_trylock;
4741 }
4742 
4743 int
4745 {
4746  rb_thread_t *th = ruby_thread_from_native();
4747 
4748  return th != 0;
4749 }
4750 
4751 static int
4753 {
4754  VALUE thval = key;
4755  rb_thread_t *th;
4756  GetThreadPtr(thval, th);
4757 
4759  *found = 1;
4760  }
4761  else if (th->locking_mutex) {
4762  rb_mutex_t *mutex;
4763  GetMutexPtr(th->locking_mutex, mutex);
4764 
4765  native_mutex_lock(&mutex->lock);
4766  if (mutex->th == th || (!mutex->th && mutex->cond_waiting)) {
4767  *found = 1;
4768  }
4769  native_mutex_unlock(&mutex->lock);
4770  }
4771 
4772  return (*found) ? ST_STOP : ST_CONTINUE;
4773 }
4774 
4775 #ifdef DEBUG_DEADLOCK_CHECK
4776 static int
4777 debug_i(st_data_t key, st_data_t val, int *found)
4778 {
4779  VALUE thval = key;
4780  rb_thread_t *th;
4781  GetThreadPtr(thval, th);
4782 
4783  printf("th:%p %d %d", th, th->status, th->interrupt_flag);
4784  if (th->locking_mutex) {
4785  rb_mutex_t *mutex;
4786  GetMutexPtr(th->locking_mutex, mutex);
4787 
4788  native_mutex_lock(&mutex->lock);
4789  printf(" %p %d\n", mutex->th, mutex->cond_waiting);
4790  native_mutex_unlock(&mutex->lock);
4791  }
4792  else
4793  puts("");
4794 
4795  return ST_CONTINUE;
4796 }
4797 #endif
4798 
4799 static void
4801 {
4802  int found = 0;
4803 
4804  if (vm_living_thread_num(vm) > vm->sleeper) return;
4805  if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
4806  if (patrol_thread && patrol_thread != GET_THREAD()) return;
4807 
4809 
4810  if (!found) {
4811  VALUE argv[2];
4812  argv[0] = rb_eFatal;
4813  argv[1] = rb_str_new2("deadlock detected");
4814 #ifdef DEBUG_DEADLOCK_CHECK
4815  printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
4816  st_foreach(vm->living_threads, debug_i, (st_data_t)0);
4817 #endif
4818  vm->sleeper--;
4819  rb_threadptr_raise(vm->main_thread, 2, argv);
4820  }
4821 }
4822 
4823 static void
4825 {
4826  VALUE coverage = GET_THREAD()->cfp->iseq->coverage;
4827  if (coverage && RBASIC(coverage)->klass == 0) {
4828  long line = rb_sourceline() - 1;
4829  long count;
4830  if (RARRAY_PTR(coverage)[line] == Qnil) {
4831  return;
4832  }
4833  count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
4834  if (POSFIXABLE(count)) {
4835  RARRAY_PTR(coverage)[line] = LONG2FIX(count);
4836  }
4837  }
4838 }
4839 
4840 VALUE
4842 {
4843  return GET_VM()->coverages;
4844 }
4845 
4846 void
4848 {
4849  GET_VM()->coverages = coverages;
4851 }
4852 
4853 void
4855 {
4856  GET_VM()->coverages = Qfalse;
4858 }
4859 
4860