]> git.saurik.com Git - apple/libc.git/blob - pthreads/pthread.c
0b9edad165f8fcf05b8a3c24ba205320f3fe328d
[apple/libc.git] / pthreads / pthread.c
1 /*
2 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
3 * All Rights Reserved
4 *
5 * Permission to use, copy, modify, and distribute this software and
6 * its documentation for any purpose and without fee is hereby granted,
7 * provided that the above copyright notice appears in all copies and
8 * that both the copyright notice and this permission notice appear in
9 * supporting documentation.
10 *
11 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
12 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
13 * FOR A PARTICULAR PURPOSE.
14 *
15 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
16 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
17 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
18 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
19 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 *
21 */
22 /*
23 * MkLinux
24 */
25
26 /*
27 * POSIX Pthread Library
28 */
29
30 #define __POSIX_LIB__
31 #include <assert.h>
32 #include <stdio.h> /* For printf(). */
33 #include <stdlib.h>
34 #include <errno.h> /* For __mach_errno_addr() prototype. */
35 #include <sys/time.h>
36 #include <sys/resource.h>
37 #include <sys/sysctl.h>
38 #include <sys/syscall.h>
39 #include <machine/vmparam.h>
40 #include <mach/vm_statistics.h>
41 #define __APPLE_API_PRIVATE
42 #include <machine/cpu_capabilities.h>
43
44 #include "pthread_internals.h"
45
46 /* Per-thread kernel support */
47 extern void _pthread_set_self(pthread_t);
48 extern void mig_init(int);
49
50 /* Get CPU capabilities from the kernel */
51 __private_extern__ void _init_cpu_capabilities(void);
52
53 /* Needed to tell the malloc subsystem we're going multithreaded */
54 extern void set_malloc_singlethreaded(int);
55
56 /* Used when we need to call into the kernel with no reply port */
57 extern pthread_lock_t reply_port_lock;
58
59 /* We'll implement this when the main thread is a pthread */
60 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
61 static struct _pthread _thread = {0};
62
63 /* This global should be used (carefully) by anyone needing to know if a
64 ** pthread has been created.
65 */
66 int __is_threaded = 0;
67 static int _pthread_count = 1;
68
69 static pthread_lock_t _pthread_count_lock = LOCK_INITIALIZER;
70
71 /* Same implementation as LOCK, but without the __is_threaded check */
72 int _spin_tries = 0;
73 __private_extern__ void _spin_lock_retry(pthread_lock_t *lock)
74 {
75 int tries = _spin_tries;
76 do {
77 if (tries-- > 0)
78 continue;
79 syscall_thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1);
80 tries = _spin_tries;
81 } while(!_spin_lock_try(lock));
82 }
83
84 extern mach_port_t thread_recycle_port;
85
86 /* These are used to keep track of a semaphore pool shared by mutexes and condition
87 ** variables.
88 */
89
90 static semaphore_t *sem_pool = NULL;
91 static int sem_pool_count = 0;
92 static int sem_pool_current = 0;
93 static pthread_lock_t sem_pool_lock = LOCK_INITIALIZER;
94
95 static int default_priority;
96 static int max_priority;
97 static int min_priority;
98 static int pthread_concurrency;
99
100 /*
101 * [Internal] stack support
102 */
103 size_t _pthread_stack_size = 0;
104 #define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
105 #define STACK_RESERVED (sizeof (struct _pthread))
106
107 #ifdef STACK_GROWS_UP
108
109 /* The stack grows towards higher addresses:
110 |struct _pthread|user stack---------------->|
111 ^STACK_BASE ^STACK_START
112 ^STACK_SELF
113 ^STACK_LOWEST */
114 #define STACK_BASE(sp) STACK_LOWEST(sp)
115 #define STACK_START(stack_low) (STACK_BASE(stack_low) + STACK_RESERVED)
116 #define STACK_SELF(sp) STACK_BASE(sp)
117
118 #else
119
120 /* The stack grows towards lower addresses:
121 |<----------------user stack|struct _pthread|
122 ^STACK_LOWEST ^STACK_START ^STACK_BASE
123 ^STACK_SELF */
124
125 #define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
126 #define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
127 #define STACK_SELF(sp) STACK_START(sp)
128
129 #endif
130
131 #if defined(__ppc__)
132 static const vm_address_t PTHREAD_STACK_HINT = 0xF0000000;
133 #elif defined(__i386__)
134 static const vm_address_t PTHREAD_STACK_HINT = 0xB0000000;
135 #else
136 #error Need to define a stack address hint for this architecture
137 #endif
138
139 /* Set the base address to use as the stack pointer, before adjusting due to the ABI */
140
141 static int
142 _pthread_allocate_stack(pthread_attr_t *attrs, void **stack)
143 {
144 kern_return_t kr;
145 #if 1
146 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
147 if (attrs->stackaddr != NULL) {
148 assert(((vm_address_t)(attrs->stackaddr) & (vm_page_size - 1)) == 0);
149 *stack = attrs->stackaddr;
150 return 0;
151 }
152
153 *((vm_address_t *)stack) = PTHREAD_STACK_HINT;
154 kr = vm_map(mach_task_self(), (vm_address_t *)stack,
155 attrs->stacksize + vm_page_size,
156 vm_page_size-1,
157 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
158 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
159 VM_INHERIT_DEFAULT);
160 if (kr != KERN_SUCCESS)
161 kr = vm_allocate(mach_task_self(),
162 (vm_address_t *)stack, attrs->stacksize + vm_page_size,
163 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
164 if (kr != KERN_SUCCESS) {
165 return EAGAIN;
166 }
167 #ifdef STACK_GROWS_UP
168 /* The guard page is the page one higher than the stack */
169 /* The stack base is at the lowest address */
170 kr = vm_protect(mach_task_self(), *stack + attrs->stacksize, vm_page_size, FALSE, VM_PROT_NONE);
171 #else
172 /* The guard page is at the lowest address */
173 /* The stack base is the highest address */
174 kr = vm_protect(mach_task_self(), (vm_address_t)*stack, vm_page_size, FALSE, VM_PROT_NONE);
175 *stack += attrs->stacksize + vm_page_size;
176 #endif
177
178 #else
179 vm_address_t cur_stack = (vm_address_t)0;
180 if (free_stacks == 0)
181 {
182 /* Allocating guard pages is done by doubling
183 * the actual stack size, since STACK_BASE() needs
184 * to have stacks aligned on stack_size. Allocating just
185 * one page takes as much memory as allocating more pages
186 * since it will remain one entry in the vm map.
187 * Besides, allocating more than one page allows tracking the
188 * overflow pattern when the overflow is bigger than one page.
189 */
190 #ifndef NO_GUARD_PAGES
191 # define GUARD_SIZE(a) (2*(a))
192 # define GUARD_MASK(a) (((a)<<1) | 1)
193 #else
194 # define GUARD_SIZE(a) (a)
195 # define GUARD_MASK(a) (a)
196 #endif
197 while (lowest_stack > GUARD_SIZE(__pthread_stack_size))
198 {
199 lowest_stack -= GUARD_SIZE(__pthread_stack_size);
200 /* Ensure stack is there */
201 kr = vm_allocate(mach_task_self(),
202 &lowest_stack,
203 GUARD_SIZE(__pthread_stack_size),
204 FALSE);
205 #ifndef NO_GUARD_PAGES
206 if (kr == KERN_SUCCESS) {
207 # ifdef STACK_GROWS_UP
208 kr = vm_protect(mach_task_self(),
209 lowest_stack+__pthread_stack_size,
210 __pthread_stack_size,
211 FALSE, VM_PROT_NONE);
212 # else /* STACK_GROWS_UP */
213 kr = vm_protect(mach_task_self(),
214 lowest_stack,
215 __pthread_stack_size,
216 FALSE, VM_PROT_NONE);
217 lowest_stack += __pthread_stack_size;
218 # endif /* STACK_GROWS_UP */
219 if (kr == KERN_SUCCESS)
220 break;
221 }
222 #else
223 if (kr == KERN_SUCCESS)
224 break;
225 #endif
226 }
227 if (lowest_stack > 0)
228 free_stacks = (vm_address_t *)lowest_stack;
229 else
230 {
231 /* Too bad. We'll just have to take what comes.
232 Use vm_map instead of vm_allocate so we can
233 specify alignment. */
234 kr = vm_map(mach_task_self(), &lowest_stack,
235 GUARD_SIZE(__pthread_stack_size),
236 GUARD_MASK(__pthread_stack_mask),
237 TRUE /* anywhere */, MEMORY_OBJECT_NULL,
238 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
239 VM_INHERIT_DEFAULT);
240 /* This really shouldn't fail and if it does I don't
241 know what to do. */
242 #ifndef NO_GUARD_PAGES
243 if (kr == KERN_SUCCESS) {
244 # ifdef STACK_GROWS_UP
245 kr = vm_protect(mach_task_self(),
246 lowest_stack+__pthread_stack_size,
247 __pthread_stack_size,
248 FALSE, VM_PROT_NONE);
249 # else /* STACK_GROWS_UP */
250 kr = vm_protect(mach_task_self(),
251 lowest_stack,
252 __pthread_stack_size,
253 FALSE, VM_PROT_NONE);
254 lowest_stack += __pthread_stack_size;
255 # endif /* STACK_GROWS_UP */
256 }
257 #endif
258 free_stacks = (vm_address_t *)lowest_stack;
259 lowest_stack = 0;
260 }
261 *free_stacks = 0; /* No other free stacks */
262 }
263 cur_stack = STACK_START((vm_address_t) free_stacks);
264 free_stacks = (vm_address_t *)*free_stacks;
265 cur_stack = _adjust_sp(cur_stack); /* Machine dependent stack fudging */
266 #endif
267 return 0;
268 }
269
270 static pthread_attr_t _pthread_attr_default = {0};
271
272 /*
273 * Destroy a thread attribute structure
274 */
275 int
276 pthread_attr_destroy(pthread_attr_t *attr)
277 {
278 if (attr->sig == _PTHREAD_ATTR_SIG)
279 {
280 return (ESUCCESS);
281 } else
282 {
283 return (EINVAL); /* Not an attribute structure! */
284 }
285 }
286
287 /*
288 * Get the 'detach' state from a thread attribute structure.
289 * Note: written as a helper function for info hiding
290 */
291 int
292 pthread_attr_getdetachstate(const pthread_attr_t *attr,
293 int *detachstate)
294 {
295 if (attr->sig == _PTHREAD_ATTR_SIG)
296 {
297 *detachstate = attr->detached;
298 return (ESUCCESS);
299 } else
300 {
301 return (EINVAL); /* Not an attribute structure! */
302 }
303 }
304
305 /*
306 * Get the 'inherit scheduling' info from a thread attribute structure.
307 * Note: written as a helper function for info hiding
308 */
309 int
310 pthread_attr_getinheritsched(const pthread_attr_t *attr,
311 int *inheritsched)
312 {
313 if (attr->sig == _PTHREAD_ATTR_SIG)
314 {
315 *inheritsched = attr->inherit;
316 return (ESUCCESS);
317 } else
318 {
319 return (EINVAL); /* Not an attribute structure! */
320 }
321 }
322
323 /*
324 * Get the scheduling parameters from a thread attribute structure.
325 * Note: written as a helper function for info hiding
326 */
327 int
328 pthread_attr_getschedparam(const pthread_attr_t *attr,
329 struct sched_param *param)
330 {
331 if (attr->sig == _PTHREAD_ATTR_SIG)
332 {
333 *param = attr->param;
334 return (ESUCCESS);
335 } else
336 {
337 return (EINVAL); /* Not an attribute structure! */
338 }
339 }
340
341 /*
342 * Get the scheduling policy from a thread attribute structure.
343 * Note: written as a helper function for info hiding
344 */
345 int
346 pthread_attr_getschedpolicy(const pthread_attr_t *attr,
347 int *policy)
348 {
349 if (attr->sig == _PTHREAD_ATTR_SIG)
350 {
351 *policy = attr->policy;
352 return (ESUCCESS);
353 } else
354 {
355 return (EINVAL); /* Not an attribute structure! */
356 }
357 }
358
359 static const size_t DEFAULT_STACK_SIZE = DFLSSIZ;
360 /*
361 * Initialize a thread attribute structure to default values.
362 */
363 int
364 pthread_attr_init(pthread_attr_t *attr)
365 {
366 attr->stacksize = DEFAULT_STACK_SIZE;
367 attr->stackaddr = NULL;
368 attr->sig = _PTHREAD_ATTR_SIG;
369 attr->policy = _PTHREAD_DEFAULT_POLICY;
370 attr->param.sched_priority = default_priority;
371 attr->param.quantum = 10; /* quantum isn't public yet */
372 attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
373 attr->detached = PTHREAD_CREATE_JOINABLE;
374 attr->freeStackOnExit = TRUE;
375 return (ESUCCESS);
376 }
377
378 /*
379 * Set the 'detach' state in a thread attribute structure.
380 * Note: written as a helper function for info hiding
381 */
382 int
383 pthread_attr_setdetachstate(pthread_attr_t *attr,
384 int detachstate)
385 {
386 if (attr->sig == _PTHREAD_ATTR_SIG)
387 {
388 if ((detachstate == PTHREAD_CREATE_JOINABLE) ||
389 (detachstate == PTHREAD_CREATE_DETACHED))
390 {
391 attr->detached = detachstate;
392 return (ESUCCESS);
393 } else
394 {
395 return (EINVAL);
396 }
397 } else
398 {
399 return (EINVAL); /* Not an attribute structure! */
400 }
401 }
402
403 /*
404 * Set the 'inherit scheduling' state in a thread attribute structure.
405 * Note: written as a helper function for info hiding
406 */
407 int
408 pthread_attr_setinheritsched(pthread_attr_t *attr,
409 int inheritsched)
410 {
411 if (attr->sig == _PTHREAD_ATTR_SIG)
412 {
413 if ((inheritsched == PTHREAD_INHERIT_SCHED) ||
414 (inheritsched == PTHREAD_EXPLICIT_SCHED))
415 {
416 attr->inherit = inheritsched;
417 return (ESUCCESS);
418 } else
419 {
420 return (EINVAL);
421 }
422 } else
423 {
424 return (EINVAL); /* Not an attribute structure! */
425 }
426 }
427
428 /*
429 * Set the scheduling paramters in a thread attribute structure.
430 * Note: written as a helper function for info hiding
431 */
432 int
433 pthread_attr_setschedparam(pthread_attr_t *attr,
434 const struct sched_param *param)
435 {
436 if (attr->sig == _PTHREAD_ATTR_SIG)
437 {
438 /* TODO: Validate sched_param fields */
439 attr->param = *param;
440 return (ESUCCESS);
441 } else
442 {
443 return (EINVAL); /* Not an attribute structure! */
444 }
445 }
446
447 /*
448 * Set the scheduling policy in a thread attribute structure.
449 * Note: written as a helper function for info hiding
450 */
451 int
452 pthread_attr_setschedpolicy(pthread_attr_t *attr,
453 int policy)
454 {
455 if (attr->sig == _PTHREAD_ATTR_SIG)
456 {
457 if ((policy == SCHED_OTHER) ||
458 (policy == SCHED_RR) ||
459 (policy == SCHED_FIFO))
460 {
461 attr->policy = policy;
462 return (ESUCCESS);
463 } else
464 {
465 return (EINVAL);
466 }
467 } else
468 {
469 return (EINVAL); /* Not an attribute structure! */
470 }
471 }
472
473 /*
474 * Set the scope for the thread.
475 * We currently only provide PTHREAD_SCOPE_SYSTEM
476 */
477 int
478 pthread_attr_setscope(pthread_attr_t *attr,
479 int scope)
480 {
481 if (attr->sig == _PTHREAD_ATTR_SIG) {
482 if (scope == PTHREAD_SCOPE_SYSTEM) {
483 /* No attribute yet for the scope */
484 return (ESUCCESS);
485 } else if (scope == PTHREAD_SCOPE_PROCESS) {
486 return (ENOTSUP);
487 }
488 }
489 return (EINVAL); /* Not an attribute structure! */
490 }
491
492 /*
493 * Get the scope for the thread.
494 * We currently only provide PTHREAD_SCOPE_SYSTEM
495 */
496 int
497 pthread_attr_getscope(pthread_attr_t *attr,
498 int *scope)
499 {
500 if (attr->sig == _PTHREAD_ATTR_SIG) {
501 *scope = PTHREAD_SCOPE_SYSTEM;
502 return (ESUCCESS);
503 }
504 return (EINVAL); /* Not an attribute structure! */
505 }
506
507 /* Get the base stack address of the given thread */
508 int
509 pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
510 {
511 if (attr->sig == _PTHREAD_ATTR_SIG) {
512 *stackaddr = attr->stackaddr;
513 return (ESUCCESS);
514 } else {
515 return (EINVAL); /* Not an attribute structure! */
516 }
517 }
518
519 int
520 pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
521 {
522 if ((attr->sig == _PTHREAD_ATTR_SIG) && (((vm_offset_t)stackaddr & (vm_page_size - 1)) == 0)) {
523 attr->stackaddr = stackaddr;
524 attr->freeStackOnExit = FALSE;
525 return (ESUCCESS);
526 } else {
527 return (EINVAL); /* Not an attribute structure! */
528 }
529 }
530
531 int
532 pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
533 {
534 if (attr->sig == _PTHREAD_ATTR_SIG) {
535 *stacksize = attr->stacksize;
536 return (ESUCCESS);
537 } else {
538 return (EINVAL); /* Not an attribute structure! */
539 }
540 }
541
542 int
543 pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
544 {
545 if ((attr->sig == _PTHREAD_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
546 attr->stacksize = stacksize;
547 return (ESUCCESS);
548 } else {
549 return (EINVAL); /* Not an attribute structure! */
550 }
551 }
552
553 int
554 pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
555 {
556 if (attr->sig == _PTHREAD_ATTR_SIG) {
557 *stackaddr = attr->stackaddr;
558 *stacksize = attr->stacksize;
559 return (ESUCCESS);
560 } else {
561 return (EINVAL); /* Not an attribute structure! */
562 }
563 }
564
565 int
566 pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
567 {
568 if ((attr->sig == _PTHREAD_ATTR_SIG) &&
569 (((vm_offset_t)stackaddr & (vm_page_size - 1)) == 0) &&
570 ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
571 attr->stackaddr = stackaddr;
572 attr->freeStackOnExit = FALSE;
573 attr->stacksize = stacksize;
574 return (ESUCCESS);
575 } else {
576 return (EINVAL); /* Not an attribute structure! */
577 }
578 }
579
580 /*
581 * Create and start execution of a new thread.
582 */
583
584 static void
585 _pthread_body(pthread_t self)
586 {
587 _pthread_set_self(self);
588 pthread_exit((self->fun)(self->arg));
589 }
590
591 int
592 _pthread_create(pthread_t t,
593 const pthread_attr_t *attrs,
594 void *stack,
595 const mach_port_t kernel_thread)
596 {
597 int res;
598 res = ESUCCESS;
599
600 do
601 {
602 memset(t, 0, sizeof(*t));
603 t->stacksize = attrs->stacksize;
604 t->stackaddr = (void *)stack;
605 t->kernel_thread = kernel_thread;
606 t->detached = attrs->detached;
607 t->inherit = attrs->inherit;
608 t->policy = attrs->policy;
609 t->param = attrs->param;
610 t->freeStackOnExit = attrs->freeStackOnExit;
611 t->mutexes = (struct _pthread_mutex *)NULL;
612 t->sig = _PTHREAD_SIG;
613 t->reply_port = MACH_PORT_NULL;
614 t->cthread_self = NULL;
615 LOCK_INIT(t->lock);
616 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
617 t->cleanup_stack = (struct _pthread_handler_rec *)NULL;
618 t->death = SEMAPHORE_NULL;
619
620 if (kernel_thread != MACH_PORT_NULL)
621 pthread_setschedparam(t, t->policy, &t->param);
622 } while (0);
623 return (res);
624 }
625
626 /* Need to deprecate this in future */
627 int
628 _pthread_is_threaded(void)
629 {
630 return __is_threaded;
631 }
632
633 /* Non portable public api to know whether this process has(had) atleast one thread
634 * apart from main thread. There could be race if there is a thread in the process of
635 * creation at the time of call . It does not tell whether there are more than one thread
636 * at this point of time.
637 */
638 int
639 pthread_is_threaded_np(void)
640 {
641 return (__is_threaded);
642 }
643
644 mach_port_t
645 pthread_mach_thread_np(pthread_t t)
646 {
647 thread_t kernel_thread;
648
649 /* Wait for the creator to initialize it */
650 while ((kernel_thread = t->kernel_thread) == MACH_PORT_NULL)
651 sched_yield();
652
653 return kernel_thread;
654 }
655
656 size_t
657 pthread_get_stacksize_np(pthread_t t)
658 {
659 return t->stacksize;
660 }
661
662 void *
663 pthread_get_stackaddr_np(pthread_t t)
664 {
665 return t->stackaddr;
666 }
667
668 mach_port_t
669 _pthread_reply_port(pthread_t t)
670 {
671 return t->reply_port;
672 }
673
674
675 /* returns non-zero if the current thread is the main thread */
676 int
677 pthread_main_np(void)
678 {
679 pthread_t self = pthread_self();
680
681 return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT);
682 }
683
684 static int
685 _pthread_create_suspended(pthread_t *thread,
686 const pthread_attr_t *attr,
687 void *(*start_routine)(void *),
688 void *arg,
689 int suspended)
690 {
691 pthread_attr_t *attrs;
692 void *stack;
693 int res;
694 pthread_t t;
695 kern_return_t kern_res;
696 mach_port_t kernel_thread = MACH_PORT_NULL;
697 int needresume;
698
699 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
700 { /* Set up default paramters */
701 attrs = &_pthread_attr_default;
702 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
703 return EINVAL;
704 }
705 res = ESUCCESS;
706
707 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
708 * any change in priority or policy is needed here.
709 */
710 if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
711 (attrs->param.sched_priority != default_priority)) && (suspended == 0)) {
712 needresume = 1;
713 suspended = 1;
714 } else
715 needresume = 0;
716
717 do
718 {
719 /* Allocate a stack for the thread */
720 if ((res = _pthread_allocate_stack(attrs, &stack)) != 0) {
721 break;
722 }
723 t = (pthread_t)malloc(sizeof(struct _pthread));
724 *thread = t;
725 if (suspended) {
726 /* Create the Mach thread for this thread */
727 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread), kern_res);
728 if (kern_res != KERN_SUCCESS)
729 {
730 printf("Can't create thread: %d\n", kern_res);
731 res = EINVAL; /* Need better error here? */
732 break;
733 }
734 }
735 if ((res = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
736 {
737 break;
738 }
739 set_malloc_singlethreaded(0);
740 __is_threaded = 1;
741 LOCK(_pthread_count_lock);
742 _pthread_count++;
743 UNLOCK(_pthread_count_lock);
744
745 /* Send it on it's way */
746 t->arg = arg;
747 t->fun = start_routine;
748 /* Now set it up to execute */
749 _pthread_setup(t, _pthread_body, stack, suspended, needresume);
750 } while (0);
751 return (res);
752 }
753
754 int
755 pthread_create(pthread_t *thread,
756 const pthread_attr_t *attr,
757 void *(*start_routine)(void *),
758 void *arg)
759 {
760 return _pthread_create_suspended(thread, attr, start_routine, arg, 0);
761 }
762
763 int
764 pthread_create_suspended_np(pthread_t *thread,
765 const pthread_attr_t *attr,
766 void *(*start_routine)(void *),
767 void *arg)
768 {
769 return _pthread_create_suspended(thread, attr, start_routine, arg, 1);
770 }
771
772 /*
773 * Make a thread 'undetached' - no longer 'joinable' with other threads.
774 */
775 int
776 pthread_detach(pthread_t thread)
777 {
778 if (thread->sig == _PTHREAD_SIG)
779 {
780 LOCK(thread->lock);
781 if (thread->detached & PTHREAD_CREATE_JOINABLE)
782 {
783 if (thread->detached & _PTHREAD_EXITED) {
784 UNLOCK(thread->lock);
785 pthread_join(thread, NULL);
786 return ESUCCESS;
787 } else {
788 semaphore_t death = thread->death;
789
790 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
791 thread->detached |= PTHREAD_CREATE_DETACHED;
792 UNLOCK(thread->lock);
793 if (death)
794 (void) semaphore_signal(death);
795 return (ESUCCESS);
796 }
797 } else {
798 UNLOCK(thread->lock);
799 return (EINVAL);
800 }
801 } else {
802 return (ESRCH); /* Not a valid thread */
803 }
804 }
805
806
807 /*
808 * pthread_kill call to system call
809 */
810
811
812 int
813 pthread_kill (
814 pthread_t th,
815 int sig)
816 {
817 int error = 0;
818
819 if ((sig < 0) || (sig > NSIG))
820 return(EINVAL);
821
822 if (th && (th->sig == _PTHREAD_SIG)) {
823 error = __pthread_kill(pthread_mach_thread_np(th), sig);
824 if (error == -1)
825 error = errno;
826 return(error);
827 }
828 else
829 return(ESRCH);
830 }
831
832 /* Announce that there are pthread resources ready to be reclaimed in a */
833 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
834 /* thread underneath is terminated right away. */
835 static
836 void _pthread_become_available(pthread_t thread, mach_port_t kernel_thread) {
837 mach_msg_empty_rcv_t msg;
838 kern_return_t ret;
839
840 msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,
841 MACH_MSG_TYPE_MOVE_SEND);
842 msg.header.msgh_size = sizeof msg - sizeof msg.trailer;
843 msg.header.msgh_remote_port = thread_recycle_port;
844 msg.header.msgh_local_port = kernel_thread;
845 msg.header.msgh_id = (int)thread;
846 ret = mach_msg_send(&msg.header);
847 assert(ret == MACH_MSG_SUCCESS);
848 }
849
850 /* Reap the resources for available threads */
851 static
852 int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr) {
853 mach_port_type_t ptype;
854 kern_return_t ret;
855 task_t self;
856
857 self = mach_task_self();
858 if (kernel_thread != MACH_PORT_DEAD) {
859 ret = mach_port_type(self, kernel_thread, &ptype);
860 if (ret == KERN_SUCCESS && ptype != MACH_PORT_TYPE_DEAD_NAME) {
861 /* not quite dead yet... */
862 return EAGAIN;
863 }
864 ret = mach_port_deallocate(self, kernel_thread);
865 if (ret != KERN_SUCCESS) {
866 fprintf(stderr,
867 "mach_port_deallocate(kernel_thread) failed: %s\n",
868 mach_error_string(ret));
869 }
870 }
871
872 if (th->reply_port != MACH_PORT_NULL) {
873 ret = mach_port_mod_refs(self, th->reply_port,
874 MACH_PORT_RIGHT_RECEIVE, -1);
875 if (ret != KERN_SUCCESS) {
876 fprintf(stderr,
877 "mach_port_mod_refs(reply_port) failed: %s\n",
878 mach_error_string(ret));
879 }
880 }
881
882 if (th->freeStackOnExit) {
883 vm_address_t addr = (vm_address_t)th->stackaddr;
884 vm_size_t size;
885
886 size = (vm_size_t)th->stacksize + vm_page_size;
887
888 #if !defined(STACK_GROWS_UP)
889 addr -= size;
890 #endif
891 ret = vm_deallocate(self, addr, size);
892 if (ret != KERN_SUCCESS) {
893 fprintf(stderr,
894 "vm_deallocate(stack) failed: %s\n",
895 mach_error_string(ret));
896 }
897 }
898
899 if (value_ptr)
900 *value_ptr = th->exit_value;
901
902 if (th != &_thread)
903 free(th);
904
905 return ESUCCESS;
906 }
907
908 static
909 void _pthread_reap_threads(void)
910 {
911 mach_msg_empty_rcv_t msg;
912 kern_return_t ret;
913
914 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
915 sizeof(mach_msg_empty_rcv_t), thread_recycle_port,
916 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
917 while (ret == MACH_MSG_SUCCESS) {
918 mach_port_t kernel_thread = msg.header.msgh_remote_port;
919 pthread_t thread = (pthread_t)msg.header.msgh_id;
920
921 if (_pthread_reap_thread(thread, kernel_thread, (void **)0) == EAGAIN)
922 {
923 /* not dead yet, put it back for someone else to reap, stop here */
924 _pthread_become_available(thread, kernel_thread);
925 return;
926 }
927 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
928 sizeof(mach_msg_empty_rcv_t), thread_recycle_port,
929 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
930 }
931 }
932
933 /* For compatibility... */
934
935 pthread_t
936 _pthread_self() {
937 return pthread_self();
938 }
939
940 /*
941 * Terminate a thread.
942 */
943 void
944 pthread_exit(void *value_ptr)
945 {
946 struct _pthread_handler_rec *handler;
947 pthread_t self = pthread_self();
948 kern_return_t kern_res;
949 int thread_count;
950
951 /* Make this thread not to receive any signals */
952 syscall(331,1);
953
954 while ((handler = self->cleanup_stack) != 0)
955 {
956 (handler->routine)(handler->arg);
957 self->cleanup_stack = handler->next;
958 }
959 _pthread_tsd_cleanup(self);
960
961 _pthread_reap_threads();
962
963 LOCK(self->lock);
964 self->detached |= _PTHREAD_EXITED;
965
966 if (self->detached & PTHREAD_CREATE_JOINABLE) {
967 mach_port_t death = self->death;
968 self->exit_value = value_ptr;
969 UNLOCK(self->lock);
970 /* the joiner will need a kernel thread reference, leave ours for it */
971 if (death) {
972 PTHREAD_MACH_CALL(semaphore_signal(death), kern_res);
973 if (kern_res != KERN_SUCCESS)
974 fprintf(stderr,
975 "semaphore_signal(death) failed: %s\n",
976 mach_error_string(kern_res));
977 }
978 } else {
979 UNLOCK(self->lock);
980 /* with no joiner, we let become available consume our cached ref */
981 _pthread_become_available(self, pthread_mach_thread_np(self));
982 }
983
984 LOCK(_pthread_count_lock);
985 thread_count = --_pthread_count;
986 UNLOCK(_pthread_count_lock);
987 if (thread_count <= 0)
988 exit(0);
989
990 /* Use a new reference to terminate ourselves. Should never return. */
991 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res);
992 fprintf(stderr, "thread_terminate(mach_thread_self()) failed: %s\n",
993 mach_error_string(kern_res));
994 abort();
995 }
996
997 /*
998 * Wait for a thread to terminate and obtain its exit value.
999 */
1000 int
1001 pthread_join(pthread_t thread,
1002 void **value_ptr)
1003 {
1004 kern_return_t kern_res;
1005 int res = ESUCCESS;
1006
1007 if (thread->sig == _PTHREAD_SIG)
1008 {
1009 semaphore_t death = new_sem_from_pool(); /* in case we need it */
1010
1011 LOCK(thread->lock);
1012 if ((thread->detached & PTHREAD_CREATE_JOINABLE) &&
1013 thread->death == SEMAPHORE_NULL)
1014 {
1015 pthread_t self = pthread_self();
1016
1017 assert(thread->joiner == NULL);
1018 if (thread != self && (self == NULL || self->joiner != thread))
1019 {
1020 int already_exited = (thread->detached & _PTHREAD_EXITED);
1021
1022 thread->death = death;
1023 thread->joiner = self;
1024 UNLOCK(thread->lock);
1025
1026 if (!already_exited)
1027 {
1028 /* Wait for it to signal... */
1029 do {
1030 PTHREAD_MACH_CALL(semaphore_wait(death), kern_res);
1031 } while (kern_res != KERN_SUCCESS);
1032 }
1033
1034 /* ... and wait for it to really be dead */
1035 while ((res = _pthread_reap_thread(thread,
1036 thread->kernel_thread,
1037 value_ptr)) == EAGAIN)
1038 {
1039 sched_yield();
1040 }
1041 } else {
1042 UNLOCK(thread->lock);
1043 res = EDEADLK;
1044 }
1045 } else {
1046 UNLOCK(thread->lock);
1047 res = EINVAL;
1048 }
1049 restore_sem_to_pool(death);
1050 return res;
1051 }
1052 return ESRCH;
1053 }
1054
1055 /*
1056 * Get the scheduling policy and scheduling paramters for a thread.
1057 */
1058 int
1059 pthread_getschedparam(pthread_t thread,
1060 int *policy,
1061 struct sched_param *param)
1062 {
1063 if (thread->sig == _PTHREAD_SIG)
1064 {
1065 *policy = thread->policy;
1066 *param = thread->param;
1067 return (ESUCCESS);
1068 } else
1069 {
1070 return (ESRCH); /* Not a valid thread structure */
1071 }
1072 }
1073
1074 /*
1075 * Set the scheduling policy and scheduling paramters for a thread.
1076 */
1077 int
1078 pthread_setschedparam(pthread_t thread,
1079 int policy,
1080 const struct sched_param *param)
1081 {
1082 policy_base_data_t bases;
1083 policy_base_t base;
1084 mach_msg_type_number_t count;
1085 kern_return_t ret;
1086
1087 if (thread->sig == _PTHREAD_SIG)
1088 {
1089 switch (policy)
1090 {
1091 case SCHED_OTHER:
1092 bases.ts.base_priority = param->sched_priority;
1093 base = (policy_base_t)&bases.ts;
1094 count = POLICY_TIMESHARE_BASE_COUNT;
1095 break;
1096 case SCHED_FIFO:
1097 bases.fifo.base_priority = param->sched_priority;
1098 base = (policy_base_t)&bases.fifo;
1099 count = POLICY_FIFO_BASE_COUNT;
1100 break;
1101 case SCHED_RR:
1102 bases.rr.base_priority = param->sched_priority;
1103 /* quantum isn't public yet */
1104 bases.rr.quantum = param->quantum;
1105 base = (policy_base_t)&bases.rr;
1106 count = POLICY_RR_BASE_COUNT;
1107 break;
1108 default:
1109 return (EINVAL);
1110 }
1111 thread->policy = policy;
1112 thread->param = *param;
1113 ret = thread_policy(pthread_mach_thread_np(thread), policy, base, count, TRUE);
1114 if (ret != KERN_SUCCESS)
1115 {
1116 return (EINVAL);
1117 }
1118 return (ESUCCESS);
1119 } else
1120 {
1121 return (ESRCH); /* Not a valid thread structure */
1122 }
1123 }
1124
1125 /*
1126 * Get the minimum priority for the given policy
1127 */
1128 int
1129 sched_get_priority_min(int policy)
1130 {
1131 return default_priority - 16;
1132 }
1133
1134 /*
1135 * Get the maximum priority for the given policy
1136 */
1137 int
1138 sched_get_priority_max(int policy)
1139 {
1140 return default_priority + 16;
1141 }
1142
1143 /*
1144 * Determine if two thread identifiers represent the same thread.
1145 */
1146 int
1147 pthread_equal(pthread_t t1,
1148 pthread_t t2)
1149 {
1150 return (t1 == t2);
1151 }
1152
1153 void
1154 cthread_set_self(void *cself)
1155 {
1156 pthread_t self = pthread_self();
1157 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1158 _pthread_set_self(cself);
1159 return;
1160 }
1161 self->cthread_self = cself;
1162 }
1163
1164 void *
1165 ur_cthread_self(void) {
1166 pthread_t self = pthread_self();
1167 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1168 return (void *)self;
1169 }
1170 return self->cthread_self;
1171 }
1172
1173 /*
1174 * Execute a function exactly one time in a thread-safe fashion.
1175 */
1176 int
1177 pthread_once(pthread_once_t *once_control,
1178 void (*init_routine)(void))
1179 {
1180 LOCK(once_control->lock);
1181 if (once_control->sig == _PTHREAD_ONCE_SIG_init)
1182 {
1183 (*init_routine)();
1184 once_control->sig = _PTHREAD_ONCE_SIG;
1185 }
1186 UNLOCK(once_control->lock);
1187 return (ESUCCESS); /* Spec defines no possible errors! */
1188 }
1189
1190 /*
1191 * Cancel a thread
1192 */
1193 int
1194 pthread_cancel(pthread_t thread)
1195 {
1196 if (thread->sig == _PTHREAD_SIG)
1197 {
1198 thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
1199 return (ESUCCESS);
1200 } else
1201 {
1202 return (ESRCH);
1203 }
1204 }
1205
1206 /*
1207 * Insert a cancellation point in a thread.
1208 */
1209 static void
1210 _pthread_testcancel(pthread_t thread)
1211 {
1212 LOCK(thread->lock);
1213 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
1214 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
1215 {
1216 UNLOCK(thread->lock);
1217 pthread_exit(0);
1218 }
1219 UNLOCK(thread->lock);
1220 }
1221
1222 void
1223 pthread_testcancel(void)
1224 {
1225 pthread_t self = pthread_self();
1226 _pthread_testcancel(self);
1227 }
1228
1229 /*
1230 * Query/update the cancelability 'state' of a thread
1231 */
1232 int
1233 pthread_setcancelstate(int state, int *oldstate)
1234 {
1235 pthread_t self = pthread_self();
1236 int err = ESUCCESS;
1237 LOCK(self->lock);
1238 if (oldstate)
1239 *oldstate = self->cancel_state & ~_PTHREAD_CANCEL_STATE_MASK;
1240 if ((state == PTHREAD_CANCEL_ENABLE) || (state == PTHREAD_CANCEL_DISABLE))
1241 {
1242 self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_STATE_MASK) | state;
1243 } else
1244 {
1245 err = EINVAL;
1246 }
1247 UNLOCK(self->lock);
1248 _pthread_testcancel(self); /* See if we need to 'die' now... */
1249 return (err);
1250 }
1251
1252 /*
1253 * Query/update the cancelability 'type' of a thread
1254 */
1255 int
1256 pthread_setcanceltype(int type, int *oldtype)
1257 {
1258 pthread_t self = pthread_self();
1259 int err = ESUCCESS;
1260 LOCK(self->lock);
1261 if (oldtype)
1262 *oldtype = self->cancel_state & ~_PTHREAD_CANCEL_TYPE_MASK;
1263 if ((type == PTHREAD_CANCEL_DEFERRED) || (type == PTHREAD_CANCEL_ASYNCHRONOUS))
1264 {
1265 self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK) | type;
1266 } else
1267 {
1268 err = EINVAL;
1269 }
1270 UNLOCK(self->lock);
1271 _pthread_testcancel(self); /* See if we need to 'die' now... */
1272 return (err);
1273 }
1274
1275 int
1276 pthread_getconcurrency(void)
1277 {
1278 return(pthread_concurrency);
1279 }
1280
1281 int
1282 pthread_setconcurrency(int new_level)
1283 {
1284 pthread_concurrency = new_level;
1285 return(ESUCCESS);
1286 }
1287
1288 /*
1289 * Perform package initialization - called automatically when application starts
1290 */
1291
1292 static int
1293 pthread_init(void)
1294 {
1295 pthread_attr_t *attrs;
1296 pthread_t thread;
1297 kern_return_t kr;
1298 host_basic_info_data_t basic_info;
1299 host_priority_info_data_t priority_info;
1300 host_info_t info;
1301 host_flavor_t flavor;
1302 host_t host;
1303 mach_msg_type_number_t count;
1304 int mib[2];
1305 size_t len;
1306 int numcpus;
1307
1308 count = HOST_PRIORITY_INFO_COUNT;
1309 info = (host_info_t)&priority_info;
1310 flavor = HOST_PRIORITY_INFO;
1311 host = mach_host_self();
1312 kr = host_info(host, flavor, info, &count);
1313 if (kr != KERN_SUCCESS)
1314 printf("host_info failed (%d); probably need privilege.\n", kr);
1315 else {
1316 default_priority = priority_info.user_priority;
1317 min_priority = priority_info.minimum_priority;
1318 max_priority = priority_info.maximum_priority;
1319 }
1320 attrs = &_pthread_attr_default;
1321 pthread_attr_init(attrs);
1322
1323 thread = &_thread;
1324 _pthread_set_self(thread);
1325 _pthread_create(thread, attrs, (void *)USRSTACK, mach_thread_self());
1326 thread->detached = PTHREAD_CREATE_JOINABLE|_PTHREAD_CREATE_PARENT;
1327
1328 /* See if we're on a multiprocessor and set _spin_tries if so. */
1329 mib[0] = CTL_HW;
1330 mib[1] = HW_NCPU;
1331 len = sizeof(numcpus);
1332 if (sysctl(mib, 2, &numcpus, &len, NULL, 0) == 0) {
1333 if (numcpus > 1) {
1334 _spin_tries = MP_SPIN_TRIES;
1335 }
1336 } else {
1337 count = HOST_BASIC_INFO_COUNT;
1338 info = (host_info_t)&basic_info;
1339 flavor = HOST_BASIC_INFO;
1340 kr = host_info(host, flavor, info, &count);
1341 if (kr != KERN_SUCCESS)
1342 printf("host_info failed (%d)\n", kr);
1343 else {
1344 if (basic_info.avail_cpus > 1)
1345 _spin_tries = MP_SPIN_TRIES;
1346 }
1347 }
1348
1349 mach_port_deallocate(mach_task_self(), host);
1350
1351 _init_cpu_capabilities(); /* check for vector unit, cache line size etc */
1352
1353 #if defined(__ppc__)
1354 /* Use fsqrt instruction in sqrt() if available. */
1355 if (_cpu_capabilities & kHasFsqrt) {
1356 extern size_t hw_sqrt_len;
1357 extern double sqrt( double );
1358 extern double hw_sqrt( double );
1359 extern void sys_icache_invalidate(void *, size_t);
1360
1361 memcpy ( (void *)sqrt, (void *)hw_sqrt, hw_sqrt_len );
1362 sys_icache_invalidate((void *)sqrt, hw_sqrt_len);
1363 }
1364 #endif
1365
1366 mig_init(1); /* enable multi-threaded mig interfaces */
1367 return 0;
1368 }
1369
1370 int sched_yield(void)
1371 {
1372 swtch_pri(0);
1373 return 0;
1374 }
1375
1376 /* This is the "magic" that gets the initialization routine called when the application starts */
1377 int (*_cthread_init_routine)(void) = pthread_init;
1378
1379 /* Get a semaphore from the pool, growing it if necessary */
1380
1381 __private_extern__ semaphore_t new_sem_from_pool(void) {
1382 kern_return_t res;
1383 semaphore_t sem;
1384 int i;
1385
1386 LOCK(sem_pool_lock);
1387 if (sem_pool_current == sem_pool_count) {
1388 sem_pool_count += 16;
1389 sem_pool = realloc(sem_pool, sem_pool_count * sizeof(semaphore_t));
1390 for (i = sem_pool_current; i < sem_pool_count; i++) {
1391 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool[i], SYNC_POLICY_FIFO, 0), res);
1392 }
1393 }
1394 sem = sem_pool[sem_pool_current++];
1395 UNLOCK(sem_pool_lock);
1396 return sem;
1397 }
1398
1399 /* Put a semaphore back into the pool */
1400 __private_extern__ void restore_sem_to_pool(semaphore_t sem) {
1401 LOCK(sem_pool_lock);
1402 sem_pool[--sem_pool_current] = sem;
1403 UNLOCK(sem_pool_lock);
1404 }
1405
1406 static void sem_pool_reset(void) {
1407 LOCK(sem_pool_lock);
1408 sem_pool_count = 0;
1409 sem_pool_current = 0;
1410 sem_pool = NULL;
1411 UNLOCK(sem_pool_lock);
1412 }
1413
1414 __private_extern__ void _pthread_fork_child(void) {
1415 /* Just in case somebody had it locked... */
1416 UNLOCK(sem_pool_lock);
1417 sem_pool_reset();
1418 UNLOCK(_pthread_count_lock);
1419 _pthread_count = 1;
1420 }
1421