]> git.saurik.com Git - apple/libc.git/blame_incremental - pthreads.subproj/pthread.c
Libc-186.tar.gz
[apple/libc.git] / pthreads.subproj / pthread.c
... / ...
CommitLineData
1/*
2 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
3 * All Rights Reserved
4 *
5 * Permission to use, copy, modify, and distribute this software and
6 * its documentation for any purpose and without fee is hereby granted,
7 * provided that the above copyright notice appears in all copies and
8 * that both the copyright notice and this permission notice appear in
9 * supporting documentation.
10 *
11 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
12 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
13 * FOR A PARTICULAR PURPOSE.
14 *
15 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
16 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
17 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
18 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
19 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 *
21 */
22/*
23 * MkLinux
24 */
25
26/*
27 * POSIX Pthread Library
28 */
29
30#define __POSIX_LIB__
31#include <assert.h>
32#include <stdio.h> /* For printf(). */
33#include <stdlib.h>
34#include <errno.h> /* For __mach_errno_addr() prototype. */
35#include <sys/time.h>
36#include <sys/resource.h>
37#include <sys/sysctl.h>
38#include <machine/vmparam.h>
39#include <mach/vm_statistics.h>
40
41#include "pthread_internals.h"
42
43/* Per-thread kernel support */
44extern void _pthread_set_self(pthread_t);
45extern void mig_init(int);
46
47/* Needed to tell the malloc subsystem we're going multithreaded */
48extern void set_malloc_singlethreaded(int);
49
50/* Used when we need to call into the kernel with no reply port */
51extern pthread_lock_t reply_port_lock;
52
53/*
54 * [Internal] stack support
55 */
56
57size_t _pthread_stack_size = 0;
58int _spin_tries = 0;
59#if !defined(__ppc__)
60int _cpu_has_altivec = 0;
61#endif
62
63/* This global should be used (carefully) by anyone needing to know if a pthread has been
64** created.
65*/
66int __is_threaded = 0;
67
68/* These are used to keep track of a semaphore pool shared by mutexes and condition
69** variables.
70*/
71
72static semaphore_t *sem_pool = NULL;
73static int sem_pool_count = 0;
74static int sem_pool_current = 0;
75static pthread_lock_t sem_pool_lock = LOCK_INITIALIZER;
76
77static int default_priority;
78static int max_priority;
79static int min_priority;
80
81extern mach_port_t thread_recycle_port;
82
83#define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
84#define STACK_RESERVED (sizeof (struct _pthread))
85
86#ifdef STACK_GROWS_UP
87
88/* The stack grows towards higher addresses:
89 |struct _pthread|user stack---------------->|
90 ^STACK_BASE ^STACK_START
91 ^STACK_SELF
92 ^STACK_LOWEST */
93#define STACK_BASE(sp) STACK_LOWEST(sp)
94#define STACK_START(stack_low) (STACK_BASE(stack_low) + STACK_RESERVED)
95#define STACK_SELF(sp) STACK_BASE(sp)
96
97#else
98
99/* The stack grows towards lower addresses:
100 |<----------------user stack|struct _pthread|
101 ^STACK_LOWEST ^STACK_START ^STACK_BASE
102 ^STACK_SELF */
103
104#define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
105#define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
106#define STACK_SELF(sp) STACK_START(sp)
107
108#endif
109
110/* Set the base address to use as the stack pointer, before adjusting due to the ABI */
111
112static int
113_pthread_allocate_stack(pthread_attr_t *attrs, vm_address_t *stack)
114{
115 kern_return_t kr;
116#if 1
117 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
118 if (attrs->stackaddr != NULL) {
119 assert(((vm_offset_t)(attrs->stackaddr) & (vm_page_size - 1)) == 0);
120 *stack = (vm_address_t)attrs->stackaddr;
121 return 0;
122 }
123 kr = vm_allocate(mach_task_self(), stack, attrs->stacksize + vm_page_size, VM_MAKE_TAG(VM_MEMORY_STACK)| TRUE);
124 if (kr != KERN_SUCCESS) {
125 return EAGAIN;
126 }
127#ifdef STACK_GROWS_UP
128 /* The guard page is the page one higher than the stack */
129 /* The stack base is at the lowest address */
130 kr = vm_protect(mach_task_self(), *stack + attrs->stacksize, vm_page_size, FALSE, VM_PROT_NONE);
131#else
132 /* The guard page is at the lowest address */
133 /* The stack base is the highest address */
134 kr = vm_protect(mach_task_self(), *stack, vm_page_size, FALSE, VM_PROT_NONE);
135 *stack += attrs->stacksize + vm_page_size;
136#endif
137
138#else
139 vm_address_t cur_stack = (vm_address_t)0;
140 if (free_stacks == 0)
141 {
142 /* Allocating guard pages is done by doubling
143 * the actual stack size, since STACK_BASE() needs
144 * to have stacks aligned on stack_size. Allocating just
145 * one page takes as much memory as allocating more pages
146 * since it will remain one entry in the vm map.
147 * Besides, allocating more than one page allows tracking the
148 * overflow pattern when the overflow is bigger than one page.
149 */
150#ifndef NO_GUARD_PAGES
151# define GUARD_SIZE(a) (2*(a))
152# define GUARD_MASK(a) (((a)<<1) | 1)
153#else
154# define GUARD_SIZE(a) (a)
155# define GUARD_MASK(a) (a)
156#endif
157 while (lowest_stack > GUARD_SIZE(__pthread_stack_size))
158 {
159 lowest_stack -= GUARD_SIZE(__pthread_stack_size);
160 /* Ensure stack is there */
161 kr = vm_allocate(mach_task_self(),
162 &lowest_stack,
163 GUARD_SIZE(__pthread_stack_size),
164 FALSE);
165#ifndef NO_GUARD_PAGES
166 if (kr == KERN_SUCCESS) {
167# ifdef STACK_GROWS_UP
168 kr = vm_protect(mach_task_self(),
169 lowest_stack+__pthread_stack_size,
170 __pthread_stack_size,
171 FALSE, VM_PROT_NONE);
172# else /* STACK_GROWS_UP */
173 kr = vm_protect(mach_task_self(),
174 lowest_stack,
175 __pthread_stack_size,
176 FALSE, VM_PROT_NONE);
177 lowest_stack += __pthread_stack_size;
178# endif /* STACK_GROWS_UP */
179 if (kr == KERN_SUCCESS)
180 break;
181 }
182#else
183 if (kr == KERN_SUCCESS)
184 break;
185#endif
186 }
187 if (lowest_stack > 0)
188 free_stacks = (vm_address_t *)lowest_stack;
189 else
190 {
191 /* Too bad. We'll just have to take what comes.
192 Use vm_map instead of vm_allocate so we can
193 specify alignment. */
194 kr = vm_map(mach_task_self(), &lowest_stack,
195 GUARD_SIZE(__pthread_stack_size),
196 GUARD_MASK(__pthread_stack_mask),
197 TRUE /* anywhere */, MEMORY_OBJECT_NULL,
198 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
199 VM_INHERIT_DEFAULT);
200 /* This really shouldn't fail and if it does I don't
201 know what to do. */
202#ifndef NO_GUARD_PAGES
203 if (kr == KERN_SUCCESS) {
204# ifdef STACK_GROWS_UP
205 kr = vm_protect(mach_task_self(),
206 lowest_stack+__pthread_stack_size,
207 __pthread_stack_size,
208 FALSE, VM_PROT_NONE);
209# else /* STACK_GROWS_UP */
210 kr = vm_protect(mach_task_self(),
211 lowest_stack,
212 __pthread_stack_size,
213 FALSE, VM_PROT_NONE);
214 lowest_stack += __pthread_stack_size;
215# endif /* STACK_GROWS_UP */
216 }
217#endif
218 free_stacks = (vm_address_t *)lowest_stack;
219 lowest_stack = 0;
220 }
221 *free_stacks = 0; /* No other free stacks */
222 }
223 cur_stack = STACK_START((vm_address_t) free_stacks);
224 free_stacks = (vm_address_t *)*free_stacks;
225 cur_stack = _adjust_sp(cur_stack); /* Machine dependent stack fudging */
226#endif
227 return 0;
228}
229
230/*
231 * Destroy a thread attribute structure
232 */
233int
234pthread_attr_destroy(pthread_attr_t *attr)
235{
236 if (attr->sig == _PTHREAD_ATTR_SIG)
237 {
238 return (ESUCCESS);
239 } else
240 {
241 return (EINVAL); /* Not an attribute structure! */
242 }
243}
244
245/*
246 * Get the 'detach' state from a thread attribute structure.
247 * Note: written as a helper function for info hiding
248 */
249int
250pthread_attr_getdetachstate(const pthread_attr_t *attr,
251 int *detachstate)
252{
253 if (attr->sig == _PTHREAD_ATTR_SIG)
254 {
255 *detachstate = attr->detached;
256 return (ESUCCESS);
257 } else
258 {
259 return (EINVAL); /* Not an attribute structure! */
260 }
261}
262
263/*
264 * Get the 'inherit scheduling' info from a thread attribute structure.
265 * Note: written as a helper function for info hiding
266 */
267int
268pthread_attr_getinheritsched(const pthread_attr_t *attr,
269 int *inheritsched)
270{
271 if (attr->sig == _PTHREAD_ATTR_SIG)
272 {
273 *inheritsched = attr->inherit;
274 return (ESUCCESS);
275 } else
276 {
277 return (EINVAL); /* Not an attribute structure! */
278 }
279}
280
281/*
282 * Get the scheduling parameters from a thread attribute structure.
283 * Note: written as a helper function for info hiding
284 */
285int
286pthread_attr_getschedparam(const pthread_attr_t *attr,
287 struct sched_param *param)
288{
289 if (attr->sig == _PTHREAD_ATTR_SIG)
290 {
291 *param = attr->param;
292 return (ESUCCESS);
293 } else
294 {
295 return (EINVAL); /* Not an attribute structure! */
296 }
297}
298
299/*
300 * Get the scheduling policy from a thread attribute structure.
301 * Note: written as a helper function for info hiding
302 */
303int
304pthread_attr_getschedpolicy(const pthread_attr_t *attr,
305 int *policy)
306{
307 if (attr->sig == _PTHREAD_ATTR_SIG)
308 {
309 *policy = attr->policy;
310 return (ESUCCESS);
311 } else
312 {
313 return (EINVAL); /* Not an attribute structure! */
314 }
315}
316
317static const size_t DEFAULT_STACK_SIZE = DFLSSIZ;
318/*
319 * Initialize a thread attribute structure to default values.
320 */
321int
322pthread_attr_init(pthread_attr_t *attr)
323{
324 attr->stacksize = DEFAULT_STACK_SIZE;
325 attr->stackaddr = NULL;
326 attr->sig = _PTHREAD_ATTR_SIG;
327 attr->policy = _PTHREAD_DEFAULT_POLICY;
328 attr->param.sched_priority = default_priority;
329 attr->param.quantum = 10; /* quantum isn't public yet */
330 attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
331 attr->detached = PTHREAD_CREATE_JOINABLE;
332 attr->freeStackOnExit = TRUE;
333 return (ESUCCESS);
334}
335
336/*
337 * Set the 'detach' state in a thread attribute structure.
338 * Note: written as a helper function for info hiding
339 */
340int
341pthread_attr_setdetachstate(pthread_attr_t *attr,
342 int detachstate)
343{
344 if (attr->sig == _PTHREAD_ATTR_SIG)
345 {
346 if ((detachstate == PTHREAD_CREATE_JOINABLE) ||
347 (detachstate == PTHREAD_CREATE_DETACHED))
348 {
349 attr->detached = detachstate;
350 return (ESUCCESS);
351 } else
352 {
353 return (EINVAL);
354 }
355 } else
356 {
357 return (EINVAL); /* Not an attribute structure! */
358 }
359}
360
361/*
362 * Set the 'inherit scheduling' state in a thread attribute structure.
363 * Note: written as a helper function for info hiding
364 */
365int
366pthread_attr_setinheritsched(pthread_attr_t *attr,
367 int inheritsched)
368{
369 if (attr->sig == _PTHREAD_ATTR_SIG)
370 {
371 if ((inheritsched == PTHREAD_INHERIT_SCHED) ||
372 (inheritsched == PTHREAD_EXPLICIT_SCHED))
373 {
374 attr->inherit = inheritsched;
375 return (ESUCCESS);
376 } else
377 {
378 return (EINVAL);
379 }
380 } else
381 {
382 return (EINVAL); /* Not an attribute structure! */
383 }
384}
385
386/*
387 * Set the scheduling paramters in a thread attribute structure.
388 * Note: written as a helper function for info hiding
389 */
390int
391pthread_attr_setschedparam(pthread_attr_t *attr,
392 const struct sched_param *param)
393{
394 if (attr->sig == _PTHREAD_ATTR_SIG)
395 {
396 /* TODO: Validate sched_param fields */
397 attr->param = *param;
398 return (ESUCCESS);
399 } else
400 {
401 return (EINVAL); /* Not an attribute structure! */
402 }
403}
404
405/*
406 * Set the scheduling policy in a thread attribute structure.
407 * Note: written as a helper function for info hiding
408 */
409int
410pthread_attr_setschedpolicy(pthread_attr_t *attr,
411 int policy)
412{
413 if (attr->sig == _PTHREAD_ATTR_SIG)
414 {
415 if ((policy == SCHED_OTHER) ||
416 (policy == SCHED_RR) ||
417 (policy == SCHED_FIFO))
418 {
419 attr->policy = policy;
420 return (ESUCCESS);
421 } else
422 {
423 return (EINVAL);
424 }
425 } else
426 {
427 return (EINVAL); /* Not an attribute structure! */
428 }
429}
430
431/*
432 * Set the scope for the thread.
433 * We currently only provide PTHREAD_SCOPE_SYSTEM
434 */
435int
436pthread_attr_setscope(pthread_attr_t *attr,
437 int scope)
438{
439 if (attr->sig == _PTHREAD_ATTR_SIG) {
440 if (scope == PTHREAD_SCOPE_SYSTEM) {
441 /* No attribute yet for the scope */
442 return (ESUCCESS);
443 } else if (scope == PTHREAD_SCOPE_PROCESS) {
444 return (ENOTSUP);
445 }
446 }
447 return (EINVAL); /* Not an attribute structure! */
448}
449
450/*
451 * Get the scope for the thread.
452 * We currently only provide PTHREAD_SCOPE_SYSTEM
453 */
454int
455pthread_attr_getscope(pthread_attr_t *attr,
456 int *scope)
457{
458 if (attr->sig == _PTHREAD_ATTR_SIG) {
459 *scope = PTHREAD_SCOPE_SYSTEM;
460 return (ESUCCESS);
461 }
462 return (EINVAL); /* Not an attribute structure! */
463}
464
465/* Get the base stack address of the given thread */
466int
467pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
468{
469 if (attr->sig == _PTHREAD_ATTR_SIG) {
470 *stackaddr = attr->stackaddr;
471 return (ESUCCESS);
472 } else {
473 return (EINVAL); /* Not an attribute structure! */
474 }
475}
476
477int
478pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
479{
480 if ((attr->sig == _PTHREAD_ATTR_SIG) && (((vm_offset_t)stackaddr & (vm_page_size - 1)) == 0)) {
481 attr->stackaddr = stackaddr;
482 attr->freeStackOnExit = FALSE;
483 return (ESUCCESS);
484 } else {
485 return (EINVAL); /* Not an attribute structure! */
486 }
487}
488
489int
490pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
491{
492 if (attr->sig == _PTHREAD_ATTR_SIG) {
493 *stacksize = attr->stacksize;
494 return (ESUCCESS);
495 } else {
496 return (EINVAL); /* Not an attribute structure! */
497 }
498}
499
500int
501pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
502{
503 if ((attr->sig == _PTHREAD_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
504 attr->stacksize = stacksize;
505 return (ESUCCESS);
506 } else {
507 return (EINVAL); /* Not an attribute structure! */
508 }
509}
510
511/*
512 * Create and start execution of a new thread.
513 */
514
515static void
516_pthread_body(pthread_t self)
517{
518 _pthread_set_self(self);
519 pthread_exit((self->fun)(self->arg));
520}
521
522int
523_pthread_create(pthread_t t,
524 const pthread_attr_t *attrs,
525 vm_address_t stack,
526 const mach_port_t kernel_thread)
527{
528 int res;
529 kern_return_t kern_res;
530 res = ESUCCESS;
531 do
532 {
533 memset(t, 0, sizeof(*t));
534 t->stacksize = attrs->stacksize;
535 t->stackaddr = (void *)stack;
536 t->kernel_thread = kernel_thread;
537 t->detached = attrs->detached;
538 t->inherit = attrs->inherit;
539 t->policy = attrs->policy;
540 t->param = attrs->param;
541 t->freeStackOnExit = attrs->freeStackOnExit;
542 t->mutexes = (struct _pthread_mutex *)NULL;
543 t->sig = _PTHREAD_SIG;
544 t->reply_port = MACH_PORT_NULL;
545 t->cthread_self = NULL;
546 LOCK_INIT(t->lock);
547 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
548 t->cleanup_stack = (struct _pthread_handler_rec *)NULL;
549 pthread_setschedparam(t, t->policy, &t->param);
550 /* Create control semaphores */
551 if (t->detached == PTHREAD_CREATE_JOINABLE)
552 {
553 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(),
554 &t->death,
555 SYNC_POLICY_FIFO,
556 0), kern_res);
557 if (kern_res != KERN_SUCCESS)
558 {
559 printf("Can't create 'death' semaphore: %d\n", kern_res);
560 res = EINVAL; /* Need better error here? */
561 break;
562 }
563 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(),
564 &t->joiners,
565 SYNC_POLICY_FIFO,
566 0), kern_res);
567 if (kern_res != KERN_SUCCESS)
568 {
569 printf("Can't create 'joiners' semaphore: %d\n", kern_res);
570 res = EINVAL; /* Need better error here? */
571 break;
572 }
573 t->num_joiners = 0;
574 } else
575 {
576 t->death = MACH_PORT_NULL;
577 }
578 } while (0);
579 return (res);
580}
581
582int
583_pthread_is_threaded(void)
584{
585 return __is_threaded;
586}
587
588mach_port_t
589pthread_mach_thread_np(pthread_t t)
590{
591 return t->kernel_thread;
592}
593
594size_t
595pthread_get_stacksize_np(pthread_t t)
596{
597 return t->stacksize;
598}
599
600void *
601pthread_get_stackaddr_np(pthread_t t)
602{
603 return t->stackaddr;
604}
605
606mach_port_t
607_pthread_reply_port(pthread_t t)
608{
609 return t->reply_port;
610}
611
612static int
613_pthread_create_suspended(pthread_t *thread,
614 const pthread_attr_t *attr,
615 void *(*start_routine)(void *),
616 void *arg,
617 int suspended)
618{
619 pthread_attr_t _attr, *attrs;
620 vm_address_t stack;
621 int res;
622 pthread_t t;
623 kern_return_t kern_res;
624 mach_port_t kernel_thread;
625 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
626 { /* Set up default paramters */
627 attrs = &_attr;
628 pthread_attr_init(attrs);
629 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
630 return EINVAL;
631 }
632 res = ESUCCESS;
633 do
634 {
635 /* Allocate a stack for the thread */
636 if ((res = _pthread_allocate_stack(attrs, &stack)) != 0) {
637 break;
638 }
639 t = (pthread_t)malloc(sizeof(struct _pthread));
640 *thread = t;
641 /* Create the Mach thread for this thread */
642 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread), kern_res);
643 if (kern_res != KERN_SUCCESS)
644 {
645 printf("Can't create thread: %d\n", kern_res);
646 res = EINVAL; /* Need better error here? */
647 break;
648 }
649 if ((res = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
650 {
651 break;
652 }
653 t->arg = arg;
654 t->fun = start_routine;
655 /* Now set it up to execute */
656 _pthread_setup(t, _pthread_body, stack);
657 /* Send it on it's way */
658 set_malloc_singlethreaded(0);
659 __is_threaded = 1;
660 if (suspended == 0) {
661 PTHREAD_MACH_CALL(thread_resume(kernel_thread), kern_res);
662 }
663 if (kern_res != KERN_SUCCESS)
664 {
665 printf("Can't resume thread: %d\n", kern_res);
666 res = EINVAL; /* Need better error here? */
667 break;
668 }
669 } while (0);
670 return (res);
671}
672
673int
674pthread_create(pthread_t *thread,
675 const pthread_attr_t *attr,
676 void *(*start_routine)(void *),
677 void *arg)
678{
679 return _pthread_create_suspended(thread, attr, start_routine, arg, 0);
680}
681
682int
683pthread_create_suspended_np(pthread_t *thread,
684 const pthread_attr_t *attr,
685 void *(*start_routine)(void *),
686 void *arg)
687{
688 return _pthread_create_suspended(thread, attr, start_routine, arg, 1);
689}
690
691/*
692 * Make a thread 'undetached' - no longer 'joinable' with other threads.
693 */
694int
695pthread_detach(pthread_t thread)
696{
697 kern_return_t kern_res;
698 int num_joiners;
699 mach_port_t death;
700 if (thread->sig == _PTHREAD_SIG)
701 {
702 LOCK(thread->lock);
703 if (thread->detached == PTHREAD_CREATE_JOINABLE)
704 {
705 thread->detached = PTHREAD_CREATE_DETACHED;
706 num_joiners = thread->num_joiners;
707 death = thread->death;
708 thread->death = MACH_PORT_NULL;
709 UNLOCK(thread->lock);
710 if (num_joiners > 0)
711 {
712 /* Wake up a joiner */
713 PTHREAD_MACH_CALL(semaphore_signal(thread->joiners), kern_res);
714 }
715 /* Destroy 'control' semaphores */
716 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(),
717 thread->joiners), kern_res);
718 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(),
719 death), kern_res);
720 return (ESUCCESS);
721 } else if (thread->detached == _PTHREAD_EXITED) {
722 UNLOCK(thread->lock);
723 pthread_join(thread, NULL);
724 return ESUCCESS;
725 } else
726 {
727 UNLOCK(thread->lock);
728 return (EINVAL);
729 }
730 } else
731 {
732 return (ESRCH); /* Not a valid thread */
733 }
734}
735
736/* Announce that there is a thread ready to be reclaimed for pthread_create */
737/* or terminated by pthread_exit. If the thread is reused, it will have its */
738/* thread state set and will continue in the thread body function. If it is */
739/* terminated, it will be yanked out from under the mach_msg() call. */
740
741static void _pthread_become_available(pthread_t thread) {
742 mach_msg_empty_rcv_t msg = { { 0 } };
743 kern_return_t ret;
744
745 if (thread->reply_port == MACH_PORT_NULL) {
746 thread->reply_port = mach_reply_port();
747 }
748 msg.header.msgh_size = sizeof msg - sizeof msg.trailer;
749 msg.header.msgh_remote_port = thread_recycle_port;
750 msg.header.msgh_local_port = MACH_PORT_NULL;
751 msg.header.msgh_id = (int)thread;
752 msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
753 ret = mach_msg(&msg.header, MACH_SEND_MSG | MACH_RCV_MSG,
754 msg.header.msgh_size, sizeof msg,
755 thread->reply_port, MACH_MSG_TIMEOUT_NONE,
756 MACH_PORT_NULL);
757 while (1) {
758 ret = thread_suspend(thread->kernel_thread);
759 }
760 /* We should never get here */
761}
762
763/* Check to see if any threads are available. Return immediately */
764
765static kern_return_t _pthread_check_for_available_threads(mach_msg_empty_rcv_t *msg) {
766 return mach_msg(&msg->header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
767 sizeof(mach_msg_empty_rcv_t), thread_recycle_port, 0,
768 MACH_PORT_NULL);
769}
770
771/* Terminate all available threads and deallocate their stacks */
772static void _pthread_reap_threads(void) {
773 kern_return_t ret;
774 mach_msg_empty_rcv_t msg = { { 0 } };
775 while((ret = _pthread_check_for_available_threads(&msg)) == KERN_SUCCESS) {
776 pthread_t th = (pthread_t)msg.header.msgh_id;
777 mach_port_t kernel_thread = th->kernel_thread;
778 mach_port_t reply_port = th->reply_port;
779 vm_size_t size = (vm_size_t)th->stacksize + vm_page_size;
780 vm_address_t addr = (vm_address_t)th->stackaddr;
781#if !defined(STACK_GROWS_UP)
782 addr -= size;
783#endif
784 ret = thread_terminate(kernel_thread);
785 if (ret != KERN_SUCCESS) {
786 fprintf(stderr, "thread_terminate() failed: %s\n",
787 mach_error_string(ret));
788 }
789 ret = mach_port_destroy(mach_task_self(), reply_port);
790 if (ret != KERN_SUCCESS) {
791 fprintf(stderr,
792 "mach_port_destroy(thread_reply) failed: %s\n",
793 mach_error_string(ret));
794 }
795 if (th->freeStackOnExit) {
796 ret = vm_deallocate(mach_task_self(), addr, size);
797 if (ret != KERN_SUCCESS) {
798 fprintf(stderr,
799 "vm_deallocate(stack) failed: %s\n",
800 mach_error_string(ret));
801 }
802 }
803 free(th);
804 }
805 assert(ret == MACH_RCV_TIMED_OUT);
806}
807
808/* For compatibility... */
809
810pthread_t
811_pthread_self() {
812 return pthread_self();
813}
814
815/*
816 * Terminate a thread.
817 */
818void
819pthread_exit(void *value_ptr)
820{
821 pthread_t self = pthread_self();
822 struct _pthread_handler_rec *handler;
823 kern_return_t kern_res;
824 int num_joiners;
825 while ((handler = self->cleanup_stack) != 0)
826 {
827 (handler->routine)(handler->arg);
828 self->cleanup_stack = handler->next;
829 }
830 _pthread_tsd_cleanup(self);
831 LOCK(self->lock);
832 if (self->detached == PTHREAD_CREATE_JOINABLE)
833 {
834 self->detached = _PTHREAD_EXITED;
835 self->exit_value = value_ptr;
836 num_joiners = self->num_joiners;
837 UNLOCK(self->lock);
838 if (num_joiners > 0)
839 {
840 /* POSIX says that multiple pthread_join() calls on */
841 /* the same thread are undefined so we just wake up */
842 /* the first one to join */
843 PTHREAD_MACH_CALL(semaphore_signal(self->joiners), kern_res);
844 }
845 do {
846 PTHREAD_MACH_CALL(semaphore_wait(self->death), kern_res);
847 } while (kern_res == KERN_ABORTED);
848 } else
849 UNLOCK(self->lock);
850 /* Destroy thread & reclaim resources */
851 if (self->death)
852 {
853 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(), self->joiners), kern_res);
854 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(), self->death), kern_res);
855 }
856 if (self->detached == _PTHREAD_CREATE_PARENT) {
857 exit((int)(self->exit_value));
858 }
859
860 _pthread_reap_threads();
861
862 _pthread_become_available(self);
863}
864
865/*
866 * Wait for a thread to terminate and obtain its exit value.
867 */
868int
869pthread_join(pthread_t thread,
870 void **value_ptr)
871{
872 kern_return_t kern_res;
873 if (thread->sig == _PTHREAD_SIG)
874 {
875 LOCK(thread->lock);
876 if (thread->detached == PTHREAD_CREATE_JOINABLE)
877 {
878 thread->num_joiners++;
879 UNLOCK(thread->lock);
880 do {
881 PTHREAD_MACH_CALL(semaphore_wait(thread->joiners), kern_res);
882 } while (kern_res == KERN_ABORTED);
883 LOCK(thread->lock);
884 thread->num_joiners--;
885 }
886 if (thread->detached == _PTHREAD_EXITED)
887 {
888 if (thread->num_joiners == 0)
889 { /* Give the result to this thread */
890 if (value_ptr)
891 {
892 *value_ptr = thread->exit_value;
893 }
894 UNLOCK(thread->lock);
895 PTHREAD_MACH_CALL(semaphore_signal(thread->death), kern_res);
896 return (ESUCCESS);
897 } else
898 { /* This 'joiner' missed the catch! */
899 UNLOCK(thread->lock);
900 return (ESRCH);
901 }
902 } else
903 { /* The thread has become anti-social! */
904 UNLOCK(thread->lock);
905 return (EINVAL);
906 }
907 } else
908 {
909 return (ESRCH); /* Not a valid thread */
910 }
911}
912
913/*
914 * Get the scheduling policy and scheduling paramters for a thread.
915 */
916int
917pthread_getschedparam(pthread_t thread,
918 int *policy,
919 struct sched_param *param)
920{
921 if (thread->sig == _PTHREAD_SIG)
922 {
923 *policy = thread->policy;
924 *param = thread->param;
925 return (ESUCCESS);
926 } else
927 {
928 return (ESRCH); /* Not a valid thread structure */
929 }
930}
931
932/*
933 * Set the scheduling policy and scheduling paramters for a thread.
934 */
935int
936pthread_setschedparam(pthread_t thread,
937 int policy,
938 const struct sched_param *param)
939{
940 policy_base_data_t bases;
941 policy_base_t base;
942 mach_msg_type_number_t count;
943 kern_return_t ret;
944
945 if (thread->sig == _PTHREAD_SIG)
946 {
947 switch (policy)
948 {
949 case SCHED_OTHER:
950 bases.ts.base_priority = param->sched_priority;
951 base = (policy_base_t)&bases.ts;
952 count = POLICY_TIMESHARE_BASE_COUNT;
953 break;
954 case SCHED_FIFO:
955 bases.fifo.base_priority = param->sched_priority;
956 base = (policy_base_t)&bases.fifo;
957 count = POLICY_FIFO_BASE_COUNT;
958 break;
959 case SCHED_RR:
960 bases.rr.base_priority = param->sched_priority;
961 /* quantum isn't public yet */
962 bases.rr.quantum = param->quantum;
963 base = (policy_base_t)&bases.rr;
964 count = POLICY_RR_BASE_COUNT;
965 break;
966 default:
967 return (EINVAL);
968 }
969 thread->policy = policy;
970 thread->param = *param;
971 ret = thread_policy(thread->kernel_thread, policy, base, count, TRUE);
972 if (ret != KERN_SUCCESS)
973 {
974 return (EINVAL);
975 }
976 return (ESUCCESS);
977 } else
978 {
979 return (ESRCH); /* Not a valid thread structure */
980 }
981}
982
983/*
984 * Get the minimum priority for the given policy
985 */
986int
987sched_get_priority_min(int policy)
988{
989 return default_priority - 16;
990}
991
992/*
993 * Get the maximum priority for the given policy
994 */
995int
996sched_get_priority_max(int policy)
997{
998 return default_priority + 16;
999}
1000
1001/*
1002 * Determine if two thread identifiers represent the same thread.
1003 */
1004int
1005pthread_equal(pthread_t t1,
1006 pthread_t t2)
1007{
1008 return (t1 == t2);
1009}
1010
1011void
1012cthread_set_self(void *cself)
1013{
1014 pthread_t self = pthread_self();
1015 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1016 _pthread_set_self(cself);
1017 return;
1018 }
1019 self->cthread_self = cself;
1020}
1021
1022void *
1023ur_cthread_self(void) {
1024 pthread_t self = pthread_self();
1025 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1026 return (void *)self;
1027 }
1028 return self->cthread_self;
1029}
1030
1031/*
1032 * Execute a function exactly one time in a thread-safe fashion.
1033 */
1034int
1035pthread_once(pthread_once_t *once_control,
1036 void (*init_routine)(void))
1037{
1038 LOCK(once_control->lock);
1039 if (once_control->sig == _PTHREAD_ONCE_SIG_init)
1040 {
1041 (*init_routine)();
1042 once_control->sig = _PTHREAD_ONCE_SIG;
1043 }
1044 UNLOCK(once_control->lock);
1045 return (ESUCCESS); /* Spec defines no possible errors! */
1046}
1047
1048/*
1049 * Cancel a thread
1050 */
1051int
1052pthread_cancel(pthread_t thread)
1053{
1054 if (thread->sig == _PTHREAD_SIG)
1055 {
1056 thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
1057 return (ESUCCESS);
1058 } else
1059 {
1060 return (ESRCH);
1061 }
1062}
1063
1064/*
1065 * Insert a cancellation point in a thread.
1066 */
1067static void
1068_pthread_testcancel(pthread_t thread)
1069{
1070 LOCK(thread->lock);
1071 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
1072 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
1073 {
1074 UNLOCK(thread->lock);
1075 pthread_exit(0);
1076 }
1077 UNLOCK(thread->lock);
1078}
1079
1080void
1081pthread_testcancel(void)
1082{
1083 pthread_t self = pthread_self();
1084 _pthread_testcancel(self);
1085}
1086
1087/*
1088 * Query/update the cancelability 'state' of a thread
1089 */
1090int
1091pthread_setcancelstate(int state, int *oldstate)
1092{
1093 pthread_t self = pthread_self();
1094 int err = ESUCCESS;
1095 LOCK(self->lock);
1096 *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
1097 if ((state == PTHREAD_CANCEL_ENABLE) || (state == PTHREAD_CANCEL_DISABLE))
1098 {
1099 self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_STATE_MASK) | state;
1100 } else
1101 {
1102 err = EINVAL;
1103 }
1104 UNLOCK(self->lock);
1105 _pthread_testcancel(self); /* See if we need to 'die' now... */
1106 return (err);
1107}
1108
1109/*
1110 * Query/update the cancelability 'type' of a thread
1111 */
1112int
1113pthread_setcanceltype(int type, int *oldtype)
1114{
1115 pthread_t self = pthread_self();
1116 int err = ESUCCESS;
1117 LOCK(self->lock);
1118 *oldtype = self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK;
1119 if ((type == PTHREAD_CANCEL_DEFERRED) || (type == PTHREAD_CANCEL_ASYNCHRONOUS))
1120 {
1121 self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK) | type;
1122 } else
1123 {
1124 err = EINVAL;
1125 }
1126 UNLOCK(self->lock);
1127 _pthread_testcancel(self); /* See if we need to 'die' now... */
1128 return (err);
1129}
1130
1131/*
1132 * Perform package initialization - called automatically when application starts
1133 */
1134
1135/* We'll implement this when the main thread is a pthread */
1136/* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
1137
1138static struct _pthread _thread = {0};
1139
1140static int
1141pthread_init(void)
1142{
1143 pthread_attr_t _attr, *attrs;
1144 pthread_t thread;
1145 kern_return_t kr;
1146 host_basic_info_data_t basic_info;
1147 host_priority_info_data_t priority_info;
1148 host_info_t info;
1149 host_flavor_t flavor;
1150 mach_msg_type_number_t count;
1151 int mib[2];
1152 size_t len;
1153 int hasvectorunit, numcpus;
1154
1155 count = HOST_PRIORITY_INFO_COUNT;
1156 info = (host_info_t)&priority_info;
1157 flavor = HOST_PRIORITY_INFO;
1158 kr = host_info(mach_host_self(), flavor, info, &count);
1159 if (kr != KERN_SUCCESS)
1160 printf("host_info failed (%d); probably need privilege.\n", kr);
1161 else {
1162 default_priority = priority_info.user_priority;
1163 min_priority = priority_info.minimum_priority;
1164 max_priority = priority_info.maximum_priority;
1165 }
1166 attrs = &_attr;
1167 pthread_attr_init(attrs);
1168 _pthread_set_self(&_thread);
1169
1170 _pthread_create(&_thread, attrs, USRSTACK, mach_thread_self());
1171 thread = &_thread;
1172 thread->detached = _PTHREAD_CREATE_PARENT;
1173
1174 /* See if we're on a multiprocessor and set _spin_tries if so. */
1175 mib[0] = CTL_HW;
1176 mib[1] = HW_NCPU;
1177 len = sizeof(numcpus);
1178 if (sysctl(mib, 2, &numcpus, &len, NULL, 0) == 0) {
1179 if (numcpus > 1) {
1180 _spin_tries = MP_SPIN_TRIES;
1181 }
1182 } else {
1183 count = HOST_BASIC_INFO_COUNT;
1184 info = (host_info_t)&basic_info;
1185 flavor = HOST_BASIC_INFO;
1186 kr = host_info(mach_host_self(), flavor, info, &count);
1187 if (kr != KERN_SUCCESS)
1188 printf("host_info failed (%d)\n", kr);
1189 else {
1190 if (basic_info.avail_cpus > 1)
1191 _spin_tries = MP_SPIN_TRIES;
1192 /* This is a crude test */
1193 if (basic_info.cpu_subtype >= CPU_SUBTYPE_POWERPC_7400)
1194 _cpu_has_altivec = 1;
1195 }
1196 }
1197 mib[0] = CTL_HW;
1198 mib[1] = HW_VECTORUNIT;
1199 len = sizeof(hasvectorunit);
1200 if (sysctl(mib, 2, &hasvectorunit, &len, NULL, 0) == 0) {
1201 _cpu_has_altivec = hasvectorunit;
1202 }
1203 mig_init(1); /* enable multi-threaded mig interfaces */
1204 return 0;
1205}
1206
1207int sched_yield(void)
1208{
1209 swtch_pri(0);
1210 return 0;
1211}
1212
1213/* This is the "magic" that gets the initialization routine called when the application starts */
1214int (*_cthread_init_routine)(void) = pthread_init;
1215
1216/* Get a semaphore from the pool, growing it if necessary */
1217
1218__private_extern__ semaphore_t new_sem_from_pool(void) {
1219 kern_return_t res;
1220 semaphore_t sem;
1221 int i;
1222
1223 LOCK(sem_pool_lock);
1224 if (sem_pool_current == sem_pool_count) {
1225 sem_pool_count += 16;
1226 sem_pool = realloc(sem_pool, sem_pool_count * sizeof(semaphore_t));
1227 for (i = sem_pool_current; i < sem_pool_count; i++) {
1228 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool[i], SYNC_POLICY_FIFO, 0), res);
1229 }
1230 }
1231 sem = sem_pool[sem_pool_current++];
1232 UNLOCK(sem_pool_lock);
1233 return sem;
1234}
1235
1236/* Put a semaphore back into the pool */
1237__private_extern__ void restore_sem_to_pool(semaphore_t sem) {
1238 LOCK(sem_pool_lock);
1239 sem_pool[--sem_pool_current] = sem;
1240 UNLOCK(sem_pool_lock);
1241}
1242
1243static void sem_pool_reset(void) {
1244 LOCK(sem_pool_lock);
1245 sem_pool_count = 0;
1246 sem_pool_current = 0;
1247 sem_pool = NULL;
1248 UNLOCK(sem_pool_lock);
1249}
1250
1251__private_extern__ void _pthread_fork_child(void) {
1252 /* Just in case somebody had it locked... */
1253 UNLOCK(sem_pool_lock);
1254 sem_pool_reset();
1255}
1256