]> git.saurik.com Git - apple/libc.git/blob - pthreads.subproj/pthread.c
ddf28a80971b62689d3992dfca85e4bec983a27b
[apple/libc.git] / pthreads.subproj / pthread.c
1 /*
2 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
3 * All Rights Reserved
4 *
5 * Permission to use, copy, modify, and distribute this software and
6 * its documentation for any purpose and without fee is hereby granted,
7 * provided that the above copyright notice appears in all copies and
8 * that both the copyright notice and this permission notice appear in
9 * supporting documentation.
10 *
11 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
12 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
13 * FOR A PARTICULAR PURPOSE.
14 *
15 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
16 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
17 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
18 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
19 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 *
21 */
22 /*
23 * MkLinux
24 */
25
26 /*
27 * POSIX Pthread Library
28 */
29
30 #define __POSIX_LIB__
31 #include <assert.h>
32 #include <stdio.h> /* For printf(). */
33 #include <stdlib.h>
34 #include <errno.h> /* For __mach_errno_addr() prototype. */
35 #include <sys/time.h>
36 #include <sys/resource.h>
37 #include <sys/sysctl.h>
38 #include <machine/vmparam.h>
39 #include <mach/vm_statistics.h>
40
41 #include "pthread_internals.h"
42
43 /* Per-thread kernel support */
44 extern void _pthread_set_self(pthread_t);
45 extern void mig_init(int);
46
47 /* Needed to tell the malloc subsystem we're going multithreaded */
48 extern void set_malloc_singlethreaded(int);
49
50 /* Used when we need to call into the kernel with no reply port */
51 extern pthread_lock_t reply_port_lock;
52
53 /*
54 * [Internal] stack support
55 */
56
57 size_t _pthread_stack_size = 0;
58 int _spin_tries = 1;
59 int _cpu_has_altivec = 0;
60
61 /* This global should be used (carefully) by anyone needing to know if a pthread has been
62 ** created.
63 */
64 int __is_threaded = 0;
65
66 /* These are used to keep track of a semaphore pool shared by mutexes and condition
67 ** variables.
68 */
69
70 static semaphore_t *sem_pool = NULL;
71 static int sem_pool_count = 0;
72 static int sem_pool_current = 0;
73 static pthread_lock_t sem_pool_lock = LOCK_INITIALIZER;
74
75 static int default_priority;
76 static int max_priority;
77 static int min_priority;
78
79 extern mach_port_t thread_recycle_port;
80
81 #define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
82 #define STACK_RESERVED (sizeof (struct _pthread))
83
84 #ifdef STACK_GROWS_UP
85
86 /* The stack grows towards higher addresses:
87 |struct _pthread|user stack---------------->|
88 ^STACK_BASE ^STACK_START
89 ^STACK_SELF
90 ^STACK_LOWEST */
91 #define STACK_BASE(sp) STACK_LOWEST(sp)
92 #define STACK_START(stack_low) (STACK_BASE(stack_low) + STACK_RESERVED)
93 #define STACK_SELF(sp) STACK_BASE(sp)
94
95 #else
96
97 /* The stack grows towards lower addresses:
98 |<----------------user stack|struct _pthread|
99 ^STACK_LOWEST ^STACK_START ^STACK_BASE
100 ^STACK_SELF */
101
102 #define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
103 #define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
104 #define STACK_SELF(sp) STACK_START(sp)
105
106 #endif
107
108 /* This is the struct used to recycle (or terminate) a thread */
109 /* We stash the thread port into the reply port of the message */
110
111 typedef struct {
112 mach_msg_header_t header;
113 mach_msg_trailer_t trailer;
114 } recycle_msg_t;
115
116 /* Set the base address to use as the stack pointer, before adjusting due to the ABI */
117
118 static int
119 _pthread_allocate_stack(pthread_attr_t *attrs, vm_address_t *stack)
120 {
121 kern_return_t kr;
122 #if 1
123 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
124 if (attrs->stackaddr != NULL) {
125 assert(((vm_offset_t)(attrs->stackaddr) & (vm_page_size - 1)) == 0);
126 *stack = (vm_address_t)attrs->stackaddr;
127 return 0;
128 }
129 kr = vm_allocate(mach_task_self(), stack, attrs->stacksize + vm_page_size, VM_MAKE_TAG(VM_MEMORY_STACK)| TRUE);
130 if (kr != KERN_SUCCESS) {
131 return EAGAIN;
132 }
133 #ifdef STACK_GROWS_UP
134 /* The guard page is the page one higher than the stack */
135 /* The stack base is at the lowest address */
136 kr = vm_protect(mach_task_self(), *stack + attrs->stacksize, vm_page_size, FALSE, VM_PROT_NONE);
137 #else
138 /* The guard page is at the lowest address */
139 /* The stack base is the highest address */
140 kr = vm_protect(mach_task_self(), *stack, vm_page_size, FALSE, VM_PROT_NONE);
141 *stack += attrs->stacksize + vm_page_size;
142 #endif
143
144 #else
145 vm_address_t cur_stack = (vm_address_t)0;
146 if (free_stacks == 0)
147 {
148 /* Allocating guard pages is done by doubling
149 * the actual stack size, since STACK_BASE() needs
150 * to have stacks aligned on stack_size. Allocating just
151 * one page takes as much memory as allocating more pages
152 * since it will remain one entry in the vm map.
153 * Besides, allocating more than one page allows tracking the
154 * overflow pattern when the overflow is bigger than one page.
155 */
156 #ifndef NO_GUARD_PAGES
157 # define GUARD_SIZE(a) (2*(a))
158 # define GUARD_MASK(a) (((a)<<1) | 1)
159 #else
160 # define GUARD_SIZE(a) (a)
161 # define GUARD_MASK(a) (a)
162 #endif
163 while (lowest_stack > GUARD_SIZE(__pthread_stack_size))
164 {
165 lowest_stack -= GUARD_SIZE(__pthread_stack_size);
166 /* Ensure stack is there */
167 kr = vm_allocate(mach_task_self(),
168 &lowest_stack,
169 GUARD_SIZE(__pthread_stack_size),
170 FALSE);
171 #ifndef NO_GUARD_PAGES
172 if (kr == KERN_SUCCESS) {
173 # ifdef STACK_GROWS_UP
174 kr = vm_protect(mach_task_self(),
175 lowest_stack+__pthread_stack_size,
176 __pthread_stack_size,
177 FALSE, VM_PROT_NONE);
178 # else /* STACK_GROWS_UP */
179 kr = vm_protect(mach_task_self(),
180 lowest_stack,
181 __pthread_stack_size,
182 FALSE, VM_PROT_NONE);
183 lowest_stack += __pthread_stack_size;
184 # endif /* STACK_GROWS_UP */
185 if (kr == KERN_SUCCESS)
186 break;
187 }
188 #else
189 if (kr == KERN_SUCCESS)
190 break;
191 #endif
192 }
193 if (lowest_stack > 0)
194 free_stacks = (vm_address_t *)lowest_stack;
195 else
196 {
197 /* Too bad. We'll just have to take what comes.
198 Use vm_map instead of vm_allocate so we can
199 specify alignment. */
200 kr = vm_map(mach_task_self(), &lowest_stack,
201 GUARD_SIZE(__pthread_stack_size),
202 GUARD_MASK(__pthread_stack_mask),
203 TRUE /* anywhere */, MEMORY_OBJECT_NULL,
204 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
205 VM_INHERIT_DEFAULT);
206 /* This really shouldn't fail and if it does I don't
207 know what to do. */
208 #ifndef NO_GUARD_PAGES
209 if (kr == KERN_SUCCESS) {
210 # ifdef STACK_GROWS_UP
211 kr = vm_protect(mach_task_self(),
212 lowest_stack+__pthread_stack_size,
213 __pthread_stack_size,
214 FALSE, VM_PROT_NONE);
215 # else /* STACK_GROWS_UP */
216 kr = vm_protect(mach_task_self(),
217 lowest_stack,
218 __pthread_stack_size,
219 FALSE, VM_PROT_NONE);
220 lowest_stack += __pthread_stack_size;
221 # endif /* STACK_GROWS_UP */
222 }
223 #endif
224 free_stacks = (vm_address_t *)lowest_stack;
225 lowest_stack = 0;
226 }
227 *free_stacks = 0; /* No other free stacks */
228 }
229 cur_stack = STACK_START((vm_address_t) free_stacks);
230 free_stacks = (vm_address_t *)*free_stacks;
231 cur_stack = _adjust_sp(cur_stack); /* Machine dependent stack fudging */
232 #endif
233 return 0;
234 }
235
236 /*
237 * Destroy a thread attribute structure
238 */
239 int
240 pthread_attr_destroy(pthread_attr_t *attr)
241 {
242 if (attr->sig == _PTHREAD_ATTR_SIG)
243 {
244 return (ESUCCESS);
245 } else
246 {
247 return (EINVAL); /* Not an attribute structure! */
248 }
249 }
250
251 /*
252 * Get the 'detach' state from a thread attribute structure.
253 * Note: written as a helper function for info hiding
254 */
255 int
256 pthread_attr_getdetachstate(const pthread_attr_t *attr,
257 int *detachstate)
258 {
259 if (attr->sig == _PTHREAD_ATTR_SIG)
260 {
261 *detachstate = attr->detached;
262 return (ESUCCESS);
263 } else
264 {
265 return (EINVAL); /* Not an attribute structure! */
266 }
267 }
268
269 /*
270 * Get the 'inherit scheduling' info from a thread attribute structure.
271 * Note: written as a helper function for info hiding
272 */
273 int
274 pthread_attr_getinheritsched(const pthread_attr_t *attr,
275 int *inheritsched)
276 {
277 if (attr->sig == _PTHREAD_ATTR_SIG)
278 {
279 *inheritsched = attr->inherit;
280 return (ESUCCESS);
281 } else
282 {
283 return (EINVAL); /* Not an attribute structure! */
284 }
285 }
286
287 /*
288 * Get the scheduling parameters from a thread attribute structure.
289 * Note: written as a helper function for info hiding
290 */
291 int
292 pthread_attr_getschedparam(const pthread_attr_t *attr,
293 struct sched_param *param)
294 {
295 if (attr->sig == _PTHREAD_ATTR_SIG)
296 {
297 *param = attr->param;
298 return (ESUCCESS);
299 } else
300 {
301 return (EINVAL); /* Not an attribute structure! */
302 }
303 }
304
305 /*
306 * Get the scheduling policy from a thread attribute structure.
307 * Note: written as a helper function for info hiding
308 */
309 int
310 pthread_attr_getschedpolicy(const pthread_attr_t *attr,
311 int *policy)
312 {
313 if (attr->sig == _PTHREAD_ATTR_SIG)
314 {
315 *policy = attr->policy;
316 return (ESUCCESS);
317 } else
318 {
319 return (EINVAL); /* Not an attribute structure! */
320 }
321 }
322
323 static const size_t DEFAULT_STACK_SIZE = DFLSSIZ;
324 /*
325 * Initialize a thread attribute structure to default values.
326 */
327 int
328 pthread_attr_init(pthread_attr_t *attr)
329 {
330 attr->stacksize = DEFAULT_STACK_SIZE;
331 attr->stackaddr = NULL;
332 attr->sig = _PTHREAD_ATTR_SIG;
333 attr->policy = _PTHREAD_DEFAULT_POLICY;
334 attr->param.sched_priority = default_priority;
335 attr->param.quantum = 10; /* quantum isn't public yet */
336 attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
337 attr->detached = PTHREAD_CREATE_JOINABLE;
338 attr->freeStackOnExit = TRUE;
339 return (ESUCCESS);
340 }
341
342 /*
343 * Set the 'detach' state in a thread attribute structure.
344 * Note: written as a helper function for info hiding
345 */
346 int
347 pthread_attr_setdetachstate(pthread_attr_t *attr,
348 int detachstate)
349 {
350 if (attr->sig == _PTHREAD_ATTR_SIG)
351 {
352 if ((detachstate == PTHREAD_CREATE_JOINABLE) ||
353 (detachstate == PTHREAD_CREATE_DETACHED))
354 {
355 attr->detached = detachstate;
356 return (ESUCCESS);
357 } else
358 {
359 return (EINVAL);
360 }
361 } else
362 {
363 return (EINVAL); /* Not an attribute structure! */
364 }
365 }
366
367 /*
368 * Set the 'inherit scheduling' state in a thread attribute structure.
369 * Note: written as a helper function for info hiding
370 */
371 int
372 pthread_attr_setinheritsched(pthread_attr_t *attr,
373 int inheritsched)
374 {
375 if (attr->sig == _PTHREAD_ATTR_SIG)
376 {
377 if ((inheritsched == PTHREAD_INHERIT_SCHED) ||
378 (inheritsched == PTHREAD_EXPLICIT_SCHED))
379 {
380 attr->inherit = inheritsched;
381 return (ESUCCESS);
382 } else
383 {
384 return (EINVAL);
385 }
386 } else
387 {
388 return (EINVAL); /* Not an attribute structure! */
389 }
390 }
391
392 /*
393 * Set the scheduling paramters in a thread attribute structure.
394 * Note: written as a helper function for info hiding
395 */
396 int
397 pthread_attr_setschedparam(pthread_attr_t *attr,
398 const struct sched_param *param)
399 {
400 if (attr->sig == _PTHREAD_ATTR_SIG)
401 {
402 /* TODO: Validate sched_param fields */
403 attr->param = *param;
404 return (ESUCCESS);
405 } else
406 {
407 return (EINVAL); /* Not an attribute structure! */
408 }
409 }
410
411 /*
412 * Set the scheduling policy in a thread attribute structure.
413 * Note: written as a helper function for info hiding
414 */
415 int
416 pthread_attr_setschedpolicy(pthread_attr_t *attr,
417 int policy)
418 {
419 if (attr->sig == _PTHREAD_ATTR_SIG)
420 {
421 if ((policy == SCHED_OTHER) ||
422 (policy == SCHED_RR) ||
423 (policy == SCHED_FIFO))
424 {
425 attr->policy = policy;
426 return (ESUCCESS);
427 } else
428 {
429 return (EINVAL);
430 }
431 } else
432 {
433 return (EINVAL); /* Not an attribute structure! */
434 }
435 }
436
437 /*
438 * Set the scope for the thread.
439 * We currently only provide PTHREAD_SCOPE_SYSTEM
440 */
441 int
442 pthread_attr_setscope(pthread_attr_t *attr,
443 int scope)
444 {
445 if (attr->sig == _PTHREAD_ATTR_SIG) {
446 if (scope == PTHREAD_SCOPE_SYSTEM) {
447 /* No attribute yet for the scope */
448 return (ESUCCESS);
449 } else if (scope == PTHREAD_SCOPE_PROCESS) {
450 return (ENOTSUP);
451 }
452 }
453 return (EINVAL); /* Not an attribute structure! */
454 }
455
456 /*
457 * Get the scope for the thread.
458 * We currently only provide PTHREAD_SCOPE_SYSTEM
459 */
460 int
461 pthread_attr_getscope(pthread_attr_t *attr,
462 int *scope)
463 {
464 if (attr->sig == _PTHREAD_ATTR_SIG) {
465 *scope = PTHREAD_SCOPE_SYSTEM;
466 return (ESUCCESS);
467 }
468 return (EINVAL); /* Not an attribute structure! */
469 }
470
471 /* Get the base stack address of the given thread */
472 int
473 pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
474 {
475 if (attr->sig == _PTHREAD_ATTR_SIG) {
476 *stackaddr = attr->stackaddr;
477 return (ESUCCESS);
478 } else {
479 return (EINVAL); /* Not an attribute structure! */
480 }
481 }
482
483 int
484 pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
485 {
486 if ((attr->sig == _PTHREAD_ATTR_SIG) && (((vm_offset_t)stackaddr & (vm_page_size - 1)) == 0)) {
487 attr->stackaddr = stackaddr;
488 attr->freeStackOnExit = FALSE;
489 return (ESUCCESS);
490 } else {
491 return (EINVAL); /* Not an attribute structure! */
492 }
493 }
494
495 int
496 pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
497 {
498 if (attr->sig == _PTHREAD_ATTR_SIG) {
499 *stacksize = attr->stacksize;
500 return (ESUCCESS);
501 } else {
502 return (EINVAL); /* Not an attribute structure! */
503 }
504 }
505
506 int
507 pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
508 {
509 if ((attr->sig == _PTHREAD_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
510 attr->stacksize = stacksize;
511 return (ESUCCESS);
512 } else {
513 return (EINVAL); /* Not an attribute structure! */
514 }
515 }
516
517 pthread_t _cachedThread = (pthread_t)0;
518
519 void _clear_thread_cache(void) {
520 _cachedThread = (pthread_t)0;
521 }
522
523 /*
524 * Create and start execution of a new thread.
525 */
526
527 static void
528 _pthread_body(pthread_t self)
529 {
530 _clear_thread_cache();
531 _pthread_set_self(self);
532 pthread_exit((self->fun)(self->arg));
533 }
534
535 int
536 _pthread_create(pthread_t t,
537 const pthread_attr_t *attrs,
538 vm_address_t stack,
539 const mach_port_t kernel_thread)
540 {
541 int res;
542 kern_return_t kern_res;
543 res = ESUCCESS;
544 do
545 {
546 memset(t, 0, sizeof(*t));
547 t->stacksize = attrs->stacksize;
548 t->stackaddr = (void *)stack;
549 t->kernel_thread = kernel_thread;
550 t->detached = attrs->detached;
551 t->inherit = attrs->inherit;
552 t->policy = attrs->policy;
553 t->param = attrs->param;
554 t->freeStackOnExit = attrs->freeStackOnExit;
555 t->mutexes = (struct _pthread_mutex *)NULL;
556 t->sig = _PTHREAD_SIG;
557 t->reply_port = MACH_PORT_NULL;
558 t->cthread_self = NULL;
559 LOCK_INIT(t->lock);
560 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
561 t->cleanup_stack = (struct _pthread_handler_rec *)NULL;
562 pthread_setschedparam(t, t->policy, &t->param);
563 /* Create control semaphores */
564 if (t->detached == PTHREAD_CREATE_JOINABLE)
565 {
566 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(),
567 &t->death,
568 SYNC_POLICY_FIFO,
569 0), kern_res);
570 if (kern_res != KERN_SUCCESS)
571 {
572 printf("Can't create 'death' semaphore: %d\n", kern_res);
573 res = EINVAL; /* Need better error here? */
574 break;
575 }
576 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(),
577 &t->joiners,
578 SYNC_POLICY_FIFO,
579 0), kern_res);
580 if (kern_res != KERN_SUCCESS)
581 {
582 printf("Can't create 'joiners' semaphore: %d\n", kern_res);
583 res = EINVAL; /* Need better error here? */
584 break;
585 }
586 t->num_joiners = 0;
587 } else
588 {
589 t->death = MACH_PORT_NULL;
590 }
591 } while (0);
592 return (res);
593 }
594
595 int
596 _pthread_is_threaded(void)
597 {
598 return __is_threaded;
599 }
600
601 mach_port_t
602 pthread_mach_thread_np(pthread_t t)
603 {
604 return t->kernel_thread;
605 }
606
607 size_t
608 pthread_get_stacksize_np(pthread_t t)
609 {
610 return t->stacksize;
611 }
612
613 void *
614 pthread_get_stackaddr_np(pthread_t t)
615 {
616 return t->stackaddr;
617 }
618
619 mach_port_t
620 _pthread_reply_port(pthread_t t)
621 {
622 return t->reply_port;
623 }
624
625 static int
626 _pthread_create_suspended(pthread_t *thread,
627 const pthread_attr_t *attr,
628 void *(*start_routine)(void *),
629 void *arg,
630 int suspended)
631 {
632 pthread_attr_t _attr, *attrs;
633 vm_address_t stack;
634 int res;
635 pthread_t t;
636 kern_return_t kern_res;
637 mach_port_t kernel_thread;
638 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
639 { /* Set up default paramters */
640 attrs = &_attr;
641 pthread_attr_init(attrs);
642 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
643 return EINVAL;
644 }
645 res = ESUCCESS;
646 do
647 {
648 /* Allocate a stack for the thread */
649 if ((res = _pthread_allocate_stack(attrs, &stack)) != 0) {
650 break;
651 }
652 t = (pthread_t)malloc(sizeof(struct _pthread));
653 *thread = t;
654 /* Create the Mach thread for this thread */
655 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread), kern_res);
656 if (kern_res != KERN_SUCCESS)
657 {
658 printf("Can't create thread: %d\n", kern_res);
659 res = EINVAL; /* Need better error here? */
660 break;
661 }
662 if ((res = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
663 {
664 break;
665 }
666 t->arg = arg;
667 t->fun = start_routine;
668 /* Now set it up to execute */
669 _pthread_setup(t, _pthread_body, stack);
670 /* Send it on it's way */
671 set_malloc_singlethreaded(0);
672 __is_threaded = 1;
673 if (suspended == 0) {
674 PTHREAD_MACH_CALL(thread_resume(kernel_thread), kern_res);
675 }
676 if (kern_res != KERN_SUCCESS)
677 {
678 printf("Can't resume thread: %d\n", kern_res);
679 res = EINVAL; /* Need better error here? */
680 break;
681 }
682 } while (0);
683 return (res);
684 }
685
686 int
687 pthread_create(pthread_t *thread,
688 const pthread_attr_t *attr,
689 void *(*start_routine)(void *),
690 void *arg)
691 {
692 return _pthread_create_suspended(thread, attr, start_routine, arg, 0);
693 }
694
695 int
696 pthread_create_suspended_np(pthread_t *thread,
697 const pthread_attr_t *attr,
698 void *(*start_routine)(void *),
699 void *arg)
700 {
701 return _pthread_create_suspended(thread, attr, start_routine, arg, 1);
702 }
703
704 /*
705 * Make a thread 'undetached' - no longer 'joinable' with other threads.
706 */
707 int
708 pthread_detach(pthread_t thread)
709 {
710 kern_return_t kern_res;
711 int num_joiners;
712 mach_port_t death;
713 if (thread->sig == _PTHREAD_SIG)
714 {
715 LOCK(thread->lock);
716 if (thread->detached == PTHREAD_CREATE_JOINABLE)
717 {
718 thread->detached = PTHREAD_CREATE_DETACHED;
719 num_joiners = thread->num_joiners;
720 death = thread->death;
721 thread->death = MACH_PORT_NULL;
722 UNLOCK(thread->lock);
723 if (num_joiners > 0)
724 { /* Have to tell these guys this thread can't be joined with */
725 swtch_pri(0);
726 PTHREAD_MACH_CALL(semaphore_signal_all(thread->joiners), kern_res);
727 }
728 /* Destroy 'control' semaphores */
729 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(),
730 thread->joiners), kern_res);
731 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(),
732 death), kern_res);
733 return (ESUCCESS);
734 } else
735 {
736 UNLOCK(thread->lock);
737 return (EINVAL);
738 }
739 } else
740 {
741 return (ESRCH); /* Not a valid thread */
742 }
743 }
744
745 /* Announce that there is a thread ready to be reclaimed for pthread_create */
746 /* or terminated by pthread_exit. If the thread is reused, it will have its */
747 /* thread state set and will continue in the thread body function. If it is */
748 /* terminated, it will be yanked out from under the mach_msg() call. */
749
750 static void _pthread_become_available(pthread_t thread) {
751 recycle_msg_t msg = { { 0 } };
752 kern_return_t ret;
753
754 msg.header.msgh_size = sizeof msg - sizeof msg.trailer;
755 msg.header.msgh_remote_port = thread_recycle_port;
756 msg.header.msgh_local_port = MACH_PORT_NULL;
757 msg.header.msgh_id = (int)thread;
758 msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
759 ret = mach_msg(&msg.header, MACH_SEND_MSG, msg.header.msgh_size, 0,
760 MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE,
761 MACH_PORT_NULL);
762 while (1) {
763 ret = thread_suspend(thread->kernel_thread);
764 }
765 /* We should never get here */
766 }
767
768 /* Check to see if any threads are available. Return immediately */
769
770 static kern_return_t _pthread_check_for_available_threads(recycle_msg_t *msg) {
771 return mach_msg(&msg->header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
772 sizeof(recycle_msg_t), thread_recycle_port, 0,
773 MACH_PORT_NULL);
774 }
775
776 /* Terminate all available threads and deallocate their stacks */
777 static void _pthread_reap_threads(void) {
778 kern_return_t ret;
779 recycle_msg_t msg = { { 0 } };
780 while(_pthread_check_for_available_threads(&msg) == KERN_SUCCESS) {
781 pthread_t th = (pthread_t)msg.header.msgh_id;
782 mach_port_t kernel_thread = th->kernel_thread;
783 mach_port_t reply_port = th->reply_port;
784 vm_size_t size = (vm_size_t)th->stacksize + vm_page_size;
785 vm_address_t addr = (vm_address_t)th->stackaddr;
786 #if !defined(STACK_GROWS_UP)
787 addr -= size;
788 #endif
789 ret = thread_terminate(kernel_thread);
790 if (ret != KERN_SUCCESS) {
791 fprintf(stderr, "thread_terminate() failed: %s\n",
792 mach_error_string(ret));
793 }
794 ret = mach_port_destroy(mach_task_self(), reply_port);
795 if (ret != KERN_SUCCESS) {
796 fprintf(stderr,
797 "mach_port_destroy(thread_reply) failed: %s\n",
798 mach_error_string(ret));
799 }
800 if (th->freeStackOnExit) {
801 ret = vm_deallocate(mach_task_self(), addr, size);
802 if (ret != KERN_SUCCESS) {
803 fprintf(stderr,
804 "vm_deallocate(stack) failed: %s\n",
805 mach_error_string(ret));
806 }
807 }
808 free(th);
809 }
810 }
811
812
813 static void *
814 stackAddress(void)
815 {
816 unsigned dummy;
817 return (void *)((unsigned)&dummy & ~ (PTHREAD_STACK_MIN - 1));
818 }
819
820 extern pthread_t _pthread_self(void);
821
822 pthread_t
823 pthread_self(void)
824 {
825 void * myStack = (void *)0;
826 pthread_t cachedThread = _cachedThread;
827 if (cachedThread) {
828 myStack = stackAddress();
829 if ((void *)((unsigned)(cachedThread->stackaddr - 1) & ~ (PTHREAD_STACK_MIN - 1)) == myStack) {
830 return cachedThread;
831 }
832 }
833 _cachedThread = _pthread_self();
834 return _cachedThread;
835 }
836
837 /*
838 * Terminate a thread.
839 */
840 void
841 pthread_exit(void *value_ptr)
842 {
843 pthread_t self = pthread_self();
844 struct _pthread_handler_rec *handler;
845 kern_return_t kern_res;
846 int num_joiners;
847 _clear_thread_cache();
848 while ((handler = self->cleanup_stack) != 0)
849 {
850 (handler->routine)(handler->arg);
851 self->cleanup_stack = handler->next;
852 }
853 _pthread_tsd_cleanup(self);
854 LOCK(self->lock);
855 if (self->detached == PTHREAD_CREATE_JOINABLE)
856 {
857 self->detached = _PTHREAD_EXITED;
858 self->exit_value = value_ptr;
859 num_joiners = self->num_joiners;
860 UNLOCK(self->lock);
861 if (num_joiners > 0)
862 {
863 swtch_pri(0);
864 PTHREAD_MACH_CALL(semaphore_signal_all(self->joiners), kern_res);
865 }
866 PTHREAD_MACH_CALL(semaphore_wait(self->death), kern_res);
867 } else
868 UNLOCK(self->lock);
869 /* Destroy thread & reclaim resources */
870 if (self->death)
871 {
872 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(), self->joiners), kern_res);
873 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(), self->death), kern_res);
874 }
875 if (self->detached == _PTHREAD_CREATE_PARENT) {
876 exit((int)(self->exit_value));
877 }
878
879 _pthread_reap_threads();
880
881 _pthread_become_available(self);
882 }
883
884 /*
885 * Wait for a thread to terminate and obtain its exit value.
886 */
887 int
888 pthread_join(pthread_t thread,
889 void **value_ptr)
890 {
891 kern_return_t kern_res;
892 if (thread->sig == _PTHREAD_SIG)
893 {
894 LOCK(thread->lock);
895 if (thread->detached == PTHREAD_CREATE_JOINABLE)
896 {
897 thread->num_joiners++;
898 UNLOCK(thread->lock);
899 PTHREAD_MACH_CALL(semaphore_wait(thread->joiners), kern_res);
900 LOCK(thread->lock);
901 thread->num_joiners--;
902 }
903 if (thread->detached == _PTHREAD_EXITED)
904 {
905 if (thread->num_joiners == 0)
906 { /* Give the result to this thread */
907 if (value_ptr)
908 {
909 *value_ptr = thread->exit_value;
910 }
911 UNLOCK(thread->lock);
912 swtch_pri(0);
913 PTHREAD_MACH_CALL(semaphore_signal(thread->death), kern_res);
914 return (ESUCCESS);
915 } else
916 { /* This 'joiner' missed the catch! */
917 UNLOCK(thread->lock);
918 return (ESRCH);
919 }
920 } else
921 { /* The thread has become anti-social! */
922 UNLOCK(thread->lock);
923 return (EINVAL);
924 }
925 } else
926 {
927 return (ESRCH); /* Not a valid thread */
928 }
929 }
930
931 /*
932 * Get the scheduling policy and scheduling paramters for a thread.
933 */
934 int
935 pthread_getschedparam(pthread_t thread,
936 int *policy,
937 struct sched_param *param)
938 {
939 if (thread->sig == _PTHREAD_SIG)
940 {
941 *policy = thread->policy;
942 *param = thread->param;
943 return (ESUCCESS);
944 } else
945 {
946 return (ESRCH); /* Not a valid thread structure */
947 }
948 }
949
950 /*
951 * Set the scheduling policy and scheduling paramters for a thread.
952 */
953 int
954 pthread_setschedparam(pthread_t thread,
955 int policy,
956 const struct sched_param *param)
957 {
958 policy_base_data_t bases;
959 policy_base_t base;
960 mach_msg_type_number_t count;
961 kern_return_t ret;
962
963 if (thread->sig == _PTHREAD_SIG)
964 {
965 switch (policy)
966 {
967 case SCHED_OTHER:
968 bases.ts.base_priority = param->sched_priority;
969 base = (policy_base_t)&bases.ts;
970 count = POLICY_TIMESHARE_BASE_COUNT;
971 break;
972 case SCHED_FIFO:
973 bases.fifo.base_priority = param->sched_priority;
974 base = (policy_base_t)&bases.fifo;
975 count = POLICY_FIFO_BASE_COUNT;
976 break;
977 case SCHED_RR:
978 bases.rr.base_priority = param->sched_priority;
979 /* quantum isn't public yet */
980 bases.rr.quantum = param->quantum;
981 base = (policy_base_t)&bases.rr;
982 count = POLICY_RR_BASE_COUNT;
983 break;
984 default:
985 return (EINVAL);
986 }
987 thread->policy = policy;
988 thread->param = *param;
989 ret = thread_policy(thread->kernel_thread, policy, base, count, TRUE);
990 if (ret != KERN_SUCCESS)
991 {
992 return (EINVAL);
993 }
994 return (ESUCCESS);
995 } else
996 {
997 return (ESRCH); /* Not a valid thread structure */
998 }
999 }
1000
1001 /*
1002 * Get the minimum priority for the given policy
1003 */
1004 int
1005 sched_get_priority_min(int policy)
1006 {
1007 return default_priority - 16;
1008 }
1009
1010 /*
1011 * Get the maximum priority for the given policy
1012 */
1013 int
1014 sched_get_priority_max(int policy)
1015 {
1016 return default_priority + 16;
1017 }
1018
1019 /*
1020 * Determine if two thread identifiers represent the same thread.
1021 */
1022 int
1023 pthread_equal(pthread_t t1,
1024 pthread_t t2)
1025 {
1026 return (t1 == t2);
1027 }
1028
1029 void
1030 cthread_set_self(void *cself)
1031 {
1032 pthread_t self = pthread_self();
1033 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1034 _pthread_set_self(cself);
1035 return;
1036 }
1037 self->cthread_self = cself;
1038 }
1039
1040 void *
1041 ur_cthread_self(void) {
1042 pthread_t self = pthread_self();
1043 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1044 return (void *)self;
1045 }
1046 return self->cthread_self;
1047 }
1048
1049 /*
1050 * Execute a function exactly one time in a thread-safe fashion.
1051 */
1052 int
1053 pthread_once(pthread_once_t *once_control,
1054 void (*init_routine)(void))
1055 {
1056 LOCK(once_control->lock);
1057 if (once_control->sig == _PTHREAD_ONCE_SIG_init)
1058 {
1059 (*init_routine)();
1060 once_control->sig = _PTHREAD_ONCE_SIG;
1061 }
1062 UNLOCK(once_control->lock);
1063 return (ESUCCESS); /* Spec defines no possible errors! */
1064 }
1065
1066 /*
1067 * Cancel a thread
1068 */
1069 int
1070 pthread_cancel(pthread_t thread)
1071 {
1072 if (thread->sig == _PTHREAD_SIG)
1073 {
1074 thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
1075 return (ESUCCESS);
1076 } else
1077 {
1078 return (ESRCH);
1079 }
1080 }
1081
1082 /*
1083 * Insert a cancellation point in a thread.
1084 */
1085 static void
1086 _pthread_testcancel(pthread_t thread)
1087 {
1088 LOCK(thread->lock);
1089 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
1090 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
1091 {
1092 UNLOCK(thread->lock);
1093 pthread_exit(0);
1094 }
1095 UNLOCK(thread->lock);
1096 }
1097
1098 void
1099 pthread_testcancel(void)
1100 {
1101 pthread_t self = pthread_self();
1102 _pthread_testcancel(self);
1103 }
1104
1105 /*
1106 * Query/update the cancelability 'state' of a thread
1107 */
1108 int
1109 pthread_setcancelstate(int state, int *oldstate)
1110 {
1111 pthread_t self = pthread_self();
1112 int err = ESUCCESS;
1113 LOCK(self->lock);
1114 *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
1115 if ((state == PTHREAD_CANCEL_ENABLE) || (state == PTHREAD_CANCEL_DISABLE))
1116 {
1117 self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_STATE_MASK) | state;
1118 } else
1119 {
1120 err = EINVAL;
1121 }
1122 UNLOCK(self->lock);
1123 _pthread_testcancel(self); /* See if we need to 'die' now... */
1124 return (err);
1125 }
1126
1127 /*
1128 * Query/update the cancelability 'type' of a thread
1129 */
1130 int
1131 pthread_setcanceltype(int type, int *oldtype)
1132 {
1133 pthread_t self = pthread_self();
1134 int err = ESUCCESS;
1135 LOCK(self->lock);
1136 *oldtype = self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK;
1137 if ((type == PTHREAD_CANCEL_DEFERRED) || (type == PTHREAD_CANCEL_ASYNCHRONOUS))
1138 {
1139 self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK) | type;
1140 } else
1141 {
1142 err = EINVAL;
1143 }
1144 UNLOCK(self->lock);
1145 _pthread_testcancel(self); /* See if we need to 'die' now... */
1146 return (err);
1147 }
1148
1149 /*
1150 * Perform package initialization - called automatically when application starts
1151 */
1152
1153 /* We'll implement this when the main thread is a pthread */
1154 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
1155
1156 static struct _pthread _thread = {0};
1157
1158 static int
1159 pthread_init(void)
1160 {
1161 pthread_attr_t _attr, *attrs;
1162 pthread_t thread;
1163 kern_return_t kr;
1164 host_basic_info_data_t basic_info;
1165 host_priority_info_data_t priority_info;
1166 host_info_t info;
1167 host_flavor_t flavor;
1168 mach_msg_type_number_t count;
1169 int mib[2];
1170 size_t len;
1171 int hasvectorunit, numcpus;
1172
1173 count = HOST_PRIORITY_INFO_COUNT;
1174 info = (host_info_t)&priority_info;
1175 flavor = HOST_PRIORITY_INFO;
1176 kr = host_info(mach_host_self(), flavor, info, &count);
1177 if (kr != KERN_SUCCESS)
1178 printf("host_info failed (%d); probably need privilege.\n", kr);
1179 else {
1180 default_priority = priority_info.user_priority;
1181 min_priority = priority_info.minimum_priority;
1182 max_priority = priority_info.maximum_priority;
1183 }
1184 attrs = &_attr;
1185 pthread_attr_init(attrs);
1186 _clear_thread_cache();
1187 _pthread_set_self(&_thread);
1188
1189 _pthread_create(&_thread, attrs, USRSTACK, mach_thread_self());
1190 thread = (pthread_t)malloc(sizeof(struct _pthread));
1191 memcpy(thread, &_thread, sizeof(struct _pthread));
1192 _clear_thread_cache();
1193 _pthread_set_self(thread);
1194 thread->detached = _PTHREAD_CREATE_PARENT;
1195
1196 /* See if we're on a multiprocessor and set _spin_tries if so. */
1197 mib[0] = CTL_HW;
1198 mib[1] = HW_NCPU;
1199 len = sizeof(numcpus);
1200 if (sysctl(mib, 2, &numcpus, &len, NULL, 0) == 0) {
1201 if (numcpus > 1) {
1202 _spin_tries = SPIN_TRIES;
1203 }
1204 } else {
1205 count = HOST_BASIC_INFO_COUNT;
1206 info = (host_info_t)&basic_info;
1207 flavor = HOST_BASIC_INFO;
1208 kr = host_info(mach_host_self(), flavor, info, &count);
1209 if (kr != KERN_SUCCESS)
1210 printf("host_info failed (%d)\n", kr);
1211 else {
1212 if (basic_info.avail_cpus > 1)
1213 _spin_tries = SPIN_TRIES;
1214 /* This is a crude test */
1215 if (basic_info.cpu_subtype >= CPU_SUBTYPE_POWERPC_7400)
1216 _cpu_has_altivec = 1;
1217 }
1218 }
1219 mib[0] = CTL_HW;
1220 mib[1] = HW_VECTORUNIT;
1221 len = sizeof(hasvectorunit);
1222 if (sysctl(mib, 2, &hasvectorunit, &len, NULL, 0) == 0) {
1223 _cpu_has_altivec = hasvectorunit;
1224 }
1225 mig_init(1); /* enable multi-threaded mig interfaces */
1226 return 0;
1227 }
1228
1229 int sched_yield(void)
1230 {
1231 swtch_pri(0);
1232 return 0;
1233 }
1234
1235 /* This is the "magic" that gets the initialization routine called when the application starts */
1236 int (*_cthread_init_routine)(void) = pthread_init;
1237
1238 /* Get a semaphore from the pool, growing it if necessary */
1239
1240 __private_extern__ semaphore_t new_sem_from_pool(void) {
1241 kern_return_t res;
1242 semaphore_t sem;
1243 int i;
1244
1245 LOCK(sem_pool_lock);
1246 if (sem_pool_current == sem_pool_count) {
1247 sem_pool_count += 16;
1248 sem_pool = realloc(sem_pool, sem_pool_count * sizeof(semaphore_t));
1249 for (i = sem_pool_current; i < sem_pool_count; i++) {
1250 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool[i], SYNC_POLICY_FIFO, 0), res);
1251 }
1252 }
1253 sem = sem_pool[sem_pool_current++];
1254 UNLOCK(sem_pool_lock);
1255 return sem;
1256 }
1257
1258 /* Put a semaphore back into the pool */
1259 __private_extern__ void restore_sem_to_pool(semaphore_t sem) {
1260 LOCK(sem_pool_lock);
1261 sem_pool[--sem_pool_current] = sem;
1262 UNLOCK(sem_pool_lock);
1263 }
1264
1265 static void sem_pool_reset(void) {
1266 LOCK(sem_pool_lock);
1267 sem_pool_count = 0;
1268 sem_pool_current = 0;
1269 sem_pool = NULL;
1270 UNLOCK(sem_pool_lock);
1271 }
1272
1273 __private_extern__ void _pthread_fork_child(void) {
1274 /* Just in case somebody had it locked... */
1275 UNLOCK(sem_pool_lock);
1276 sem_pool_reset();
1277 }
1278