]> git.saurik.com Git - apple/libc.git/blame - pthreads/pthread.c
Libc-262.3.2.tar.gz
[apple/libc.git] / pthreads / pthread.c
CommitLineData
e9ce8d39
A
1/*
2 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
3 * All Rights Reserved
4 *
5 * Permission to use, copy, modify, and distribute this software and
6 * its documentation for any purpose and without fee is hereby granted,
7 * provided that the above copyright notice appears in all copies and
8 * that both the copyright notice and this permission notice appear in
9 * supporting documentation.
10 *
11 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
12 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
13 * FOR A PARTICULAR PURPOSE.
14 *
15 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
16 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
17 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
18 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
19 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 *
21 */
22/*
23 * MkLinux
24 */
25
26/*
27 * POSIX Pthread Library
28 */
29
30#define __POSIX_LIB__
31#include <assert.h>
32#include <stdio.h> /* For printf(). */
33#include <stdlib.h>
34#include <errno.h> /* For __mach_errno_addr() prototype. */
35#include <sys/time.h>
36#include <sys/resource.h>
37#include <sys/sysctl.h>
5b2abdfb 38#include <sys/syscall.h>
e9ce8d39
A
39#include <machine/vmparam.h>
40#include <mach/vm_statistics.h>
41
42#include "pthread_internals.h"
43
44/* Per-thread kernel support */
45extern void _pthread_set_self(pthread_t);
46extern void mig_init(int);
47
48/* Needed to tell the malloc subsystem we're going multithreaded */
49extern void set_malloc_singlethreaded(int);
50
51/* Used when we need to call into the kernel with no reply port */
52extern pthread_lock_t reply_port_lock;
53
5b2abdfb
A
54/* We'll implement this when the main thread is a pthread */
55/* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
56static struct _pthread _thread = {0};
e9ce8d39 57
5b2abdfb
A
58/* This global should be used (carefully) by anyone needing to know if a
59** pthread has been created.
60*/
61int __is_threaded = 0;
62static int _pthread_count = 1;
63
64static pthread_lock_t _pthread_count_lock = LOCK_INITIALIZER;
65
66/* Same implementation as LOCK, but without the __is_threaded check */
3b2a1fe8 67int _spin_tries = 0;
5b2abdfb
A
68__private_extern__ void _spin_lock_retry(pthread_lock_t *lock)
69{
70 int tries = _spin_tries;
71 do {
72 if (tries-- > 0)
73 continue;
74 syscall_thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1);
75 tries = _spin_tries;
76 } while(!_spin_lock_try(lock));
77}
78
e3cf15b6
A
79/* Apparently, bcopy doesn't declare _cpu_has_altivec anymore */
80int _cpu_has_altivec = 0;
81
5b2abdfb 82extern mach_port_t thread_recycle_port;
e9ce8d39
A
83
84/* These are used to keep track of a semaphore pool shared by mutexes and condition
85** variables.
86*/
87
88static semaphore_t *sem_pool = NULL;
89static int sem_pool_count = 0;
90static int sem_pool_current = 0;
91static pthread_lock_t sem_pool_lock = LOCK_INITIALIZER;
92
93static int default_priority;
94static int max_priority;
95static int min_priority;
5b2abdfb 96static int pthread_concurrency;
e9ce8d39 97
5b2abdfb
A
98/*
99 * [Internal] stack support
100 */
101size_t _pthread_stack_size = 0;
e9ce8d39
A
102#define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
103#define STACK_RESERVED (sizeof (struct _pthread))
104
105#ifdef STACK_GROWS_UP
106
107/* The stack grows towards higher addresses:
108 |struct _pthread|user stack---------------->|
109 ^STACK_BASE ^STACK_START
110 ^STACK_SELF
111 ^STACK_LOWEST */
112#define STACK_BASE(sp) STACK_LOWEST(sp)
113#define STACK_START(stack_low) (STACK_BASE(stack_low) + STACK_RESERVED)
114#define STACK_SELF(sp) STACK_BASE(sp)
115
116#else
117
118/* The stack grows towards lower addresses:
119 |<----------------user stack|struct _pthread|
120 ^STACK_LOWEST ^STACK_START ^STACK_BASE
121 ^STACK_SELF */
122
123#define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
124#define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
125#define STACK_SELF(sp) STACK_START(sp)
126
127#endif
128
5b2abdfb
A
129#if defined(__ppc__)
130static const vm_address_t PTHREAD_STACK_HINT = 0xF0000000;
131#elif defined(__i386__)
132static const vm_address_t PTHREAD_STACK_HINT = 0xB0000000;
133#else
134#error Need to define a stack address hint for this architecture
135#endif
136
e9ce8d39
A
137/* Set the base address to use as the stack pointer, before adjusting due to the ABI */
138
139static int
5b2abdfb 140_pthread_allocate_stack(pthread_attr_t *attrs, void **stack)
e9ce8d39
A
141{
142 kern_return_t kr;
143#if 1
144 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
145 if (attrs->stackaddr != NULL) {
5b2abdfb
A
146 assert(((vm_address_t)(attrs->stackaddr) & (vm_page_size - 1)) == 0);
147 *stack = attrs->stackaddr;
148 return 0;
e9ce8d39 149 }
5b2abdfb
A
150
151 *((vm_address_t *)stack) = PTHREAD_STACK_HINT;
152 kr = vm_map(mach_task_self(), (vm_address_t *)stack,
153 attrs->stacksize + vm_page_size,
154 vm_page_size-1,
155 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
156 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
157 VM_INHERIT_DEFAULT);
158 if (kr != KERN_SUCCESS)
159 kr = vm_allocate(mach_task_self(),
160 (vm_address_t *)stack, attrs->stacksize + vm_page_size,
161 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
e9ce8d39
A
162 if (kr != KERN_SUCCESS) {
163 return EAGAIN;
164 }
5b2abdfb
A
165 #ifdef STACK_GROWS_UP
166 /* The guard page is the page one higher than the stack */
167 /* The stack base is at the lowest address */
e9ce8d39 168 kr = vm_protect(mach_task_self(), *stack + attrs->stacksize, vm_page_size, FALSE, VM_PROT_NONE);
5b2abdfb
A
169 #else
170 /* The guard page is at the lowest address */
171 /* The stack base is the highest address */
172 kr = vm_protect(mach_task_self(), (vm_address_t)*stack, vm_page_size, FALSE, VM_PROT_NONE);
e9ce8d39 173 *stack += attrs->stacksize + vm_page_size;
5b2abdfb 174 #endif
e9ce8d39
A
175
176#else
177 vm_address_t cur_stack = (vm_address_t)0;
178 if (free_stacks == 0)
179 {
180 /* Allocating guard pages is done by doubling
181 * the actual stack size, since STACK_BASE() needs
182 * to have stacks aligned on stack_size. Allocating just
183 * one page takes as much memory as allocating more pages
184 * since it will remain one entry in the vm map.
185 * Besides, allocating more than one page allows tracking the
186 * overflow pattern when the overflow is bigger than one page.
187 */
188#ifndef NO_GUARD_PAGES
189# define GUARD_SIZE(a) (2*(a))
190# define GUARD_MASK(a) (((a)<<1) | 1)
191#else
192# define GUARD_SIZE(a) (a)
193# define GUARD_MASK(a) (a)
194#endif
195 while (lowest_stack > GUARD_SIZE(__pthread_stack_size))
196 {
197 lowest_stack -= GUARD_SIZE(__pthread_stack_size);
198 /* Ensure stack is there */
199 kr = vm_allocate(mach_task_self(),
200 &lowest_stack,
201 GUARD_SIZE(__pthread_stack_size),
202 FALSE);
203#ifndef NO_GUARD_PAGES
204 if (kr == KERN_SUCCESS) {
205# ifdef STACK_GROWS_UP
206 kr = vm_protect(mach_task_self(),
207 lowest_stack+__pthread_stack_size,
208 __pthread_stack_size,
209 FALSE, VM_PROT_NONE);
210# else /* STACK_GROWS_UP */
211 kr = vm_protect(mach_task_self(),
212 lowest_stack,
213 __pthread_stack_size,
214 FALSE, VM_PROT_NONE);
215 lowest_stack += __pthread_stack_size;
216# endif /* STACK_GROWS_UP */
217 if (kr == KERN_SUCCESS)
218 break;
219 }
220#else
221 if (kr == KERN_SUCCESS)
222 break;
223#endif
224 }
225 if (lowest_stack > 0)
226 free_stacks = (vm_address_t *)lowest_stack;
227 else
228 {
229 /* Too bad. We'll just have to take what comes.
230 Use vm_map instead of vm_allocate so we can
231 specify alignment. */
232 kr = vm_map(mach_task_self(), &lowest_stack,
233 GUARD_SIZE(__pthread_stack_size),
234 GUARD_MASK(__pthread_stack_mask),
235 TRUE /* anywhere */, MEMORY_OBJECT_NULL,
236 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
237 VM_INHERIT_DEFAULT);
238 /* This really shouldn't fail and if it does I don't
239 know what to do. */
240#ifndef NO_GUARD_PAGES
241 if (kr == KERN_SUCCESS) {
242# ifdef STACK_GROWS_UP
243 kr = vm_protect(mach_task_self(),
244 lowest_stack+__pthread_stack_size,
245 __pthread_stack_size,
246 FALSE, VM_PROT_NONE);
247# else /* STACK_GROWS_UP */
248 kr = vm_protect(mach_task_self(),
249 lowest_stack,
250 __pthread_stack_size,
251 FALSE, VM_PROT_NONE);
252 lowest_stack += __pthread_stack_size;
253# endif /* STACK_GROWS_UP */
254 }
255#endif
256 free_stacks = (vm_address_t *)lowest_stack;
257 lowest_stack = 0;
258 }
259 *free_stacks = 0; /* No other free stacks */
260 }
261 cur_stack = STACK_START((vm_address_t) free_stacks);
262 free_stacks = (vm_address_t *)*free_stacks;
263 cur_stack = _adjust_sp(cur_stack); /* Machine dependent stack fudging */
264#endif
265 return 0;
266}
267
5b2abdfb
A
268static pthread_attr_t _pthread_attr_default = {0};
269
e9ce8d39
A
270/*
271 * Destroy a thread attribute structure
272 */
273int
274pthread_attr_destroy(pthread_attr_t *attr)
275{
276 if (attr->sig == _PTHREAD_ATTR_SIG)
277 {
278 return (ESUCCESS);
279 } else
280 {
281 return (EINVAL); /* Not an attribute structure! */
282 }
283}
284
285/*
286 * Get the 'detach' state from a thread attribute structure.
287 * Note: written as a helper function for info hiding
288 */
289int
290pthread_attr_getdetachstate(const pthread_attr_t *attr,
291 int *detachstate)
292{
293 if (attr->sig == _PTHREAD_ATTR_SIG)
294 {
295 *detachstate = attr->detached;
296 return (ESUCCESS);
297 } else
298 {
299 return (EINVAL); /* Not an attribute structure! */
300 }
301}
302
303/*
304 * Get the 'inherit scheduling' info from a thread attribute structure.
305 * Note: written as a helper function for info hiding
306 */
307int
308pthread_attr_getinheritsched(const pthread_attr_t *attr,
309 int *inheritsched)
310{
311 if (attr->sig == _PTHREAD_ATTR_SIG)
312 {
313 *inheritsched = attr->inherit;
314 return (ESUCCESS);
315 } else
316 {
317 return (EINVAL); /* Not an attribute structure! */
318 }
319}
320
321/*
322 * Get the scheduling parameters from a thread attribute structure.
323 * Note: written as a helper function for info hiding
324 */
325int
326pthread_attr_getschedparam(const pthread_attr_t *attr,
327 struct sched_param *param)
328{
329 if (attr->sig == _PTHREAD_ATTR_SIG)
330 {
331 *param = attr->param;
332 return (ESUCCESS);
333 } else
334 {
335 return (EINVAL); /* Not an attribute structure! */
336 }
337}
338
339/*
340 * Get the scheduling policy from a thread attribute structure.
341 * Note: written as a helper function for info hiding
342 */
343int
344pthread_attr_getschedpolicy(const pthread_attr_t *attr,
345 int *policy)
346{
347 if (attr->sig == _PTHREAD_ATTR_SIG)
348 {
349 *policy = attr->policy;
350 return (ESUCCESS);
351 } else
352 {
353 return (EINVAL); /* Not an attribute structure! */
354 }
355}
356
357static const size_t DEFAULT_STACK_SIZE = DFLSSIZ;
358/*
359 * Initialize a thread attribute structure to default values.
360 */
361int
362pthread_attr_init(pthread_attr_t *attr)
363{
364 attr->stacksize = DEFAULT_STACK_SIZE;
365 attr->stackaddr = NULL;
366 attr->sig = _PTHREAD_ATTR_SIG;
367 attr->policy = _PTHREAD_DEFAULT_POLICY;
368 attr->param.sched_priority = default_priority;
369 attr->param.quantum = 10; /* quantum isn't public yet */
370 attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
371 attr->detached = PTHREAD_CREATE_JOINABLE;
372 attr->freeStackOnExit = TRUE;
373 return (ESUCCESS);
374}
375
376/*
377 * Set the 'detach' state in a thread attribute structure.
378 * Note: written as a helper function for info hiding
379 */
380int
381pthread_attr_setdetachstate(pthread_attr_t *attr,
382 int detachstate)
383{
384 if (attr->sig == _PTHREAD_ATTR_SIG)
385 {
386 if ((detachstate == PTHREAD_CREATE_JOINABLE) ||
387 (detachstate == PTHREAD_CREATE_DETACHED))
388 {
389 attr->detached = detachstate;
390 return (ESUCCESS);
391 } else
392 {
393 return (EINVAL);
394 }
395 } else
396 {
397 return (EINVAL); /* Not an attribute structure! */
398 }
399}
400
401/*
402 * Set the 'inherit scheduling' state in a thread attribute structure.
403 * Note: written as a helper function for info hiding
404 */
405int
406pthread_attr_setinheritsched(pthread_attr_t *attr,
407 int inheritsched)
408{
409 if (attr->sig == _PTHREAD_ATTR_SIG)
410 {
411 if ((inheritsched == PTHREAD_INHERIT_SCHED) ||
412 (inheritsched == PTHREAD_EXPLICIT_SCHED))
413 {
414 attr->inherit = inheritsched;
415 return (ESUCCESS);
416 } else
417 {
418 return (EINVAL);
419 }
420 } else
421 {
422 return (EINVAL); /* Not an attribute structure! */
423 }
424}
425
426/*
427 * Set the scheduling paramters in a thread attribute structure.
428 * Note: written as a helper function for info hiding
429 */
430int
431pthread_attr_setschedparam(pthread_attr_t *attr,
432 const struct sched_param *param)
433{
434 if (attr->sig == _PTHREAD_ATTR_SIG)
435 {
436 /* TODO: Validate sched_param fields */
437 attr->param = *param;
438 return (ESUCCESS);
439 } else
440 {
441 return (EINVAL); /* Not an attribute structure! */
442 }
443}
444
445/*
446 * Set the scheduling policy in a thread attribute structure.
447 * Note: written as a helper function for info hiding
448 */
449int
450pthread_attr_setschedpolicy(pthread_attr_t *attr,
451 int policy)
452{
453 if (attr->sig == _PTHREAD_ATTR_SIG)
454 {
455 if ((policy == SCHED_OTHER) ||
456 (policy == SCHED_RR) ||
457 (policy == SCHED_FIFO))
458 {
459 attr->policy = policy;
460 return (ESUCCESS);
461 } else
462 {
463 return (EINVAL);
464 }
465 } else
466 {
467 return (EINVAL); /* Not an attribute structure! */
468 }
469}
470
471/*
472 * Set the scope for the thread.
473 * We currently only provide PTHREAD_SCOPE_SYSTEM
474 */
475int
476pthread_attr_setscope(pthread_attr_t *attr,
477 int scope)
478{
479 if (attr->sig == _PTHREAD_ATTR_SIG) {
480 if (scope == PTHREAD_SCOPE_SYSTEM) {
481 /* No attribute yet for the scope */
482 return (ESUCCESS);
483 } else if (scope == PTHREAD_SCOPE_PROCESS) {
484 return (ENOTSUP);
485 }
486 }
487 return (EINVAL); /* Not an attribute structure! */
488}
489
490/*
491 * Get the scope for the thread.
492 * We currently only provide PTHREAD_SCOPE_SYSTEM
493 */
494int
495pthread_attr_getscope(pthread_attr_t *attr,
496 int *scope)
497{
498 if (attr->sig == _PTHREAD_ATTR_SIG) {
499 *scope = PTHREAD_SCOPE_SYSTEM;
500 return (ESUCCESS);
501 }
502 return (EINVAL); /* Not an attribute structure! */
503}
504
505/* Get the base stack address of the given thread */
506int
507pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
508{
509 if (attr->sig == _PTHREAD_ATTR_SIG) {
510 *stackaddr = attr->stackaddr;
511 return (ESUCCESS);
512 } else {
513 return (EINVAL); /* Not an attribute structure! */
514 }
515}
516
517int
518pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
519{
520 if ((attr->sig == _PTHREAD_ATTR_SIG) && (((vm_offset_t)stackaddr & (vm_page_size - 1)) == 0)) {
521 attr->stackaddr = stackaddr;
522 attr->freeStackOnExit = FALSE;
523 return (ESUCCESS);
524 } else {
525 return (EINVAL); /* Not an attribute structure! */
526 }
527}
528
529int
530pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
531{
532 if (attr->sig == _PTHREAD_ATTR_SIG) {
533 *stacksize = attr->stacksize;
534 return (ESUCCESS);
535 } else {
536 return (EINVAL); /* Not an attribute structure! */
537 }
538}
539
540int
541pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
542{
543 if ((attr->sig == _PTHREAD_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
544 attr->stacksize = stacksize;
545 return (ESUCCESS);
546 } else {
547 return (EINVAL); /* Not an attribute structure! */
548 }
549}
550
5b2abdfb
A
551int
552pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
553{
554 if (attr->sig == _PTHREAD_ATTR_SIG) {
555 *stackaddr = attr->stackaddr;
556 *stacksize = attr->stacksize;
557 return (ESUCCESS);
558 } else {
559 return (EINVAL); /* Not an attribute structure! */
560 }
561}
562
563int
564pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
565{
566 if ((attr->sig == _PTHREAD_ATTR_SIG) &&
567 (((vm_offset_t)stackaddr & (vm_page_size - 1)) == 0) &&
568 ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
569 attr->stackaddr = stackaddr;
570 attr->freeStackOnExit = FALSE;
571 attr->stacksize = stacksize;
572 return (ESUCCESS);
573 } else {
574 return (EINVAL); /* Not an attribute structure! */
575 }
576}
577
e9ce8d39
A
578/*
579 * Create and start execution of a new thread.
580 */
581
582static void
583_pthread_body(pthread_t self)
584{
e9ce8d39
A
585 _pthread_set_self(self);
586 pthread_exit((self->fun)(self->arg));
587}
588
589int
590_pthread_create(pthread_t t,
591 const pthread_attr_t *attrs,
5b2abdfb 592 void *stack,
e9ce8d39
A
593 const mach_port_t kernel_thread)
594{
595 int res;
e9ce8d39 596 res = ESUCCESS;
5b2abdfb 597
e9ce8d39
A
598 do
599 {
600 memset(t, 0, sizeof(*t));
601 t->stacksize = attrs->stacksize;
602 t->stackaddr = (void *)stack;
603 t->kernel_thread = kernel_thread;
604 t->detached = attrs->detached;
605 t->inherit = attrs->inherit;
606 t->policy = attrs->policy;
607 t->param = attrs->param;
608 t->freeStackOnExit = attrs->freeStackOnExit;
609 t->mutexes = (struct _pthread_mutex *)NULL;
610 t->sig = _PTHREAD_SIG;
611 t->reply_port = MACH_PORT_NULL;
612 t->cthread_self = NULL;
613 LOCK_INIT(t->lock);
614 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
615 t->cleanup_stack = (struct _pthread_handler_rec *)NULL;
5b2abdfb
A
616 t->death = SEMAPHORE_NULL;
617
618 if (kernel_thread != MACH_PORT_NULL)
619 pthread_setschedparam(t, t->policy, &t->param);
e9ce8d39
A
620 } while (0);
621 return (res);
622}
623
5b2abdfb 624/* Need to deprecate this in future */
e9ce8d39
A
625int
626_pthread_is_threaded(void)
627{
628 return __is_threaded;
629}
630
5b2abdfb
A
631/* Non portable public api to know whether this process has(had) atleast one thread
632 * apart from main thread. There could be race if there is a thread in the process of
633 * creation at the time of call . It does not tell whether there are more than one thread
634 * at this point of time.
635 */
636int
637pthread_is_threaded_np(void)
638{
639 return (__is_threaded);
640}
641
e9ce8d39
A
642mach_port_t
643pthread_mach_thread_np(pthread_t t)
644{
5b2abdfb
A
645 thread_t kernel_thread;
646
647 /* Wait for the creator to initialize it */
648 while ((kernel_thread = t->kernel_thread) == MACH_PORT_NULL)
649 sched_yield();
650
651 return kernel_thread;
e9ce8d39
A
652}
653
654size_t
655pthread_get_stacksize_np(pthread_t t)
656{
657 return t->stacksize;
658}
659
660void *
661pthread_get_stackaddr_np(pthread_t t)
662{
663 return t->stackaddr;
664}
665
666mach_port_t
667_pthread_reply_port(pthread_t t)
668{
669 return t->reply_port;
670}
671
5b2abdfb
A
672
673/* returns non-zero if the current thread is the main thread */
674int
675pthread_main_np(void)
676{
677 pthread_t self = pthread_self();
678
679 return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT);
680}
681
e9ce8d39
A
682static int
683_pthread_create_suspended(pthread_t *thread,
684 const pthread_attr_t *attr,
685 void *(*start_routine)(void *),
686 void *arg,
687 int suspended)
688{
5b2abdfb
A
689 pthread_attr_t *attrs;
690 void *stack;
e9ce8d39
A
691 int res;
692 pthread_t t;
693 kern_return_t kern_res;
5b2abdfb
A
694 mach_port_t kernel_thread = MACH_PORT_NULL;
695 int needresume;
696
e9ce8d39
A
697 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
698 { /* Set up default paramters */
5b2abdfb
A
699 attrs = &_pthread_attr_default;
700 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
e9ce8d39 701 return EINVAL;
5b2abdfb 702 }
e9ce8d39 703 res = ESUCCESS;
5b2abdfb
A
704
705 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
706 * any change in priority or policy is needed here.
707 */
708 if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
709 (attrs->param.sched_priority != default_priority)) && (suspended == 0)) {
710 needresume = 1;
711 suspended = 1;
712 } else
713 needresume = 0;
714
e9ce8d39
A
715 do
716 {
717 /* Allocate a stack for the thread */
718 if ((res = _pthread_allocate_stack(attrs, &stack)) != 0) {
719 break;
720 }
721 t = (pthread_t)malloc(sizeof(struct _pthread));
722 *thread = t;
5b2abdfb
A
723 if (suspended) {
724 /* Create the Mach thread for this thread */
725 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread), kern_res);
726 if (kern_res != KERN_SUCCESS)
727 {
728 printf("Can't create thread: %d\n", kern_res);
729 res = EINVAL; /* Need better error here? */
730 break;
731 }
e9ce8d39
A
732 }
733 if ((res = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
734 {
735 break;
736 }
5b2abdfb
A
737 set_malloc_singlethreaded(0);
738 __is_threaded = 1;
739 LOCK(_pthread_count_lock);
740 _pthread_count++;
741 UNLOCK(_pthread_count_lock);
742
743 /* Send it on it's way */
e9ce8d39
A
744 t->arg = arg;
745 t->fun = start_routine;
746 /* Now set it up to execute */
5b2abdfb 747 _pthread_setup(t, _pthread_body, stack, suspended, needresume);
e9ce8d39
A
748 } while (0);
749 return (res);
750}
751
752int
753pthread_create(pthread_t *thread,
754 const pthread_attr_t *attr,
755 void *(*start_routine)(void *),
756 void *arg)
757{
758 return _pthread_create_suspended(thread, attr, start_routine, arg, 0);
759}
760
761int
762pthread_create_suspended_np(pthread_t *thread,
763 const pthread_attr_t *attr,
764 void *(*start_routine)(void *),
765 void *arg)
766{
767 return _pthread_create_suspended(thread, attr, start_routine, arg, 1);
768}
769
770/*
771 * Make a thread 'undetached' - no longer 'joinable' with other threads.
772 */
773int
774pthread_detach(pthread_t thread)
775{
e9ce8d39
A
776 if (thread->sig == _PTHREAD_SIG)
777 {
778 LOCK(thread->lock);
5b2abdfb 779 if (thread->detached & PTHREAD_CREATE_JOINABLE)
e9ce8d39 780 {
5b2abdfb
A
781 if (thread->detached & _PTHREAD_EXITED) {
782 UNLOCK(thread->lock);
783 pthread_join(thread, NULL);
784 return ESUCCESS;
785 } else {
786 semaphore_t death = thread->death;
787
788 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
789 thread->detached |= PTHREAD_CREATE_DETACHED;
790 UNLOCK(thread->lock);
791 if (death)
792 (void) semaphore_signal(death);
793 return (ESUCCESS);
e9ce8d39 794 }
5b2abdfb 795 } else {
e9ce8d39
A
796 UNLOCK(thread->lock);
797 return (EINVAL);
798 }
5b2abdfb 799 } else {
e9ce8d39
A
800 return (ESRCH); /* Not a valid thread */
801 }
802}
803
e9ce8d39 804
5b2abdfb
A
805/*
806 * pthread_kill call to system call
807 */
e9ce8d39 808
5b2abdfb
A
809
810int
811pthread_kill (
812 pthread_t th,
813 int sig)
814{
815 int error = 0;
816
817 if ((sig < 0) || (sig > NSIG))
818 return(EINVAL);
819
820 if (th && (th->sig == _PTHREAD_SIG)) {
821 error = __pthread_kill(pthread_mach_thread_np(th), sig);
822 if (error == -1)
823 error = errno;
824 return(error);
3b2a1fe8 825 }
5b2abdfb
A
826 else
827 return(ESRCH);
828}
829
830/* Announce that there are pthread resources ready to be reclaimed in a */
831/* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
832/* thread underneath is terminated right away. */
833static
834void _pthread_become_available(pthread_t thread, mach_port_t kernel_thread) {
835 mach_msg_empty_rcv_t msg;
836 kern_return_t ret;
837
838 msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,
839 MACH_MSG_TYPE_MOVE_SEND);
e9ce8d39
A
840 msg.header.msgh_size = sizeof msg - sizeof msg.trailer;
841 msg.header.msgh_remote_port = thread_recycle_port;
5b2abdfb 842 msg.header.msgh_local_port = kernel_thread;
e9ce8d39 843 msg.header.msgh_id = (int)thread;
5b2abdfb
A
844 ret = mach_msg_send(&msg.header);
845 assert(ret == MACH_MSG_SUCCESS);
e9ce8d39
A
846}
847
5b2abdfb
A
848/* Reap the resources for available threads */
849static
850int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr) {
851 mach_port_type_t ptype;
852 kern_return_t ret;
853 task_t self;
854
855 self = mach_task_self();
856 if (kernel_thread != MACH_PORT_DEAD) {
857 ret = mach_port_type(self, kernel_thread, &ptype);
858 if (ret == KERN_SUCCESS && ptype != MACH_PORT_TYPE_DEAD_NAME) {
859 /* not quite dead yet... */
860 return EAGAIN;
861 }
862 ret = mach_port_deallocate(self, kernel_thread);
863 if (ret != KERN_SUCCESS) {
864 fprintf(stderr,
865 "mach_port_deallocate(kernel_thread) failed: %s\n",
866 mach_error_string(ret));
867 }
868 }
e9ce8d39 869
5b2abdfb
A
870 if (th->reply_port != MACH_PORT_NULL) {
871 ret = mach_port_mod_refs(self, th->reply_port,
872 MACH_PORT_RIGHT_RECEIVE, -1);
873 if (ret != KERN_SUCCESS) {
874 fprintf(stderr,
875 "mach_port_mod_refs(reply_port) failed: %s\n",
876 mach_error_string(ret));
877 }
878 }
e9ce8d39 879
5b2abdfb 880 if (th->freeStackOnExit) {
e9ce8d39 881 vm_address_t addr = (vm_address_t)th->stackaddr;
5b2abdfb
A
882 vm_size_t size;
883
884 size = (vm_size_t)th->stacksize + vm_page_size;
885
e9ce8d39
A
886#if !defined(STACK_GROWS_UP)
887 addr -= size;
888#endif
5b2abdfb 889 ret = vm_deallocate(self, addr, size);
e9ce8d39 890 if (ret != KERN_SUCCESS) {
5b2abdfb
A
891 fprintf(stderr,
892 "vm_deallocate(stack) failed: %s\n",
893 mach_error_string(ret));
e9ce8d39 894 }
5b2abdfb
A
895 }
896
897 if (value_ptr)
898 *value_ptr = th->exit_value;
899
900 if (th != &_thread)
e9ce8d39 901 free(th);
5b2abdfb
A
902
903 return ESUCCESS;
904}
905
906static
907void _pthread_reap_threads(void)
908{
909 mach_msg_empty_rcv_t msg;
910 kern_return_t ret;
911
912 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
913 sizeof(mach_msg_empty_rcv_t), thread_recycle_port,
914 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
915 while (ret == MACH_MSG_SUCCESS) {
916 mach_port_t kernel_thread = msg.header.msgh_remote_port;
917 pthread_t thread = (pthread_t)msg.header.msgh_id;
918
919 if (_pthread_reap_thread(thread, kernel_thread, (void **)0) == EAGAIN)
920 {
921 /* not dead yet, put it back for someone else to reap, stop here */
922 _pthread_become_available(thread, kernel_thread);
923 return;
924 }
925 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
926 sizeof(mach_msg_empty_rcv_t), thread_recycle_port,
927 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
e9ce8d39
A
928 }
929}
930
3b2a1fe8 931/* For compatibility... */
e9ce8d39
A
932
933pthread_t
3b2a1fe8
A
934_pthread_self() {
935 return pthread_self();
e9ce8d39
A
936}
937
938/*
939 * Terminate a thread.
940 */
941void
942pthread_exit(void *value_ptr)
943{
5b2abdfb 944 struct _pthread_handler_rec *handler;
e9ce8d39 945 pthread_t self = pthread_self();
e9ce8d39 946 kern_return_t kern_res;
5b2abdfb
A
947 int thread_count;
948
949 /* Make this thread not to receive any signals */
950 syscall(331,1);
951
e9ce8d39
A
952 while ((handler = self->cleanup_stack) != 0)
953 {
954 (handler->routine)(handler->arg);
955 self->cleanup_stack = handler->next;
956 }
957 _pthread_tsd_cleanup(self);
5b2abdfb
A
958
959 _pthread_reap_threads();
960
e9ce8d39 961 LOCK(self->lock);
5b2abdfb
A
962 self->detached |= _PTHREAD_EXITED;
963
964 if (self->detached & PTHREAD_CREATE_JOINABLE) {
965 mach_port_t death = self->death;
e9ce8d39 966 self->exit_value = value_ptr;
e9ce8d39 967 UNLOCK(self->lock);
5b2abdfb
A
968 /* the joiner will need a kernel thread reference, leave ours for it */
969 if (death) {
970 PTHREAD_MACH_CALL(semaphore_signal(death), kern_res);
971 if (kern_res != KERN_SUCCESS)
972 fprintf(stderr,
973 "semaphore_signal(death) failed: %s\n",
974 mach_error_string(kern_res));
e9ce8d39 975 }
5b2abdfb 976 } else {
e9ce8d39 977 UNLOCK(self->lock);
5b2abdfb
A
978 /* with no joiner, we let become available consume our cached ref */
979 _pthread_become_available(self, pthread_mach_thread_np(self));
e9ce8d39
A
980 }
981
5b2abdfb
A
982 LOCK(_pthread_count_lock);
983 thread_count = --_pthread_count;
984 UNLOCK(_pthread_count_lock);
985 if (thread_count <= 0)
986 exit(0);
987
988 /* Use a new reference to terminate ourselves. Should never return. */
989 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res);
990 fprintf(stderr, "thread_terminate(mach_thread_self()) failed: %s\n",
991 mach_error_string(kern_res));
992 abort();
e9ce8d39
A
993}
994
995/*
996 * Wait for a thread to terminate and obtain its exit value.
997 */
998int
999pthread_join(pthread_t thread,
1000 void **value_ptr)
1001{
1002 kern_return_t kern_res;
5b2abdfb
A
1003 int res = ESUCCESS;
1004
e9ce8d39
A
1005 if (thread->sig == _PTHREAD_SIG)
1006 {
5b2abdfb
A
1007 semaphore_t death = new_sem_from_pool(); /* in case we need it */
1008
e9ce8d39 1009 LOCK(thread->lock);
5b2abdfb
A
1010 if ((thread->detached & PTHREAD_CREATE_JOINABLE) &&
1011 thread->death == SEMAPHORE_NULL)
e9ce8d39 1012 {
5b2abdfb
A
1013 pthread_t self = pthread_self();
1014
1015 assert(thread->joiner == NULL);
1016 if (thread != self && (self == NULL || self->joiner != thread))
1017 {
1018 int already_exited = (thread->detached & _PTHREAD_EXITED);
1019
1020 thread->death = death;
1021 thread->joiner = self;
1022 UNLOCK(thread->lock);
1023
1024 if (!already_exited)
e9ce8d39 1025 {
5b2abdfb
A
1026 /* Wait for it to signal... */
1027 do {
1028 PTHREAD_MACH_CALL(semaphore_wait(death), kern_res);
1029 } while (kern_res != KERN_SUCCESS);
e9ce8d39 1030 }
5b2abdfb
A
1031
1032 /* ... and wait for it to really be dead */
1033 while ((res = _pthread_reap_thread(thread,
1034 thread->kernel_thread,
1035 value_ptr)) == EAGAIN)
1036 {
1037 sched_yield();
1038 }
1039 } else {
e9ce8d39 1040 UNLOCK(thread->lock);
5b2abdfb 1041 res = EDEADLK;
e9ce8d39 1042 }
5b2abdfb 1043 } else {
e9ce8d39 1044 UNLOCK(thread->lock);
5b2abdfb 1045 res = EINVAL;
e9ce8d39 1046 }
5b2abdfb
A
1047 restore_sem_to_pool(death);
1048 return res;
e9ce8d39 1049 }
5b2abdfb 1050 return ESRCH;
e9ce8d39
A
1051}
1052
1053/*
1054 * Get the scheduling policy and scheduling paramters for a thread.
1055 */
1056int
1057pthread_getschedparam(pthread_t thread,
1058 int *policy,
1059 struct sched_param *param)
1060{
1061 if (thread->sig == _PTHREAD_SIG)
1062 {
1063 *policy = thread->policy;
1064 *param = thread->param;
1065 return (ESUCCESS);
1066 } else
1067 {
1068 return (ESRCH); /* Not a valid thread structure */
1069 }
1070}
1071
1072/*
1073 * Set the scheduling policy and scheduling paramters for a thread.
1074 */
1075int
1076pthread_setschedparam(pthread_t thread,
1077 int policy,
1078 const struct sched_param *param)
1079{
1080 policy_base_data_t bases;
1081 policy_base_t base;
1082 mach_msg_type_number_t count;
1083 kern_return_t ret;
1084
1085 if (thread->sig == _PTHREAD_SIG)
1086 {
1087 switch (policy)
1088 {
1089 case SCHED_OTHER:
1090 bases.ts.base_priority = param->sched_priority;
1091 base = (policy_base_t)&bases.ts;
1092 count = POLICY_TIMESHARE_BASE_COUNT;
1093 break;
1094 case SCHED_FIFO:
1095 bases.fifo.base_priority = param->sched_priority;
1096 base = (policy_base_t)&bases.fifo;
1097 count = POLICY_FIFO_BASE_COUNT;
1098 break;
1099 case SCHED_RR:
1100 bases.rr.base_priority = param->sched_priority;
1101 /* quantum isn't public yet */
1102 bases.rr.quantum = param->quantum;
1103 base = (policy_base_t)&bases.rr;
1104 count = POLICY_RR_BASE_COUNT;
1105 break;
1106 default:
1107 return (EINVAL);
1108 }
1109 thread->policy = policy;
1110 thread->param = *param;
5b2abdfb 1111 ret = thread_policy(pthread_mach_thread_np(thread), policy, base, count, TRUE);
e9ce8d39
A
1112 if (ret != KERN_SUCCESS)
1113 {
1114 return (EINVAL);
1115 }
1116 return (ESUCCESS);
1117 } else
1118 {
1119 return (ESRCH); /* Not a valid thread structure */
1120 }
1121}
1122
1123/*
1124 * Get the minimum priority for the given policy
1125 */
1126int
1127sched_get_priority_min(int policy)
1128{
1129 return default_priority - 16;
1130}
1131
1132/*
1133 * Get the maximum priority for the given policy
1134 */
1135int
1136sched_get_priority_max(int policy)
1137{
1138 return default_priority + 16;
1139}
1140
1141/*
1142 * Determine if two thread identifiers represent the same thread.
1143 */
1144int
1145pthread_equal(pthread_t t1,
1146 pthread_t t2)
1147{
1148 return (t1 == t2);
1149}
1150
1151void
1152cthread_set_self(void *cself)
1153{
1154 pthread_t self = pthread_self();
1155 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1156 _pthread_set_self(cself);
1157 return;
1158 }
1159 self->cthread_self = cself;
1160}
1161
1162void *
1163ur_cthread_self(void) {
1164 pthread_t self = pthread_self();
1165 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1166 return (void *)self;
1167 }
1168 return self->cthread_self;
1169}
1170
1171/*
1172 * Execute a function exactly one time in a thread-safe fashion.
1173 */
1174int
1175pthread_once(pthread_once_t *once_control,
1176 void (*init_routine)(void))
1177{
1178 LOCK(once_control->lock);
1179 if (once_control->sig == _PTHREAD_ONCE_SIG_init)
1180 {
1181 (*init_routine)();
1182 once_control->sig = _PTHREAD_ONCE_SIG;
1183 }
1184 UNLOCK(once_control->lock);
1185 return (ESUCCESS); /* Spec defines no possible errors! */
1186}
1187
1188/*
1189 * Cancel a thread
1190 */
1191int
1192pthread_cancel(pthread_t thread)
1193{
1194 if (thread->sig == _PTHREAD_SIG)
1195 {
1196 thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
1197 return (ESUCCESS);
1198 } else
1199 {
1200 return (ESRCH);
1201 }
1202}
1203
1204/*
1205 * Insert a cancellation point in a thread.
1206 */
1207static void
1208_pthread_testcancel(pthread_t thread)
1209{
1210 LOCK(thread->lock);
1211 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
1212 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
1213 {
1214 UNLOCK(thread->lock);
1215 pthread_exit(0);
1216 }
1217 UNLOCK(thread->lock);
1218}
1219
1220void
1221pthread_testcancel(void)
1222{
1223 pthread_t self = pthread_self();
1224 _pthread_testcancel(self);
1225}
1226
1227/*
1228 * Query/update the cancelability 'state' of a thread
1229 */
1230int
1231pthread_setcancelstate(int state, int *oldstate)
1232{
1233 pthread_t self = pthread_self();
1234 int err = ESUCCESS;
1235 LOCK(self->lock);
5b2abdfb
A
1236 if (oldstate)
1237 *oldstate = self->cancel_state & ~_PTHREAD_CANCEL_STATE_MASK;
e9ce8d39
A
1238 if ((state == PTHREAD_CANCEL_ENABLE) || (state == PTHREAD_CANCEL_DISABLE))
1239 {
1240 self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_STATE_MASK) | state;
1241 } else
1242 {
1243 err = EINVAL;
1244 }
1245 UNLOCK(self->lock);
1246 _pthread_testcancel(self); /* See if we need to 'die' now... */
1247 return (err);
1248}
1249
1250/*
1251 * Query/update the cancelability 'type' of a thread
1252 */
1253int
1254pthread_setcanceltype(int type, int *oldtype)
1255{
1256 pthread_t self = pthread_self();
1257 int err = ESUCCESS;
1258 LOCK(self->lock);
5b2abdfb
A
1259 if (oldtype)
1260 *oldtype = self->cancel_state & ~_PTHREAD_CANCEL_TYPE_MASK;
e9ce8d39
A
1261 if ((type == PTHREAD_CANCEL_DEFERRED) || (type == PTHREAD_CANCEL_ASYNCHRONOUS))
1262 {
1263 self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK) | type;
1264 } else
1265 {
1266 err = EINVAL;
1267 }
1268 UNLOCK(self->lock);
1269 _pthread_testcancel(self); /* See if we need to 'die' now... */
1270 return (err);
1271}
1272
5b2abdfb
A
1273int
1274pthread_getconcurrency(void)
1275{
1276 return(pthread_concurrency);
1277}
1278
1279int
1280pthread_setconcurrency(int new_level)
1281{
1282 pthread_concurrency = new_level;
1283 return(ESUCCESS);
1284}
1285
e9ce8d39
A
1286/*
1287 * Perform package initialization - called automatically when application starts
1288 */
1289
e3cf15b6
A
1290extern int _cpu_capabilities;
1291
1292#define kHasAltivec 0x01
1293#define kCache32 0x04
1294#define kUseDcba 0x20
1295
e9ce8d39
A
1296static int
1297pthread_init(void)
1298{
5b2abdfb 1299 pthread_attr_t *attrs;
e9ce8d39
A
1300 pthread_t thread;
1301 kern_return_t kr;
1302 host_basic_info_data_t basic_info;
1303 host_priority_info_data_t priority_info;
1304 host_info_t info;
1305 host_flavor_t flavor;
5b2abdfb 1306 host_t host;
e9ce8d39
A
1307 mach_msg_type_number_t count;
1308 int mib[2];
1309 size_t len;
734aad71 1310 int numcpus;
e3cf15b6
A
1311
1312 extern int _bcopy_initialize(void);
1313
e9ce8d39
A
1314
1315 count = HOST_PRIORITY_INFO_COUNT;
1316 info = (host_info_t)&priority_info;
1317 flavor = HOST_PRIORITY_INFO;
5b2abdfb
A
1318 host = mach_host_self();
1319 kr = host_info(host, flavor, info, &count);
e9ce8d39
A
1320 if (kr != KERN_SUCCESS)
1321 printf("host_info failed (%d); probably need privilege.\n", kr);
1322 else {
1323 default_priority = priority_info.user_priority;
e3cf15b6
A
1324 min_priority = priority_info.minimum_priority;
1325 max_priority = priority_info.maximum_priority;
e9ce8d39 1326 }
5b2abdfb 1327 attrs = &_pthread_attr_default;
e9ce8d39 1328 pthread_attr_init(attrs);
e9ce8d39 1329
5b2abdfb
A
1330 thread = &_thread;
1331 _pthread_set_self(thread);
1332 _pthread_create(thread, attrs, (void *)USRSTACK, mach_thread_self());
1333 thread->detached = PTHREAD_CREATE_JOINABLE|_PTHREAD_CREATE_PARENT;
e9ce8d39
A
1334
1335 /* See if we're on a multiprocessor and set _spin_tries if so. */
1336 mib[0] = CTL_HW;
1337 mib[1] = HW_NCPU;
1338 len = sizeof(numcpus);
1339 if (sysctl(mib, 2, &numcpus, &len, NULL, 0) == 0) {
1340 if (numcpus > 1) {
3b2a1fe8 1341 _spin_tries = MP_SPIN_TRIES;
e9ce8d39
A
1342 }
1343 } else {
1344 count = HOST_BASIC_INFO_COUNT;
1345 info = (host_info_t)&basic_info;
1346 flavor = HOST_BASIC_INFO;
5b2abdfb 1347 kr = host_info(host, flavor, info, &count);
e9ce8d39
A
1348 if (kr != KERN_SUCCESS)
1349 printf("host_info failed (%d)\n", kr);
1350 else {
1351 if (basic_info.avail_cpus > 1)
3b2a1fe8 1352 _spin_tries = MP_SPIN_TRIES;
e3cf15b6
A
1353 /* This is a crude test */
1354 if (basic_info.cpu_subtype >= CPU_SUBTYPE_POWERPC_7400)
1355 _cpu_has_altivec = 1;
e9ce8d39
A
1356 }
1357 }
5b2abdfb
A
1358 mach_port_deallocate(mach_task_self(), host);
1359
e3cf15b6
A
1360 len = sizeof(_cpu_capabilities);
1361 sysctlbyname("hw._cpu_capabilities", &_cpu_capabilities, &len, NULL, 0);
1362
1363 _bcopy_initialize();
1364
e9ce8d39
A
1365 mig_init(1); /* enable multi-threaded mig interfaces */
1366 return 0;
1367}
1368
1369int sched_yield(void)
1370{
1371 swtch_pri(0);
1372 return 0;
1373}
1374
1375/* This is the "magic" that gets the initialization routine called when the application starts */
1376int (*_cthread_init_routine)(void) = pthread_init;
1377
1378/* Get a semaphore from the pool, growing it if necessary */
1379
1380__private_extern__ semaphore_t new_sem_from_pool(void) {
1381 kern_return_t res;
1382 semaphore_t sem;
1383 int i;
1384
1385 LOCK(sem_pool_lock);
1386 if (sem_pool_current == sem_pool_count) {
1387 sem_pool_count += 16;
1388 sem_pool = realloc(sem_pool, sem_pool_count * sizeof(semaphore_t));
1389 for (i = sem_pool_current; i < sem_pool_count; i++) {
1390 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool[i], SYNC_POLICY_FIFO, 0), res);
1391 }
1392 }
1393 sem = sem_pool[sem_pool_current++];
1394 UNLOCK(sem_pool_lock);
1395 return sem;
1396}
1397
1398/* Put a semaphore back into the pool */
1399__private_extern__ void restore_sem_to_pool(semaphore_t sem) {
1400 LOCK(sem_pool_lock);
1401 sem_pool[--sem_pool_current] = sem;
1402 UNLOCK(sem_pool_lock);
1403}
1404
1405static void sem_pool_reset(void) {
1406 LOCK(sem_pool_lock);
1407 sem_pool_count = 0;
1408 sem_pool_current = 0;
1409 sem_pool = NULL;
1410 UNLOCK(sem_pool_lock);
1411}
1412
1413__private_extern__ void _pthread_fork_child(void) {
1414 /* Just in case somebody had it locked... */
1415 UNLOCK(sem_pool_lock);
1416 sem_pool_reset();
5b2abdfb
A
1417 UNLOCK(_pthread_count_lock);
1418 _pthread_count = 1;
e9ce8d39
A
1419}
1420