]> git.saurik.com Git - apple/libc.git/blob - pthreads/pthread.c
Libc-320.1.3.tar.gz
[apple/libc.git] / pthreads / pthread.c
1 /*
2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 */
51
52 #include "pthread_internals.h"
53
54 #include <assert.h>
55 #include <stdio.h> /* For printf(). */
56 #include <stdlib.h>
57 #include <errno.h> /* For __mach_errno_addr() prototype. */
58 #include <sys/time.h>
59 #include <sys/resource.h>
60 #include <sys/sysctl.h>
61 #include <sys/queue.h>
62 #include <sys/syscall.h>
63 #include <machine/vmparam.h>
64 #include <mach/vm_statistics.h>
65 #define __APPLE_API_PRIVATE
66 #include <machine/cpu_capabilities.h>
67
68 __private_extern__ struct __pthread_list __pthread_head = LIST_HEAD_INITIALIZER(&__pthread_head);
69
70 /* Per-thread kernel support */
71 extern void _pthread_set_self(pthread_t);
72 extern void mig_init(int);
73
74 /* Get CPU capabilities from the kernel */
75 __private_extern__ void _init_cpu_capabilities(void);
76
77 /* Needed to tell the malloc subsystem we're going multithreaded */
78 extern void set_malloc_singlethreaded(int);
79
80 /* Used when we need to call into the kernel with no reply port */
81 extern pthread_lock_t reply_port_lock;
82
83 /* We'll implement this when the main thread is a pthread */
84 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
85 static struct _pthread _thread = {0};
86
87 /* This global should be used (carefully) by anyone needing to know if a
88 ** pthread has been created.
89 */
90 int __is_threaded = 0;
91 /* _pthread_count is protected by _pthread_list_lock */
92 static int _pthread_count = 1;
93
94 __private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER;
95
96 /* Same implementation as LOCK, but without the __is_threaded check */
97 int _spin_tries = 0;
98 __private_extern__ void _spin_lock_retry(pthread_lock_t *lock)
99 {
100 int tries = _spin_tries;
101 do {
102 if (tries-- > 0)
103 continue;
104 syscall_thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1);
105 tries = _spin_tries;
106 } while(!_spin_lock_try(lock));
107 }
108
109 extern mach_port_t thread_recycle_port;
110
111 /* These are used to keep track of a semaphore pool shared by mutexes and condition
112 ** variables.
113 */
114
115 static semaphore_t *sem_pool = NULL;
116 static int sem_pool_count = 0;
117 static int sem_pool_current = 0;
118 static pthread_lock_t sem_pool_lock = LOCK_INITIALIZER;
119
120 static int default_priority;
121 static int max_priority;
122 static int min_priority;
123 static int pthread_concurrency;
124
125 /*
126 * [Internal] stack support
127 */
128 size_t _pthread_stack_size = 0;
129 #define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
130 #define STACK_RESERVED (sizeof (struct _pthread))
131
132
133 /* The stack grows towards lower addresses:
134 |<----------------user stack|struct _pthread|
135 ^STACK_LOWEST ^STACK_START ^STACK_BASE
136 ^STACK_SELF */
137
138 #define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
139 #define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
140 #define STACK_SELF(sp) STACK_START(sp)
141
142 #if defined(__ppc__)
143 static const vm_address_t PTHREAD_STACK_HINT = 0xF0000000;
144 #elif defined(__i386__)
145 static const vm_address_t PTHREAD_STACK_HINT = 0xB0000000;
146 #else
147 #error Need to define a stack address hint for this architecture
148 #endif
149
150 /* Set the base address to use as the stack pointer, before adjusting due to the ABI
151 * The guardpages for stackoverflow protection is also allocated here
152 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
153 * set up for the thread
154 */
155
156 static int
157 _pthread_allocate_stack(pthread_attr_t *attrs, void **stack)
158 {
159 kern_return_t kr;
160 size_t guardsize;
161 #if 1
162 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
163 if (attrs->stackaddr != NULL) {
164 /* No guard pages setup in this case */
165 assert(((vm_address_t)(attrs->stackaddr) & (vm_page_size - 1)) == 0);
166 *stack = attrs->stackaddr;
167 return 0;
168 }
169
170 guardsize = attrs->guardsize;
171 *((vm_address_t *)stack) = PTHREAD_STACK_HINT;
172 kr = vm_map(mach_task_self(), (vm_address_t *)stack,
173 attrs->stacksize + guardsize,
174 vm_page_size-1,
175 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
176 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
177 VM_INHERIT_DEFAULT);
178 if (kr != KERN_SUCCESS)
179 kr = vm_allocate(mach_task_self(),
180 (vm_address_t *)stack, attrs->stacksize + guardsize,
181 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
182 if (kr != KERN_SUCCESS) {
183 return EAGAIN;
184 }
185 /* The guard page is at the lowest address */
186 /* The stack base is the highest address */
187 if (guardsize)
188 kr = vm_protect(mach_task_self(), (vm_address_t)*stack, guardsize, FALSE, VM_PROT_NONE);
189 *stack += attrs->stacksize + guardsize;
190
191 #else
192 vm_address_t cur_stack = (vm_address_t)0;
193 if (free_stacks == 0)
194 {
195 /* Allocating guard pages is done by doubling
196 * the actual stack size, since STACK_BASE() needs
197 * to have stacks aligned on stack_size. Allocating just
198 * one page takes as much memory as allocating more pages
199 * since it will remain one entry in the vm map.
200 * Besides, allocating more than one page allows tracking the
201 * overflow pattern when the overflow is bigger than one page.
202 */
203 #ifndef NO_GUARD_PAGES
204 # define GUARD_SIZE(a) (2*(a))
205 # define GUARD_MASK(a) (((a)<<1) | 1)
206 #else
207 # define GUARD_SIZE(a) (a)
208 # define GUARD_MASK(a) (a)
209 #endif
210 while (lowest_stack > GUARD_SIZE(__pthread_stack_size))
211 {
212 lowest_stack -= GUARD_SIZE(__pthread_stack_size);
213 /* Ensure stack is there */
214 kr = vm_allocate(mach_task_self(),
215 &lowest_stack,
216 GUARD_SIZE(__pthread_stack_size),
217 FALSE);
218 #ifndef NO_GUARD_PAGES
219 if (kr == KERN_SUCCESS) {
220 kr = vm_protect(mach_task_self(),
221 lowest_stack,
222 __pthread_stack_size,
223 FALSE, VM_PROT_NONE);
224 lowest_stack += __pthread_stack_size;
225 if (kr == KERN_SUCCESS)
226 break;
227 }
228 #else
229 if (kr == KERN_SUCCESS)
230 break;
231 #endif
232 }
233 if (lowest_stack > 0)
234 free_stacks = (vm_address_t *)lowest_stack;
235 else
236 {
237 /* Too bad. We'll just have to take what comes.
238 Use vm_map instead of vm_allocate so we can
239 specify alignment. */
240 kr = vm_map(mach_task_self(), &lowest_stack,
241 GUARD_SIZE(__pthread_stack_size),
242 GUARD_MASK(__pthread_stack_mask),
243 TRUE /* anywhere */, MEMORY_OBJECT_NULL,
244 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
245 VM_INHERIT_DEFAULT);
246 /* This really shouldn't fail and if it does I don't
247 know what to do. */
248 #ifndef NO_GUARD_PAGES
249 if (kr == KERN_SUCCESS) {
250 kr = vm_protect(mach_task_self(),
251 lowest_stack,
252 __pthread_stack_size,
253 FALSE, VM_PROT_NONE);
254 lowest_stack += __pthread_stack_size;
255 }
256 #endif
257 free_stacks = (vm_address_t *)lowest_stack;
258 lowest_stack = 0;
259 }
260 *free_stacks = 0; /* No other free stacks */
261 }
262 cur_stack = STACK_START((vm_address_t) free_stacks);
263 free_stacks = (vm_address_t *)*free_stacks;
264 cur_stack = _adjust_sp(cur_stack); /* Machine dependent stack fudging */
265 #endif
266 return 0;
267 }
268
269 static pthread_attr_t _pthread_attr_default = {0};
270
271 /*
272 * Destroy a thread attribute structure
273 */
274 int
275 pthread_attr_destroy(pthread_attr_t *attr)
276 {
277 if (attr->sig == _PTHREAD_ATTR_SIG)
278 {
279 return (ESUCCESS);
280 } else
281 {
282 return (EINVAL); /* Not an attribute structure! */
283 }
284 }
285
286 /*
287 * Get the 'detach' state from a thread attribute structure.
288 * Note: written as a helper function for info hiding
289 */
290 int
291 pthread_attr_getdetachstate(const pthread_attr_t *attr,
292 int *detachstate)
293 {
294 if (attr->sig == _PTHREAD_ATTR_SIG)
295 {
296 *detachstate = attr->detached;
297 return (ESUCCESS);
298 } else
299 {
300 return (EINVAL); /* Not an attribute structure! */
301 }
302 }
303
304 /*
305 * Get the 'inherit scheduling' info from a thread attribute structure.
306 * Note: written as a helper function for info hiding
307 */
308 int
309 pthread_attr_getinheritsched(const pthread_attr_t *attr,
310 int *inheritsched)
311 {
312 if (attr->sig == _PTHREAD_ATTR_SIG)
313 {
314 *inheritsched = attr->inherit;
315 return (ESUCCESS);
316 } else
317 {
318 return (EINVAL); /* Not an attribute structure! */
319 }
320 }
321
322 /*
323 * Get the scheduling parameters from a thread attribute structure.
324 * Note: written as a helper function for info hiding
325 */
326 int
327 pthread_attr_getschedparam(const pthread_attr_t *attr,
328 struct sched_param *param)
329 {
330 if (attr->sig == _PTHREAD_ATTR_SIG)
331 {
332 *param = attr->param;
333 return (ESUCCESS);
334 } else
335 {
336 return (EINVAL); /* Not an attribute structure! */
337 }
338 }
339
340 /*
341 * Get the scheduling policy from a thread attribute structure.
342 * Note: written as a helper function for info hiding
343 */
344 int
345 pthread_attr_getschedpolicy(const pthread_attr_t *attr,
346 int *policy)
347 {
348 if (attr->sig == _PTHREAD_ATTR_SIG)
349 {
350 *policy = attr->policy;
351 return (ESUCCESS);
352 } else
353 {
354 return (EINVAL); /* Not an attribute structure! */
355 }
356 }
357
358 /* Retain the existing stack size of 512K and not depend on Main thread default stack size */
359 static const size_t DEFAULT_STACK_SIZE = (512*1024);
360 /*
361 * Initialize a thread attribute structure to default values.
362 */
363 int
364 pthread_attr_init(pthread_attr_t *attr)
365 {
366 attr->stacksize = DEFAULT_STACK_SIZE;
367 attr->stackaddr = NULL;
368 attr->sig = _PTHREAD_ATTR_SIG;
369 attr->param.sched_priority = default_priority;
370 attr->param.quantum = 10; /* quantum isn't public yet */
371 attr->detached = PTHREAD_CREATE_JOINABLE;
372 attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
373 attr->policy = _PTHREAD_DEFAULT_POLICY;
374 attr->freeStackOnExit = TRUE;
375 attr->guardsize = vm_page_size;
376 return (ESUCCESS);
377 }
378
379 /*
380 * Set the 'detach' state in a thread attribute structure.
381 * Note: written as a helper function for info hiding
382 */
383 int
384 pthread_attr_setdetachstate(pthread_attr_t *attr,
385 int detachstate)
386 {
387 if (attr->sig == _PTHREAD_ATTR_SIG)
388 {
389 if ((detachstate == PTHREAD_CREATE_JOINABLE) ||
390 (detachstate == PTHREAD_CREATE_DETACHED))
391 {
392 attr->detached = detachstate;
393 return (ESUCCESS);
394 } else
395 {
396 return (EINVAL);
397 }
398 } else
399 {
400 return (EINVAL); /* Not an attribute structure! */
401 }
402 }
403
404 /*
405 * Set the 'inherit scheduling' state in a thread attribute structure.
406 * Note: written as a helper function for info hiding
407 */
408 int
409 pthread_attr_setinheritsched(pthread_attr_t *attr,
410 int inheritsched)
411 {
412 if (attr->sig == _PTHREAD_ATTR_SIG)
413 {
414 if ((inheritsched == PTHREAD_INHERIT_SCHED) ||
415 (inheritsched == PTHREAD_EXPLICIT_SCHED))
416 {
417 attr->inherit = inheritsched;
418 return (ESUCCESS);
419 } else
420 {
421 return (EINVAL);
422 }
423 } else
424 {
425 return (EINVAL); /* Not an attribute structure! */
426 }
427 }
428
429 /*
430 * Set the scheduling paramters in a thread attribute structure.
431 * Note: written as a helper function for info hiding
432 */
433 int
434 pthread_attr_setschedparam(pthread_attr_t *attr,
435 const struct sched_param *param)
436 {
437 if (attr->sig == _PTHREAD_ATTR_SIG)
438 {
439 /* TODO: Validate sched_param fields */
440 attr->param = *param;
441 return (ESUCCESS);
442 } else
443 {
444 return (EINVAL); /* Not an attribute structure! */
445 }
446 }
447
448 /*
449 * Set the scheduling policy in a thread attribute structure.
450 * Note: written as a helper function for info hiding
451 */
452 int
453 pthread_attr_setschedpolicy(pthread_attr_t *attr,
454 int policy)
455 {
456 if (attr->sig == _PTHREAD_ATTR_SIG)
457 {
458 if ((policy == SCHED_OTHER) ||
459 (policy == SCHED_RR) ||
460 (policy == SCHED_FIFO))
461 {
462 attr->policy = policy;
463 return (ESUCCESS);
464 } else
465 {
466 return (EINVAL);
467 }
468 } else
469 {
470 return (EINVAL); /* Not an attribute structure! */
471 }
472 }
473
474 /*
475 * Set the scope for the thread.
476 * We currently only provide PTHREAD_SCOPE_SYSTEM
477 */
478 int
479 pthread_attr_setscope(pthread_attr_t *attr,
480 int scope)
481 {
482 if (attr->sig == _PTHREAD_ATTR_SIG) {
483 if (scope == PTHREAD_SCOPE_SYSTEM) {
484 /* No attribute yet for the scope */
485 return (ESUCCESS);
486 } else if (scope == PTHREAD_SCOPE_PROCESS) {
487 return (ENOTSUP);
488 }
489 }
490 return (EINVAL); /* Not an attribute structure! */
491 }
492
493 /*
494 * Get the scope for the thread.
495 * We currently only provide PTHREAD_SCOPE_SYSTEM
496 */
497 int
498 pthread_attr_getscope(pthread_attr_t *attr,
499 int *scope)
500 {
501 if (attr->sig == _PTHREAD_ATTR_SIG) {
502 *scope = PTHREAD_SCOPE_SYSTEM;
503 return (ESUCCESS);
504 }
505 return (EINVAL); /* Not an attribute structure! */
506 }
507
508 /* Get the base stack address of the given thread */
509 int
510 pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
511 {
512 if (attr->sig == _PTHREAD_ATTR_SIG) {
513 *stackaddr = attr->stackaddr;
514 return (ESUCCESS);
515 } else {
516 return (EINVAL); /* Not an attribute structure! */
517 }
518 }
519
520 int
521 pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
522 {
523 if ((attr->sig == _PTHREAD_ATTR_SIG) && (((vm_offset_t)stackaddr & (vm_page_size - 1)) == 0)) {
524 attr->stackaddr = stackaddr;
525 attr->freeStackOnExit = FALSE;
526 return (ESUCCESS);
527 } else {
528 return (EINVAL); /* Not an attribute structure! */
529 }
530 }
531
532 int
533 pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
534 {
535 if (attr->sig == _PTHREAD_ATTR_SIG) {
536 *stacksize = attr->stacksize;
537 return (ESUCCESS);
538 } else {
539 return (EINVAL); /* Not an attribute structure! */
540 }
541 }
542
543 int
544 pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
545 {
546 if ((attr->sig == _PTHREAD_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
547 attr->stacksize = stacksize;
548 return (ESUCCESS);
549 } else {
550 return (EINVAL); /* Not an attribute structure! */
551 }
552 }
553
554 int
555 pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
556 {
557 if (attr->sig == _PTHREAD_ATTR_SIG) {
558 u_int32_t addr = (u_int32_t)attr->stackaddr;
559
560 addr -= attr->stacksize;
561 *stackaddr = (void *)addr;
562 *stacksize = attr->stacksize;
563 return (ESUCCESS);
564 } else {
565 return (EINVAL); /* Not an attribute structure! */
566 }
567 }
568
569 /* By SUSV spec, the stackaddr is the base address, the lowest addressable
570 * byte address. This is not the same as in pthread_attr_setstackaddr.
571 */
572 int
573 pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
574 {
575 if ((attr->sig == _PTHREAD_ATTR_SIG) &&
576 (((vm_offset_t)stackaddr & (vm_page_size - 1)) == 0) &&
577 ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
578 u_int32_t addr = (u_int32_t)stackaddr;
579
580 addr += stacksize;
581 attr->stackaddr = (void *)addr;
582 attr->stacksize = stacksize;
583 attr->freeStackOnExit = FALSE;
584 return (ESUCCESS);
585 } else {
586 return (EINVAL); /* Not an attribute structure! */
587 }
588 }
589
590
591 /*
592 * Set the guardsize attribute in the attr.
593 */
594 int
595 pthread_attr_setguardsize(pthread_attr_t *attr,
596 size_t guardsize)
597 {
598 if (attr->sig == _PTHREAD_ATTR_SIG) {
599 /* Guardsize of 0 is valid, ot means no guard */
600 if ((guardsize % vm_page_size) == 0) {
601 attr->guardsize = guardsize;
602 return (ESUCCESS);
603 } else
604 return(EINVAL);
605 }
606 return (EINVAL); /* Not an attribute structure! */
607 }
608
609 /*
610 * Get the guardsize attribute in the attr.
611 */
612 int
613 pthread_attr_getguardsize(const pthread_attr_t *attr,
614 size_t *guardsize)
615 {
616 if (attr->sig == _PTHREAD_ATTR_SIG) {
617 *guardsize = attr->guardsize;
618 return (ESUCCESS);
619 }
620 return (EINVAL); /* Not an attribute structure! */
621 }
622
623
624 /*
625 * Create and start execution of a new thread.
626 */
627
628 static void
629 _pthread_body(pthread_t self)
630 {
631 _pthread_set_self(self);
632 pthread_exit((self->fun)(self->arg));
633 }
634
635 int
636 _pthread_create(pthread_t t,
637 const pthread_attr_t *attrs,
638 void *stack,
639 const mach_port_t kernel_thread)
640 {
641 int res;
642 res = ESUCCESS;
643
644 do
645 {
646 memset(t, 0, sizeof(*t));
647 t->tsd[0] = t;
648 t->stacksize = attrs->stacksize;
649 t->stackaddr = (void *)stack;
650 t->guardsize = attrs->guardsize;
651 t->kernel_thread = kernel_thread;
652 t->detached = attrs->detached;
653 t->inherit = attrs->inherit;
654 t->policy = attrs->policy;
655 t->param = attrs->param;
656 t->freeStackOnExit = attrs->freeStackOnExit;
657 t->mutexes = (struct _pthread_mutex *)NULL;
658 t->sig = _PTHREAD_SIG;
659 t->reply_port = MACH_PORT_NULL;
660 t->cthread_self = NULL;
661 LOCK_INIT(t->lock);
662 t->plist.le_next = (struct _pthread *)0;
663 t->plist.le_prev = (struct _pthread **)0;
664 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
665 t->cleanup_stack = (struct _pthread_handler_rec *)NULL;
666 t->death = SEMAPHORE_NULL;
667
668 if (kernel_thread != MACH_PORT_NULL)
669 pthread_setschedparam(t, t->policy, &t->param);
670 } while (0);
671 return (res);
672 }
673
674 /* Need to deprecate this in future */
675 int
676 _pthread_is_threaded(void)
677 {
678 return __is_threaded;
679 }
680
681 /* Non portable public api to know whether this process has(had) atleast one thread
682 * apart from main thread. There could be race if there is a thread in the process of
683 * creation at the time of call . It does not tell whether there are more than one thread
684 * at this point of time.
685 */
686 int
687 pthread_is_threaded_np(void)
688 {
689 return (__is_threaded);
690 }
691
692 mach_port_t
693 pthread_mach_thread_np(pthread_t t)
694 {
695 thread_t kernel_thread;
696
697 /* Wait for the creator to initialize it */
698 while ((kernel_thread = t->kernel_thread) == MACH_PORT_NULL)
699 sched_yield();
700
701 return kernel_thread;
702 }
703
704 size_t
705 pthread_get_stacksize_np(pthread_t t)
706 {
707 return t->stacksize;
708 }
709
710 void *
711 pthread_get_stackaddr_np(pthread_t t)
712 {
713 return t->stackaddr;
714 }
715
716 mach_port_t
717 _pthread_reply_port(pthread_t t)
718 {
719 return t->reply_port;
720 }
721
722
723 /* returns non-zero if the current thread is the main thread */
724 int
725 pthread_main_np(void)
726 {
727 pthread_t self = pthread_self();
728
729 return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT);
730 }
731
732 static int
733 _pthread_create_suspended(pthread_t *thread,
734 const pthread_attr_t *attr,
735 void *(*start_routine)(void *),
736 void *arg,
737 int suspended)
738 {
739 pthread_attr_t *attrs;
740 void *stack;
741 int res;
742 pthread_t t;
743 kern_return_t kern_res;
744 mach_port_t kernel_thread = MACH_PORT_NULL;
745 int needresume;
746
747 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
748 { /* Set up default paramters */
749 attrs = &_pthread_attr_default;
750 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
751 return EINVAL;
752 }
753 res = ESUCCESS;
754
755 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
756 * any change in priority or policy is needed here.
757 */
758 if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
759 (attrs->param.sched_priority != default_priority)) && (suspended == 0)) {
760 needresume = 1;
761 suspended = 1;
762 } else
763 needresume = 0;
764
765 do
766 {
767 /* Allocate a stack for the thread */
768 if ((res = _pthread_allocate_stack(attrs, &stack)) != 0) {
769 break;
770 }
771 t = (pthread_t)malloc(sizeof(struct _pthread));
772 *thread = t;
773 if (suspended) {
774 /* Create the Mach thread for this thread */
775 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread), kern_res);
776 if (kern_res != KERN_SUCCESS)
777 {
778 printf("Can't create thread: %d\n", kern_res);
779 res = EINVAL; /* Need better error here? */
780 break;
781 }
782 }
783 if ((res = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
784 {
785 break;
786 }
787 set_malloc_singlethreaded(0);
788 __is_threaded = 1;
789
790 /* Send it on it's way */
791 t->arg = arg;
792 t->fun = start_routine;
793 /* Now set it up to execute */
794 LOCK(_pthread_list_lock);
795 LIST_INSERT_HEAD(&__pthread_head, t, plist);
796 _pthread_count++;
797 UNLOCK(_pthread_list_lock);
798 _pthread_setup(t, _pthread_body, stack, suspended, needresume);
799 } while (0);
800 return (res);
801 }
802
803 int
804 pthread_create(pthread_t *thread,
805 const pthread_attr_t *attr,
806 void *(*start_routine)(void *),
807 void *arg)
808 {
809 return _pthread_create_suspended(thread, attr, start_routine, arg, 0);
810 }
811
812 int
813 pthread_create_suspended_np(pthread_t *thread,
814 const pthread_attr_t *attr,
815 void *(*start_routine)(void *),
816 void *arg)
817 {
818 return _pthread_create_suspended(thread, attr, start_routine, arg, 1);
819 }
820
821 /*
822 * Make a thread 'undetached' - no longer 'joinable' with other threads.
823 */
824 int
825 pthread_detach(pthread_t thread)
826 {
827 if (thread->sig == _PTHREAD_SIG)
828 {
829 LOCK(thread->lock);
830 if (thread->detached & PTHREAD_CREATE_JOINABLE)
831 {
832 if (thread->detached & _PTHREAD_EXITED) {
833 UNLOCK(thread->lock);
834 pthread_join(thread, NULL);
835 return ESUCCESS;
836 } else {
837 semaphore_t death = thread->death;
838
839 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
840 thread->detached |= PTHREAD_CREATE_DETACHED;
841 UNLOCK(thread->lock);
842 if (death)
843 (void) semaphore_signal(death);
844 return (ESUCCESS);
845 }
846 } else {
847 UNLOCK(thread->lock);
848 return (EINVAL);
849 }
850 } else {
851 return (ESRCH); /* Not a valid thread */
852 }
853 }
854
855
856 /*
857 * pthread_kill call to system call
858 */
859
860 extern int __pthread_kill(mach_port_t, int);
861
862 int
863 pthread_kill (
864 pthread_t th,
865 int sig)
866 {
867 int error = 0;
868
869 if ((sig < 0) || (sig > NSIG))
870 return(EINVAL);
871
872 if (th && (th->sig == _PTHREAD_SIG)) {
873 error = __pthread_kill(pthread_mach_thread_np(th), sig);
874 if (error == -1)
875 error = errno;
876 return(error);
877 }
878 else
879 return(ESRCH);
880 }
881
882 /* Announce that there are pthread resources ready to be reclaimed in a */
883 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
884 /* thread underneath is terminated right away. */
885 static
886 void _pthread_become_available(pthread_t thread, mach_port_t kernel_thread) {
887 mach_msg_empty_rcv_t msg;
888 kern_return_t ret;
889
890 msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,
891 MACH_MSG_TYPE_MOVE_SEND);
892 msg.header.msgh_size = sizeof msg - sizeof msg.trailer;
893 msg.header.msgh_remote_port = thread_recycle_port;
894 msg.header.msgh_local_port = kernel_thread;
895 msg.header.msgh_id = (int)thread;
896 ret = mach_msg_send(&msg.header);
897 assert(ret == MACH_MSG_SUCCESS);
898 }
899
900 /* Reap the resources for available threads */
901 static
902 int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr) {
903 mach_port_type_t ptype;
904 kern_return_t ret;
905 task_t self;
906
907 self = mach_task_self();
908 if (kernel_thread != MACH_PORT_DEAD) {
909 ret = mach_port_type(self, kernel_thread, &ptype);
910 if (ret == KERN_SUCCESS && ptype != MACH_PORT_TYPE_DEAD_NAME) {
911 /* not quite dead yet... */
912 return EAGAIN;
913 }
914 ret = mach_port_deallocate(self, kernel_thread);
915 if (ret != KERN_SUCCESS) {
916 fprintf(stderr,
917 "mach_port_deallocate(kernel_thread) failed: %s\n",
918 mach_error_string(ret));
919 }
920 }
921
922 if (th->reply_port != MACH_PORT_NULL) {
923 ret = mach_port_mod_refs(self, th->reply_port,
924 MACH_PORT_RIGHT_RECEIVE, -1);
925 if (ret != KERN_SUCCESS) {
926 fprintf(stderr,
927 "mach_port_mod_refs(reply_port) failed: %s\n",
928 mach_error_string(ret));
929 }
930 }
931
932 if (th->freeStackOnExit) {
933 vm_address_t addr = (vm_address_t)th->stackaddr;
934 vm_size_t size;
935
936 size = (vm_size_t)th->stacksize + th->guardsize;
937
938 addr -= size;
939 ret = vm_deallocate(self, addr, size);
940 if (ret != KERN_SUCCESS) {
941 fprintf(stderr,
942 "vm_deallocate(stack) failed: %s\n",
943 mach_error_string(ret));
944 }
945 }
946
947 if (value_ptr)
948 *value_ptr = th->exit_value;
949
950 if (th != &_thread)
951 free(th);
952
953 return ESUCCESS;
954 }
955
956 static
957 void _pthread_reap_threads(void)
958 {
959 mach_msg_empty_rcv_t msg;
960 kern_return_t ret;
961
962 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
963 sizeof(mach_msg_empty_rcv_t), thread_recycle_port,
964 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
965 while (ret == MACH_MSG_SUCCESS) {
966 mach_port_t kernel_thread = msg.header.msgh_remote_port;
967 pthread_t thread = (pthread_t)msg.header.msgh_id;
968
969 if (_pthread_reap_thread(thread, kernel_thread, (void **)0) == EAGAIN)
970 {
971 /* not dead yet, put it back for someone else to reap, stop here */
972 _pthread_become_available(thread, kernel_thread);
973 return;
974 }
975 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
976 sizeof(mach_msg_empty_rcv_t), thread_recycle_port,
977 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
978 }
979 }
980
981 /* For compatibility... */
982
983 pthread_t
984 _pthread_self() {
985 return pthread_self();
986 }
987
988 /*
989 * Terminate a thread.
990 */
991 void
992 pthread_exit(void *value_ptr)
993 {
994 struct _pthread_handler_rec *handler;
995 pthread_t self = pthread_self();
996 kern_return_t kern_res;
997 int thread_count;
998
999 /* Make this thread not to receive any signals */
1000 syscall(331,1);
1001
1002 while ((handler = self->cleanup_stack) != 0)
1003 {
1004 (handler->routine)(handler->arg);
1005 self->cleanup_stack = handler->next;
1006 }
1007 _pthread_tsd_cleanup(self);
1008
1009 _pthread_reap_threads();
1010
1011 LOCK(self->lock);
1012 self->detached |= _PTHREAD_EXITED;
1013
1014 if (self->detached & PTHREAD_CREATE_JOINABLE) {
1015 mach_port_t death = self->death;
1016 self->exit_value = value_ptr;
1017 UNLOCK(self->lock);
1018 /* the joiner will need a kernel thread reference, leave ours for it */
1019 if (death) {
1020 PTHREAD_MACH_CALL(semaphore_signal(death), kern_res);
1021 if (kern_res != KERN_SUCCESS)
1022 fprintf(stderr,
1023 "semaphore_signal(death) failed: %s\n",
1024 mach_error_string(kern_res));
1025 }
1026 LOCK(_pthread_list_lock);
1027 thread_count = --_pthread_count;
1028 UNLOCK(_pthread_list_lock);
1029 } else {
1030 UNLOCK(self->lock);
1031 LOCK(_pthread_list_lock);
1032 LIST_REMOVE(self, plist);
1033 thread_count = --_pthread_count;
1034 UNLOCK(_pthread_list_lock);
1035 /* with no joiner, we let become available consume our cached ref */
1036 _pthread_become_available(self, pthread_mach_thread_np(self));
1037 }
1038
1039 if (thread_count <= 0)
1040 exit(0);
1041
1042 /* Use a new reference to terminate ourselves. Should never return. */
1043 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res);
1044 fprintf(stderr, "thread_terminate(mach_thread_self()) failed: %s\n",
1045 mach_error_string(kern_res));
1046 abort();
1047 }
1048
1049 /*
1050 * Wait for a thread to terminate and obtain its exit value.
1051 */
1052 int
1053 pthread_join(pthread_t thread,
1054 void **value_ptr)
1055 {
1056 kern_return_t kern_res;
1057 int res = ESUCCESS;
1058
1059 if (thread->sig == _PTHREAD_SIG)
1060 {
1061 semaphore_t death = new_sem_from_pool(); /* in case we need it */
1062
1063 LOCK(thread->lock);
1064 if ((thread->detached & PTHREAD_CREATE_JOINABLE) &&
1065 thread->death == SEMAPHORE_NULL)
1066 {
1067 pthread_t self = pthread_self();
1068
1069 assert(thread->joiner == NULL);
1070 if (thread != self && (self == NULL || self->joiner != thread))
1071 {
1072 int already_exited = (thread->detached & _PTHREAD_EXITED);
1073
1074 thread->death = death;
1075 thread->joiner = self;
1076 UNLOCK(thread->lock);
1077
1078 if (!already_exited)
1079 {
1080 /* Wait for it to signal... */
1081 do {
1082 PTHREAD_MACH_CALL(semaphore_wait(death), kern_res);
1083 } while (kern_res != KERN_SUCCESS);
1084 }
1085
1086 LOCK(_pthread_list_lock);
1087 LIST_REMOVE(thread, plist);
1088 UNLOCK(_pthread_list_lock);
1089 /* ... and wait for it to really be dead */
1090 while ((res = _pthread_reap_thread(thread,
1091 thread->kernel_thread,
1092 value_ptr)) == EAGAIN)
1093 {
1094 sched_yield();
1095 }
1096 } else {
1097 UNLOCK(thread->lock);
1098 res = EDEADLK;
1099 }
1100 } else {
1101 UNLOCK(thread->lock);
1102 res = EINVAL;
1103 }
1104 restore_sem_to_pool(death);
1105 return res;
1106 }
1107 return ESRCH;
1108 }
1109
1110 /*
1111 * Get the scheduling policy and scheduling paramters for a thread.
1112 */
1113 int
1114 pthread_getschedparam(pthread_t thread,
1115 int *policy,
1116 struct sched_param *param)
1117 {
1118 if (thread->sig == _PTHREAD_SIG)
1119 {
1120 *policy = thread->policy;
1121 *param = thread->param;
1122 return (ESUCCESS);
1123 } else
1124 {
1125 return (ESRCH); /* Not a valid thread structure */
1126 }
1127 }
1128
1129 /*
1130 * Set the scheduling policy and scheduling paramters for a thread.
1131 */
1132 int
1133 pthread_setschedparam(pthread_t thread,
1134 int policy,
1135 const struct sched_param *param)
1136 {
1137 policy_base_data_t bases;
1138 policy_base_t base;
1139 mach_msg_type_number_t count;
1140 kern_return_t ret;
1141
1142 if (thread->sig == _PTHREAD_SIG)
1143 {
1144 switch (policy)
1145 {
1146 case SCHED_OTHER:
1147 bases.ts.base_priority = param->sched_priority;
1148 base = (policy_base_t)&bases.ts;
1149 count = POLICY_TIMESHARE_BASE_COUNT;
1150 break;
1151 case SCHED_FIFO:
1152 bases.fifo.base_priority = param->sched_priority;
1153 base = (policy_base_t)&bases.fifo;
1154 count = POLICY_FIFO_BASE_COUNT;
1155 break;
1156 case SCHED_RR:
1157 bases.rr.base_priority = param->sched_priority;
1158 /* quantum isn't public yet */
1159 bases.rr.quantum = param->quantum;
1160 base = (policy_base_t)&bases.rr;
1161 count = POLICY_RR_BASE_COUNT;
1162 break;
1163 default:
1164 return (EINVAL);
1165 }
1166 ret = thread_policy(pthread_mach_thread_np(thread), policy, base, count, TRUE);
1167 if (ret != KERN_SUCCESS)
1168 {
1169 return (EINVAL);
1170 }
1171 thread->policy = policy;
1172 thread->param = *param;
1173 return (ESUCCESS);
1174 } else
1175 {
1176 return (ESRCH); /* Not a valid thread structure */
1177 }
1178 }
1179
1180 /*
1181 * Get the minimum priority for the given policy
1182 */
1183 int
1184 sched_get_priority_min(int policy)
1185 {
1186 return default_priority - 16;
1187 }
1188
1189 /*
1190 * Get the maximum priority for the given policy
1191 */
1192 int
1193 sched_get_priority_max(int policy)
1194 {
1195 return default_priority + 16;
1196 }
1197
1198 /*
1199 * Determine if two thread identifiers represent the same thread.
1200 */
1201 int
1202 pthread_equal(pthread_t t1,
1203 pthread_t t2)
1204 {
1205 return (t1 == t2);
1206 }
1207
1208 __private_extern__ void
1209 _pthread_set_self(pthread_t p)
1210 {
1211 extern void __pthread_set_self(pthread_t);
1212 if (p == 0) {
1213 bzero(&_thread, sizeof(struct _pthread));
1214 p = &_thread;
1215 }
1216 p->tsd[0] = p;
1217 __pthread_set_self(p);
1218 }
1219
1220 void
1221 cthread_set_self(void *cself)
1222 {
1223 pthread_t self = pthread_self();
1224 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1225 _pthread_set_self(cself);
1226 return;
1227 }
1228 self->cthread_self = cself;
1229 }
1230
1231 void *
1232 ur_cthread_self(void) {
1233 pthread_t self = pthread_self();
1234 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1235 return (void *)self;
1236 }
1237 return self->cthread_self;
1238 }
1239
1240 /*
1241 * Execute a function exactly one time in a thread-safe fashion.
1242 */
1243 int
1244 pthread_once(pthread_once_t *once_control,
1245 void (*init_routine)(void))
1246 {
1247 _spin_lock(&once_control->lock);
1248 if (once_control->sig == _PTHREAD_ONCE_SIG_init)
1249 {
1250 (*init_routine)();
1251 once_control->sig = _PTHREAD_ONCE_SIG;
1252 }
1253 _spin_unlock(&once_control->lock);
1254 return (ESUCCESS); /* Spec defines no possible errors! */
1255 }
1256
1257 /*
1258 * Cancel a thread
1259 */
1260 int
1261 pthread_cancel(pthread_t thread)
1262 {
1263 if (thread->sig == _PTHREAD_SIG)
1264 {
1265 thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
1266 return (ESUCCESS);
1267 } else
1268 {
1269 return (ESRCH);
1270 }
1271 }
1272
1273 /*
1274 * Insert a cancellation point in a thread.
1275 */
1276 static void
1277 _pthread_testcancel(pthread_t thread)
1278 {
1279 LOCK(thread->lock);
1280 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
1281 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
1282 {
1283 UNLOCK(thread->lock);
1284 pthread_exit(0);
1285 }
1286 UNLOCK(thread->lock);
1287 }
1288
1289 void
1290 pthread_testcancel(void)
1291 {
1292 pthread_t self = pthread_self();
1293 _pthread_testcancel(self);
1294 }
1295
1296 /*
1297 * Query/update the cancelability 'state' of a thread
1298 */
1299 int
1300 pthread_setcancelstate(int state, int *oldstate)
1301 {
1302 pthread_t self = pthread_self();
1303 int err = ESUCCESS;
1304 LOCK(self->lock);
1305 if (oldstate)
1306 *oldstate = self->cancel_state & ~_PTHREAD_CANCEL_STATE_MASK;
1307 if ((state == PTHREAD_CANCEL_ENABLE) || (state == PTHREAD_CANCEL_DISABLE))
1308 {
1309 self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_STATE_MASK) | state;
1310 } else
1311 {
1312 err = EINVAL;
1313 }
1314 UNLOCK(self->lock);
1315 _pthread_testcancel(self); /* See if we need to 'die' now... */
1316 return (err);
1317 }
1318
1319 /*
1320 * Query/update the cancelability 'type' of a thread
1321 */
1322 int
1323 pthread_setcanceltype(int type, int *oldtype)
1324 {
1325 pthread_t self = pthread_self();
1326 int err = ESUCCESS;
1327 LOCK(self->lock);
1328 if (oldtype)
1329 *oldtype = self->cancel_state & ~_PTHREAD_CANCEL_TYPE_MASK;
1330 if ((type == PTHREAD_CANCEL_DEFERRED) || (type == PTHREAD_CANCEL_ASYNCHRONOUS))
1331 {
1332 self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK) | type;
1333 } else
1334 {
1335 err = EINVAL;
1336 }
1337 UNLOCK(self->lock);
1338 _pthread_testcancel(self); /* See if we need to 'die' now... */
1339 return (err);
1340 }
1341
1342 int
1343 pthread_getconcurrency(void)
1344 {
1345 return(pthread_concurrency);
1346 }
1347
1348 int
1349 pthread_setconcurrency(int new_level)
1350 {
1351 pthread_concurrency = new_level;
1352 return(ESUCCESS);
1353 }
1354
1355 /*
1356 * Perform package initialization - called automatically when application starts
1357 */
1358
1359 static int
1360 pthread_init(void)
1361 {
1362 pthread_attr_t *attrs;
1363 pthread_t thread;
1364 kern_return_t kr;
1365 host_basic_info_data_t basic_info;
1366 host_priority_info_data_t priority_info;
1367 host_info_t info;
1368 host_flavor_t flavor;
1369 host_t host;
1370 mach_msg_type_number_t count;
1371 int mib[2];
1372 size_t len;
1373 int numcpus;
1374
1375 count = HOST_PRIORITY_INFO_COUNT;
1376 info = (host_info_t)&priority_info;
1377 flavor = HOST_PRIORITY_INFO;
1378 host = mach_host_self();
1379 kr = host_info(host, flavor, info, &count);
1380 if (kr != KERN_SUCCESS)
1381 printf("host_info failed (%d); probably need privilege.\n", kr);
1382 else {
1383 default_priority = priority_info.user_priority;
1384 min_priority = priority_info.minimum_priority;
1385 max_priority = priority_info.maximum_priority;
1386 }
1387 attrs = &_pthread_attr_default;
1388 pthread_attr_init(attrs);
1389
1390 LIST_INIT(&__pthread_head);
1391 LOCK_INIT(_pthread_list_lock);
1392 thread = &_thread;
1393 LIST_INSERT_HEAD(&__pthread_head, thread, plist);
1394 _pthread_set_self(thread);
1395 _pthread_create(thread, attrs, (void *)USRSTACK, mach_thread_self());
1396 thread->detached = PTHREAD_CREATE_JOINABLE|_PTHREAD_CREATE_PARENT;
1397
1398 /* See if we're on a multiprocessor and set _spin_tries if so. */
1399 mib[0] = CTL_HW;
1400 mib[1] = HW_NCPU;
1401 len = sizeof(numcpus);
1402 if (sysctl(mib, 2, &numcpus, &len, NULL, 0) == 0) {
1403 if (numcpus > 1) {
1404 _spin_tries = MP_SPIN_TRIES;
1405 }
1406 } else {
1407 count = HOST_BASIC_INFO_COUNT;
1408 info = (host_info_t)&basic_info;
1409 flavor = HOST_BASIC_INFO;
1410 kr = host_info(host, flavor, info, &count);
1411 if (kr != KERN_SUCCESS)
1412 printf("host_info failed (%d)\n", kr);
1413 else {
1414 if (basic_info.avail_cpus > 1)
1415 _spin_tries = MP_SPIN_TRIES;
1416 }
1417 }
1418
1419 mach_port_deallocate(mach_task_self(), host);
1420
1421 _init_cpu_capabilities();
1422
1423 #if defined(__ppc__)
1424
1425 /* Use fsqrt instruction in sqrt() if available. */
1426 if (_cpu_capabilities & kHasFsqrt) {
1427 extern size_t hw_sqrt_len;
1428 extern double sqrt( double );
1429 extern double hw_sqrt( double );
1430 extern void sys_icache_invalidate(void *, size_t);
1431
1432 memcpy ( (void *)sqrt, (void *)hw_sqrt, hw_sqrt_len );
1433 sys_icache_invalidate((void *)sqrt, hw_sqrt_len);
1434 }
1435 #endif
1436
1437 mig_init(1); /* enable multi-threaded mig interfaces */
1438 return 0;
1439 }
1440
1441 int sched_yield(void)
1442 {
1443 swtch_pri(0);
1444 return 0;
1445 }
1446
1447 /* This is the "magic" that gets the initialization routine called when the application starts */
1448 int (*_cthread_init_routine)(void) = pthread_init;
1449
1450 /* Get a semaphore from the pool, growing it if necessary */
1451
1452 __private_extern__ semaphore_t new_sem_from_pool(void) {
1453 kern_return_t res;
1454 semaphore_t sem;
1455 int i;
1456
1457 LOCK(sem_pool_lock);
1458 if (sem_pool_current == sem_pool_count) {
1459 sem_pool_count += 16;
1460 sem_pool = realloc(sem_pool, sem_pool_count * sizeof(semaphore_t));
1461 for (i = sem_pool_current; i < sem_pool_count; i++) {
1462 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool[i], SYNC_POLICY_FIFO, 0), res);
1463 }
1464 }
1465 sem = sem_pool[sem_pool_current++];
1466 UNLOCK(sem_pool_lock);
1467 return sem;
1468 }
1469
1470 /* Put a semaphore back into the pool */
1471 __private_extern__ void restore_sem_to_pool(semaphore_t sem) {
1472 LOCK(sem_pool_lock);
1473 sem_pool[--sem_pool_current] = sem;
1474 UNLOCK(sem_pool_lock);
1475 }
1476
1477 static void sem_pool_reset(void) {
1478 LOCK(sem_pool_lock);
1479 sem_pool_count = 0;
1480 sem_pool_current = 0;
1481 sem_pool = NULL;
1482 UNLOCK(sem_pool_lock);
1483 }
1484
1485 __private_extern__ void _pthread_fork_child(pthread_t p) {
1486 /* Just in case somebody had it locked... */
1487 UNLOCK(sem_pool_lock);
1488 sem_pool_reset();
1489 /* No need to hold the pthread_list_lock as no one other than this
1490 * thread is present at this time
1491 */
1492 LIST_INIT(&__pthread_head);
1493 LOCK_INIT(_pthread_list_lock);
1494 LIST_INSERT_HEAD(&__pthread_head, p, plist);
1495 _pthread_count = 1;
1496 }
1497