]> git.saurik.com Git - apple/libc.git/blob - pthreads/pthread.c
Libc-391.4.3.tar.gz
[apple/libc.git] / pthreads / pthread.c
1 /*
2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 */
51
52 #include "pthread_internals.h"
53
54 #include <assert.h>
55 #include <stdio.h> /* For printf(). */
56 #include <stdlib.h>
57 #include <errno.h> /* For __mach_errno_addr() prototype. */
58 #include <sys/time.h>
59 #include <sys/resource.h>
60 #include <sys/sysctl.h>
61 #include <sys/queue.h>
62 #include <sys/syscall.h>
63 #include <machine/vmparam.h>
64 #include <mach/vm_statistics.h>
65 #define __APPLE_API_PRIVATE
66 #include <machine/cpu_capabilities.h>
67
68
69 #ifndef BUILDING_VARIANT /* [ */
70
71 __private_extern__ struct __pthread_list __pthread_head = LIST_HEAD_INITIALIZER(&__pthread_head);
72
73 /* Per-thread kernel support */
74 extern void _pthread_set_self(pthread_t);
75 extern void mig_init(int);
76
77 /* Get CPU capabilities from the kernel */
78 __private_extern__ void _init_cpu_capabilities(void);
79
80 /* Needed to tell the malloc subsystem we're going multithreaded */
81 extern void set_malloc_singlethreaded(int);
82
83 /* Used when we need to call into the kernel with no reply port */
84 extern pthread_lock_t reply_port_lock;
85
86 /* Mach message used to notify that a thread needs to be reaped */
87
88 typedef struct _pthread_reap_msg_t {
89 mach_msg_header_t header;
90 pthread_t thread;
91 mach_msg_trailer_t trailer;
92 } pthread_reap_msg_t;
93
94 /* We'll implement this when the main thread is a pthread */
95 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
96 static struct _pthread _thread = {0};
97
98 /* This global should be used (carefully) by anyone needing to know if a
99 ** pthread has been created.
100 */
101 int __is_threaded = 0;
102 /* _pthread_count is protected by _pthread_list_lock */
103 static int _pthread_count = 1;
104 int __unix_conforming = 0;
105
106
107 __private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER;
108
109 /* Same implementation as LOCK, but without the __is_threaded check */
110 int _spin_tries = 0;
111 __private_extern__ void _spin_lock_retry(pthread_lock_t *lock)
112 {
113 int tries = _spin_tries;
114 do {
115 if (tries-- > 0)
116 continue;
117 syscall_thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1);
118 tries = _spin_tries;
119 } while(!_spin_lock_try(lock));
120 }
121
122 extern mach_port_t thread_recycle_port;
123
124 /* These are used to keep track of a semaphore pool shared by mutexes and condition
125 ** variables.
126 */
127
128 static semaphore_t *sem_pool = NULL;
129 static int sem_pool_count = 0;
130 static int sem_pool_current = 0;
131 static pthread_lock_t sem_pool_lock = LOCK_INITIALIZER;
132
133 static int default_priority;
134 static int max_priority;
135 static int min_priority;
136 static int pthread_concurrency;
137
138 static void _pthread_exit(pthread_t self, void *value_ptr);
139
140 /*
141 * [Internal] stack support
142 */
143 size_t _pthread_stack_size = 0;
144 #define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
145 #define STACK_RESERVED (sizeof (struct _pthread))
146
147
148 /* The stack grows towards lower addresses:
149 |<----------------user stack|struct _pthread|
150 ^STACK_LOWEST ^STACK_START ^STACK_BASE
151 ^STACK_SELF */
152
153 #define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
154 #define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
155 #define STACK_SELF(sp) STACK_START(sp)
156
157 #if defined(__ppc__) || defined(__ppc64__)
158 static const vm_address_t PTHREAD_STACK_HINT = 0xF0000000;
159 #elif defined(__i386__)
160 static const vm_address_t PTHREAD_STACK_HINT = 0xB0000000;
161 #else
162 #error Need to define a stack address hint for this architecture
163 #endif
164
165 /* Set the base address to use as the stack pointer, before adjusting due to the ABI
166 * The guardpages for stackoverflow protection is also allocated here
167 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
168 * set up for the thread
169 */
170
171 static int
172 _pthread_allocate_stack(pthread_attr_t *attrs, void **stack)
173 {
174 kern_return_t kr;
175 vm_address_t stackaddr;
176 size_t guardsize;
177 #if 1
178 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
179 if (attrs->stackaddr != NULL) {
180 /* No guard pages setup in this case */
181 assert(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
182 *stack = attrs->stackaddr;
183 return 0;
184 }
185
186 guardsize = attrs->guardsize;
187 stackaddr = PTHREAD_STACK_HINT;
188 kr = vm_map(mach_task_self(), &stackaddr,
189 attrs->stacksize + guardsize,
190 vm_page_size-1,
191 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
192 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
193 VM_INHERIT_DEFAULT);
194 if (kr != KERN_SUCCESS)
195 kr = vm_allocate(mach_task_self(),
196 &stackaddr, attrs->stacksize + guardsize,
197 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
198 if (kr != KERN_SUCCESS) {
199 return EAGAIN;
200 }
201 /* The guard page is at the lowest address */
202 /* The stack base is the highest address */
203 if (guardsize)
204 kr = vm_protect(mach_task_self(), stackaddr, guardsize, FALSE, VM_PROT_NONE);
205 *stack = (void *)(stackaddr + attrs->stacksize + guardsize);
206
207 #else
208 vm_address_t cur_stack = (vm_address_t)0;
209 if (free_stacks == 0)
210 {
211 /* Allocating guard pages is done by doubling
212 * the actual stack size, since STACK_BASE() needs
213 * to have stacks aligned on stack_size. Allocating just
214 * one page takes as much memory as allocating more pages
215 * since it will remain one entry in the vm map.
216 * Besides, allocating more than one page allows tracking the
217 * overflow pattern when the overflow is bigger than one page.
218 */
219 #ifndef NO_GUARD_PAGES
220 # define GUARD_SIZE(a) (2*(a))
221 # define GUARD_MASK(a) (((a)<<1) | 1)
222 #else
223 # define GUARD_SIZE(a) (a)
224 # define GUARD_MASK(a) (a)
225 #endif
226 while (lowest_stack > GUARD_SIZE(__pthread_stack_size))
227 {
228 lowest_stack -= GUARD_SIZE(__pthread_stack_size);
229 /* Ensure stack is there */
230 kr = vm_allocate(mach_task_self(),
231 &lowest_stack,
232 GUARD_SIZE(__pthread_stack_size),
233 FALSE);
234 #ifndef NO_GUARD_PAGES
235 if (kr == KERN_SUCCESS) {
236 kr = vm_protect(mach_task_self(),
237 lowest_stack,
238 __pthread_stack_size,
239 FALSE, VM_PROT_NONE);
240 lowest_stack += __pthread_stack_size;
241 if (kr == KERN_SUCCESS)
242 break;
243 }
244 #else
245 if (kr == KERN_SUCCESS)
246 break;
247 #endif
248 }
249 if (lowest_stack > 0)
250 free_stacks = (vm_address_t *)lowest_stack;
251 else
252 {
253 /* Too bad. We'll just have to take what comes.
254 Use vm_map instead of vm_allocate so we can
255 specify alignment. */
256 kr = vm_map(mach_task_self(), &lowest_stack,
257 GUARD_SIZE(__pthread_stack_size),
258 GUARD_MASK(__pthread_stack_mask),
259 TRUE /* anywhere */, MEMORY_OBJECT_NULL,
260 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
261 VM_INHERIT_DEFAULT);
262 /* This really shouldn't fail and if it does I don't
263 know what to do. */
264 #ifndef NO_GUARD_PAGES
265 if (kr == KERN_SUCCESS) {
266 kr = vm_protect(mach_task_self(),
267 lowest_stack,
268 __pthread_stack_size,
269 FALSE, VM_PROT_NONE);
270 lowest_stack += __pthread_stack_size;
271 }
272 #endif
273 free_stacks = (vm_address_t *)lowest_stack;
274 lowest_stack = 0;
275 }
276 *free_stacks = 0; /* No other free stacks */
277 }
278 cur_stack = STACK_START((vm_address_t) free_stacks);
279 free_stacks = (vm_address_t *)*free_stacks;
280 cur_stack = _adjust_sp(cur_stack); /* Machine dependent stack fudging */
281 #endif
282 return 0;
283 }
284
285 static pthread_attr_t _pthread_attr_default = {0};
286
287 /*
288 * Destroy a thread attribute structure
289 */
290 int
291 pthread_attr_destroy(pthread_attr_t *attr)
292 {
293 if (attr->sig == _PTHREAD_ATTR_SIG)
294 {
295 return (ESUCCESS);
296 } else
297 {
298 return (EINVAL); /* Not an attribute structure! */
299 }
300 }
301
302 /*
303 * Get the 'detach' state from a thread attribute structure.
304 * Note: written as a helper function for info hiding
305 */
306 int
307 pthread_attr_getdetachstate(const pthread_attr_t *attr,
308 int *detachstate)
309 {
310 if (attr->sig == _PTHREAD_ATTR_SIG)
311 {
312 *detachstate = attr->detached;
313 return (ESUCCESS);
314 } else
315 {
316 return (EINVAL); /* Not an attribute structure! */
317 }
318 }
319
320 /*
321 * Get the 'inherit scheduling' info from a thread attribute structure.
322 * Note: written as a helper function for info hiding
323 */
324 int
325 pthread_attr_getinheritsched(const pthread_attr_t *attr,
326 int *inheritsched)
327 {
328 if (attr->sig == _PTHREAD_ATTR_SIG)
329 {
330 *inheritsched = attr->inherit;
331 return (ESUCCESS);
332 } else
333 {
334 return (EINVAL); /* Not an attribute structure! */
335 }
336 }
337
338 /*
339 * Get the scheduling parameters from a thread attribute structure.
340 * Note: written as a helper function for info hiding
341 */
342 int
343 pthread_attr_getschedparam(const pthread_attr_t *attr,
344 struct sched_param *param)
345 {
346 if (attr->sig == _PTHREAD_ATTR_SIG)
347 {
348 *param = attr->param;
349 return (ESUCCESS);
350 } else
351 {
352 return (EINVAL); /* Not an attribute structure! */
353 }
354 }
355
356 /*
357 * Get the scheduling policy from a thread attribute structure.
358 * Note: written as a helper function for info hiding
359 */
360 int
361 pthread_attr_getschedpolicy(const pthread_attr_t *attr,
362 int *policy)
363 {
364 if (attr->sig == _PTHREAD_ATTR_SIG)
365 {
366 *policy = attr->policy;
367 return (ESUCCESS);
368 } else
369 {
370 return (EINVAL); /* Not an attribute structure! */
371 }
372 }
373
374 /* Retain the existing stack size of 512K and not depend on Main thread default stack size */
375 static const size_t DEFAULT_STACK_SIZE = (512*1024);
376 /*
377 * Initialize a thread attribute structure to default values.
378 */
379 int
380 pthread_attr_init(pthread_attr_t *attr)
381 {
382 attr->stacksize = DEFAULT_STACK_SIZE;
383 attr->stackaddr = NULL;
384 attr->sig = _PTHREAD_ATTR_SIG;
385 attr->param.sched_priority = default_priority;
386 attr->param.quantum = 10; /* quantum isn't public yet */
387 attr->detached = PTHREAD_CREATE_JOINABLE;
388 attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
389 attr->policy = _PTHREAD_DEFAULT_POLICY;
390 attr->freeStackOnExit = TRUE;
391 attr->guardsize = vm_page_size;
392 return (ESUCCESS);
393 }
394
395 /*
396 * Set the 'detach' state in a thread attribute structure.
397 * Note: written as a helper function for info hiding
398 */
399 int
400 pthread_attr_setdetachstate(pthread_attr_t *attr,
401 int detachstate)
402 {
403 if (attr->sig == _PTHREAD_ATTR_SIG)
404 {
405 if ((detachstate == PTHREAD_CREATE_JOINABLE) ||
406 (detachstate == PTHREAD_CREATE_DETACHED))
407 {
408 attr->detached = detachstate;
409 return (ESUCCESS);
410 } else
411 {
412 return (EINVAL);
413 }
414 } else
415 {
416 return (EINVAL); /* Not an attribute structure! */
417 }
418 }
419
420 /*
421 * Set the 'inherit scheduling' state in a thread attribute structure.
422 * Note: written as a helper function for info hiding
423 */
424 int
425 pthread_attr_setinheritsched(pthread_attr_t *attr,
426 int inheritsched)
427 {
428 if (attr->sig == _PTHREAD_ATTR_SIG)
429 {
430 if ((inheritsched == PTHREAD_INHERIT_SCHED) ||
431 (inheritsched == PTHREAD_EXPLICIT_SCHED))
432 {
433 attr->inherit = inheritsched;
434 return (ESUCCESS);
435 } else
436 {
437 return (EINVAL);
438 }
439 } else
440 {
441 return (EINVAL); /* Not an attribute structure! */
442 }
443 }
444
445 /*
446 * Set the scheduling paramters in a thread attribute structure.
447 * Note: written as a helper function for info hiding
448 */
449 int
450 pthread_attr_setschedparam(pthread_attr_t *attr,
451 const struct sched_param *param)
452 {
453 if (attr->sig == _PTHREAD_ATTR_SIG)
454 {
455 /* TODO: Validate sched_param fields */
456 attr->param = *param;
457 return (ESUCCESS);
458 } else
459 {
460 return (EINVAL); /* Not an attribute structure! */
461 }
462 }
463
464 /*
465 * Set the scheduling policy in a thread attribute structure.
466 * Note: written as a helper function for info hiding
467 */
468 int
469 pthread_attr_setschedpolicy(pthread_attr_t *attr,
470 int policy)
471 {
472 if (attr->sig == _PTHREAD_ATTR_SIG)
473 {
474 if ((policy == SCHED_OTHER) ||
475 (policy == SCHED_RR) ||
476 (policy == SCHED_FIFO))
477 {
478 attr->policy = policy;
479 return (ESUCCESS);
480 } else
481 {
482 return (EINVAL);
483 }
484 } else
485 {
486 return (EINVAL); /* Not an attribute structure! */
487 }
488 }
489
490 /*
491 * Set the scope for the thread.
492 * We currently only provide PTHREAD_SCOPE_SYSTEM
493 */
494 int
495 pthread_attr_setscope(pthread_attr_t *attr,
496 int scope)
497 {
498 if (attr->sig == _PTHREAD_ATTR_SIG) {
499 if (scope == PTHREAD_SCOPE_SYSTEM) {
500 /* No attribute yet for the scope */
501 return (ESUCCESS);
502 } else if (scope == PTHREAD_SCOPE_PROCESS) {
503 return (ENOTSUP);
504 }
505 }
506 return (EINVAL); /* Not an attribute structure! */
507 }
508
509 /*
510 * Get the scope for the thread.
511 * We currently only provide PTHREAD_SCOPE_SYSTEM
512 */
513 int
514 pthread_attr_getscope(pthread_attr_t *attr,
515 int *scope)
516 {
517 if (attr->sig == _PTHREAD_ATTR_SIG) {
518 *scope = PTHREAD_SCOPE_SYSTEM;
519 return (ESUCCESS);
520 }
521 return (EINVAL); /* Not an attribute structure! */
522 }
523
524 /* Get the base stack address of the given thread */
525 int
526 pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
527 {
528 if (attr->sig == _PTHREAD_ATTR_SIG) {
529 *stackaddr = attr->stackaddr;
530 return (ESUCCESS);
531 } else {
532 return (EINVAL); /* Not an attribute structure! */
533 }
534 }
535
536 int
537 pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
538 {
539 if ((attr->sig == _PTHREAD_ATTR_SIG) && (((uintptr_t)stackaddr % vm_page_size) == 0)) {
540 attr->stackaddr = stackaddr;
541 attr->freeStackOnExit = FALSE;
542 return (ESUCCESS);
543 } else {
544 return (EINVAL); /* Not an attribute structure! */
545 }
546 }
547
548 int
549 pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
550 {
551 if (attr->sig == _PTHREAD_ATTR_SIG) {
552 *stacksize = attr->stacksize;
553 return (ESUCCESS);
554 } else {
555 return (EINVAL); /* Not an attribute structure! */
556 }
557 }
558
559 int
560 pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
561 {
562 if ((attr->sig == _PTHREAD_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
563 attr->stacksize = stacksize;
564 return (ESUCCESS);
565 } else {
566 return (EINVAL); /* Not an attribute structure! */
567 }
568 }
569
570 int
571 pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
572 {
573 if (attr->sig == _PTHREAD_ATTR_SIG) {
574 *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize);
575 *stacksize = attr->stacksize;
576 return (ESUCCESS);
577 } else {
578 return (EINVAL); /* Not an attribute structure! */
579 }
580 }
581
582 /* By SUSV spec, the stackaddr is the base address, the lowest addressable
583 * byte address. This is not the same as in pthread_attr_setstackaddr.
584 */
585 int
586 pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
587 {
588 if ((attr->sig == _PTHREAD_ATTR_SIG) &&
589 (((uintptr_t)stackaddr % vm_page_size) == 0) &&
590 ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
591 attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize);
592 attr->stacksize = stacksize;
593 attr->freeStackOnExit = FALSE;
594 return (ESUCCESS);
595 } else {
596 return (EINVAL); /* Not an attribute structure! */
597 }
598 }
599
600
601 /*
602 * Set the guardsize attribute in the attr.
603 */
604 int
605 pthread_attr_setguardsize(pthread_attr_t *attr,
606 size_t guardsize)
607 {
608 if (attr->sig == _PTHREAD_ATTR_SIG) {
609 /* Guardsize of 0 is valid, ot means no guard */
610 if ((guardsize % vm_page_size) == 0) {
611 attr->guardsize = guardsize;
612 return (ESUCCESS);
613 } else
614 return(EINVAL);
615 }
616 return (EINVAL); /* Not an attribute structure! */
617 }
618
619 /*
620 * Get the guardsize attribute in the attr.
621 */
622 int
623 pthread_attr_getguardsize(const pthread_attr_t *attr,
624 size_t *guardsize)
625 {
626 if (attr->sig == _PTHREAD_ATTR_SIG) {
627 *guardsize = attr->guardsize;
628 return (ESUCCESS);
629 }
630 return (EINVAL); /* Not an attribute structure! */
631 }
632
633
634 /*
635 * Create and start execution of a new thread.
636 */
637
638 static void
639 _pthread_body(pthread_t self)
640 {
641 _pthread_set_self(self);
642 _pthread_exit(self, (self->fun)(self->arg));
643 }
644
645 int
646 _pthread_create(pthread_t t,
647 const pthread_attr_t *attrs,
648 void *stack,
649 const mach_port_t kernel_thread)
650 {
651 int res;
652 res = ESUCCESS;
653
654 do
655 {
656 memset(t, 0, sizeof(*t));
657 t->tsd[0] = t;
658 t->stacksize = attrs->stacksize;
659 t->stackaddr = (void *)stack;
660 t->guardsize = attrs->guardsize;
661 t->kernel_thread = kernel_thread;
662 t->detached = attrs->detached;
663 t->inherit = attrs->inherit;
664 t->policy = attrs->policy;
665 t->param = attrs->param;
666 t->freeStackOnExit = attrs->freeStackOnExit;
667 t->mutexes = (struct _pthread_mutex *)NULL;
668 t->sig = _PTHREAD_SIG;
669 t->reply_port = MACH_PORT_NULL;
670 t->cthread_self = NULL;
671 LOCK_INIT(t->lock);
672 t->plist.le_next = (struct _pthread *)0;
673 t->plist.le_prev = (struct _pthread **)0;
674 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
675 t->__cleanup_stack = (struct __darwin_pthread_handler_rec *)NULL;
676 t->death = SEMAPHORE_NULL;
677
678 if (kernel_thread != MACH_PORT_NULL)
679 pthread_setschedparam(t, t->policy, &t->param);
680 } while (0);
681 return (res);
682 }
683
684 /* Need to deprecate this in future */
685 int
686 _pthread_is_threaded(void)
687 {
688 return __is_threaded;
689 }
690
691 /* Non portable public api to know whether this process has(had) atleast one thread
692 * apart from main thread. There could be race if there is a thread in the process of
693 * creation at the time of call . It does not tell whether there are more than one thread
694 * at this point of time.
695 */
696 int
697 pthread_is_threaded_np(void)
698 {
699 return (__is_threaded);
700 }
701
702 mach_port_t
703 pthread_mach_thread_np(pthread_t t)
704 {
705 thread_t kernel_thread;
706
707 /* Wait for the creator to initialize it */
708 while ((kernel_thread = t->kernel_thread) == MACH_PORT_NULL)
709 sched_yield();
710
711 return kernel_thread;
712 }
713
714 size_t
715 pthread_get_stacksize_np(pthread_t t)
716 {
717 return t->stacksize;
718 }
719
720 void *
721 pthread_get_stackaddr_np(pthread_t t)
722 {
723 return t->stackaddr;
724 }
725
726 mach_port_t
727 _pthread_reply_port(pthread_t t)
728 {
729 return t->reply_port;
730 }
731
732
733 /* returns non-zero if the current thread is the main thread */
734 int
735 pthread_main_np(void)
736 {
737 pthread_t self = pthread_self();
738
739 return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT);
740 }
741
742 static int
743 _pthread_create_suspended(pthread_t *thread,
744 const pthread_attr_t *attr,
745 void *(*start_routine)(void *),
746 void *arg,
747 int suspended)
748 {
749 pthread_attr_t *attrs;
750 void *stack;
751 int res;
752 pthread_t t;
753 kern_return_t kern_res;
754 mach_port_t kernel_thread = MACH_PORT_NULL;
755 int needresume;
756
757 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
758 { /* Set up default paramters */
759 attrs = &_pthread_attr_default;
760 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
761 return EINVAL;
762 }
763 res = ESUCCESS;
764
765 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
766 * any change in priority or policy is needed here.
767 */
768 if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
769 (attrs->param.sched_priority != default_priority)) && (suspended == 0)) {
770 needresume = 1;
771 suspended = 1;
772 } else
773 needresume = 0;
774
775 do
776 {
777 /* Allocate a stack for the thread */
778 if ((res = _pthread_allocate_stack(attrs, &stack)) != 0) {
779 break;
780 }
781 t = (pthread_t)malloc(sizeof(struct _pthread));
782 *thread = t;
783 if (suspended) {
784 /* Create the Mach thread for this thread */
785 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread), kern_res);
786 if (kern_res != KERN_SUCCESS)
787 {
788 printf("Can't create thread: %d\n", kern_res);
789 res = EINVAL; /* Need better error here? */
790 break;
791 }
792 }
793 if ((res = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
794 {
795 break;
796 }
797 set_malloc_singlethreaded(0);
798 __is_threaded = 1;
799
800 /* Send it on it's way */
801 t->arg = arg;
802 t->fun = start_routine;
803 /* Now set it up to execute */
804 LOCK(_pthread_list_lock);
805 LIST_INSERT_HEAD(&__pthread_head, t, plist);
806 _pthread_count++;
807 UNLOCK(_pthread_list_lock);
808 _pthread_setup(t, _pthread_body, stack, suspended, needresume);
809 } while (0);
810 return (res);
811 }
812
813 int
814 pthread_create(pthread_t *thread,
815 const pthread_attr_t *attr,
816 void *(*start_routine)(void *),
817 void *arg)
818 {
819 return _pthread_create_suspended(thread, attr, start_routine, arg, 0);
820 }
821
822 int
823 pthread_create_suspended_np(pthread_t *thread,
824 const pthread_attr_t *attr,
825 void *(*start_routine)(void *),
826 void *arg)
827 {
828 return _pthread_create_suspended(thread, attr, start_routine, arg, 1);
829 }
830
831 /*
832 * Make a thread 'undetached' - no longer 'joinable' with other threads.
833 */
834 int
835 pthread_detach(pthread_t thread)
836 {
837 if (thread->sig == _PTHREAD_SIG)
838 {
839 LOCK(thread->lock);
840 if (thread->detached & PTHREAD_CREATE_JOINABLE)
841 {
842 if (thread->detached & _PTHREAD_EXITED) {
843 UNLOCK(thread->lock);
844 pthread_join(thread, NULL);
845 return ESUCCESS;
846 } else {
847 semaphore_t death = thread->death;
848
849 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
850 thread->detached |= PTHREAD_CREATE_DETACHED;
851 UNLOCK(thread->lock);
852 if (death)
853 (void) semaphore_signal(death);
854 return (ESUCCESS);
855 }
856 } else {
857 UNLOCK(thread->lock);
858 return (EINVAL);
859 }
860 } else {
861 return (ESRCH); /* Not a valid thread */
862 }
863 }
864
865
866 /*
867 * pthread_kill call to system call
868 */
869
870 extern int __pthread_kill(mach_port_t, int);
871
872 int
873 pthread_kill (
874 pthread_t th,
875 int sig)
876 {
877 int error = 0;
878
879 if ((sig < 0) || (sig > NSIG))
880 return(EINVAL);
881
882 if (th && (th->sig == _PTHREAD_SIG)) {
883 error = __pthread_kill(pthread_mach_thread_np(th), sig);
884 if (error == -1)
885 error = errno;
886 return(error);
887 }
888 else
889 return(ESRCH);
890 }
891
892 /* Announce that there are pthread resources ready to be reclaimed in a */
893 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
894 /* thread underneath is terminated right away. */
895 static
896 void _pthread_become_available(pthread_t thread, mach_port_t kernel_thread) {
897 pthread_reap_msg_t msg;
898 kern_return_t ret;
899
900 msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,
901 MACH_MSG_TYPE_MOVE_SEND);
902 msg.header.msgh_size = sizeof msg - sizeof msg.trailer;
903 msg.header.msgh_remote_port = thread_recycle_port;
904 msg.header.msgh_local_port = kernel_thread;
905 msg.header.msgh_id = 0x44454144; /* 'DEAD' */
906 msg.thread = thread;
907 ret = mach_msg_send(&msg.header);
908 assert(ret == MACH_MSG_SUCCESS);
909 }
910
911 /* Reap the resources for available threads */
912 __private_extern__
913 int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr) {
914 mach_port_type_t ptype;
915 kern_return_t ret;
916 task_t self;
917
918 self = mach_task_self();
919 if (kernel_thread != MACH_PORT_DEAD) {
920 ret = mach_port_type(self, kernel_thread, &ptype);
921 if (ret == KERN_SUCCESS && ptype != MACH_PORT_TYPE_DEAD_NAME) {
922 /* not quite dead yet... */
923 return EAGAIN;
924 }
925 ret = mach_port_deallocate(self, kernel_thread);
926 if (ret != KERN_SUCCESS) {
927 fprintf(stderr,
928 "mach_port_deallocate(kernel_thread) failed: %s\n",
929 mach_error_string(ret));
930 }
931 }
932
933 if (th->reply_port != MACH_PORT_NULL) {
934 ret = mach_port_mod_refs(self, th->reply_port,
935 MACH_PORT_RIGHT_RECEIVE, -1);
936 if (ret != KERN_SUCCESS) {
937 fprintf(stderr,
938 "mach_port_mod_refs(reply_port) failed: %s\n",
939 mach_error_string(ret));
940 }
941 }
942
943 if (th->freeStackOnExit) {
944 vm_address_t addr = (vm_address_t)th->stackaddr;
945 vm_size_t size;
946
947 size = (vm_size_t)th->stacksize + th->guardsize;
948
949 addr -= size;
950 ret = vm_deallocate(self, addr, size);
951 if (ret != KERN_SUCCESS) {
952 fprintf(stderr,
953 "vm_deallocate(stack) failed: %s\n",
954 mach_error_string(ret));
955 }
956 }
957
958 if (value_ptr)
959 *value_ptr = th->exit_value;
960
961 if (th != &_thread)
962 free(th);
963
964 return ESUCCESS;
965 }
966
967 static
968 void _pthread_reap_threads(void)
969 {
970 pthread_reap_msg_t msg;
971 kern_return_t ret;
972
973 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
974 sizeof msg, thread_recycle_port,
975 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
976 while (ret == MACH_MSG_SUCCESS) {
977 mach_port_t kernel_thread = msg.header.msgh_remote_port;
978 pthread_t thread = msg.thread;
979
980 if (_pthread_reap_thread(thread, kernel_thread, (void **)0) == EAGAIN)
981 {
982 /* not dead yet, put it back for someone else to reap, stop here */
983 _pthread_become_available(thread, kernel_thread);
984 return;
985 }
986 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
987 sizeof msg, thread_recycle_port,
988 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
989 }
990 }
991
992 /* For compatibility... */
993
994 pthread_t
995 _pthread_self() {
996 return pthread_self();
997 }
998
999 /*
1000 * Terminate a thread.
1001 */
1002 static void
1003 _pthread_exit(pthread_t self, void *value_ptr)
1004 {
1005 struct __darwin_pthread_handler_rec *handler;
1006 kern_return_t kern_res;
1007 int thread_count;
1008
1009 /* Make this thread not to receive any signals */
1010 syscall(331,1);
1011
1012 while ((handler = self->__cleanup_stack) != 0)
1013 {
1014 (handler->__routine)(handler->__arg);
1015 self->__cleanup_stack = handler->__next;
1016 }
1017 _pthread_tsd_cleanup(self);
1018
1019 _pthread_reap_threads();
1020
1021 LOCK(self->lock);
1022 self->detached |= _PTHREAD_EXITED;
1023
1024 if (self->detached & PTHREAD_CREATE_JOINABLE) {
1025 mach_port_t death = self->death;
1026 self->exit_value = value_ptr;
1027 UNLOCK(self->lock);
1028 /* the joiner will need a kernel thread reference, leave ours for it */
1029 if (death) {
1030 PTHREAD_MACH_CALL(semaphore_signal(death), kern_res);
1031 if (kern_res != KERN_SUCCESS)
1032 fprintf(stderr,
1033 "semaphore_signal(death) failed: %s\n",
1034 mach_error_string(kern_res));
1035 }
1036 LOCK(_pthread_list_lock);
1037 thread_count = --_pthread_count;
1038 UNLOCK(_pthread_list_lock);
1039 } else {
1040 UNLOCK(self->lock);
1041 LOCK(_pthread_list_lock);
1042 LIST_REMOVE(self, plist);
1043 thread_count = --_pthread_count;
1044 UNLOCK(_pthread_list_lock);
1045 /* with no joiner, we let become available consume our cached ref */
1046 _pthread_become_available(self, pthread_mach_thread_np(self));
1047 }
1048
1049 if (thread_count <= 0)
1050 exit(0);
1051
1052 /* Use a new reference to terminate ourselves. Should never return. */
1053 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res);
1054 fprintf(stderr, "thread_terminate(mach_thread_self()) failed: %s\n",
1055 mach_error_string(kern_res));
1056 abort();
1057 }
1058
1059 void
1060 pthread_exit(void *value_ptr)
1061 {
1062 _pthread_exit(pthread_self(), value_ptr);
1063 }
1064
1065 /*
1066 * Get the scheduling policy and scheduling paramters for a thread.
1067 */
1068 int
1069 pthread_getschedparam(pthread_t thread,
1070 int *policy,
1071 struct sched_param *param)
1072 {
1073 if (thread->sig == _PTHREAD_SIG)
1074 {
1075 *policy = thread->policy;
1076 *param = thread->param;
1077 return (ESUCCESS);
1078 } else
1079 {
1080 return (ESRCH); /* Not a valid thread structure */
1081 }
1082 }
1083
1084 /*
1085 * Set the scheduling policy and scheduling paramters for a thread.
1086 */
1087 int
1088 pthread_setschedparam(pthread_t thread,
1089 int policy,
1090 const struct sched_param *param)
1091 {
1092 policy_base_data_t bases;
1093 policy_base_t base;
1094 mach_msg_type_number_t count;
1095 kern_return_t ret;
1096
1097 if (thread->sig == _PTHREAD_SIG)
1098 {
1099 switch (policy)
1100 {
1101 case SCHED_OTHER:
1102 bases.ts.base_priority = param->sched_priority;
1103 base = (policy_base_t)&bases.ts;
1104 count = POLICY_TIMESHARE_BASE_COUNT;
1105 break;
1106 case SCHED_FIFO:
1107 bases.fifo.base_priority = param->sched_priority;
1108 base = (policy_base_t)&bases.fifo;
1109 count = POLICY_FIFO_BASE_COUNT;
1110 break;
1111 case SCHED_RR:
1112 bases.rr.base_priority = param->sched_priority;
1113 /* quantum isn't public yet */
1114 bases.rr.quantum = param->quantum;
1115 base = (policy_base_t)&bases.rr;
1116 count = POLICY_RR_BASE_COUNT;
1117 break;
1118 default:
1119 return (EINVAL);
1120 }
1121 ret = thread_policy(pthread_mach_thread_np(thread), policy, base, count, TRUE);
1122 if (ret != KERN_SUCCESS)
1123 {
1124 return (EINVAL);
1125 }
1126 thread->policy = policy;
1127 thread->param = *param;
1128 return (ESUCCESS);
1129 } else
1130 {
1131 return (ESRCH); /* Not a valid thread structure */
1132 }
1133 }
1134
1135 /*
1136 * Get the minimum priority for the given policy
1137 */
1138 int
1139 sched_get_priority_min(int policy)
1140 {
1141 return default_priority - 16;
1142 }
1143
1144 /*
1145 * Get the maximum priority for the given policy
1146 */
1147 int
1148 sched_get_priority_max(int policy)
1149 {
1150 return default_priority + 16;
1151 }
1152
1153 /*
1154 * Determine if two thread identifiers represent the same thread.
1155 */
1156 int
1157 pthread_equal(pthread_t t1,
1158 pthread_t t2)
1159 {
1160 return (t1 == t2);
1161 }
1162
1163 __private_extern__ void
1164 _pthread_set_self(pthread_t p)
1165 {
1166 extern void __pthread_set_self(pthread_t);
1167 if (p == 0) {
1168 bzero(&_thread, sizeof(struct _pthread));
1169 p = &_thread;
1170 }
1171 p->tsd[0] = p;
1172 __pthread_set_self(p);
1173 }
1174
1175 void
1176 cthread_set_self(void *cself)
1177 {
1178 pthread_t self = pthread_self();
1179 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1180 _pthread_set_self(cself);
1181 return;
1182 }
1183 self->cthread_self = cself;
1184 }
1185
1186 void *
1187 ur_cthread_self(void) {
1188 pthread_t self = pthread_self();
1189 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1190 return (void *)self;
1191 }
1192 return self->cthread_self;
1193 }
1194
1195 /*
1196 * Execute a function exactly one time in a thread-safe fashion.
1197 */
1198 int
1199 pthread_once(pthread_once_t *once_control,
1200 void (*init_routine)(void))
1201 {
1202 _spin_lock(&once_control->lock);
1203 if (once_control->sig == _PTHREAD_ONCE_SIG_init)
1204 {
1205 (*init_routine)();
1206 once_control->sig = _PTHREAD_ONCE_SIG;
1207 }
1208 _spin_unlock(&once_control->lock);
1209 return (ESUCCESS); /* Spec defines no possible errors! */
1210 }
1211
1212 /*
1213 * Insert a cancellation point in a thread.
1214 */
1215 __private_extern__ void
1216 _pthread_testcancel(pthread_t thread, int isconforming)
1217 {
1218 LOCK(thread->lock);
1219 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
1220 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
1221 {
1222 UNLOCK(thread->lock);
1223 if (isconforming)
1224 pthread_exit(PTHREAD_CANCELED);
1225 else
1226 pthread_exit(0);
1227 }
1228 UNLOCK(thread->lock);
1229 }
1230
1231
1232
1233 int
1234 pthread_getconcurrency(void)
1235 {
1236 return(pthread_concurrency);
1237 }
1238
1239 int
1240 pthread_setconcurrency(int new_level)
1241 {
1242 pthread_concurrency = new_level;
1243 return(ESUCCESS);
1244 }
1245
1246 /*
1247 * Perform package initialization - called automatically when application starts
1248 */
1249
1250 static int
1251 pthread_init(void)
1252 {
1253 pthread_attr_t *attrs;
1254 pthread_t thread;
1255 kern_return_t kr;
1256 host_basic_info_data_t basic_info;
1257 host_priority_info_data_t priority_info;
1258 host_info_t info;
1259 host_flavor_t flavor;
1260 host_t host;
1261 mach_msg_type_number_t count;
1262 int mib[2];
1263 size_t len;
1264 int numcpus;
1265 void *stackaddr;
1266
1267 count = HOST_PRIORITY_INFO_COUNT;
1268 info = (host_info_t)&priority_info;
1269 flavor = HOST_PRIORITY_INFO;
1270 host = mach_host_self();
1271 kr = host_info(host, flavor, info, &count);
1272 if (kr != KERN_SUCCESS)
1273 printf("host_info failed (%d); probably need privilege.\n", kr);
1274 else {
1275 default_priority = priority_info.user_priority;
1276 min_priority = priority_info.minimum_priority;
1277 max_priority = priority_info.maximum_priority;
1278 }
1279 attrs = &_pthread_attr_default;
1280 pthread_attr_init(attrs);
1281
1282 LIST_INIT(&__pthread_head);
1283 LOCK_INIT(_pthread_list_lock);
1284 thread = &_thread;
1285 LIST_INSERT_HEAD(&__pthread_head, thread, plist);
1286 _pthread_set_self(thread);
1287
1288 mib[0] = CTL_KERN;
1289 mib[1] = KERN_USRSTACK;
1290 len = sizeof (stackaddr);
1291 if (sysctl (mib, 2, &stackaddr, &len, NULL, 0) != 0)
1292 stackaddr = (void *)USRSTACK;
1293 _pthread_create(thread, attrs, stackaddr, mach_thread_self());
1294 thread->detached = PTHREAD_CREATE_JOINABLE|_PTHREAD_CREATE_PARENT;
1295
1296 /* See if we're on a multiprocessor and set _spin_tries if so. */
1297 mib[0] = CTL_HW;
1298 mib[1] = HW_NCPU;
1299 len = sizeof(numcpus);
1300 if (sysctl(mib, 2, &numcpus, &len, NULL, 0) == 0) {
1301 if (numcpus > 1) {
1302 _spin_tries = MP_SPIN_TRIES;
1303 }
1304 } else {
1305 count = HOST_BASIC_INFO_COUNT;
1306 info = (host_info_t)&basic_info;
1307 flavor = HOST_BASIC_INFO;
1308 kr = host_info(host, flavor, info, &count);
1309 if (kr != KERN_SUCCESS)
1310 printf("host_info failed (%d)\n", kr);
1311 else {
1312 if (basic_info.avail_cpus > 1)
1313 _spin_tries = MP_SPIN_TRIES;
1314 }
1315 }
1316
1317 mach_port_deallocate(mach_task_self(), host);
1318
1319 _init_cpu_capabilities();
1320
1321 #if defined(_OBJC_PAGE_BASE_ADDRESS)
1322 {
1323 vm_address_t objcRTPage = (vm_address_t)_OBJC_PAGE_BASE_ADDRESS;
1324 kr = vm_map(mach_task_self(),
1325 &objcRTPage, vm_page_size * 4, vm_page_size - 1,
1326 VM_FLAGS_FIXED | VM_MAKE_TAG(0), // Which tag to use?
1327 MACH_PORT_NULL,
1328 (vm_address_t)0, FALSE,
1329 (vm_prot_t)0, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE,
1330 VM_INHERIT_DEFAULT);
1331 /* We ignore the return result here. The ObjC runtime will just have to deal. */
1332 }
1333 #endif
1334
1335 mig_init(1); /* enable multi-threaded mig interfaces */
1336 return 0;
1337 }
1338
1339 int sched_yield(void)
1340 {
1341 swtch_pri(0);
1342 return 0;
1343 }
1344
1345 /* This is the "magic" that gets the initialization routine called when the application starts */
1346 int (*_cthread_init_routine)(void) = pthread_init;
1347
1348 /* Get a semaphore from the pool, growing it if necessary */
1349
1350 __private_extern__ semaphore_t new_sem_from_pool(void) {
1351 kern_return_t res;
1352 semaphore_t sem;
1353 int i;
1354
1355 LOCK(sem_pool_lock);
1356 if (sem_pool_current == sem_pool_count) {
1357 sem_pool_count += 16;
1358 sem_pool = realloc(sem_pool, sem_pool_count * sizeof(semaphore_t));
1359 for (i = sem_pool_current; i < sem_pool_count; i++) {
1360 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool[i], SYNC_POLICY_FIFO, 0), res);
1361 }
1362 }
1363 sem = sem_pool[sem_pool_current++];
1364 UNLOCK(sem_pool_lock);
1365 return sem;
1366 }
1367
1368 /* Put a semaphore back into the pool */
1369 __private_extern__ void restore_sem_to_pool(semaphore_t sem) {
1370 LOCK(sem_pool_lock);
1371 sem_pool[--sem_pool_current] = sem;
1372 UNLOCK(sem_pool_lock);
1373 }
1374
1375 static void sem_pool_reset(void) {
1376 LOCK(sem_pool_lock);
1377 sem_pool_count = 0;
1378 sem_pool_current = 0;
1379 sem_pool = NULL;
1380 UNLOCK(sem_pool_lock);
1381 }
1382
1383 __private_extern__ void _pthread_fork_child(pthread_t p) {
1384 /* Just in case somebody had it locked... */
1385 UNLOCK(sem_pool_lock);
1386 sem_pool_reset();
1387 /* No need to hold the pthread_list_lock as no one other than this
1388 * thread is present at this time
1389 */
1390 LIST_INIT(&__pthread_head);
1391 LOCK_INIT(_pthread_list_lock);
1392 LIST_INSERT_HEAD(&__pthread_head, p, plist);
1393 _pthread_count = 1;
1394 }
1395
1396 #else /* !BUILDING_VARIANT ] [ */
1397 extern int __unix_conforming;
1398 extern pthread_lock_t _pthread_list_lock;
1399 extern void _pthread_testcancel(pthread_t thread, int isconforming);
1400 extern int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr);
1401
1402 #endif /* !BUILDING_VARIANT ] */
1403
1404 #if __DARWIN_UNIX03
1405
1406 static void __posix_join_cleanup(void *arg)
1407 {
1408 pthread_t thread = (pthread_t)arg;
1409 int already_exited, res;
1410 void * dummy;
1411 semaphore_t death;
1412
1413 LOCK(thread->lock);
1414 death = thread->death;
1415 already_exited = (thread->detached & _PTHREAD_EXITED);
1416
1417 if (!already_exited){
1418 thread->joiner = (struct _pthread *)NULL;
1419 UNLOCK(thread->lock);
1420 restore_sem_to_pool(death);
1421 } else {
1422 UNLOCK(thread->lock);
1423 while ((res = _pthread_reap_thread(thread,
1424 thread->kernel_thread,
1425 &dummy)) == EAGAIN)
1426 {
1427 sched_yield();
1428 }
1429 restore_sem_to_pool(death);
1430
1431 }
1432 }
1433
1434 #endif /* __DARWIN_UNIX03 */
1435
1436
1437 /*
1438 * Wait for a thread to terminate and obtain its exit value.
1439 */
1440 int
1441 pthread_join(pthread_t thread,
1442 void **value_ptr)
1443 {
1444 kern_return_t kern_res;
1445 int res = ESUCCESS;
1446
1447 #if __DARWIN_UNIX03
1448 if (__unix_conforming == 0)
1449 __unix_conforming = 1;
1450 #endif /* __DARWIN_UNIX03 */
1451
1452 if (thread->sig == _PTHREAD_SIG)
1453 {
1454 semaphore_t death = new_sem_from_pool(); /* in case we need it */
1455
1456 LOCK(thread->lock);
1457 if ((thread->detached & PTHREAD_CREATE_JOINABLE) &&
1458 thread->death == SEMAPHORE_NULL)
1459 {
1460 pthread_t self = pthread_self();
1461
1462 assert(thread->joiner == NULL);
1463 if (thread != self && (self == NULL || self->joiner != thread))
1464 {
1465 int already_exited = (thread->detached & _PTHREAD_EXITED);
1466
1467 thread->death = death;
1468 thread->joiner = self;
1469 UNLOCK(thread->lock);
1470
1471 if (!already_exited)
1472 {
1473 #if __DARWIN_UNIX03
1474 /* Wait for it to signal... */
1475 pthread_cleanup_push(__posix_join_cleanup, (void *)thread);
1476 do {
1477 res = __semwait_signal(death, 0, 0, 0, 0, 0);
1478 } while ((res < 0) && (errno == EINTR));
1479 pthread_cleanup_pop(0);
1480
1481 #else /* __DARWIN_UNIX03 */
1482 /* Wait for it to signal... */
1483 do {
1484 PTHREAD_MACH_CALL(semaphore_wait(death), kern_res);
1485 } while (kern_res != KERN_SUCCESS);
1486 #endif /* __DARWIN_UNIX03 */
1487 }
1488 #if __DARWIN_UNIX03
1489 else {
1490 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) == (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
1491 res = PTHREAD_CANCELED;
1492 }
1493 #endif /* __DARWIN_UNIX03 */
1494
1495 LOCK(_pthread_list_lock);
1496 LIST_REMOVE(thread, plist);
1497 UNLOCK(_pthread_list_lock);
1498 /* ... and wait for it to really be dead */
1499 while ((res = _pthread_reap_thread(thread,
1500 thread->kernel_thread,
1501 value_ptr)) == EAGAIN)
1502 {
1503 sched_yield();
1504 }
1505 } else {
1506 UNLOCK(thread->lock);
1507 res = EDEADLK;
1508 }
1509 } else {
1510 UNLOCK(thread->lock);
1511 res = EINVAL;
1512 }
1513 restore_sem_to_pool(death);
1514 return res;
1515 }
1516 return ESRCH;
1517 }
1518
1519 /*
1520 * Cancel a thread
1521 */
1522 int
1523 pthread_cancel(pthread_t thread)
1524 {
1525 #if __DARWIN_UNIX03
1526 if (__unix_conforming == 0)
1527 __unix_conforming = 1;
1528 #endif /* __DARWIN_UNIX03 */
1529
1530 if (thread->sig == _PTHREAD_SIG)
1531 {
1532 #if __DARWIN_UNIX03
1533 int state;
1534 LOCK(thread->lock);
1535 state = thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
1536 UNLOCK(thread->lock);
1537 if (state & PTHREAD_CANCEL_ENABLE)
1538 __pthread_markcancel(thread->kernel_thread);
1539 #else /* __DARWIN_UNIX03 */
1540 thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
1541 #endif /* __DARWIN_UNIX03 */
1542 return (ESUCCESS);
1543 } else
1544 {
1545 return (ESRCH);
1546 }
1547 }
1548
1549 void
1550 pthread_testcancel(void)
1551 {
1552 pthread_t self = pthread_self();
1553
1554 #if __DARWIN_UNIX03
1555 if (__unix_conforming == 0)
1556 __unix_conforming = 1;
1557 _pthread_testcancel(self, 1);
1558 #else /* __DARWIN_UNIX03 */
1559 _pthread_testcancel(self, 0);
1560 #endif /* __DARWIN_UNIX03 */
1561
1562 }
1563 /*
1564 * Query/update the cancelability 'state' of a thread
1565 */
1566 int
1567 pthread_setcancelstate(int state, int *oldstate)
1568 {
1569 pthread_t self = pthread_self();
1570
1571 #if __DARWIN_UNIX03
1572 if (__unix_conforming == 0)
1573 __unix_conforming = 1;
1574 #endif /* __DARWIN_UNIX03 */
1575
1576 switch (state) {
1577 case PTHREAD_CANCEL_ENABLE:
1578 #if __DARWIN_UNIX03
1579 __pthread_canceled(1);
1580 #endif /* __DARWIN_UNIX03 */
1581 break;
1582 case PTHREAD_CANCEL_DISABLE:
1583 #if __DARWIN_UNIX03
1584 __pthread_canceled(2);
1585 #endif /* __DARWIN_UNIX03 */
1586 break;
1587 default:
1588 return EINVAL;
1589 }
1590
1591 self = pthread_self();
1592 LOCK(self->lock);
1593 if (oldstate)
1594 *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
1595 self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK;
1596 self->cancel_state |= state;
1597 UNLOCK(self->lock);
1598 #if !__DARWIN_UNIX03
1599 _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
1600 #endif /* __DARWIN_UNIX03 */
1601 return (0);
1602 }
1603
1604 /*
1605 * Query/update the cancelability 'type' of a thread
1606 */
1607 int
1608 pthread_setcanceltype(int type, int *oldtype)
1609 {
1610 pthread_t self = pthread_self();
1611
1612 #if __DARWIN_UNIX03
1613 if (__unix_conforming == 0)
1614 __unix_conforming = 1;
1615 #endif /* __DARWIN_UNIX03 */
1616
1617 if ((type != PTHREAD_CANCEL_DEFERRED) &&
1618 (type != PTHREAD_CANCEL_ASYNCHRONOUS))
1619 return EINVAL;
1620 self = pthread_self();
1621 LOCK(self->lock);
1622 if (oldtype)
1623 *oldtype = self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK;
1624 self->cancel_state &= ~_PTHREAD_CANCEL_TYPE_MASK;
1625 self->cancel_state |= type;
1626 UNLOCK(self->lock);
1627 #if !__DARWIN_UNIX03
1628 _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
1629 #endif /* __DARWIN_UNIX03 */
1630 return (0);
1631 }
1632