]> git.saurik.com Git - apple/libc.git/blob - pthreads/pthread.c
Libc-498.1.1.tar.gz
[apple/libc.git] / pthreads / pthread.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 */
51
52 #include "pthread_internals.h"
53 #include "pthread_workqueue.h"
54
55 #include <assert.h>
56 #include <stdio.h> /* For printf(). */
57 #include <stdlib.h>
58 #include <errno.h> /* For __mach_errno_addr() prototype. */
59 #include <signal.h>
60 #include <sys/time.h>
61 #include <sys/resource.h>
62 #include <sys/sysctl.h>
63 #include <sys/queue.h>
64 #include <machine/vmparam.h>
65 #include <mach/vm_statistics.h>
66 #define __APPLE_API_PRIVATE
67 #include <machine/cpu_capabilities.h>
68 #include <libkern/OSAtomic.h>
69 #if defined(__ppc__)
70 #include <libkern/OSCrossEndian.h>
71 #endif
72
73
74 #ifndef BUILDING_VARIANT /* [ */
75
76 __private_extern__ struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
77
78
79
80 /* Per-thread kernel support */
81 extern void _pthread_set_self(pthread_t);
82 extern void mig_init(int);
83 static int _pthread_create_pthread_onstack(pthread_attr_t *attrs, void **stack, pthread_t *thread);
84 static kern_return_t _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread);
85 void _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero);
86 static void _pthread_tsd_reinit(pthread_t t);
87 static int _new_pthread_create_suspended(pthread_t *thread,
88 const pthread_attr_t *attr,
89 void *(*start_routine)(void *),
90 void *arg,
91 int create_susp);
92
93 /* Get CPU capabilities from the kernel */
94 __private_extern__ void _init_cpu_capabilities(void);
95
96 /* Needed to tell the malloc subsystem we're going multithreaded */
97 extern void set_malloc_singlethreaded(int);
98
99 /* Used when we need to call into the kernel with no reply port */
100 extern pthread_lock_t reply_port_lock;
101 int _pthread_find_thread(pthread_t thread);
102
103 /* Mach message used to notify that a thread needs to be reaped */
104
105 typedef struct _pthread_reap_msg_t {
106 mach_msg_header_t header;
107 pthread_t thread;
108 mach_msg_trailer_t trailer;
109 } pthread_reap_msg_t;
110
111 /* We'll implement this when the main thread is a pthread */
112 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
113 static struct _pthread _thread = {0};
114
115 /* This global should be used (carefully) by anyone needing to know if a
116 ** pthread has been created.
117 */
118 int __is_threaded = 0;
119 /* _pthread_count is protected by _pthread_list_lock */
120 static int _pthread_count = 1;
121 int __unix_conforming = 0;
122 __private_extern__ size_t pthreadsize = 0;
123
124 /* under rosetta we will use old style creation of threads */
125 static int __oldstyle = 0;
126
127 __private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER;
128
129 /* Same implementation as LOCK, but without the __is_threaded check */
130 int _spin_tries = 0;
131 __private_extern__ void _spin_lock_retry(pthread_lock_t *lock)
132 {
133 int tries = _spin_tries;
134 do {
135 if (tries-- > 0)
136 continue;
137 syscall_thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1);
138 tries = _spin_tries;
139 } while(!_spin_lock_try(lock));
140 }
141
142 extern mach_port_t thread_recycle_port;
143
144 /* These are used to keep track of a semaphore pool shared by mutexes and condition
145 ** variables.
146 */
147
148 static semaphore_t *sem_pool = NULL;
149 static int sem_pool_count = 0;
150 static int sem_pool_current = 0;
151 static pthread_lock_t sem_pool_lock = LOCK_INITIALIZER;
152
153 static int default_priority;
154 static int max_priority;
155 static int min_priority;
156 static int pthread_concurrency;
157
158 static OSSpinLock __workqueue_list_lock = OS_SPINLOCK_INIT;
159
160 static void _pthread_exit(pthread_t self, void *value_ptr);
161 int _pthread_setcancelstate_internal(int state, int *oldstate, int conforming);
162 static void _pthread_setcancelstate_exit(pthread_t self, void *value_ptr, int conforming);
163 static pthread_attr_t _pthread_attr_default = {0};
164 static void _pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * attr);
165 static int handle_removeitem(pthread_workqueue_t workq, pthread_workitem_t item);
166 static int kernel_workq_setup = 0;
167 static volatile int32_t kernel_workq_count = 0;
168 static volatile unsigned int user_workq_count = 0;
169 #define KERNEL_WORKQ_ELEM_MAX 64 /* Max number of elements in the kerrel */
170 static int wqreadyprio = 0; /* current highest prio queue ready with items */
171
172 __private_extern__ struct __pthread_workitem_pool __pthread_workitem_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workitem_pool_head);
173 __private_extern__ struct __pthread_workqueue_pool __pthread_workqueue_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workqueue_pool_head);
174
175 struct _pthread_workqueue_head __pthread_workq0_head;
176 struct _pthread_workqueue_head __pthread_workq1_head;
177 struct _pthread_workqueue_head __pthread_workq2_head;
178 struct _pthread_workqueue_head __pthread_workq3_head;
179 struct _pthread_workqueue_head __pthread_workq4_head;
180 pthread_workqueue_head_t __pthread_wq_head_tbl[WQ_NUM_PRIO_QS] = {&__pthread_workq0_head, &__pthread_workq1_head, &__pthread_workq2_head, &__pthread_workq3_head, &__pthread_workq4_head};
181
182 static void workqueue_list_lock(void);
183 static void workqueue_list_unlock(void);
184 static int valid_workq(pthread_workqueue_t);
185 static void pick_nextworkqueue_droplock(void);
186 static int post_nextworkitem(pthread_workqueue_t workq);
187 static void _pthread_workq_return(pthread_t self);
188 static pthread_workqueue_attr_t _pthread_wq_attr_default = {0};
189 void _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse);
190 extern void start_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse);
191 extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, int flags);
192 static pthread_workitem_t alloc_workitem(void);
193 static void free_workitem(pthread_workitem_t);
194 static pthread_workqueue_t alloc_workqueue(void);
195 static void free_workqueue(pthread_workqueue_t);
196 static int _pthread_work_internal_init(void);
197 static void workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item);
198
199 /* workq_ops commands */
200 #define WQOPS_QUEUE_ADD 1
201 #define WQOPS_QUEUE_REMOVE 2
202 #define WQOPS_THREAD_RETURN 4
203
204 /*
205 * Flags filed passed to bsdthread_create and back in pthread_start
206 31 <---------------------------------> 0
207 _________________________________________
208 | flags(8) | policy(8) | importance(16) |
209 -----------------------------------------
210 */
211 void _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags);
212
213 #define PTHREAD_START_CUSTOM 0x01000000
214 #define PTHREAD_START_SETSCHED 0x02000000
215 #define PTHREAD_START_DETACHED 0x04000000
216 #define PTHREAD_START_POLICY_BITSHIFT 16
217 #define PTHREAD_START_POLICY_MASK 0xff
218 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
219
220 extern pthread_t __bsdthread_create(void (*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags);
221 extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem);
222
223 #if defined(__ppc__) || defined(__ppc64__)
224 static const vm_address_t PTHREAD_STACK_HINT = 0xF0000000;
225 #elif defined(__i386__) || defined(__x86_64__)
226 static const vm_address_t PTHREAD_STACK_HINT = 0xB0000000;
227 #else
228 #error Need to define a stack address hint for this architecture
229 #endif
230
231 /* Set the base address to use as the stack pointer, before adjusting due to the ABI
232 * The guardpages for stackoverflow protection is also allocated here
233 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
234 * set up for the thread
235 */
236
237 static int
238 _pthread_allocate_stack(pthread_attr_t *attrs, void **stack)
239 {
240 kern_return_t kr;
241 vm_address_t stackaddr;
242 size_t guardsize;
243
244 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
245 if (attrs->stackaddr != NULL) {
246 /* No guard pages setup in this case */
247 assert(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
248 *stack = attrs->stackaddr;
249 return 0;
250 }
251
252 guardsize = attrs->guardsize;
253 stackaddr = PTHREAD_STACK_HINT;
254 kr = vm_map(mach_task_self(), &stackaddr,
255 attrs->stacksize + guardsize,
256 vm_page_size-1,
257 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
258 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
259 VM_INHERIT_DEFAULT);
260 if (kr != KERN_SUCCESS)
261 kr = vm_allocate(mach_task_self(),
262 &stackaddr, attrs->stacksize + guardsize,
263 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
264 if (kr != KERN_SUCCESS) {
265 return EAGAIN;
266 }
267 /* The guard page is at the lowest address */
268 /* The stack base is the highest address */
269 if (guardsize)
270 kr = vm_protect(mach_task_self(), stackaddr, guardsize, FALSE, VM_PROT_NONE);
271 *stack = (void *)(stackaddr + attrs->stacksize + guardsize);
272 return 0;
273 }
274
275 static int
276 _pthread_create_pthread_onstack(pthread_attr_t *attrs, void **stack, pthread_t *thread)
277 {
278 kern_return_t kr;
279 pthread_t t;
280 vm_address_t stackaddr;
281 size_t guardsize, allocsize;
282
283 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
284
285 if (attrs->stackaddr != NULL) {
286 /* No guard pages setup in this case */
287 assert(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
288 *stack = attrs->stackaddr;
289 t = (pthread_t)malloc(pthreadsize);
290 _pthread_struct_init(t, attrs, attrs->stackaddr, 0, 0, 0);
291 t->freeStackOnExit = 0;
292 t->freeaddr = 0;
293 t->freesize = 0;
294 *thread = t;
295 return 0;
296 }
297
298 guardsize = attrs->guardsize;
299 allocsize = attrs->stacksize + guardsize + pthreadsize;
300 stackaddr = PTHREAD_STACK_HINT;
301 kr = vm_map(mach_task_self(), &stackaddr,
302 allocsize,
303 vm_page_size-1,
304 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
305 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
306 VM_INHERIT_DEFAULT);
307 if (kr != KERN_SUCCESS)
308 kr = vm_allocate(mach_task_self(),
309 &stackaddr, allocsize,
310 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
311 if (kr != KERN_SUCCESS) {
312 return EAGAIN;
313 }
314 /* The guard page is at the lowest address */
315 /* The stack base is the highest address */
316 if (guardsize)
317 kr = vm_protect(mach_task_self(), stackaddr, guardsize, FALSE, VM_PROT_NONE);
318
319
320 *stack = (void *)(stackaddr + attrs->stacksize + guardsize);
321
322 t = (pthread_t)(stackaddr + attrs->stacksize + guardsize);
323 _pthread_struct_init(t, attrs, *stack, 0, 0, 1);
324 t->kernalloc = 0;
325 t->freesize = allocsize;
326 t->freeaddr = (void *)stackaddr;
327 t->freeStackOnExit = 1;
328 *thread = t;
329
330 return 0;
331 }
332
333 static kern_return_t
334 _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread)
335 {
336 kern_return_t res = 0;
337 vm_address_t freeaddr;
338 size_t freesize;
339 task_t self = mach_task_self();
340 int thread_count;
341 mach_port_t kport;
342 semaphore_t joinsem = SEMAPHORE_NULL;
343
344 #if WQ_TRACE
345 __kdebug_trace(0x900001c, freestruct, termthread, 0, 0, 0);
346 #endif
347 kport = t->kernel_thread;
348 joinsem = t->joiner_notify;
349
350 if (t->freeStackOnExit) {
351 freeaddr = (vm_address_t)t->freeaddr;
352 if (freestruct)
353 freesize = t->stacksize + t->guardsize + pthreadsize;
354 else
355 freesize = t->stacksize + t->guardsize;
356 if (termthread) {
357 mig_dealloc_reply_port(MACH_PORT_NULL);
358 LOCK(_pthread_list_lock);
359 if (freestruct != 0) {
360 TAILQ_REMOVE(&__pthread_head, t, plist);
361 /* if parent has not returned from create yet keep pthread_t */
362 #if WQ_TRACE
363 __kdebug_trace(0x9000010, t, 0, 0, 1, 0);
364 #endif
365 if (t->parentcheck == 0)
366 freesize -= pthreadsize;
367 }
368 t->childexit = 1;
369 thread_count = --_pthread_count;
370 UNLOCK(_pthread_list_lock);
371
372 #if WQ_TRACE
373 __kdebug_trace(0x9000020, freeaddr, freesize, kport, 1, 0);
374 #endif
375 if (thread_count <=0)
376 exit(0);
377 else
378 __bsdthread_terminate(freeaddr, freesize, kport, joinsem);
379 abort();
380 } else {
381 #if WQ_TRACE
382 __kdebug_trace(0x9000024, freeaddr, freesize, 0, 1, 0);
383 #endif
384 res = vm_deallocate(mach_task_self(), freeaddr, freesize);
385 }
386 } else {
387 if (termthread) {
388 mig_dealloc_reply_port(MACH_PORT_NULL);
389 LOCK(_pthread_list_lock);
390 if (freestruct != 0) {
391 TAILQ_REMOVE(&__pthread_head, t, plist);
392 #if WQ_TRACE
393 __kdebug_trace(0x9000010, t, 0, 0, 2, 0);
394 #endif
395 }
396 thread_count = --_pthread_count;
397 t->childexit = 1;
398 UNLOCK(_pthread_list_lock);
399
400 if (freestruct) {
401 #if WQ_TRACE
402 __kdebug_trace(0x9000008, t, 0, 0, 2, 0);
403 #endif
404 free(t);
405 }
406
407 freeaddr = 0;
408 freesize = 0;
409 #if WQ_TRACE
410 __kdebug_trace(0x9000020, 0, 0, kport, 2, 0);
411 #endif
412
413 if (thread_count <=0)
414 exit(0);
415 else
416 __bsdthread_terminate(NULL, 0, kport, joinsem);
417 abort();
418 } else if (freestruct) {
419 t->sig = _PTHREAD_NO_SIG;
420 #if WQ_TRACE
421 __kdebug_trace(0x9000024, t, 0, 0, 2, 0);
422 #endif
423 free(t);
424 }
425 }
426 return(res);
427 }
428
429
430
431 /*
432 * Destroy a thread attribute structure
433 */
434 int
435 pthread_attr_destroy(pthread_attr_t *attr)
436 {
437 if (attr->sig == _PTHREAD_ATTR_SIG)
438 {
439 attr->sig = 0;
440 return (0);
441 } else
442 {
443 return (EINVAL); /* Not an attribute structure! */
444 }
445 }
446
447 /*
448 * Get the 'detach' state from a thread attribute structure.
449 * Note: written as a helper function for info hiding
450 */
451 int
452 pthread_attr_getdetachstate(const pthread_attr_t *attr,
453 int *detachstate)
454 {
455 if (attr->sig == _PTHREAD_ATTR_SIG)
456 {
457 *detachstate = attr->detached;
458 return (0);
459 } else
460 {
461 return (EINVAL); /* Not an attribute structure! */
462 }
463 }
464
465 /*
466 * Get the 'inherit scheduling' info from a thread attribute structure.
467 * Note: written as a helper function for info hiding
468 */
469 int
470 pthread_attr_getinheritsched(const pthread_attr_t *attr,
471 int *inheritsched)
472 {
473 if (attr->sig == _PTHREAD_ATTR_SIG)
474 {
475 *inheritsched = attr->inherit;
476 return (0);
477 } else
478 {
479 return (EINVAL); /* Not an attribute structure! */
480 }
481 }
482
483 /*
484 * Get the scheduling parameters from a thread attribute structure.
485 * Note: written as a helper function for info hiding
486 */
487 int
488 pthread_attr_getschedparam(const pthread_attr_t *attr,
489 struct sched_param *param)
490 {
491 if (attr->sig == _PTHREAD_ATTR_SIG)
492 {
493 *param = attr->param;
494 return (0);
495 } else
496 {
497 return (EINVAL); /* Not an attribute structure! */
498 }
499 }
500
501 /*
502 * Get the scheduling policy from a thread attribute structure.
503 * Note: written as a helper function for info hiding
504 */
505 int
506 pthread_attr_getschedpolicy(const pthread_attr_t *attr,
507 int *policy)
508 {
509 if (attr->sig == _PTHREAD_ATTR_SIG)
510 {
511 *policy = attr->policy;
512 return (0);
513 } else
514 {
515 return (EINVAL); /* Not an attribute structure! */
516 }
517 }
518
519 /* Retain the existing stack size of 512K and not depend on Main thread default stack size */
520 static const size_t DEFAULT_STACK_SIZE = (512*1024);
521 /*
522 * Initialize a thread attribute structure to default values.
523 */
524 int
525 pthread_attr_init(pthread_attr_t *attr)
526 {
527 attr->stacksize = DEFAULT_STACK_SIZE;
528 attr->stackaddr = NULL;
529 attr->sig = _PTHREAD_ATTR_SIG;
530 attr->param.sched_priority = default_priority;
531 attr->param.quantum = 10; /* quantum isn't public yet */
532 attr->detached = PTHREAD_CREATE_JOINABLE;
533 attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
534 attr->policy = _PTHREAD_DEFAULT_POLICY;
535 attr->freeStackOnExit = 1;
536 attr->fastpath = 1;
537 attr->schedset = 0;
538 attr->guardsize = vm_page_size;
539 return (0);
540 }
541
542 /*
543 * Set the 'detach' state in a thread attribute structure.
544 * Note: written as a helper function for info hiding
545 */
546 int
547 pthread_attr_setdetachstate(pthread_attr_t *attr,
548 int detachstate)
549 {
550 if (attr->sig == _PTHREAD_ATTR_SIG)
551 {
552 if ((detachstate == PTHREAD_CREATE_JOINABLE) ||
553 (detachstate == PTHREAD_CREATE_DETACHED))
554 {
555 attr->detached = detachstate;
556 return (0);
557 } else
558 {
559 return (EINVAL);
560 }
561 } else
562 {
563 return (EINVAL); /* Not an attribute structure! */
564 }
565 }
566
567 /*
568 * Set the 'inherit scheduling' state in a thread attribute structure.
569 * Note: written as a helper function for info hiding
570 */
571 int
572 pthread_attr_setinheritsched(pthread_attr_t *attr,
573 int inheritsched)
574 {
575 if (attr->sig == _PTHREAD_ATTR_SIG)
576 {
577 if ((inheritsched == PTHREAD_INHERIT_SCHED) ||
578 (inheritsched == PTHREAD_EXPLICIT_SCHED))
579 {
580 attr->inherit = inheritsched;
581 return (0);
582 } else
583 {
584 return (EINVAL);
585 }
586 } else
587 {
588 return (EINVAL); /* Not an attribute structure! */
589 }
590 }
591
592 /*
593 * Set the scheduling paramters in a thread attribute structure.
594 * Note: written as a helper function for info hiding
595 */
596 int
597 pthread_attr_setschedparam(pthread_attr_t *attr,
598 const struct sched_param *param)
599 {
600 if (attr->sig == _PTHREAD_ATTR_SIG)
601 {
602 /* TODO: Validate sched_param fields */
603 attr->param = *param;
604 attr->schedset = 1;
605 return (0);
606 } else
607 {
608 return (EINVAL); /* Not an attribute structure! */
609 }
610 }
611
612 /*
613 * Set the scheduling policy in a thread attribute structure.
614 * Note: written as a helper function for info hiding
615 */
616 int
617 pthread_attr_setschedpolicy(pthread_attr_t *attr,
618 int policy)
619 {
620 if (attr->sig == _PTHREAD_ATTR_SIG)
621 {
622 if ((policy == SCHED_OTHER) ||
623 (policy == SCHED_RR) ||
624 (policy == SCHED_FIFO))
625 {
626 attr->policy = policy;
627 attr->schedset = 1;
628 return (0);
629 } else
630 {
631 return (EINVAL);
632 }
633 } else
634 {
635 return (EINVAL); /* Not an attribute structure! */
636 }
637 }
638
639 /*
640 * Set the scope for the thread.
641 * We currently only provide PTHREAD_SCOPE_SYSTEM
642 */
643 int
644 pthread_attr_setscope(pthread_attr_t *attr,
645 int scope)
646 {
647 if (attr->sig == _PTHREAD_ATTR_SIG) {
648 if (scope == PTHREAD_SCOPE_SYSTEM) {
649 /* No attribute yet for the scope */
650 return (0);
651 } else if (scope == PTHREAD_SCOPE_PROCESS) {
652 return (ENOTSUP);
653 }
654 }
655 return (EINVAL); /* Not an attribute structure! */
656 }
657
658 /*
659 * Get the scope for the thread.
660 * We currently only provide PTHREAD_SCOPE_SYSTEM
661 */
662 int
663 pthread_attr_getscope(const pthread_attr_t *attr,
664 int *scope)
665 {
666 if (attr->sig == _PTHREAD_ATTR_SIG) {
667 *scope = PTHREAD_SCOPE_SYSTEM;
668 return (0);
669 }
670 return (EINVAL); /* Not an attribute structure! */
671 }
672
673 /* Get the base stack address of the given thread */
674 int
675 pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
676 {
677 if (attr->sig == _PTHREAD_ATTR_SIG) {
678 *stackaddr = attr->stackaddr;
679 return (0);
680 } else {
681 return (EINVAL); /* Not an attribute structure! */
682 }
683 }
684
685 int
686 pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
687 {
688 if ((attr->sig == _PTHREAD_ATTR_SIG) && (((uintptr_t)stackaddr % vm_page_size) == 0)) {
689 attr->stackaddr = stackaddr;
690 attr->freeStackOnExit = 0;
691 attr->fastpath = 0;
692 return (0);
693 } else {
694 return (EINVAL); /* Not an attribute structure! */
695 }
696 }
697
698 int
699 pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
700 {
701 if (attr->sig == _PTHREAD_ATTR_SIG) {
702 *stacksize = attr->stacksize;
703 return (0);
704 } else {
705 return (EINVAL); /* Not an attribute structure! */
706 }
707 }
708
709 int
710 pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
711 {
712 if ((attr->sig == _PTHREAD_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
713 attr->stacksize = stacksize;
714 return (0);
715 } else {
716 return (EINVAL); /* Not an attribute structure! */
717 }
718 }
719
720 int
721 pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
722 {
723 if (attr->sig == _PTHREAD_ATTR_SIG) {
724 *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize);
725 *stacksize = attr->stacksize;
726 return (0);
727 } else {
728 return (EINVAL); /* Not an attribute structure! */
729 }
730 }
731
732 /* By SUSV spec, the stackaddr is the base address, the lowest addressable
733 * byte address. This is not the same as in pthread_attr_setstackaddr.
734 */
735 int
736 pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
737 {
738 if ((attr->sig == _PTHREAD_ATTR_SIG) &&
739 (((uintptr_t)stackaddr % vm_page_size) == 0) &&
740 ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
741 attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize);
742 attr->stacksize = stacksize;
743 attr->freeStackOnExit = 0;
744 attr->fastpath = 0;
745 return (0);
746 } else {
747 return (EINVAL); /* Not an attribute structure! */
748 }
749 }
750
751
752 /*
753 * Set the guardsize attribute in the attr.
754 */
755 int
756 pthread_attr_setguardsize(pthread_attr_t *attr,
757 size_t guardsize)
758 {
759 if (attr->sig == _PTHREAD_ATTR_SIG) {
760 /* Guardsize of 0 is valid, ot means no guard */
761 if ((guardsize % vm_page_size) == 0) {
762 attr->guardsize = guardsize;
763 attr->fastpath = 0;
764 return (0);
765 } else
766 return(EINVAL);
767 }
768 return (EINVAL); /* Not an attribute structure! */
769 }
770
771 /*
772 * Get the guardsize attribute in the attr.
773 */
774 int
775 pthread_attr_getguardsize(const pthread_attr_t *attr,
776 size_t *guardsize)
777 {
778 if (attr->sig == _PTHREAD_ATTR_SIG) {
779 *guardsize = attr->guardsize;
780 return (0);
781 }
782 return (EINVAL); /* Not an attribute structure! */
783 }
784
785
786 /*
787 * Create and start execution of a new thread.
788 */
789
790 static void
791 _pthread_body(pthread_t self)
792 {
793 _pthread_set_self(self);
794 _pthread_exit(self, (self->fun)(self->arg));
795 }
796
797 void
798 _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int pflags)
799 {
800 int ret;
801 #if WQ_DEBUG
802 pthread_t pself;
803 #endif
804 pthread_attr_t *attrs = &_pthread_attr_default;
805 char * stackaddr;
806
807 if ((pflags & PTHREAD_START_CUSTOM) == 0) {
808 stackaddr = self;
809 _pthread_struct_init(self, attrs, stackaddr, stacksize, 1, 1);
810 LOCK(_pthread_list_lock);
811 if (pflags & PTHREAD_START_SETSCHED) {
812 self->policy = ((pflags >> PTHREAD_START_POLICY_BITSHIFT) & PTHREAD_START_POLICY_MASK);
813 self->param.sched_priority = (pflags & PTHREAD_START_IMPORTANCE_MASK);
814 }
815 /* These are not joinable threads */
816 if ((pflags & PTHREAD_START_DETACHED) == PTHREAD_START_DETACHED) {
817 self->detached &= ~PTHREAD_CREATE_JOINABLE;
818 self->detached |= PTHREAD_CREATE_DETACHED;
819 }
820 } else
821 LOCK(_pthread_list_lock);
822 self->kernel_thread = kport;
823 self->fun = fun;
824 self->arg = funarg;
825
826 /* Add to the pthread list */
827 if (self->parentcheck == 0) {
828 TAILQ_INSERT_TAIL(&__pthread_head, self, plist);
829 #if WQ_TRACE
830 __kdebug_trace(0x900000c, self, 0, 0, 3, 0);
831 #endif
832 _pthread_count++;
833 }
834 self->childrun = 1;
835 UNLOCK(_pthread_list_lock);
836 #if defined(__i386__) || defined(__x86_64__)
837 _pthread_set_self(self);
838 #endif
839
840 #if WQ_DEBUG
841 pself = pthread_self();
842 if (self != pself)
843 abort();
844 #endif
845 #if WQ_TRACE
846 __kdebug_trace(0x9000030, self, pflags, 0, 0, 0);
847 #endif
848
849 _pthread_exit(self, (self->fun)(self->arg));
850 }
851
852 int
853 _pthread_create(pthread_t t,
854 const pthread_attr_t *attrs,
855 void *stack,
856 const mach_port_t kernel_thread)
857 {
858 int res;
859 res = 0;
860
861 do
862 {
863 memset(t, 0, sizeof(*t));
864 t->newstyle = 0;
865 t->schedset = 0;
866 t->kernalloc = 0;
867 t->tsd[0] = t;
868 t->max_tsd_key = 0;
869 t->wqthread = 0;
870 t->cur_workq = 0;
871 t->cur_workitem = 0;
872 t->stacksize = attrs->stacksize;
873 t->stackaddr = (void *)stack;
874 t->guardsize = attrs->guardsize;
875 t->kernel_thread = kernel_thread;
876 t->detached = attrs->detached;
877 t->inherit = attrs->inherit;
878 t->policy = attrs->policy;
879 t->param = attrs->param;
880 t->freeStackOnExit = attrs->freeStackOnExit;
881 t->mutexes = (struct _pthread_mutex *)NULL;
882 t->sig = _PTHREAD_SIG;
883 t->reply_port = MACH_PORT_NULL;
884 t->cthread_self = NULL;
885 LOCK_INIT(t->lock);
886 t->plist.tqe_next = (struct _pthread *)0;
887 t->plist.tqe_prev = (struct _pthread **)0;
888 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
889 t->__cleanup_stack = (struct __darwin_pthread_handler_rec *)NULL;
890 t->death = SEMAPHORE_NULL;
891
892 if (kernel_thread != MACH_PORT_NULL)
893 pthread_setschedparam(t, t->policy, &t->param);
894 } while (0);
895 return (res);
896 }
897
898 void
899 _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero)
900 {
901 mach_vm_offset_t stackaddr = (mach_vm_offset_t)stack;
902
903 if (nozero == 0) {
904 memset(t, 0, sizeof(*t));
905 t->plist.tqe_next = (struct _pthread *)0;
906 t->plist.tqe_prev = (struct _pthread **)0;
907 }
908 t->schedset = attrs->schedset;
909 t->tsd[0] = t;
910 if (kernalloc != 0) {
911 stackaddr = (mach_vm_offset_t)t;
912
913 /* if allocated from kernel set values appropriately */
914 t->stacksize = stacksize;
915 t->stackaddr = stackaddr;
916 t->freeStackOnExit = 1;
917 t->freeaddr = stackaddr - stacksize - vm_page_size;
918 t->freesize = pthreadsize + stacksize + vm_page_size;
919 } else {
920 t->stacksize = attrs->stacksize;
921 t->stackaddr = (void *)stack;
922 }
923 t->guardsize = attrs->guardsize;
924 t->detached = attrs->detached;
925 t->inherit = attrs->inherit;
926 t->policy = attrs->policy;
927 t->param = attrs->param;
928 t->mutexes = (struct _pthread_mutex *)NULL;
929 t->sig = _PTHREAD_SIG;
930 t->reply_port = MACH_PORT_NULL;
931 t->cthread_self = NULL;
932 LOCK_INIT(t->lock);
933 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
934 t->__cleanup_stack = (struct __darwin_pthread_handler_rec *)NULL;
935 t->death = SEMAPHORE_NULL;
936 t->newstyle = 1;
937 t->kernalloc = kernalloc;
938 t->wqthread = 0;
939 t->cur_workq = 0;
940 t->cur_workitem = 0;
941 t->max_tsd_key = 0;
942 }
943
944 static void
945 _pthread_tsd_reinit(pthread_t t)
946 {
947 bzero(&t->tsd[1], (_INTERNAL_POSIX_THREAD_KEYS_END-1) * sizeof(void *));
948 }
949
950
951 /* Need to deprecate this in future */
952 int
953 _pthread_is_threaded(void)
954 {
955 return __is_threaded;
956 }
957
958 /* Non portable public api to know whether this process has(had) atleast one thread
959 * apart from main thread. There could be race if there is a thread in the process of
960 * creation at the time of call . It does not tell whether there are more than one thread
961 * at this point of time.
962 */
963 int
964 pthread_is_threaded_np(void)
965 {
966 return (__is_threaded);
967 }
968
969 mach_port_t
970 pthread_mach_thread_np(pthread_t t)
971 {
972 mach_port_t kport = MACH_PORT_NULL;
973
974 if (_pthread_lookup_thread(t, &kport, 0) != 0)
975 return(NULL);
976
977 return(kport);
978 }
979
980 pthread_t pthread_from_mach_thread_np(mach_port_t kernel_thread)
981 {
982 struct _pthread * p = NULL;
983
984 /* No need to wait as mach port is already known */
985 LOCK(_pthread_list_lock);
986 TAILQ_FOREACH(p, &__pthread_head, plist) {
987 if (p->kernel_thread == kernel_thread)
988 break;
989 }
990 UNLOCK(_pthread_list_lock);
991 return p;
992 }
993
994 size_t
995 pthread_get_stacksize_np(pthread_t t)
996 {
997 int ret;
998 size_t size = 0;
999
1000 if (t == NULL)
1001 return(ESRCH);
1002
1003 LOCK(_pthread_list_lock);
1004
1005 if ((ret = _pthread_find_thread(t)) != 0) {
1006 UNLOCK(_pthread_list_lock);
1007 return(ret);
1008 }
1009 size = t->stacksize;
1010 UNLOCK(_pthread_list_lock);
1011 return(size);
1012 }
1013
1014 void *
1015 pthread_get_stackaddr_np(pthread_t t)
1016 {
1017 int ret;
1018 void * addr = NULL;
1019
1020 if (t == NULL)
1021 return(ESRCH);
1022
1023 LOCK(_pthread_list_lock);
1024
1025 if ((ret = _pthread_find_thread(t)) != 0) {
1026 UNLOCK(_pthread_list_lock);
1027 return(ret);
1028 }
1029 addr = t->stackaddr;
1030 UNLOCK(_pthread_list_lock);
1031
1032 return(addr);
1033 }
1034
1035 mach_port_t
1036 _pthread_reply_port(pthread_t t)
1037 {
1038 return t->reply_port;
1039 }
1040
1041
1042 /* returns non-zero if the current thread is the main thread */
1043 int
1044 pthread_main_np(void)
1045 {
1046 pthread_t self = pthread_self();
1047
1048 return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT);
1049 }
1050
1051 static int
1052 _new_pthread_create_suspended(pthread_t *thread,
1053 const pthread_attr_t *attr,
1054 void *(*start_routine)(void *),
1055 void *arg,
1056 int create_susp)
1057 {
1058 pthread_attr_t *attrs;
1059 void *stack;
1060 int error;
1061 unsigned int flags;
1062 pthread_t t;
1063 kern_return_t kern_res;
1064 mach_port_t kernel_thread = MACH_PORT_NULL;
1065 int needresume;
1066 task_t self = mach_task_self();
1067 int kernalloc = 0;
1068 int susp = create_susp;
1069
1070 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
1071 { /* Set up default paramters */
1072 attrs = &_pthread_attr_default;
1073 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1074 return EINVAL;
1075 }
1076 error = 0;
1077
1078 if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
1079 (attrs->param.sched_priority != default_priority)) && (create_susp == 0)) {
1080 needresume = 1;
1081 susp = 1;
1082 } else
1083 needresume = 0;
1084
1085 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1086 * any change in priority or policy is needed here.
1087 */
1088 if ((__oldstyle == 1) || (create_susp != 0)) {
1089 /* Rosetta or pthread_create_suspended() */
1090 /* running under rosetta */
1091 /* Allocate a stack for the thread */
1092 #if WQ_TRACE
1093 __kdebug_trace(0x9000000, create_susp, 0, 0, 0, 0);
1094 #endif
1095 if ((error = _pthread_allocate_stack(attrs, &stack)) != 0) {
1096 return(error);
1097 }
1098 t = (pthread_t)malloc(sizeof(struct _pthread));
1099 *thread = t;
1100 if (susp) {
1101 /* Create the Mach thread for this thread */
1102 PTHREAD_MACH_CALL(thread_create(self, &kernel_thread), kern_res);
1103 if (kern_res != KERN_SUCCESS)
1104 {
1105 printf("Can't create thread: %d\n", kern_res);
1106 return(EINVAL);
1107 }
1108 }
1109 if ((error = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
1110 {
1111 return(error);
1112 }
1113 set_malloc_singlethreaded(0);
1114 __is_threaded = 1;
1115
1116 /* Send it on it's way */
1117 t->arg = arg;
1118 t->fun = start_routine;
1119 t->newstyle = 0;
1120 /* Now set it up to execute */
1121 LOCK(_pthread_list_lock);
1122 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1123 #if WQ_TRACE
1124 __kdebug_trace(0x900000c, t, 0, 0, 4, 0);
1125 #endif
1126 _pthread_count++;
1127 UNLOCK(_pthread_list_lock);
1128 _pthread_setup(t, _pthread_body, stack, susp, needresume);
1129 return(0);
1130 } else {
1131
1132 flags = 0;
1133 if (attrs->fastpath == 1)
1134 kernalloc = 1;
1135
1136 if (attrs->detached == PTHREAD_CREATE_DETACHED)
1137 flags |= PTHREAD_START_DETACHED;
1138 if (attrs->schedset != 0) {
1139 flags |= PTHREAD_START_SETSCHED;
1140 flags |= ((attrs->policy & PTHREAD_START_POLICY_MASK) << PTHREAD_START_POLICY_BITSHIFT);
1141 flags |= (attrs->param.sched_priority & PTHREAD_START_IMPORTANCE_MASK);
1142 }
1143
1144 set_malloc_singlethreaded(0);
1145 __is_threaded = 1;
1146
1147 if (kernalloc == 0) {
1148 /* Allocate a stack for the thread */
1149 flags |= PTHREAD_START_CUSTOM;
1150 if ((error = _pthread_create_pthread_onstack(attrs, &stack, &t)) != 0) {
1151 return(error);
1152 }
1153 /* Send it on it's way */
1154 t->arg = arg;
1155 t->fun = start_routine;
1156 t->newstyle = 1;
1157
1158 #if WQ_TRACE
1159 __kdebug_trace(0x9000004, t, flags, 0, 0, 0);
1160 #endif
1161
1162 if ((t = __bsdthread_create(start_routine, arg, stack, t, flags)) == -1) {
1163 _pthread_free_pthread_onstack(t, 1, 0);
1164 return (EAGAIN);
1165 }
1166 LOCK(_pthread_list_lock);
1167 t->parentcheck = 1;
1168 if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) {
1169 /* detached child exited, mop up */
1170 UNLOCK(_pthread_list_lock);
1171 #if WQ_TRACE
1172 __kdebug_trace(0x9000008, t, 0, 0, 1, 0);
1173 #endif
1174 free(t);
1175 } else if (t->childrun == 0) {
1176 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1177 _pthread_count++;
1178 #if WQ_TRACE
1179 __kdebug_trace(0x900000c, t, 0, 0, 1, 0);
1180 #endif
1181 UNLOCK(_pthread_list_lock);
1182 } else
1183 UNLOCK(_pthread_list_lock);
1184
1185 *thread = t;
1186
1187 #if WQ_TRACE
1188 __kdebug_trace(0x9000014, t, 0, 0, 1, 0);
1189 #endif
1190 return (0);
1191
1192 } else {
1193 /* kernel allocation */
1194 #if WQ_TRACE
1195 __kdebug_trace(0x9000018, flags, 0, 0, 0, 0);
1196 #endif
1197 if ((t = __bsdthread_create(start_routine, arg, attrs->stacksize, NULL, flags)) == -1)
1198 return (EAGAIN);
1199 /* Now set it up to execute */
1200 LOCK(_pthread_list_lock);
1201 t->parentcheck = 1;
1202 if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) {
1203 /* detached child exited, mop up */
1204 UNLOCK(_pthread_list_lock);
1205 #if WQ_TRACE
1206 __kdebug_trace(0x9000008, t, pthreadsize, 0, 2, 0);
1207 #endif
1208 vm_deallocate(self, t, pthreadsize);
1209 } else if (t->childrun == 0) {
1210 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1211 _pthread_count++;
1212 #if WQ_TRACE
1213 __kdebug_trace(0x900000c, t, 0, 0, 2, 0);
1214 #endif
1215 UNLOCK(_pthread_list_lock);
1216 } else
1217 UNLOCK(_pthread_list_lock);
1218
1219 *thread = t;
1220
1221 #if WQ_TRACE
1222 __kdebug_trace(0x9000014, t, 0, 0, 2, 0);
1223 #endif
1224 return(0);
1225 }
1226 }
1227 }
1228
1229 static int
1230 _pthread_create_suspended(pthread_t *thread,
1231 const pthread_attr_t *attr,
1232 void *(*start_routine)(void *),
1233 void *arg,
1234 int suspended)
1235 {
1236 pthread_attr_t *attrs;
1237 void *stack;
1238 int res;
1239 pthread_t t;
1240 kern_return_t kern_res;
1241 mach_port_t kernel_thread = MACH_PORT_NULL;
1242 int needresume;
1243
1244 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
1245 { /* Set up default paramters */
1246 attrs = &_pthread_attr_default;
1247 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1248 return EINVAL;
1249 }
1250 res = 0;
1251
1252 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1253 * any change in priority or policy is needed here.
1254 */
1255 if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
1256 (attrs->param.sched_priority != default_priority)) && (suspended == 0)) {
1257 needresume = 1;
1258 suspended = 1;
1259 } else
1260 needresume = 0;
1261
1262 do
1263 {
1264 /* Allocate a stack for the thread */
1265 if ((res = _pthread_allocate_stack(attrs, &stack)) != 0) {
1266 break;
1267 }
1268 t = (pthread_t)malloc(sizeof(struct _pthread));
1269 *thread = t;
1270 if (suspended) {
1271 /* Create the Mach thread for this thread */
1272 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread), kern_res);
1273 if (kern_res != KERN_SUCCESS)
1274 {
1275 printf("Can't create thread: %d\n", kern_res);
1276 res = EINVAL; /* Need better error here? */
1277 break;
1278 }
1279 }
1280 if ((res = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
1281 {
1282 break;
1283 }
1284 set_malloc_singlethreaded(0);
1285 __is_threaded = 1;
1286
1287 /* Send it on it's way */
1288 t->arg = arg;
1289 t->fun = start_routine;
1290 /* Now set it up to execute */
1291 LOCK(_pthread_list_lock);
1292 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1293 #if WQ_TRACE
1294 __kdebug_trace(0x900000c, t, 0, 0, 5, 0);
1295 #endif
1296 _pthread_count++;
1297 UNLOCK(_pthread_list_lock);
1298 _pthread_setup(t, _pthread_body, stack, suspended, needresume);
1299 } while (0);
1300 return (res);
1301 }
1302
1303 int
1304 pthread_create(pthread_t *thread,
1305 const pthread_attr_t *attr,
1306 void *(*start_routine)(void *),
1307 void *arg)
1308 {
1309 return _new_pthread_create_suspended(thread, attr, start_routine, arg, 0);
1310 }
1311
1312 int
1313 pthread_create_suspended_np(pthread_t *thread,
1314 const pthread_attr_t *attr,
1315 void *(*start_routine)(void *),
1316 void *arg)
1317 {
1318 return _pthread_create_suspended(thread, attr, start_routine, arg, 1);
1319 }
1320
1321 /*
1322 * Make a thread 'undetached' - no longer 'joinable' with other threads.
1323 */
1324 int
1325 pthread_detach(pthread_t thread)
1326 {
1327 int newstyle = 0;
1328 int ret;
1329
1330 if ((ret = _pthread_lookup_thread(thread, NULL, 1)) != 0)
1331 return (ret); /* Not a valid thread */
1332
1333 LOCK(thread->lock);
1334 newstyle = thread->newstyle;
1335 if (thread->detached & PTHREAD_CREATE_JOINABLE)
1336 {
1337 if (thread->detached & _PTHREAD_EXITED) {
1338 UNLOCK(thread->lock);
1339 pthread_join(thread, NULL);
1340 return 0;
1341 } else {
1342 if (newstyle == 0) {
1343 semaphore_t death = thread->death;
1344
1345 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
1346 thread->detached |= PTHREAD_CREATE_DETACHED;
1347 UNLOCK(thread->lock);
1348 if (death)
1349 (void) semaphore_signal(death);
1350 } else {
1351 mach_port_t joinport = thread->joiner_notify;
1352
1353 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
1354 thread->detached |= PTHREAD_CREATE_DETACHED;
1355
1356 UNLOCK(thread->lock);
1357 if (joinport) {
1358 semaphore_signal(joinport);
1359 }
1360 }
1361 return(0);
1362 }
1363 } else {
1364 UNLOCK(thread->lock);
1365 return (EINVAL);
1366 }
1367 }
1368
1369
1370 /*
1371 * pthread_kill call to system call
1372 */
1373
1374 extern int __pthread_kill(mach_port_t, int);
1375
1376 int
1377 pthread_kill (
1378 pthread_t th,
1379 int sig)
1380 {
1381 int error = 0;
1382 mach_port_t kport = MACH_PORT_NULL;
1383
1384 if ((sig < 0) || (sig > NSIG))
1385 return(EINVAL);
1386
1387 if (_pthread_lookup_thread(th, &kport, 0) != 0)
1388 return (ESRCH); /* Not a valid thread */
1389
1390 error = __pthread_kill(kport, sig);
1391
1392 if (error == -1)
1393 error = errno;
1394 return(error);
1395 }
1396
1397 /* Announce that there are pthread resources ready to be reclaimed in a */
1398 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
1399 /* thread underneath is terminated right away. */
1400 static
1401 void _pthread_become_available(pthread_t thread, mach_port_t kernel_thread) {
1402 pthread_reap_msg_t msg;
1403 kern_return_t ret;
1404
1405 msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,
1406 MACH_MSG_TYPE_MOVE_SEND);
1407 msg.header.msgh_size = sizeof msg - sizeof msg.trailer;
1408 msg.header.msgh_remote_port = thread_recycle_port;
1409 msg.header.msgh_local_port = kernel_thread;
1410 msg.header.msgh_id = 0x44454144; /* 'DEAD' */
1411 msg.thread = thread;
1412 ret = mach_msg_send(&msg.header);
1413 assert(ret == MACH_MSG_SUCCESS);
1414 }
1415
1416 /* Reap the resources for available threads */
1417 __private_extern__
1418 int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr, int conforming) {
1419 mach_port_type_t ptype;
1420 kern_return_t ret;
1421 task_t self;
1422
1423 self = mach_task_self();
1424 if (kernel_thread != MACH_PORT_DEAD) {
1425 ret = mach_port_type(self, kernel_thread, &ptype);
1426 if (ret == KERN_SUCCESS && ptype != MACH_PORT_TYPE_DEAD_NAME) {
1427 /* not quite dead yet... */
1428 return EAGAIN;
1429 }
1430 ret = mach_port_deallocate(self, kernel_thread);
1431 if (ret != KERN_SUCCESS) {
1432 fprintf(stderr,
1433 "mach_port_deallocate(kernel_thread) failed: %s\n",
1434 mach_error_string(ret));
1435 }
1436 }
1437
1438 if (th->reply_port != MACH_PORT_NULL) {
1439 ret = mach_port_mod_refs(self, th->reply_port,
1440 MACH_PORT_RIGHT_RECEIVE, -1);
1441 if (ret != KERN_SUCCESS) {
1442 fprintf(stderr,
1443 "mach_port_mod_refs(reply_port) failed: %s\n",
1444 mach_error_string(ret));
1445 }
1446 }
1447
1448 if (th->freeStackOnExit) {
1449 vm_address_t addr = (vm_address_t)th->stackaddr;
1450 vm_size_t size;
1451
1452 size = (vm_size_t)th->stacksize + th->guardsize;
1453
1454 addr -= size;
1455 ret = vm_deallocate(self, addr, size);
1456 if (ret != KERN_SUCCESS) {
1457 fprintf(stderr,
1458 "vm_deallocate(stack) failed: %s\n",
1459 mach_error_string(ret));
1460 }
1461 }
1462
1463
1464 if (value_ptr)
1465 *value_ptr = th->exit_value;
1466 if (conforming) {
1467 if ((th->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
1468 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
1469 *value_ptr = PTHREAD_CANCELED;
1470 th->sig = _PTHREAD_NO_SIG;
1471 }
1472
1473
1474 if (th != &_thread)
1475 free(th);
1476
1477 return 0;
1478 }
1479
1480 static
1481 void _pthread_reap_threads(void)
1482 {
1483 pthread_reap_msg_t msg;
1484 kern_return_t ret;
1485
1486 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
1487 sizeof msg, thread_recycle_port,
1488 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1489 while (ret == MACH_MSG_SUCCESS) {
1490 mach_port_t kernel_thread = msg.header.msgh_remote_port;
1491 pthread_t thread = msg.thread;
1492
1493 if (_pthread_reap_thread(thread, kernel_thread, (void **)0, 0) == EAGAIN)
1494 {
1495 /* not dead yet, put it back for someone else to reap, stop here */
1496 _pthread_become_available(thread, kernel_thread);
1497 return;
1498 }
1499 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
1500 sizeof msg, thread_recycle_port,
1501 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1502 }
1503 }
1504
1505 /* For compatibility... */
1506
1507 pthread_t
1508 _pthread_self() {
1509 return pthread_self();
1510 }
1511
1512 /*
1513 * Terminate a thread.
1514 */
1515 int __disable_threadsignal(int);
1516
1517 static void
1518 _pthread_exit(pthread_t self, void *value_ptr)
1519 {
1520 struct __darwin_pthread_handler_rec *handler;
1521 kern_return_t kern_res;
1522 int thread_count;
1523 int newstyle = self->newstyle;
1524
1525 /* Make this thread not to receive any signals */
1526 __disable_threadsignal(1);
1527
1528 #if WQ_TRACE
1529 __kdebug_trace(0x900001c, self, newstyle, 0, 0, 0);
1530 #endif
1531
1532 /* set cancel state to disable and type to deferred */
1533 _pthread_setcancelstate_exit(self, value_ptr, __unix_conforming);
1534
1535 while ((handler = self->__cleanup_stack) != 0)
1536 {
1537 (handler->__routine)(handler->__arg);
1538 self->__cleanup_stack = handler->__next;
1539 }
1540 _pthread_tsd_cleanup(self);
1541
1542 if (newstyle == 0) {
1543 _pthread_reap_threads();
1544
1545 LOCK(self->lock);
1546 self->detached |= _PTHREAD_EXITED;
1547
1548 if (self->detached & PTHREAD_CREATE_JOINABLE) {
1549 mach_port_t death = self->death;
1550 self->exit_value = value_ptr;
1551 UNLOCK(self->lock);
1552 /* the joiner will need a kernel thread reference, leave ours for it */
1553 if (death) {
1554 PTHREAD_MACH_CALL(semaphore_signal(death), kern_res);
1555 if (kern_res != KERN_SUCCESS)
1556 fprintf(stderr,
1557 "semaphore_signal(death) failed: %s\n",
1558 mach_error_string(kern_res));
1559 }
1560 LOCK(_pthread_list_lock);
1561 thread_count = --_pthread_count;
1562 UNLOCK(_pthread_list_lock);
1563 } else {
1564 UNLOCK(self->lock);
1565 LOCK(_pthread_list_lock);
1566 TAILQ_REMOVE(&__pthread_head, self, plist);
1567 #if WQ_TRACE
1568 __kdebug_trace(0x9000010, self, 0, 0, 5, 0);
1569 #endif
1570 thread_count = --_pthread_count;
1571 UNLOCK(_pthread_list_lock);
1572 /* with no joiner, we let become available consume our cached ref */
1573 _pthread_become_available(self, self->kernel_thread);
1574 }
1575
1576 if (thread_count <= 0)
1577 exit(0);
1578
1579 /* Use a new reference to terminate ourselves. Should never return. */
1580 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res);
1581 fprintf(stderr, "thread_terminate(mach_thread_self()) failed: %s\n",
1582 mach_error_string(kern_res));
1583 } else {
1584 semaphore_t joinsem = SEMAPHORE_NULL;
1585
1586 if ((self->joiner_notify == NULL) && (self->detached & PTHREAD_CREATE_JOINABLE))
1587 joinsem = new_sem_from_pool();
1588 LOCK(self->lock);
1589 self->detached |= _PTHREAD_EXITED;
1590
1591 self->exit_value = value_ptr;
1592 if (self->detached & PTHREAD_CREATE_JOINABLE) {
1593 if (self->joiner_notify == NULL) {
1594 self->joiner_notify = joinsem;
1595 joinsem = SEMAPHORE_NULL;
1596 }
1597 UNLOCK(self->lock);
1598 if (joinsem != SEMAPHORE_NULL)
1599 restore_sem_to_pool(joinsem);
1600 _pthread_free_pthread_onstack(self, 0, 1);
1601 } else {
1602 UNLOCK(self->lock);
1603 /* with no joiner, we let become available consume our cached ref */
1604 if (joinsem != SEMAPHORE_NULL)
1605 restore_sem_to_pool(joinsem);
1606 _pthread_free_pthread_onstack(self, 1, 1);
1607 }
1608 }
1609 abort();
1610 }
1611
1612 void
1613 pthread_exit(void *value_ptr)
1614 {
1615 pthread_t self = pthread_self();
1616 if (self->wqthread != 0)
1617 workqueue_exit(self, self->cur_workq, self->cur_workitem);
1618 else
1619 _pthread_exit(self, value_ptr);
1620 }
1621
1622 /*
1623 * Get the scheduling policy and scheduling paramters for a thread.
1624 */
1625 int
1626 pthread_getschedparam(pthread_t thread,
1627 int *policy,
1628 struct sched_param *param)
1629 {
1630 int ret;
1631
1632 if (thread == NULL)
1633 return(ESRCH);
1634
1635 LOCK(_pthread_list_lock);
1636
1637 if ((ret = _pthread_find_thread(thread)) != 0) {
1638 UNLOCK(_pthread_list_lock);
1639 return(ret);
1640 }
1641 if (policy != 0)
1642 *policy = thread->policy;
1643 if (param != 0)
1644 *param = thread->param;
1645 UNLOCK(_pthread_list_lock);
1646
1647 return(0);
1648 }
1649
1650 /*
1651 * Set the scheduling policy and scheduling paramters for a thread.
1652 */
1653 int
1654 pthread_setschedparam(pthread_t thread,
1655 int policy,
1656 const struct sched_param *param)
1657 {
1658 policy_base_data_t bases;
1659 policy_base_t base;
1660 mach_msg_type_number_t count;
1661 kern_return_t ret;
1662
1663 switch (policy)
1664 {
1665 case SCHED_OTHER:
1666 bases.ts.base_priority = param->sched_priority;
1667 base = (policy_base_t)&bases.ts;
1668 count = POLICY_TIMESHARE_BASE_COUNT;
1669 break;
1670 case SCHED_FIFO:
1671 bases.fifo.base_priority = param->sched_priority;
1672 base = (policy_base_t)&bases.fifo;
1673 count = POLICY_FIFO_BASE_COUNT;
1674 break;
1675 case SCHED_RR:
1676 bases.rr.base_priority = param->sched_priority;
1677 /* quantum isn't public yet */
1678 bases.rr.quantum = param->quantum;
1679 base = (policy_base_t)&bases.rr;
1680 count = POLICY_RR_BASE_COUNT;
1681 break;
1682 default:
1683 return (EINVAL);
1684 }
1685 ret = thread_policy(pthread_mach_thread_np(thread), policy, base, count, TRUE);
1686 if (ret != KERN_SUCCESS)
1687 return (EINVAL);
1688 thread->policy = policy;
1689 thread->param = *param;
1690 return (0);
1691 }
1692
1693 /*
1694 * Get the minimum priority for the given policy
1695 */
1696 int
1697 sched_get_priority_min(int policy)
1698 {
1699 return default_priority - 16;
1700 }
1701
1702 /*
1703 * Get the maximum priority for the given policy
1704 */
1705 int
1706 sched_get_priority_max(int policy)
1707 {
1708 return default_priority + 16;
1709 }
1710
1711 /*
1712 * Determine if two thread identifiers represent the same thread.
1713 */
1714 int
1715 pthread_equal(pthread_t t1,
1716 pthread_t t2)
1717 {
1718 return (t1 == t2);
1719 }
1720
1721 __private_extern__ void
1722 _pthread_set_self(pthread_t p)
1723 {
1724 extern void __pthread_set_self(pthread_t);
1725 if (p == 0) {
1726 bzero(&_thread, sizeof(struct _pthread));
1727 p = &_thread;
1728 }
1729 p->tsd[0] = p;
1730 __pthread_set_self(p);
1731 }
1732
1733 void
1734 cthread_set_self(void *cself)
1735 {
1736 pthread_t self = pthread_self();
1737 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1738 _pthread_set_self(cself);
1739 return;
1740 }
1741 self->cthread_self = cself;
1742 }
1743
1744 void *
1745 ur_cthread_self(void) {
1746 pthread_t self = pthread_self();
1747 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1748 return (void *)self;
1749 }
1750 return self->cthread_self;
1751 }
1752
1753 /*
1754 * cancellation handler for pthread once as the init routine can have a
1755 * cancellation point. In that case we need to restore the spin unlock
1756 */
1757 void
1758 __pthread_once_cancel_handler(pthread_once_t *once_control)
1759 {
1760 _spin_unlock(&once_control->lock);
1761 }
1762
1763
1764 /*
1765 * Execute a function exactly one time in a thread-safe fashion.
1766 */
1767 int
1768 pthread_once(pthread_once_t *once_control,
1769 void (*init_routine)(void))
1770 {
1771 _spin_lock(&once_control->lock);
1772 if (once_control->sig == _PTHREAD_ONCE_SIG_init)
1773 {
1774 pthread_cleanup_push(__pthread_once_cancel_handler, once_control);
1775 (*init_routine)();
1776 pthread_cleanup_pop(0);
1777 once_control->sig = _PTHREAD_ONCE_SIG;
1778 }
1779 _spin_unlock(&once_control->lock);
1780 return (0); /* Spec defines no possible errors! */
1781 }
1782
1783 /*
1784 * Insert a cancellation point in a thread.
1785 */
1786 __private_extern__ void
1787 _pthread_testcancel(pthread_t thread, int isconforming)
1788 {
1789 LOCK(thread->lock);
1790 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
1791 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
1792 {
1793 UNLOCK(thread->lock);
1794 if (isconforming)
1795 pthread_exit(PTHREAD_CANCELED);
1796 else
1797 pthread_exit(0);
1798 }
1799 UNLOCK(thread->lock);
1800 }
1801
1802
1803
1804 int
1805 pthread_getconcurrency(void)
1806 {
1807 return(pthread_concurrency);
1808 }
1809
1810 int
1811 pthread_setconcurrency(int new_level)
1812 {
1813 if (new_level < 0)
1814 return EINVAL;
1815 pthread_concurrency = new_level;
1816 return(0);
1817 }
1818
1819 /*
1820 * Perform package initialization - called automatically when application starts
1821 */
1822
1823 __private_extern__ int
1824 pthread_init(void)
1825 {
1826 pthread_attr_t *attrs;
1827 pthread_t thread;
1828 kern_return_t kr;
1829 host_priority_info_data_t priority_info;
1830 host_info_t info;
1831 host_flavor_t flavor;
1832 host_t host;
1833 mach_msg_type_number_t count;
1834 int mib[2];
1835 size_t len;
1836 void *stackaddr;
1837
1838 pthreadsize = round_page(sizeof (struct _pthread));
1839 count = HOST_PRIORITY_INFO_COUNT;
1840 info = (host_info_t)&priority_info;
1841 flavor = HOST_PRIORITY_INFO;
1842 host = mach_host_self();
1843 kr = host_info(host, flavor, info, &count);
1844 if (kr != KERN_SUCCESS)
1845 printf("host_info failed (%d); probably need privilege.\n", kr);
1846 else {
1847 default_priority = priority_info.user_priority;
1848 min_priority = priority_info.minimum_priority;
1849 max_priority = priority_info.maximum_priority;
1850 }
1851 attrs = &_pthread_attr_default;
1852 pthread_attr_init(attrs);
1853
1854 TAILQ_INIT(&__pthread_head);
1855 LOCK_INIT(_pthread_list_lock);
1856 thread = &_thread;
1857 TAILQ_INSERT_HEAD(&__pthread_head, thread, plist);
1858 _pthread_set_self(thread);
1859
1860 /* In case of dyld reset the tsd keys from 1 - 10 */
1861 _pthread_keys_init();
1862
1863 mib[0] = CTL_KERN;
1864 mib[1] = KERN_USRSTACK;
1865 len = sizeof (stackaddr);
1866 if (sysctl (mib, 2, &stackaddr, &len, NULL, 0) != 0)
1867 stackaddr = (void *)USRSTACK;
1868 _pthread_create(thread, attrs, stackaddr, mach_thread_self());
1869 thread->detached = PTHREAD_CREATE_JOINABLE|_PTHREAD_CREATE_PARENT;
1870
1871 _init_cpu_capabilities();
1872 if (_NumCPUs() > 1)
1873 _spin_tries = MP_SPIN_TRIES;
1874
1875 mach_port_deallocate(mach_task_self(), host);
1876
1877 #if defined(__ppc__)
1878 IF_ROSETTA() {
1879 __oldstyle = 1;
1880 }
1881 #endif
1882 #if defined(__arm__)
1883 __oldstyle = 1;
1884 #endif
1885
1886 #if defined(_OBJC_PAGE_BASE_ADDRESS)
1887 {
1888 vm_address_t objcRTPage = (vm_address_t)_OBJC_PAGE_BASE_ADDRESS;
1889 kr = vm_map(mach_task_self(),
1890 &objcRTPage, vm_page_size * 4, vm_page_size - 1,
1891 VM_FLAGS_FIXED | VM_MAKE_TAG(0), // Which tag to use?
1892 MACH_PORT_NULL,
1893 (vm_address_t)0, FALSE,
1894 (vm_prot_t)0, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE,
1895 VM_INHERIT_DEFAULT);
1896 /* We ignore the return result here. The ObjC runtime will just have to deal. */
1897 }
1898 #endif
1899
1900 mig_init(1); /* enable multi-threaded mig interfaces */
1901 if (__oldstyle == 0) {
1902 #if defined(__i386__) || defined(__x86_64__)
1903 __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)));
1904 #else
1905 __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)));
1906 #endif
1907 }
1908 return 0;
1909 }
1910
1911 int sched_yield(void)
1912 {
1913 swtch_pri(0);
1914 return 0;
1915 }
1916
1917 /* This used to be the "magic" that gets the initialization routine called when the application starts */
1918 static int _do_nothing(void) { return 0; }
1919 int (*_cthread_init_routine)(void) = _do_nothing;
1920
1921 /* Get a semaphore from the pool, growing it if necessary */
1922
1923 __private_extern__ semaphore_t new_sem_from_pool(void) {
1924 kern_return_t res;
1925 semaphore_t sem;
1926 int i;
1927
1928 LOCK(sem_pool_lock);
1929 if (sem_pool_current == sem_pool_count) {
1930 sem_pool_count += 16;
1931 sem_pool = realloc(sem_pool, sem_pool_count * sizeof(semaphore_t));
1932 for (i = sem_pool_current; i < sem_pool_count; i++) {
1933 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool[i], SYNC_POLICY_FIFO, 0), res);
1934 }
1935 }
1936 sem = sem_pool[sem_pool_current++];
1937 UNLOCK(sem_pool_lock);
1938 return sem;
1939 }
1940
1941 /* Put a semaphore back into the pool */
1942 __private_extern__ void restore_sem_to_pool(semaphore_t sem) {
1943 LOCK(sem_pool_lock);
1944 sem_pool[--sem_pool_current] = sem;
1945 UNLOCK(sem_pool_lock);
1946 }
1947
1948 static void sem_pool_reset(void) {
1949 LOCK(sem_pool_lock);
1950 sem_pool_count = 0;
1951 sem_pool_current = 0;
1952 sem_pool = NULL;
1953 UNLOCK(sem_pool_lock);
1954 }
1955
1956 __private_extern__ void _pthread_fork_child(pthread_t p) {
1957 /* Just in case somebody had it locked... */
1958 UNLOCK(sem_pool_lock);
1959 sem_pool_reset();
1960 /* No need to hold the pthread_list_lock as no one other than this
1961 * thread is present at this time
1962 */
1963 TAILQ_INIT(&__pthread_head);
1964 LOCK_INIT(_pthread_list_lock);
1965 TAILQ_INSERT_HEAD(&__pthread_head, p, plist);
1966 _pthread_count = 1;
1967 }
1968
1969 /*
1970 * Query/update the cancelability 'state' of a thread
1971 */
1972 int
1973 _pthread_setcancelstate_internal(int state, int *oldstate, int conforming)
1974 {
1975 pthread_t self = pthread_self();
1976
1977
1978 switch (state) {
1979 case PTHREAD_CANCEL_ENABLE:
1980 if (conforming)
1981 __pthread_canceled(1);
1982 break;
1983 case PTHREAD_CANCEL_DISABLE:
1984 if (conforming)
1985 __pthread_canceled(2);
1986 break;
1987 default:
1988 return EINVAL;
1989 }
1990
1991 self = pthread_self();
1992 LOCK(self->lock);
1993 if (oldstate)
1994 *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
1995 self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK;
1996 self->cancel_state |= state;
1997 UNLOCK(self->lock);
1998 if (!conforming)
1999 _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
2000 return (0);
2001 }
2002
2003 /* When a thread exits set the cancellation state to DISABLE and DEFERRED */
2004 static void
2005 _pthread_setcancelstate_exit(pthread_t self, void * value_ptr, int conforming)
2006 {
2007 LOCK(self->lock);
2008 self->cancel_state &= ~(_PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK);
2009 self->cancel_state |= (PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED);
2010 if ((value_ptr == PTHREAD_CANCELED)) {
2011 // 4597450: begin
2012 self->detached |= _PTHREAD_WASCANCEL;
2013 // 4597450: end
2014 }
2015 UNLOCK(self->lock);
2016 }
2017
2018 int
2019 _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming)
2020 {
2021 kern_return_t res;
2022 int detached = 0, ret;
2023
2024 #if WQ_TRACE
2025 __kdebug_trace(0x9000028, thread, 0, 0, 1, 0);
2026 #endif
2027 /* The scenario where the joiner was waiting for the thread and
2028 * the pthread detach happened on that thread. Then the semaphore
2029 * will trigger but by the time joiner runs, the target thread could be
2030 * freed. So we need to make sure that the thread is still in the list
2031 * and is joinable before we continue with the join.
2032 */
2033 LOCK(_pthread_list_lock);
2034 if ((ret = _pthread_find_thread(thread)) != 0) {
2035 UNLOCK(_pthread_list_lock);
2036 /* returns ESRCH */
2037 return(ret);
2038 }
2039 if ((thread->detached & PTHREAD_CREATE_JOINABLE) == 0) {
2040 /* the thread might be a detached thread */
2041 UNLOCK(_pthread_list_lock);
2042 return(ESRCH);
2043
2044 }
2045 /* It is still a joinable thread and needs to be reaped */
2046 TAILQ_REMOVE(&__pthread_head, thread, plist);
2047 #if WQ_TRACE
2048 __kdebug_trace(0x9000010, thread, 0, 0, 3, 0);
2049 #endif
2050 UNLOCK(_pthread_list_lock);
2051
2052 if (value_ptr)
2053 *value_ptr = thread->exit_value;
2054 if (conforming) {
2055 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
2056 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) {
2057 *value_ptr = PTHREAD_CANCELED;
2058 }
2059 }
2060 if (thread->reply_port != MACH_PORT_NULL) {
2061 res = mach_port_mod_refs(mach_task_self(), thread->reply_port, MACH_PORT_RIGHT_RECEIVE, -1);
2062 if (res != KERN_SUCCESS)
2063 fprintf(stderr,"mach_port_mod_refs(reply_port) failed: %s\n",mach_error_string(res));
2064 thread->reply_port = MACH_PORT_NULL;
2065 }
2066 if (thread->freeStackOnExit) {
2067 thread->sig = _PTHREAD_NO_SIG;
2068 #if WQ_TRACE
2069 __kdebug_trace(0x9000028, thread, 0, 0, 2, 0);
2070 #endif
2071 vm_deallocate(mach_task_self(), thread, pthreadsize);
2072 } else {
2073 thread->sig = _PTHREAD_NO_SIG;
2074 #if WQ_TRACE
2075 __kdebug_trace(0x9000028, thread, 0, 0, 3, 0);
2076 #endif
2077 free(thread);
2078 }
2079 return(0);
2080 }
2081
2082 /* ALWAYS called with list lock and return with list lock */
2083 int
2084 _pthread_find_thread(pthread_t thread)
2085 {
2086 pthread_t p;
2087
2088 loop:
2089 TAILQ_FOREACH(p, &__pthread_head, plist) {
2090 if (p == thread) {
2091 if (thread->kernel_thread == MACH_PORT_NULL) {
2092 UNLOCK(_pthread_list_lock);
2093 sched_yield();
2094 LOCK(_pthread_list_lock);
2095 goto loop;
2096 }
2097 return(0);
2098 }
2099 }
2100 return(ESRCH);
2101 }
2102
2103 int
2104 _pthread_lookup_thread(pthread_t thread, mach_port_t * portp, int only_joinable)
2105 {
2106 mach_port_t kport;
2107 int ret = 0;
2108
2109 if (thread == NULL)
2110 return(ESRCH);
2111
2112 LOCK(_pthread_list_lock);
2113
2114 if ((ret = _pthread_find_thread(thread)) != 0) {
2115 UNLOCK(_pthread_list_lock);
2116 return(ret);
2117 }
2118 if ((only_joinable != 0) && ((thread->detached & PTHREAD_CREATE_DETACHED) != 0)) {
2119 UNLOCK(_pthread_list_lock);
2120 return(EINVAL);
2121 }
2122 kport = thread->kernel_thread;
2123 UNLOCK(_pthread_list_lock);
2124 if (portp != NULL)
2125 *portp = kport;
2126 return(0);
2127 }
2128
2129 /* XXXXXXXXXXXXX Pthread Workqueue Attributes XXXXXXXXXXXXXXXXXX */
2130 int
2131 pthread_workqueue_attr_init_np(pthread_workqueue_attr_t * attrp)
2132 {
2133 attrp->stacksize = DEFAULT_STACK_SIZE;
2134 attrp->istimeshare = 1;
2135 attrp->importance = 0;
2136 attrp->affinity = 0;
2137 attrp->queueprio = WORK_QUEUE_NORMALIZER;
2138 attrp->sig = PTHEAD_WRKQUEUE_ATTR_SIG;
2139 return(0);
2140 }
2141
2142 int
2143 pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t * attr)
2144 {
2145 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG)
2146 {
2147 return (0);
2148 } else
2149 {
2150 return (EINVAL); /* Not an attribute structure! */
2151 }
2152 }
2153
2154 #ifdef NOTYET /* [ */
2155 int
2156 pthread_workqueue_attr_getstacksize_np(const pthread_workqueue_attr_t * attr, size_t * stacksizep)
2157 {
2158 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
2159 *stacksizep = attr->stacksize;
2160 return (0);
2161 } else {
2162 return (EINVAL); /* Not an attribute structure! */
2163 }
2164 }
2165
2166 int
2167 pthread_workqueue_attr_setstacksize_np(pthread_workqueue_attr_t * attr, size_t stacksize)
2168 {
2169 if ((attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
2170 attr->stacksize = stacksize;
2171 return (0);
2172 } else {
2173 return (EINVAL); /* Not an attribute structure! */
2174 }
2175 }
2176
2177
2178 int
2179 pthread_workqueue_attr_getthreadtimeshare_np(const pthread_workqueue_attr_t * attr, int * istimesahrep)
2180 {
2181 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
2182 *istimesahrep = attr->istimeshare;
2183 return (0);
2184 } else {
2185 return (EINVAL); /* Not an attribute structure! */
2186 }
2187 }
2188
2189 int
2190 pthread_workqueue_attr_settthreadtimeshare_np(pthread_workqueue_attr_t * attr, int istimeshare)
2191 {
2192 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
2193 if (istimeshare != 0)
2194 attr->istimeshare = istimeshare;
2195 else
2196 attr->istimeshare = 0;
2197 return (0);
2198 } else {
2199 return (EINVAL); /* Not an attribute structure! */
2200 }
2201 }
2202
2203 int
2204 pthread_workqueue_attr_getthreadimportance_np(const pthread_workqueue_attr_t * attr, int * importancep)
2205 {
2206 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
2207 *importancep = attr->importance;
2208 return (0);
2209 } else {
2210 return (EINVAL); /* Not an attribute structure! */
2211 }
2212 }
2213
2214 int
2215 pthread_workqueue_attr_settthreadimportance_np(pthread_workqueue_attr_t * attr, int importance)
2216 {
2217 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG){
2218 attr->importance = importance;
2219 return (0);
2220 } else {
2221 return (EINVAL); /* Not an attribute structure! */
2222 }
2223 }
2224
2225 int
2226 pthread_workqueue_attr_getthreadaffinity_np(const pthread_workqueue_attr_t * attr, int * affinityp)
2227 {
2228 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
2229 *affinityp = attr->affinity;
2230 return (0);
2231 } else {
2232 return (EINVAL); /* Not an attribute structure! */
2233 }
2234 }
2235
2236 int
2237 pthread_workqueue_attr_settthreadaffinity_np(pthread_workqueue_attr_t * attr, int affinity)
2238 {
2239 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG){
2240 attr->affinity = affinity;
2241 return (0);
2242 } else {
2243 return (EINVAL); /* Not an attribute structure! */
2244 }
2245 }
2246
2247 #endif /* NOTYET ] */
2248
2249 int
2250 pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t * attr, int * qpriop)
2251 {
2252 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
2253 *qpriop = (attr->queueprio - WORK_QUEUE_NORMALIZER);
2254 return (0);
2255 } else {
2256 return (EINVAL); /* Not an attribute structure! */
2257 }
2258 }
2259
2260 int
2261 pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t * attr, int qprio)
2262 {
2263 /* only -2 to +2 is valid */
2264 if ((attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) && (qprio <= 2) && (qprio >= -2)) {
2265 attr->queueprio = (qprio + WORK_QUEUE_NORMALIZER);
2266 return (0);
2267 } else {
2268 return (EINVAL); /* Not an attribute structure! */
2269 }
2270 }
2271
2272 /* XXXXXXXXXXXXX Pthread Workqueue support routines XXXXXXXXXXXXXXXXXX */
2273
2274 static void
2275 workqueue_list_lock()
2276 {
2277 OSSpinLockLock(&__workqueue_list_lock);
2278 }
2279
2280 static void
2281 workqueue_list_unlock()
2282 {
2283 OSSpinLockUnlock(&__workqueue_list_lock);
2284 }
2285
2286 int
2287 pthread_workqueue_init_np()
2288 {
2289 int ret;
2290
2291 workqueue_list_lock();
2292 ret =_pthread_work_internal_init();
2293 workqueue_list_unlock();
2294
2295 return(ret);
2296 }
2297
2298 static int
2299 _pthread_work_internal_init(void)
2300 {
2301 int i, error;
2302 pthread_workqueue_head_t headp;
2303 pthread_workitem_t witemp;
2304 pthread_workqueue_t wq;
2305
2306 if (kernel_workq_setup == 0) {
2307 #if defined(__i386__) || defined(__x86_64__)
2308 __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)));
2309 #else
2310 __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)));
2311 #endif
2312
2313 _pthread_wq_attr_default.stacksize = DEFAULT_STACK_SIZE;
2314 _pthread_wq_attr_default.istimeshare = 1;
2315 _pthread_wq_attr_default.importance = 0;
2316 _pthread_wq_attr_default.affinity = 0;
2317 _pthread_wq_attr_default.queueprio = WORK_QUEUE_NORMALIZER;
2318 _pthread_wq_attr_default.sig = PTHEAD_WRKQUEUE_ATTR_SIG;
2319
2320 for( i = 0; i< WQ_NUM_PRIO_QS; i++) {
2321 headp = __pthread_wq_head_tbl[i];
2322 TAILQ_INIT(&headp->wqhead);
2323 headp->next_workq = 0;
2324 }
2325
2326 /* create work item and workqueue pools */
2327 witemp = (struct _pthread_workitem *)malloc(sizeof(struct _pthread_workitem) * WORKITEM_POOL_SIZE);
2328 bzero(witemp, (sizeof(struct _pthread_workitem) * WORKITEM_POOL_SIZE));
2329 for (i = 0; i < WORKITEM_POOL_SIZE; i++) {
2330 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, &witemp[i], item_entry);
2331 }
2332 wq = (struct _pthread_workqueue *)malloc(sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE);
2333 bzero(wq, (sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE));
2334 for (i = 0; i < WORKQUEUE_POOL_SIZE; i++) {
2335 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, &wq[i], wq_list);
2336 }
2337
2338 if (error = __workq_open()) {
2339 TAILQ_INIT(&__pthread_workitem_pool_head);
2340 TAILQ_INIT(&__pthread_workqueue_pool_head);
2341 free(witemp);
2342 free(wq);
2343 return(ENOMEM);
2344 }
2345 kernel_workq_setup = 1;
2346 }
2347 return(0);
2348 }
2349
2350
2351 /* This routine is called with list lock held */
2352 static pthread_workitem_t
2353 alloc_workitem(void)
2354 {
2355 pthread_workitem_t witem;
2356
2357 if (TAILQ_EMPTY(&__pthread_workitem_pool_head)) {
2358 workqueue_list_unlock();
2359 witem = malloc(sizeof(struct _pthread_workitem));
2360 workqueue_list_lock();
2361 } else {
2362 witem = TAILQ_FIRST(&__pthread_workitem_pool_head);
2363 TAILQ_REMOVE(&__pthread_workitem_pool_head, witem, item_entry);
2364 }
2365 return(witem);
2366 }
2367
2368 /* This routine is called with list lock held */
2369 static void
2370 free_workitem(pthread_workitem_t witem)
2371 {
2372 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, witem, item_entry);
2373 }
2374
2375 /* This routine is called with list lock held */
2376 static pthread_workqueue_t
2377 alloc_workqueue(void)
2378 {
2379 pthread_workqueue_t wq;
2380
2381 if (TAILQ_EMPTY(&__pthread_workqueue_pool_head)) {
2382 workqueue_list_unlock();
2383 wq = malloc(sizeof(struct _pthread_workqueue));
2384 workqueue_list_lock();
2385 } else {
2386 wq = TAILQ_FIRST(&__pthread_workqueue_pool_head);
2387 TAILQ_REMOVE(&__pthread_workqueue_pool_head, wq, wq_list);
2388 }
2389 user_workq_count++;
2390 return(wq);
2391 }
2392
2393 /* This routine is called with list lock held */
2394 static void
2395 free_workqueue(pthread_workqueue_t wq)
2396 {
2397 user_workq_count--;
2398 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, wq, wq_list);
2399 }
2400
2401 static void
2402 _pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * attr)
2403 {
2404 bzero(wq, sizeof(struct _pthread_workqueue));
2405 if (attr != NULL) {
2406 wq->stacksize = attr->stacksize;
2407 wq->istimeshare = attr->istimeshare;
2408 wq->importance = attr->importance;
2409 wq->affinity = attr->affinity;
2410 wq->queueprio = attr->queueprio;
2411 } else {
2412 wq->stacksize = DEFAULT_STACK_SIZE;
2413 wq->istimeshare = 1;
2414 wq->importance = 0;
2415 wq->affinity = 0;
2416 wq->queueprio = WORK_QUEUE_NORMALIZER;
2417 }
2418 LOCK_INIT(wq->lock);
2419 wq->flags = 0;
2420 TAILQ_INIT(&wq->item_listhead);
2421 TAILQ_INIT(&wq->item_kernhead);
2422 wq->wq_list.tqe_next = 0;
2423 wq->wq_list.tqe_prev = 0;
2424 wq->sig = PTHEAD_WRKQUEUE_SIG;
2425 wq->headp = __pthread_wq_head_tbl[wq->queueprio];
2426 }
2427
2428 int
2429 valid_workq(pthread_workqueue_t workq)
2430 {
2431 if (workq->sig == PTHEAD_WRKQUEUE_SIG)
2432 return(1);
2433 else
2434 return(0);
2435 }
2436
2437
2438 /* called with list lock */
2439 static void
2440 pick_nextworkqueue_droplock()
2441 {
2442 int i, curwqprio, val, found;
2443 pthread_workqueue_head_t headp;
2444 pthread_workqueue_t workq;
2445 pthread_workqueue_t nworkq = NULL;
2446
2447 loop:
2448 while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) {
2449 found = 0;
2450 for (i = 0; i < WQ_NUM_PRIO_QS; i++) {
2451 wqreadyprio = i; /* because there is nothing else higher to run */
2452 headp = __pthread_wq_head_tbl[i];
2453
2454 if (TAILQ_EMPTY(&headp->wqhead))
2455 continue;
2456 workq = headp->next_workq;
2457 if (workq == NULL)
2458 workq = TAILQ_FIRST(&headp->wqhead);
2459 curwqprio = workq->queueprio;
2460 nworkq = workq; /* starting pt */
2461 while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) {
2462 headp->next_workq = TAILQ_NEXT(workq, wq_list);
2463 if (headp->next_workq == NULL)
2464 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2465 val = post_nextworkitem(workq);
2466
2467 if (val != 0) {
2468 /* things could have changed so reasses */
2469 /* If kernel queue is full , skip */
2470 if (kernel_workq_count >= KERNEL_WORKQ_ELEM_MAX)
2471 break;
2472 /* If anything with higher prio arrived, then reevaluate */
2473 if (wqreadyprio < curwqprio)
2474 goto loop; /* we need re evaluate again */
2475 /* we can post some more work items */
2476 found = 1;
2477 }
2478
2479 /* cannot use workq here as it could be freed */
2480 if (TAILQ_EMPTY(&headp->wqhead))
2481 break;
2482 /* if we found nothing to run and only one workqueue in the list, skip */
2483 if ((val == 0) && (workq == headp->next_workq))
2484 break;
2485 workq = headp->next_workq;
2486 if (workq == NULL)
2487 workq = TAILQ_FIRST(&headp->wqhead);
2488 if (val != 0)
2489 nworkq = workq;
2490 /* if we found nothing to run and back to workq where we started */
2491 if ((val == 0) && (workq == nworkq))
2492 break;
2493 }
2494 if (kernel_workq_count >= KERNEL_WORKQ_ELEM_MAX)
2495 break;
2496 }
2497 /* nothing found to run? */
2498 if (found == 0)
2499 break;
2500 }
2501 workqueue_list_unlock();
2502 }
2503
2504 static int
2505 post_nextworkitem(pthread_workqueue_t workq)
2506 {
2507 int error;
2508 pthread_workitem_t witem;
2509 pthread_workqueue_head_t headp;
2510 void (*func)(pthread_workqueue_t, void *);
2511
2512 if ((workq->flags & PTHREAD_WORKQ_SUSPEND) == PTHREAD_WORKQ_SUSPEND) {
2513 return(0);
2514 }
2515 if (TAILQ_EMPTY(&workq->item_listhead)) {
2516 return(0);
2517 }
2518 witem = TAILQ_FIRST(&workq->item_listhead);
2519 headp = workq->headp;
2520 if ((witem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) {
2521
2522 if ((witem->flags & PTH_WQITEM_APPLIED) != 0) {
2523 return(0);
2524 }
2525 /* Also barrier when nothing is there needs to be handled */
2526 /* Nothing to wait for */
2527 if (workq->kq_count != 0) {
2528 witem->flags |= PTH_WQITEM_APPLIED;
2529 workq->flags |= PTHREAD_WORKQ_BARRIER_ON;
2530 workq->barrier_count = workq->kq_count;
2531 #if WQ_TRACE
2532 __kdebug_trace(0x9000064, 1, workq->barrier_count, 0, 0, 0);
2533 #endif
2534 return(1);
2535 } else {
2536 #if WQ_TRACE
2537 __kdebug_trace(0x9000064, 2, workq->barrier_count, 0, 0, 0);
2538 #endif
2539 if (witem->func != NULL) {
2540 workqueue_list_unlock();
2541 func = witem->func;
2542 (*func)(workq, witem->func_arg);
2543 workqueue_list_lock();
2544 }
2545 TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
2546 witem->flags = 0;
2547 free_workitem(witem);
2548 return(1);
2549 }
2550 } else if ((witem->flags & PTH_WQITEM_DESTROY) == PTH_WQITEM_DESTROY) {
2551 #if WQ_TRACE
2552 __kdebug_trace(0x9000068, 1, workq->kq_count, 0, 0, 0);
2553 #endif
2554 if ((witem->flags & PTH_WQITEM_APPLIED) != 0) {
2555 return(0);
2556 }
2557 witem->flags |= PTH_WQITEM_APPLIED;
2558 workq->flags |= (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON);
2559 workq->barrier_count = workq->kq_count;
2560 workq->term_callback = witem->func;
2561 workq->term_callarg = witem->func_arg;
2562 TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
2563 if ((TAILQ_EMPTY(&workq->item_listhead)) && (workq->kq_count == 0)) {
2564 if (!(TAILQ_EMPTY(&workq->item_kernhead))) {
2565 #if WQ_TRACE
2566 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 0xff, 0);
2567 #endif
2568 }
2569 witem->flags = 0;
2570 free_workitem(witem);
2571 workq->flags |= PTHREAD_WORKQ_DESTROYED;
2572 #if WQ_TRACE
2573 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 1, 0);
2574 #endif
2575 headp = __pthread_wq_head_tbl[workq->queueprio];
2576 if (headp->next_workq == workq) {
2577 headp->next_workq = TAILQ_NEXT(workq, wq_list);
2578 if (headp->next_workq == NULL) {
2579 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2580 if (headp->next_workq == workq)
2581 headp->next_workq = NULL;
2582 }
2583 }
2584 workq->sig = 0;
2585 TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
2586 if (workq->term_callback != NULL) {
2587 workqueue_list_unlock();
2588 (*workq->term_callback)(workq, workq->term_callarg);
2589 workqueue_list_lock();
2590 }
2591 free_workqueue(workq);
2592 return(1);
2593 } else
2594 TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry);
2595 #if WQ_TRACE
2596 __kdebug_trace(0x9000068, 2, workq->barrier_count, 0, 0, 0);
2597 #endif
2598 return(1);
2599 } else {
2600 #if WQ_TRACE
2601 __kdebug_trace(0x9000060, witem, workq, witem->func_arg, 0xfff, 0);
2602 #endif
2603 TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
2604 TAILQ_INSERT_TAIL(&workq->item_kernhead, witem, item_entry);
2605 if ((witem->flags & PTH_WQITEM_KERN_COUNT) == 0) {
2606 workq->kq_count++;
2607 witem->flags |= PTH_WQITEM_KERN_COUNT;
2608 }
2609 OSAtomicIncrement32(&kernel_workq_count);
2610 workqueue_list_unlock();
2611 if (( error =__workq_ops(WQOPS_QUEUE_ADD, witem, 0)) == -1) {
2612 OSAtomicDecrement32(&kernel_workq_count);
2613 workqueue_list_lock();
2614 #if WQ_TRACE
2615 __kdebug_trace(0x900007c, witem, workq, witem->func_arg, workq->kq_count, 0);
2616 #endif
2617 TAILQ_REMOVE(&workq->item_kernhead, witem, item_entry);
2618 TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry);
2619 if ((workq->flags & (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON)) != 0)
2620 workq->flags |= PTHREAD_WORKQ_REQUEUED;
2621 } else
2622 workqueue_list_lock();
2623 #if WQ_TRACE
2624 __kdebug_trace(0x9000060, witem, workq, witem->func_arg, workq->kq_count, 0);
2625 #endif
2626 return(1);
2627 }
2628 /* noone should come here */
2629 #if 1
2630 printf("error in logic for next workitem\n");
2631 abort();
2632 #endif
2633 return(0);
2634 }
2635
2636 void
2637 _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse)
2638 {
2639 int ret;
2640 pthread_attr_t *attrs = &_pthread_attr_default;
2641 pthread_workqueue_t workq;
2642 pthread_t pself;
2643
2644
2645 workq = item->workq;
2646 if (reuse == 0) {
2647 /* reuse is set to 0, when a thread is newly created to run a workitem */
2648 _pthread_struct_init(self, attrs, stackaddr, DEFAULT_STACK_SIZE, 1, 1);
2649 self->wqthread = 1;
2650 self->parentcheck = 1;
2651
2652 /* These are not joinable threads */
2653 self->detached &= ~PTHREAD_CREATE_JOINABLE;
2654 self->detached |= PTHREAD_CREATE_DETACHED;
2655 #if defined(__i386__) || defined(__x86_64__)
2656 _pthread_set_self(self);
2657 #endif
2658 #if WQ_TRACE
2659 __kdebug_trace(0x9000050, self, item, item->func_arg, 0, 0);
2660 #endif
2661 self->kernel_thread = kport;
2662 self->fun = item->func;
2663 self->arg = item->func_arg;
2664 /* Add to the pthread list */
2665 LOCK(_pthread_list_lock);
2666 TAILQ_INSERT_TAIL(&__pthread_head, self, plist);
2667 #if WQ_TRACE
2668 __kdebug_trace(0x900000c, self, 0, 0, 10, 0);
2669 #endif
2670 _pthread_count++;
2671 UNLOCK(_pthread_list_lock);
2672 } else {
2673 /* reuse is set to 1, when a thread is resued to run another work item */
2674 #if WQ_TRACE
2675 __kdebug_trace(0x9000054, self, item, item->func_arg, 0, 0);
2676 #endif
2677 /* reset all tsd from 1 to KEYS_MAX */
2678 _pthread_tsd_reinit(self);
2679
2680 self->fun = item->func;
2681 self->arg = item->func_arg;
2682 }
2683
2684 #if WQ_DEBUG
2685 if (reuse == 0) {
2686 pself = pthread_self();
2687 if (self != pself) {
2688 #if WQ_TRACE
2689 __kdebug_trace(0x9000078, self, pself, item->func_arg, 0, 0);
2690 #endif
2691 printf("pthread_self not set: pself %p, passed in %p\n", pself, self);
2692 _pthread_set_self(self);
2693 pself = pthread_self();
2694 if (self != pself)
2695 printf("(2)pthread_self not set: pself %p, passed in %p\n", pself, self);
2696 pself = self;
2697 }
2698 } else {
2699 pself = pthread_self();
2700 if (self != pself) {
2701 printf("(3)pthread_self not set in reuse: pself %p, passed in %p\n", pself, self);
2702 abort();
2703 }
2704 }
2705 #endif /* WQ_DEBUG */
2706
2707 self->cur_workq = workq;
2708 self->cur_workitem = item;
2709 OSAtomicDecrement32(&kernel_workq_count);
2710
2711 ret = (*self->fun)(self->arg);
2712
2713 workqueue_exit(self, workq, item);
2714
2715 }
2716
2717 static void
2718 workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item)
2719 {
2720 pthread_attr_t *attrs = &_pthread_attr_default;
2721 pthread_workitem_t baritem;
2722 pthread_workqueue_head_t headp;
2723 void (*func)(pthread_workqueue_t, void *);
2724
2725 workqueue_list_lock();
2726
2727 TAILQ_REMOVE(&workq->item_kernhead, item, item_entry);
2728 workq->kq_count--;
2729 #if WQ_TRACE
2730 __kdebug_trace(0x9000070, self, 1, item->func_arg, workq->kq_count, 0);
2731 #endif
2732 item->flags = 0;
2733 free_workitem(item);
2734
2735 if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON) {
2736 workq->barrier_count--;
2737 #if WQ_TRACE
2738 __kdebug_trace(0x9000084, self, workq->barrier_count, workq->kq_count, 1, 0);
2739 #endif
2740 if (workq->barrier_count <= 0 ) {
2741 /* Need to remove barrier item from the list */
2742 baritem = TAILQ_FIRST(&workq->item_listhead);
2743 #if WQ_DEBUG
2744 if ((baritem->flags & (PTH_WQITEM_BARRIER | PTH_WQITEM_DESTROY| PTH_WQITEM_APPLIED)) == 0)
2745 printf("Incorect bar item being removed in barrier processing\n");
2746 #endif /* WQ_DEBUG */
2747 /* if the front item is a barrier and call back is registered, run that */
2748 if (((baritem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) && (baritem->func != NULL)) {
2749 workqueue_list_unlock();
2750 func = baritem->func;
2751 (*func)(workq, baritem->func_arg);
2752 workqueue_list_lock();
2753 }
2754 TAILQ_REMOVE(&workq->item_listhead, baritem, item_entry);
2755 baritem->flags = 0;
2756 free_workitem(baritem);
2757 workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
2758 #if WQ_TRACE
2759 __kdebug_trace(0x9000058, self, item, item->func_arg, 0, 0);
2760 #endif
2761 if ((workq->flags & PTHREAD_WORKQ_TERM_ON) != 0) {
2762 headp = __pthread_wq_head_tbl[workq->queueprio];
2763 workq->flags |= PTHREAD_WORKQ_DESTROYED;
2764 #if WQ_TRACE
2765 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 2, 0);
2766 #endif
2767 if (headp->next_workq == workq) {
2768 headp->next_workq = TAILQ_NEXT(workq, wq_list);
2769 if (headp->next_workq == NULL) {
2770 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2771 if (headp->next_workq == workq)
2772 headp->next_workq = NULL;
2773 }
2774 }
2775 TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
2776 workq->sig = 0;
2777 if (workq->term_callback != NULL) {
2778 workqueue_list_unlock();
2779 (*workq->term_callback)(workq, workq->term_callarg);
2780 workqueue_list_lock();
2781 }
2782 free_workqueue(workq);
2783 } else {
2784 /* if there are higher prio schedulabel item reset to wqreadyprio */
2785 if ((workq->queueprio < wqreadyprio) && (!(TAILQ_EMPTY(&workq->item_listhead))))
2786 wqreadyprio = workq->queueprio;
2787 }
2788 }
2789 }
2790 #if WQ_TRACE
2791 else {
2792 __kdebug_trace(0x9000070, self, 2, item->func_arg, workq->barrier_count, 0);
2793 }
2794
2795 __kdebug_trace(0x900005c, self, item, 0, 0, 0);
2796 #endif
2797 pick_nextworkqueue_droplock();
2798 _pthread_workq_return(self);
2799 }
2800
2801 static void
2802 _pthread_workq_return(pthread_t self)
2803 {
2804 struct __darwin_pthread_handler_rec *handler;
2805 int value = 0;
2806 int * value_ptr=&value;
2807
2808 /* set cancel state to disable and type to deferred */
2809 _pthread_setcancelstate_exit(self, value_ptr, __unix_conforming);
2810
2811 /* Make this thread not to receive any signals */
2812 __disable_threadsignal(1);
2813
2814 while ((handler = self->__cleanup_stack) != 0)
2815 {
2816 (handler->__routine)(handler->__arg);
2817 self->__cleanup_stack = handler->__next;
2818 }
2819 _pthread_tsd_cleanup(self);
2820
2821 __workq_ops(WQOPS_THREAD_RETURN, NULL, 0);
2822
2823 /* This is the way to terminate the thread */
2824 _pthread_exit(self, NULL);
2825 }
2826
2827
2828 /* returns 0 if it handles it, otherwise 1 */
2829 static int
2830 handle_removeitem(pthread_workqueue_t workq, pthread_workitem_t item)
2831 {
2832 pthread_workitem_t baritem;
2833 pthread_workqueue_head_t headp;
2834 void (*func)(pthread_workqueue_t, void *);
2835
2836 if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON) {
2837 workq->barrier_count--;
2838 if (workq->barrier_count <= 0 ) {
2839 /* Need to remove barrier item from the list */
2840 baritem = TAILQ_FIRST(&workq->item_listhead);
2841 #if WQ_DEBUG
2842 if ((baritem->flags & (PTH_WQITEM_BARRIER | PTH_WQITEM_DESTROY| PTH_WQITEM_APPLIED)) == 0)
2843 printf("Incorect bar item being removed in barrier processing\n");
2844 #endif /* WQ_DEBUG */
2845 /* if the front item is a barrier and call back is registered, run that */
2846 if (((baritem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER)
2847 && (baritem->func != NULL)) {
2848 workqueue_list_unlock();
2849 func = baritem->func;
2850 (*func)(workq, baritem->func_arg);
2851 workqueue_list_lock();
2852 }
2853 TAILQ_REMOVE(&workq->item_listhead, baritem, item_entry);
2854 baritem->flags = 0;
2855 free_workitem(baritem);
2856 item->flags = 0;
2857 free_workitem(item);
2858 workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
2859 #if WQ_TRACE
2860 __kdebug_trace(0x9000058, pthread_self(), item, item->func_arg, 0, 0);
2861 #endif
2862 if ((workq->flags & PTHREAD_WORKQ_TERM_ON) != 0) {
2863 headp = __pthread_wq_head_tbl[workq->queueprio];
2864 workq->flags |= PTHREAD_WORKQ_DESTROYED;
2865 #if WQ_TRACE
2866 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 2, 0);
2867 #endif
2868 if (headp->next_workq == workq) {
2869 headp->next_workq = TAILQ_NEXT(workq, wq_list);
2870 if (headp->next_workq == NULL) {
2871 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2872 if (headp->next_workq == workq)
2873 headp->next_workq = NULL;
2874 }
2875 }
2876 TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
2877 workq->sig = 0;
2878 if (workq->term_callback != NULL) {
2879 workqueue_list_unlock();
2880 (*workq->term_callback)(workq, workq->term_callarg);
2881 workqueue_list_lock();
2882 }
2883 free_workqueue(workq);
2884 pick_nextworkqueue_droplock();
2885 return(0);
2886 } else {
2887 /* if there are higher prio schedulabel item reset to wqreadyprio */
2888 if ((workq->queueprio < wqreadyprio) && (!(TAILQ_EMPTY(&workq->item_listhead))))
2889 wqreadyprio = workq->queueprio;
2890 free_workitem(item);
2891 pick_nextworkqueue_droplock();
2892 return(0);
2893 }
2894 }
2895 }
2896 return(1);
2897 }
2898 /* XXXXXXXXXXXXX Pthread Workqueue functions XXXXXXXXXXXXXXXXXX */
2899
2900 int
2901 pthread_workqueue_create_np(pthread_workqueue_t * workqp, const pthread_workqueue_attr_t * attr)
2902 {
2903 pthread_workqueue_t wq;
2904 pthread_workqueue_head_t headp;
2905
2906 if ((attr != NULL) && (attr->sig != PTHEAD_WRKQUEUE_ATTR_SIG)) {
2907 return(EINVAL);
2908 }
2909
2910 if (__is_threaded == 0)
2911 __is_threaded = 1;
2912
2913 workqueue_list_lock();
2914 if (kernel_workq_setup == 0) {
2915 int ret = _pthread_work_internal_init();
2916 if (ret != 0) {
2917 workqueue_list_unlock();
2918 return(ret);
2919 }
2920 }
2921
2922 wq = alloc_workqueue();
2923
2924 _pthread_workq_init(wq, attr);
2925
2926 headp = __pthread_wq_head_tbl[wq->queueprio];
2927 TAILQ_INSERT_TAIL(&headp->wqhead, wq, wq_list);
2928 if (headp->next_workq == NULL) {
2929 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2930 }
2931
2932 workqueue_list_unlock();
2933
2934 *workqp = wq;
2935
2936 return(0);
2937 }
2938
2939 int
2940 pthread_workqueue_destroy_np(pthread_workqueue_t workq, void (* callback_func)(pthread_workqueue_t, void *), void * callback_arg)
2941 {
2942 pthread_workitem_t witem;
2943 pthread_workqueue_head_t headp;
2944
2945 if (valid_workq(workq) == 0) {
2946 return(EINVAL);
2947 }
2948
2949 workqueue_list_lock();
2950
2951 /*
2952 * Allocate the workitem here as it can drop the lock.
2953 * Also we can evaluate the workqueue state only once.
2954 */
2955 witem = alloc_workitem();
2956 witem->item_entry.tqe_next = 0;
2957 witem->item_entry.tqe_prev = 0;
2958 witem->func = callback_func;
2959 witem->func_arg = callback_arg;
2960 witem->flags = PTH_WQITEM_DESTROY;
2961
2962 if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_TERM_ON | PTHREAD_WORKQ_DESTROYED)) == 0) {
2963 workq->flags |= PTHREAD_WORKQ_IN_TERMINATE;
2964 /* If nothing queued or running, destroy now */
2965 if ((TAILQ_EMPTY(&workq->item_listhead)) && (TAILQ_EMPTY(&workq->item_kernhead))) {
2966 workq->flags |= (PTHREAD_WORKQ_TERM_ON | PTHREAD_WORKQ_DESTROYED);
2967 headp = __pthread_wq_head_tbl[workq->queueprio];
2968 workq->term_callback = callback_func;
2969 workq->term_callarg = callback_arg;
2970 if (headp->next_workq == workq) {
2971 headp->next_workq = TAILQ_NEXT(workq, wq_list);
2972 if (headp->next_workq == NULL) {
2973 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2974 if (headp->next_workq == workq)
2975 headp->next_workq = NULL;
2976 }
2977 }
2978 TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
2979 workq->sig = 0;
2980 free_workitem(witem);
2981 if (workq->term_callback != NULL) {
2982 workqueue_list_unlock();
2983 (*workq->term_callback)(workq, workq->term_callarg);
2984 workqueue_list_lock();
2985 }
2986 #if WQ_TRACE
2987 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 3, 0);
2988 #endif
2989 free_workqueue(workq);
2990 workqueue_list_unlock();
2991 return(0);
2992 }
2993 TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry);
2994 } else {
2995 free_workitem(witem);
2996 workqueue_list_unlock();
2997 return(EINPROGRESS);
2998 }
2999 workqueue_list_unlock();
3000 return(0);
3001 }
3002
3003
3004 int
3005 pthread_workqueue_additem_np(pthread_workqueue_t workq, void ( *workitem_func)(void *), void * workitem_arg, pthread_workitem_handle_t * itemhandlep)
3006 {
3007 pthread_workitem_t witem;
3008
3009 if (valid_workq(workq) == 0) {
3010 return(EINVAL);
3011 }
3012
3013 workqueue_list_lock();
3014
3015 /*
3016 * Allocate the workitem here as it can drop the lock.
3017 * Also we can evaluate the workqueue state only once.
3018 */
3019 witem = alloc_workitem();
3020 witem->func = workitem_func;
3021 witem->func_arg = workitem_arg;
3022 witem->flags = 0;
3023 witem->workq = workq;
3024 witem->item_entry.tqe_next = 0;
3025 witem->item_entry.tqe_prev = 0;
3026
3027 /* alloc workitem can drop the lock, check the state */
3028 if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
3029 free_workitem(witem);
3030 workqueue_list_unlock();
3031 *itemhandlep = 0;
3032 return(ESRCH);
3033 }
3034
3035 if (itemhandlep != NULL)
3036 *itemhandlep = (pthread_workitem_handle_t *)witem;
3037 TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry);
3038 if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio))
3039 wqreadyprio = workq->queueprio;
3040
3041 pick_nextworkqueue_droplock();
3042
3043 return(0);
3044 }
3045
3046 int
3047 pthread_workqueue_removeitem_np(pthread_workqueue_t workq, pthread_workitem_handle_t itemhandle)
3048 {
3049 pthread_workitem_t item, baritem;
3050 pthread_workqueue_head_t headp;
3051 int error;
3052
3053 if (valid_workq(workq) == 0) {
3054 return(EINVAL);
3055 }
3056
3057 workqueue_list_lock();
3058 if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
3059 workqueue_list_unlock();
3060 return(ESRCH);
3061 }
3062
3063 TAILQ_FOREACH(item, &workq->item_listhead, item_entry) {
3064 if (item == (pthread_workitem_t)itemhandle) {
3065 TAILQ_REMOVE(&workq->item_listhead, item, item_entry);
3066 if ((item->flags & (PTH_WQITEM_BARRIER | PTH_WQITEM_APPLIED)) == (PTH_WQITEM_BARRIER | PTH_WQITEM_APPLIED)) {
3067 workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
3068 workq->barrier_count = 0;
3069 if ((workq->queueprio < wqreadyprio) && (!(TAILQ_EMPTY(&workq->item_listhead)))) {
3070 wqreadyprio = workq->queueprio;
3071 }
3072 } else if ((item->flags & PTH_WQITEM_KERN_COUNT) == PTH_WQITEM_KERN_COUNT) {
3073 workq->kq_count--;
3074 item->flags |= PTH_WQITEM_REMOVED;
3075 if (handle_removeitem(workq, item) == 0)
3076 return(0);
3077 }
3078 item->flags |= PTH_WQITEM_NOTINLIST;
3079 free_workitem(item);
3080 workqueue_list_unlock();
3081 return(0);
3082 }
3083 }
3084
3085 TAILQ_FOREACH(item, &workq->item_kernhead, item_entry) {
3086 if (item == (pthread_workitem_t)itemhandle) {
3087 workqueue_list_unlock();
3088 if ((error = __workq_ops(WQOPS_QUEUE_REMOVE, item, 0)) == 0) {
3089 workqueue_list_lock();
3090 TAILQ_REMOVE(&workq->item_kernhead, item, item_entry);
3091 OSAtomicDecrement32(&kernel_workq_count);
3092 workq->kq_count--;
3093 item->flags |= PTH_WQITEM_REMOVED;
3094 if (handle_removeitem(workq, item) != 0) {
3095 free_workitem(item);
3096 pick_nextworkqueue_droplock();
3097 }
3098 return(0);
3099 } else {
3100 workqueue_list_unlock();
3101 return(EBUSY);
3102 }
3103 }
3104 }
3105 workqueue_list_unlock();
3106 return(EINVAL);
3107 }
3108
3109
3110 int
3111 pthread_workqueue_addbarrier_np(pthread_workqueue_t workq, void (* callback_func)(pthread_workqueue_t, void *), void * callback_arg, __unused int waitforcallback, pthread_workitem_handle_t *itemhandlep)
3112 {
3113 pthread_workitem_t witem;
3114
3115 if (valid_workq(workq) == 0) {
3116 return(EINVAL);
3117 }
3118
3119 workqueue_list_lock();
3120
3121 /*
3122 * Allocate the workitem here as it can drop the lock.
3123 * Also we can evaluate the workqueue state only once.
3124 */
3125 witem = alloc_workitem();
3126 witem->item_entry.tqe_next = 0;
3127 witem->item_entry.tqe_prev = 0;
3128 witem->func = callback_func;
3129 witem->func_arg = callback_arg;
3130 witem->flags = PTH_WQITEM_BARRIER;
3131
3132 /* alloc workitem can drop the lock, check the state */
3133 if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
3134 free_workitem(witem);
3135 workqueue_list_unlock();
3136 return(ESRCH);
3137 }
3138
3139 if (itemhandlep != NULL)
3140 *itemhandlep = (pthread_workitem_handle_t *)witem;
3141
3142 TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry);
3143 if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio))
3144 wqreadyprio = workq->queueprio;
3145
3146 pick_nextworkqueue_droplock();
3147
3148 return(0);
3149 }
3150
3151 int
3152 pthread_workqueue_suspend_np(pthread_workqueue_t workq)
3153 {
3154 if (valid_workq(workq) == 0) {
3155 return(EINVAL);
3156 }
3157 workqueue_list_lock();
3158 if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
3159 workqueue_list_unlock();
3160 return(ESRCH);
3161 }
3162
3163 workq->flags |= PTHREAD_WORKQ_SUSPEND;
3164 workq->suspend_count++;
3165 workqueue_list_unlock();
3166 return(0);
3167 }
3168
3169 int
3170 pthread_workqueue_resume_np(pthread_workqueue_t workq)
3171 {
3172 if (valid_workq(workq) == 0) {
3173 return(EINVAL);
3174 }
3175 workqueue_list_lock();
3176 if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
3177 workqueue_list_unlock();
3178 return(ESRCH);
3179 }
3180
3181 workq->suspend_count--;
3182 if (workq->suspend_count <= 0) {
3183 workq->flags &= ~PTHREAD_WORKQ_SUSPEND;
3184 if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio))
3185 wqreadyprio = workq->queueprio;
3186
3187 pick_nextworkqueue_droplock();
3188 } else
3189 workqueue_list_unlock();
3190
3191
3192 return(0);
3193 }
3194
3195 #else /* !BUILDING_VARIANT ] [ */
3196 extern int __unix_conforming;
3197 extern int _pthread_count;
3198 extern pthread_lock_t _pthread_list_lock;
3199 extern void _pthread_testcancel(pthread_t thread, int isconforming);
3200 extern int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr, int conforming);
3201
3202 #endif /* !BUILDING_VARIANT ] */
3203
3204 #if __DARWIN_UNIX03
3205
3206 __private_extern__ void
3207 __posix_join_cleanup(void *arg)
3208 {
3209 pthread_t thread = (pthread_t)arg;
3210 int already_exited, res;
3211 void * dummy;
3212 semaphore_t death;
3213 mach_port_t joinport;
3214 int newstyle = 0;
3215
3216 LOCK(thread->lock);
3217 already_exited = (thread->detached & _PTHREAD_EXITED);
3218
3219 newstyle = thread->newstyle;
3220
3221 #if WQ_TRACE
3222 __kdebug_trace(0x900002c, thread, newstyle, 0, 0, 0);
3223 #endif
3224 if (newstyle = 0) {
3225 death = thread->death;
3226 if (!already_exited){
3227 thread->joiner = (struct _pthread *)NULL;
3228 UNLOCK(thread->lock);
3229 restore_sem_to_pool(death);
3230 } else {
3231 UNLOCK(thread->lock);
3232 while ((res = _pthread_reap_thread(thread,
3233 thread->kernel_thread,
3234 &dummy, 1)) == EAGAIN)
3235 {
3236 sched_yield();
3237 }
3238 restore_sem_to_pool(death);
3239
3240 }
3241
3242 } else {
3243 /* leave another thread to join */
3244 thread->joiner = (struct _pthread *)NULL;
3245 UNLOCK(thread->lock);
3246 }
3247 }
3248
3249 #endif /* __DARWIN_UNIX03 */
3250
3251
3252 /*
3253 * Wait for a thread to terminate and obtain its exit value.
3254 */
3255 /*
3256 int
3257 pthread_join(pthread_t thread,
3258 void **value_ptr)
3259
3260 moved to pthread_cancelable.c */
3261
3262 /*
3263 * Cancel a thread
3264 */
3265 int
3266 pthread_cancel(pthread_t thread)
3267 {
3268 #if __DARWIN_UNIX03
3269 if (__unix_conforming == 0)
3270 __unix_conforming = 1;
3271 #endif /* __DARWIN_UNIX03 */
3272
3273 if (_pthread_lookup_thread(thread, NULL, 0) != 0)
3274 return(ESRCH);
3275
3276 #if __DARWIN_UNIX03
3277 int state;
3278
3279 LOCK(thread->lock);
3280 state = thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
3281 UNLOCK(thread->lock);
3282 if (state & PTHREAD_CANCEL_ENABLE)
3283 __pthread_markcancel(thread->kernel_thread);
3284 #else /* __DARWIN_UNIX03 */
3285 thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
3286 #endif /* __DARWIN_UNIX03 */
3287 return (0);
3288 }
3289
3290 void
3291 pthread_testcancel(void)
3292 {
3293 pthread_t self = pthread_self();
3294
3295 #if __DARWIN_UNIX03
3296 if (__unix_conforming == 0)
3297 __unix_conforming = 1;
3298 _pthread_testcancel(self, 1);
3299 #else /* __DARWIN_UNIX03 */
3300 _pthread_testcancel(self, 0);
3301 #endif /* __DARWIN_UNIX03 */
3302
3303 }
3304
3305
3306 /*
3307 * Query/update the cancelability 'state' of a thread
3308 */
3309 int
3310 pthread_setcancelstate(int state, int *oldstate)
3311 {
3312 #if __DARWIN_UNIX03
3313 if (__unix_conforming == 0) {
3314 __unix_conforming = 1;
3315 }
3316 return (_pthread_setcancelstate_internal(state, oldstate, 1));
3317 #else /* __DARWIN_UNIX03 */
3318 return (_pthread_setcancelstate_internal(state, oldstate, 0));
3319 #endif /* __DARWIN_UNIX03 */
3320
3321 }
3322
3323
3324
3325 /*
3326 * Query/update the cancelability 'type' of a thread
3327 */
3328 int
3329 pthread_setcanceltype(int type, int *oldtype)
3330 {
3331 pthread_t self = pthread_self();
3332
3333 #if __DARWIN_UNIX03
3334 if (__unix_conforming == 0)
3335 __unix_conforming = 1;
3336 #endif /* __DARWIN_UNIX03 */
3337
3338 if ((type != PTHREAD_CANCEL_DEFERRED) &&
3339 (type != PTHREAD_CANCEL_ASYNCHRONOUS))
3340 return EINVAL;
3341 self = pthread_self();
3342 LOCK(self->lock);
3343 if (oldtype)
3344 *oldtype = self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK;
3345 self->cancel_state &= ~_PTHREAD_CANCEL_TYPE_MASK;
3346 self->cancel_state |= type;
3347 UNLOCK(self->lock);
3348 #if !__DARWIN_UNIX03
3349 _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
3350 #endif /* __DARWIN_UNIX03 */
3351 return (0);
3352 }
3353
3354 int
3355 pthread_sigmask(int how, const sigset_t * set, sigset_t * oset)
3356 {
3357 #if __DARWIN_UNIX03
3358 int err = 0;
3359
3360 if (__pthread_sigmask(how, set, oset) == -1) {
3361 err = errno;
3362 }
3363 return(err);
3364 #else /* __DARWIN_UNIX03 */
3365 return(__pthread_sigmask(how, set, oset));
3366 #endif /* __DARWIN_UNIX03 */
3367 }
3368
3369 /*
3370 int
3371 sigwait(const sigset_t * set, int * sig)
3372
3373 moved to pthread_cancelable.c */