]> git.saurik.com Git - apple/libc.git/blob - pthreads/pthread.c
Libc-594.9.1.tar.gz
[apple/libc.git] / pthreads / pthread.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 */
51
52 #include "pthread_internals.h"
53 #include "pthread_workqueue.h"
54
55 #include <assert.h>
56 #include <stdio.h> /* For printf(). */
57 #include <stdlib.h>
58 #include <errno.h> /* For __mach_errno_addr() prototype. */
59 #include <signal.h>
60 #include <sys/time.h>
61 #include <sys/resource.h>
62 #include <sys/sysctl.h>
63 #include <sys/queue.h>
64 #include <machine/vmparam.h>
65 #include <mach/vm_statistics.h>
66 #define __APPLE_API_PRIVATE
67 #include <machine/cpu_capabilities.h>
68 #include <libkern/OSAtomic.h>
69 #if defined(__ppc__)
70 #include <libkern/OSCrossEndian.h>
71 #endif
72
73
74 extern int _pthread_setcancelstate_internal(int state, int *oldstate, int conforming);
75 extern int __pthread_sigmask(int, const sigset_t *, sigset_t *);
76
77 #ifndef BUILDING_VARIANT /* [ */
78
79 __private_extern__ struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
80
81
82
83 int32_t workq_targetconc[WORKQ_NUM_PRIOQUEUE];
84
85 /* Per-thread kernel support */
86 extern void _pthread_set_self(pthread_t);
87 extern void mig_init(int);
88 static int _pthread_create_pthread_onstack(pthread_attr_t *attrs, void **stack, pthread_t *thread);
89 static kern_return_t _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread);
90 static void _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero);
91 static void _pthread_tsd_reinit(pthread_t t);
92 static int _new_pthread_create_suspended(pthread_t *thread,
93 const pthread_attr_t *attr,
94 void *(*start_routine)(void *),
95 void *arg,
96 int create_susp);
97
98 /* Get CPU capabilities from the kernel */
99 __private_extern__ void _init_cpu_capabilities(void);
100
101 /* Needed to tell the malloc subsystem we're going multithreaded */
102 extern void set_malloc_singlethreaded(int);
103
104 /* Used when we need to call into the kernel with no reply port */
105 extern pthread_lock_t reply_port_lock;
106 int _pthread_find_thread(pthread_t thread);
107
108 /* Mach message used to notify that a thread needs to be reaped */
109
110 typedef struct _pthread_reap_msg_t {
111 mach_msg_header_t header;
112 pthread_t thread;
113 mach_msg_trailer_t trailer;
114 } pthread_reap_msg_t;
115
116 /* We'll implement this when the main thread is a pthread */
117 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
118 static struct _pthread _thread = {0};
119
120 /* This global should be used (carefully) by anyone needing to know if a
121 ** pthread has been created.
122 */
123 int __is_threaded = 0;
124 /* _pthread_count is protected by _pthread_list_lock */
125 static int _pthread_count = 1;
126 int __unix_conforming = 0;
127 __private_extern__ size_t pthreadsize = 0;
128
129 /* under rosetta we will use old style creation of threads */
130 static int __oldstyle = 0;
131
132 __private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER;
133
134 /* Same implementation as LOCK, but without the __is_threaded check */
135 int _spin_tries = 0;
136 extern kern_return_t syscall_thread_switch(mach_port_name_t, int, mach_msg_timeout_t);
137 __private_extern__ void _spin_lock_retry(pthread_lock_t *lock)
138 {
139 int tries = _spin_tries;
140 do {
141 if (tries-- > 0)
142 continue;
143 syscall_thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1);
144 tries = _spin_tries;
145 } while(!_spin_lock_try(lock));
146 }
147
148 extern mach_port_t thread_recycle_port;
149
150 /* These are used to keep track of a semaphore pool shared by mutexes and condition
151 ** variables.
152 */
153
154 static semaphore_t *sem_pool = NULL;
155 static int sem_pool_count = 0;
156 static int sem_pool_current = 0;
157 static pthread_lock_t sem_pool_lock = LOCK_INITIALIZER;
158
159 static int default_priority;
160 static int max_priority;
161 static int min_priority;
162 static int pthread_concurrency;
163
164 static OSSpinLock __workqueue_list_lock = OS_SPINLOCK_INIT;
165
166 static void _pthread_exit(pthread_t self, void *value_ptr);
167 static void _pthread_setcancelstate_exit(pthread_t self, void *value_ptr, int conforming);
168 static pthread_attr_t _pthread_attr_default = {0};
169 static void _pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * attr);
170 static int kernel_workq_setup = 0;
171 static volatile int32_t kernel_workq_count = 0;
172 static volatile unsigned int user_workq_count = 0;
173 #define KERNEL_WORKQ_ELEM_MAX 64 /* Max number of elements in the kerrel */
174 static int wqreadyprio = 0; /* current highest prio queue ready with items */
175
176 static int __pthread_workqueue_affinity = 1; /* 0 means no affinity */
177 __private_extern__ struct __pthread_workitem_pool __pthread_workitem_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workitem_pool_head);
178 __private_extern__ struct __pthread_workqueue_pool __pthread_workqueue_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workqueue_pool_head);
179
180 struct _pthread_workqueue_head __pthread_workq0_head;
181 struct _pthread_workqueue_head __pthread_workq1_head;
182 struct _pthread_workqueue_head __pthread_workq2_head;
183 pthread_workqueue_head_t __pthread_wq_head_tbl[WQ_NUM_PRIO_QS] = {&__pthread_workq0_head, &__pthread_workq1_head, &__pthread_workq2_head};
184
185 static void workqueue_list_lock(void);
186 static void workqueue_list_unlock(void);
187 static int valid_workq(pthread_workqueue_t);
188 static void pick_nextworkqueue_droplock(void);
189 static int post_nextworkitem(pthread_workqueue_t workq);
190 static void _pthread_workq_return(pthread_t self);
191 static pthread_workqueue_attr_t _pthread_wq_attr_default = {0};
192 extern void start_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse);
193 extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags);
194 static pthread_workitem_t alloc_workitem(void);
195 static void free_workitem(pthread_workitem_t);
196 static pthread_workqueue_t alloc_workqueue(void);
197 static void free_workqueue(pthread_workqueue_t);
198 static int _pthread_work_internal_init(void);
199 static void workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item);
200
201 void pthread_workqueue_atfork_prepare(void);
202 void pthread_workqueue_atfork_parent(void);
203 void pthread_workqueue_atfork_child(void);
204
205 extern void dispatch_atfork_prepare(void);
206 extern void dispatch_atfork_parent(void);
207 extern void dispatch_atfork_child(void);
208
209 /* workq_kernreturn commands */
210 #define WQOPS_QUEUE_ADD 1
211 #define WQOPS_QUEUE_REMOVE 2
212 #define WQOPS_THREAD_RETURN 4
213 #define WQOPS_THREAD_SETCONC 8
214
215 /*
216 * Flags filed passed to bsdthread_create and back in pthread_start
217 31 <---------------------------------> 0
218 _________________________________________
219 | flags(8) | policy(8) | importance(16) |
220 -----------------------------------------
221 */
222 __private_extern__
223 void _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags);
224
225 __private_extern__
226 void _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse);
227
228 #define PTHREAD_START_CUSTOM 0x01000000
229 #define PTHREAD_START_SETSCHED 0x02000000
230 #define PTHREAD_START_DETACHED 0x04000000
231 #define PTHREAD_START_POLICY_BITSHIFT 16
232 #define PTHREAD_START_POLICY_MASK 0xff
233 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
234
235 static int pthread_setschedparam_internal(pthread_t, mach_port_t, int, const struct sched_param *);
236 extern pthread_t __bsdthread_create(void *(*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags);
237 extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, pthread_workitem_t, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, pthread_workitem_t, int),__uint64_t);
238 extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem);
239 extern __uint64_t __thread_selfid( void );
240 extern int __pthread_canceled(int);
241 extern void _pthread_keys_init(void);
242 extern int __pthread_kill(mach_port_t, int);
243 extern int __pthread_markcancel(int);
244 extern int __workq_open(void);
245
246 #define WORKQUEUE_OVERCOMMIT 0x10000
247
248 extern int __workq_kernreturn(int, pthread_workitem_t, int, int);
249
250 #if defined(__ppc__) || defined(__ppc64__)
251 static const vm_address_t PTHREAD_STACK_HINT = 0xF0000000;
252 #elif defined(__i386__) || defined(__x86_64__)
253 static const vm_address_t PTHREAD_STACK_HINT = 0xB0000000;
254 #elif defined(__arm__)
255 static const vm_address_t PTHREAD_STACK_HINT = 0x30000000;
256 #else
257 #error Need to define a stack address hint for this architecture
258 #endif
259
260 /* Set the base address to use as the stack pointer, before adjusting due to the ABI
261 * The guardpages for stackoverflow protection is also allocated here
262 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
263 * set up for the thread
264 */
265
266 static int
267 _pthread_allocate_stack(pthread_attr_t *attrs, void **stack)
268 {
269 kern_return_t kr;
270 vm_address_t stackaddr;
271 size_t guardsize;
272
273 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
274 if (attrs->stackaddr != NULL) {
275 /* No guard pages setup in this case */
276 assert(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
277 *stack = attrs->stackaddr;
278 return 0;
279 }
280
281 guardsize = attrs->guardsize;
282 stackaddr = PTHREAD_STACK_HINT;
283 kr = vm_map(mach_task_self(), &stackaddr,
284 attrs->stacksize + guardsize,
285 vm_page_size-1,
286 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
287 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
288 VM_INHERIT_DEFAULT);
289 if (kr != KERN_SUCCESS)
290 kr = vm_allocate(mach_task_self(),
291 &stackaddr, attrs->stacksize + guardsize,
292 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
293 if (kr != KERN_SUCCESS) {
294 return EAGAIN;
295 }
296 /* The guard page is at the lowest address */
297 /* The stack base is the highest address */
298 if (guardsize)
299 kr = vm_protect(mach_task_self(), stackaddr, guardsize, FALSE, VM_PROT_NONE);
300 *stack = (void *)(stackaddr + attrs->stacksize + guardsize);
301 return 0;
302 }
303
304 static int
305 _pthread_create_pthread_onstack(pthread_attr_t *attrs, void **stack, pthread_t *thread)
306 {
307 kern_return_t kr;
308 pthread_t t;
309 vm_address_t stackaddr;
310 size_t guardsize, allocsize;
311
312 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
313
314 if (attrs->stackaddr != NULL) {
315 /* No guard pages setup in this case */
316 assert(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
317 *stack = attrs->stackaddr;
318 t = (pthread_t)malloc(pthreadsize);
319 _pthread_struct_init(t, attrs, attrs->stackaddr, 0, 0, 0);
320 t->freeStackOnExit = 0;
321 t->freeaddr = 0;
322 t->freesize = 0;
323 *thread = t;
324 return 0;
325 }
326
327 guardsize = attrs->guardsize;
328 allocsize = attrs->stacksize + guardsize + pthreadsize;
329 stackaddr = PTHREAD_STACK_HINT;
330 kr = vm_map(mach_task_self(), &stackaddr,
331 allocsize,
332 vm_page_size-1,
333 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
334 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
335 VM_INHERIT_DEFAULT);
336 if (kr != KERN_SUCCESS)
337 kr = vm_allocate(mach_task_self(),
338 &stackaddr, allocsize,
339 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
340 if (kr != KERN_SUCCESS) {
341 return EAGAIN;
342 }
343 /* The guard page is at the lowest address */
344 /* The stack base is the highest address */
345 if (guardsize)
346 kr = vm_protect(mach_task_self(), stackaddr, guardsize, FALSE, VM_PROT_NONE);
347
348
349 *stack = (void *)(stackaddr + attrs->stacksize + guardsize);
350
351 t = (pthread_t)(stackaddr + attrs->stacksize + guardsize);
352 _pthread_struct_init(t, attrs, *stack, 0, 0, 1);
353 t->kernalloc = 0;
354 t->freesize = allocsize;
355 t->freeaddr = (void *)stackaddr;
356 t->freeStackOnExit = 1;
357 *thread = t;
358
359 return 0;
360 }
361
362 static kern_return_t
363 _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread)
364 {
365 kern_return_t res = 0;
366 vm_address_t freeaddr;
367 size_t freesize;
368 task_t self = mach_task_self();
369 int thread_count;
370 mach_port_t kport;
371 semaphore_t joinsem = SEMAPHORE_NULL;
372
373 #if PTH_TRACE
374 __kdebug_trace(0x900001c, freestruct, termthread, 0, 0, 0);
375 #endif
376 kport = t->kernel_thread;
377 joinsem = t->joiner_notify;
378
379 if (t->freeStackOnExit) {
380 freeaddr = (vm_address_t)t->freeaddr;
381 if (freestruct)
382 freesize = t->stacksize + t->guardsize + pthreadsize;
383 else
384 freesize = t->stacksize + t->guardsize;
385 if (termthread) {
386 mig_dealloc_reply_port(MACH_PORT_NULL);
387 LOCK(_pthread_list_lock);
388 if (freestruct != 0) {
389 TAILQ_REMOVE(&__pthread_head, t, plist);
390 /* if parent has not returned from create yet keep pthread_t */
391 #if PTH_LISTTRACE
392 __kdebug_trace(0x9000010, t, 0, 0, 1, 0);
393 #endif
394 if (t->parentcheck == 0)
395 freesize -= pthreadsize;
396 }
397 t->childexit = 1;
398 thread_count = --_pthread_count;
399 UNLOCK(_pthread_list_lock);
400
401 #if PTH_TRACE
402 __kdebug_trace(0x9000020, freeaddr, freesize, kport, 1, 0);
403 #endif
404 if (thread_count <=0)
405 exit(0);
406 else
407 __bsdthread_terminate((void *)freeaddr, freesize, kport, joinsem);
408 LIBC_ABORT("thread %p didn't terminate", t);
409 } else {
410 #if PTH_TRACE
411 __kdebug_trace(0x9000024, freeaddr, freesize, 0, 1, 0);
412 #endif
413 res = vm_deallocate(mach_task_self(), freeaddr, freesize);
414 }
415 } else {
416 if (termthread) {
417 mig_dealloc_reply_port(MACH_PORT_NULL);
418 LOCK(_pthread_list_lock);
419 if (freestruct != 0) {
420 TAILQ_REMOVE(&__pthread_head, t, plist);
421 #if PTH_LISTTRACE
422 __kdebug_trace(0x9000010, t, 0, 0, 2, 0);
423 #endif
424 }
425 thread_count = --_pthread_count;
426 t->childexit = 1;
427 UNLOCK(_pthread_list_lock);
428
429 if (freestruct) {
430 #if PTH_TRACE
431 __kdebug_trace(0x9000008, t, 0, 0, 2, 0);
432 #endif
433 free(t);
434 }
435
436 freeaddr = 0;
437 freesize = 0;
438 #if PTH_TRACE
439 __kdebug_trace(0x9000020, 0, 0, kport, 2, 0);
440 #endif
441
442 if (thread_count <=0)
443 exit(0);
444 else
445 __bsdthread_terminate(NULL, 0, kport, joinsem);
446 LIBC_ABORT("thread %p didn't terminate", t);
447 } else if (freestruct) {
448 t->sig = _PTHREAD_NO_SIG;
449 #if PTH_TRACE
450 __kdebug_trace(0x9000024, t, 0, 0, 2, 0);
451 #endif
452 free(t);
453 }
454 }
455 return(res);
456 }
457
458
459
460 /*
461 * Destroy a thread attribute structure
462 */
463 int
464 pthread_attr_destroy(pthread_attr_t *attr)
465 {
466 if (attr->sig == _PTHREAD_ATTR_SIG)
467 {
468 attr->sig = 0;
469 return (0);
470 } else
471 {
472 return (EINVAL); /* Not an attribute structure! */
473 }
474 }
475
476 /*
477 * Get the 'detach' state from a thread attribute structure.
478 * Note: written as a helper function for info hiding
479 */
480 int
481 pthread_attr_getdetachstate(const pthread_attr_t *attr,
482 int *detachstate)
483 {
484 if (attr->sig == _PTHREAD_ATTR_SIG)
485 {
486 *detachstate = attr->detached;
487 return (0);
488 } else
489 {
490 return (EINVAL); /* Not an attribute structure! */
491 }
492 }
493
494 /*
495 * Get the 'inherit scheduling' info from a thread attribute structure.
496 * Note: written as a helper function for info hiding
497 */
498 int
499 pthread_attr_getinheritsched(const pthread_attr_t *attr,
500 int *inheritsched)
501 {
502 if (attr->sig == _PTHREAD_ATTR_SIG)
503 {
504 *inheritsched = attr->inherit;
505 return (0);
506 } else
507 {
508 return (EINVAL); /* Not an attribute structure! */
509 }
510 }
511
512 /*
513 * Get the scheduling parameters from a thread attribute structure.
514 * Note: written as a helper function for info hiding
515 */
516 int
517 pthread_attr_getschedparam(const pthread_attr_t *attr,
518 struct sched_param *param)
519 {
520 if (attr->sig == _PTHREAD_ATTR_SIG)
521 {
522 *param = attr->param;
523 return (0);
524 } else
525 {
526 return (EINVAL); /* Not an attribute structure! */
527 }
528 }
529
530 /*
531 * Get the scheduling policy from a thread attribute structure.
532 * Note: written as a helper function for info hiding
533 */
534 int
535 pthread_attr_getschedpolicy(const pthread_attr_t *attr,
536 int *policy)
537 {
538 if (attr->sig == _PTHREAD_ATTR_SIG)
539 {
540 *policy = attr->policy;
541 return (0);
542 } else
543 {
544 return (EINVAL); /* Not an attribute structure! */
545 }
546 }
547
548 /* Retain the existing stack size of 512K and not depend on Main thread default stack size */
549 static const size_t DEFAULT_STACK_SIZE = (512*1024);
550 /*
551 * Initialize a thread attribute structure to default values.
552 */
553 int
554 pthread_attr_init(pthread_attr_t *attr)
555 {
556 attr->stacksize = DEFAULT_STACK_SIZE;
557 attr->stackaddr = NULL;
558 attr->sig = _PTHREAD_ATTR_SIG;
559 attr->param.sched_priority = default_priority;
560 attr->param.quantum = 10; /* quantum isn't public yet */
561 attr->detached = PTHREAD_CREATE_JOINABLE;
562 attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
563 attr->policy = _PTHREAD_DEFAULT_POLICY;
564 attr->freeStackOnExit = 1;
565 attr->fastpath = 1;
566 attr->schedset = 0;
567 attr->guardsize = vm_page_size;
568 return (0);
569 }
570
571 /*
572 * Set the 'detach' state in a thread attribute structure.
573 * Note: written as a helper function for info hiding
574 */
575 int
576 pthread_attr_setdetachstate(pthread_attr_t *attr,
577 int detachstate)
578 {
579 if (attr->sig == _PTHREAD_ATTR_SIG)
580 {
581 if ((detachstate == PTHREAD_CREATE_JOINABLE) ||
582 (detachstate == PTHREAD_CREATE_DETACHED))
583 {
584 attr->detached = detachstate;
585 return (0);
586 } else
587 {
588 return (EINVAL);
589 }
590 } else
591 {
592 return (EINVAL); /* Not an attribute structure! */
593 }
594 }
595
596 /*
597 * Set the 'inherit scheduling' state in a thread attribute structure.
598 * Note: written as a helper function for info hiding
599 */
600 int
601 pthread_attr_setinheritsched(pthread_attr_t *attr,
602 int inheritsched)
603 {
604 if (attr->sig == _PTHREAD_ATTR_SIG)
605 {
606 if ((inheritsched == PTHREAD_INHERIT_SCHED) ||
607 (inheritsched == PTHREAD_EXPLICIT_SCHED))
608 {
609 attr->inherit = inheritsched;
610 return (0);
611 } else
612 {
613 return (EINVAL);
614 }
615 } else
616 {
617 return (EINVAL); /* Not an attribute structure! */
618 }
619 }
620
621 /*
622 * Set the scheduling paramters in a thread attribute structure.
623 * Note: written as a helper function for info hiding
624 */
625 int
626 pthread_attr_setschedparam(pthread_attr_t *attr,
627 const struct sched_param *param)
628 {
629 if (attr->sig == _PTHREAD_ATTR_SIG)
630 {
631 /* TODO: Validate sched_param fields */
632 attr->param = *param;
633 attr->schedset = 1;
634 return (0);
635 } else
636 {
637 return (EINVAL); /* Not an attribute structure! */
638 }
639 }
640
641 /*
642 * Set the scheduling policy in a thread attribute structure.
643 * Note: written as a helper function for info hiding
644 */
645 int
646 pthread_attr_setschedpolicy(pthread_attr_t *attr,
647 int policy)
648 {
649 if (attr->sig == _PTHREAD_ATTR_SIG)
650 {
651 if ((policy == SCHED_OTHER) ||
652 (policy == SCHED_RR) ||
653 (policy == SCHED_FIFO))
654 {
655 attr->policy = policy;
656 attr->schedset = 1;
657 return (0);
658 } else
659 {
660 return (EINVAL);
661 }
662 } else
663 {
664 return (EINVAL); /* Not an attribute structure! */
665 }
666 }
667
668 /*
669 * Set the scope for the thread.
670 * We currently only provide PTHREAD_SCOPE_SYSTEM
671 */
672 int
673 pthread_attr_setscope(pthread_attr_t *attr,
674 int scope)
675 {
676 if (attr->sig == _PTHREAD_ATTR_SIG) {
677 if (scope == PTHREAD_SCOPE_SYSTEM) {
678 /* No attribute yet for the scope */
679 return (0);
680 } else if (scope == PTHREAD_SCOPE_PROCESS) {
681 return (ENOTSUP);
682 }
683 }
684 return (EINVAL); /* Not an attribute structure! */
685 }
686
687 /*
688 * Get the scope for the thread.
689 * We currently only provide PTHREAD_SCOPE_SYSTEM
690 */
691 int
692 pthread_attr_getscope(const pthread_attr_t *attr,
693 int *scope)
694 {
695 if (attr->sig == _PTHREAD_ATTR_SIG) {
696 *scope = PTHREAD_SCOPE_SYSTEM;
697 return (0);
698 }
699 return (EINVAL); /* Not an attribute structure! */
700 }
701
702 /* Get the base stack address of the given thread */
703 int
704 pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
705 {
706 if (attr->sig == _PTHREAD_ATTR_SIG) {
707 *stackaddr = attr->stackaddr;
708 return (0);
709 } else {
710 return (EINVAL); /* Not an attribute structure! */
711 }
712 }
713
714 int
715 pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
716 {
717 if ((attr->sig == _PTHREAD_ATTR_SIG) && (((uintptr_t)stackaddr % vm_page_size) == 0)) {
718 attr->stackaddr = stackaddr;
719 attr->freeStackOnExit = 0;
720 attr->fastpath = 0;
721 return (0);
722 } else {
723 return (EINVAL); /* Not an attribute structure! */
724 }
725 }
726
727 int
728 pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
729 {
730 if (attr->sig == _PTHREAD_ATTR_SIG) {
731 *stacksize = attr->stacksize;
732 return (0);
733 } else {
734 return (EINVAL); /* Not an attribute structure! */
735 }
736 }
737
738 int
739 pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
740 {
741 if ((attr->sig == _PTHREAD_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
742 attr->stacksize = stacksize;
743 return (0);
744 } else {
745 return (EINVAL); /* Not an attribute structure! */
746 }
747 }
748
749 int
750 pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
751 {
752 if (attr->sig == _PTHREAD_ATTR_SIG) {
753 *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize);
754 *stacksize = attr->stacksize;
755 return (0);
756 } else {
757 return (EINVAL); /* Not an attribute structure! */
758 }
759 }
760
761 /* By SUSV spec, the stackaddr is the base address, the lowest addressable
762 * byte address. This is not the same as in pthread_attr_setstackaddr.
763 */
764 int
765 pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
766 {
767 if ((attr->sig == _PTHREAD_ATTR_SIG) &&
768 (((uintptr_t)stackaddr % vm_page_size) == 0) &&
769 ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
770 attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize);
771 attr->stacksize = stacksize;
772 attr->freeStackOnExit = 0;
773 attr->fastpath = 0;
774 return (0);
775 } else {
776 return (EINVAL); /* Not an attribute structure! */
777 }
778 }
779
780
781 /*
782 * Set the guardsize attribute in the attr.
783 */
784 int
785 pthread_attr_setguardsize(pthread_attr_t *attr,
786 size_t guardsize)
787 {
788 if (attr->sig == _PTHREAD_ATTR_SIG) {
789 /* Guardsize of 0 is valid, ot means no guard */
790 if ((guardsize % vm_page_size) == 0) {
791 attr->guardsize = guardsize;
792 attr->fastpath = 0;
793 return (0);
794 } else
795 return(EINVAL);
796 }
797 return (EINVAL); /* Not an attribute structure! */
798 }
799
800 /*
801 * Get the guardsize attribute in the attr.
802 */
803 int
804 pthread_attr_getguardsize(const pthread_attr_t *attr,
805 size_t *guardsize)
806 {
807 if (attr->sig == _PTHREAD_ATTR_SIG) {
808 *guardsize = attr->guardsize;
809 return (0);
810 }
811 return (EINVAL); /* Not an attribute structure! */
812 }
813
814
815 /*
816 * Create and start execution of a new thread.
817 */
818
819 static void
820 _pthread_body(pthread_t self)
821 {
822 _pthread_set_self(self);
823 _pthread_exit(self, (self->fun)(self->arg));
824 }
825
826 void
827 _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int pflags)
828 {
829 int ret;
830 #if WQ_DEBUG
831 pthread_t pself;
832 #endif
833 pthread_attr_t *attrs = &_pthread_attr_default;
834 char * stackaddr;
835
836 if ((pflags & PTHREAD_START_CUSTOM) == 0) {
837 stackaddr = (char *)self;
838 _pthread_struct_init(self, attrs, stackaddr, stacksize, 1, 1);
839 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
840 _pthread_set_self(self);
841 #endif
842 LOCK(_pthread_list_lock);
843 if (pflags & PTHREAD_START_SETSCHED) {
844 self->policy = ((pflags >> PTHREAD_START_POLICY_BITSHIFT) & PTHREAD_START_POLICY_MASK);
845 self->param.sched_priority = (pflags & PTHREAD_START_IMPORTANCE_MASK);
846 }
847 /* These are not joinable threads */
848 if ((pflags & PTHREAD_START_DETACHED) == PTHREAD_START_DETACHED) {
849 self->detached &= ~PTHREAD_CREATE_JOINABLE;
850 self->detached |= PTHREAD_CREATE_DETACHED;
851 }
852 } else {
853 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
854 _pthread_set_self(self);
855 #endif
856 LOCK(_pthread_list_lock);
857 }
858 self->kernel_thread = kport;
859 self->fun = fun;
860 self->arg = funarg;
861
862 /* Add to the pthread list */
863 if (self->parentcheck == 0) {
864 TAILQ_INSERT_TAIL(&__pthread_head, self, plist);
865 #if PTH_LISTTRACE
866 __kdebug_trace(0x900000c, self, 0, 0, 3, 0);
867 #endif
868 _pthread_count++;
869 }
870 self->childrun = 1;
871 UNLOCK(_pthread_list_lock);
872
873 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
874 if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1)
875 printf("Failed to set thread_id in pthread_start\n");
876 #endif
877
878 #if WQ_DEBUG
879 pself = pthread_self();
880 if (self != pself)
881 LIBC_ABORT("self %p != pself %p", self, pself);
882 #endif
883 #if PTH_TRACE
884 __kdebug_trace(0x9000030, self, pflags, 0, 0, 0);
885 #endif
886
887 _pthread_exit(self, (self->fun)(self->arg));
888 }
889
890 int
891 _pthread_create(pthread_t t,
892 const pthread_attr_t *attrs,
893 void *stack,
894 const mach_port_t kernel_thread)
895 {
896 int res;
897 res = 0;
898
899 do
900 {
901 memset(t, 0, sizeof(*t));
902 t->newstyle = 0;
903 t->schedset = 0;
904 t->kernalloc = 0;
905 t->tsd[0] = t;
906 t->max_tsd_key = 0;
907 t->wqthread = 0;
908 t->cur_workq = 0;
909 t->cur_workitem = 0;
910 t->stacksize = attrs->stacksize;
911 t->stackaddr = (void *)stack;
912 t->guardsize = attrs->guardsize;
913 t->kernel_thread = kernel_thread;
914 t->detached = attrs->detached;
915 t->inherit = attrs->inherit;
916 t->policy = attrs->policy;
917 t->param = attrs->param;
918 t->freeStackOnExit = attrs->freeStackOnExit;
919 t->mutexes = (struct _pthread_mutex *)NULL;
920 t->sig = _PTHREAD_SIG;
921 t->reply_port = MACH_PORT_NULL;
922 t->cthread_self = NULL;
923 LOCK_INIT(t->lock);
924 t->plist.tqe_next = (struct _pthread *)0;
925 t->plist.tqe_prev = (struct _pthread **)0;
926 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
927 t->__cleanup_stack = (struct __darwin_pthread_handler_rec *)NULL;
928 t->death = SEMAPHORE_NULL;
929
930 if (kernel_thread != MACH_PORT_NULL)
931 (void)pthread_setschedparam_internal(t, kernel_thread, t->policy, &t->param);
932 } while (0);
933 return (res);
934 }
935
936 void
937 _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero)
938 {
939 mach_vm_offset_t stackaddr = (mach_vm_offset_t)(long)stack;
940
941 if (nozero == 0) {
942 memset(t, 0, sizeof(*t));
943 t->plist.tqe_next = (struct _pthread *)0;
944 t->plist.tqe_prev = (struct _pthread **)0;
945 }
946 t->schedset = attrs->schedset;
947 t->tsd[0] = t;
948 if (kernalloc != 0) {
949 stackaddr = (mach_vm_offset_t)(long)t;
950
951 /* if allocated from kernel set values appropriately */
952 t->stacksize = stacksize;
953 t->stackaddr = (void *)(long)stackaddr;
954 t->freeStackOnExit = 1;
955 t->freeaddr = (void *)(long)(stackaddr - stacksize - vm_page_size);
956 t->freesize = pthreadsize + stacksize + vm_page_size;
957 } else {
958 t->stacksize = attrs->stacksize;
959 t->stackaddr = (void *)stack;
960 }
961 t->guardsize = attrs->guardsize;
962 t->detached = attrs->detached;
963 t->inherit = attrs->inherit;
964 t->policy = attrs->policy;
965 t->param = attrs->param;
966 t->mutexes = (struct _pthread_mutex *)NULL;
967 t->sig = _PTHREAD_SIG;
968 t->reply_port = MACH_PORT_NULL;
969 t->cthread_self = NULL;
970 LOCK_INIT(t->lock);
971 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
972 t->__cleanup_stack = (struct __darwin_pthread_handler_rec *)NULL;
973 t->death = SEMAPHORE_NULL;
974 t->newstyle = 1;
975 t->kernalloc = kernalloc;
976 t->wqthread = 0;
977 t->cur_workq = 0;
978 t->cur_workitem = 0;
979 t->max_tsd_key = 0;
980 }
981
982 static void
983 _pthread_tsd_reinit(pthread_t t)
984 {
985 bzero(&t->tsd[1], (_INTERNAL_POSIX_THREAD_KEYS_END-1) * sizeof(void *));
986 }
987
988
989 /* Need to deprecate this in future */
990 int
991 _pthread_is_threaded(void)
992 {
993 return __is_threaded;
994 }
995
996 /* Non portable public api to know whether this process has(had) atleast one thread
997 * apart from main thread. There could be race if there is a thread in the process of
998 * creation at the time of call . It does not tell whether there are more than one thread
999 * at this point of time.
1000 */
1001 int
1002 pthread_is_threaded_np(void)
1003 {
1004 return (__is_threaded);
1005 }
1006
1007 mach_port_t
1008 pthread_mach_thread_np(pthread_t t)
1009 {
1010 mach_port_t kport = MACH_PORT_NULL;
1011
1012 if (t == NULL)
1013 goto out;
1014
1015 /*
1016 * If the call is on self, return the kernel port. We cannot
1017 * add this bypass for main thread as it might have exited,
1018 * and we should not return stale port info.
1019 */
1020 if (t == pthread_self())
1021 {
1022 kport = t->kernel_thread;
1023 goto out;
1024 }
1025
1026 if (_pthread_lookup_thread(t, &kport, 0) != 0)
1027 return((mach_port_t)0);
1028
1029 out:
1030 return(kport);
1031 }
1032
1033 pthread_t pthread_from_mach_thread_np(mach_port_t kernel_thread)
1034 {
1035 struct _pthread * p = NULL;
1036
1037 /* No need to wait as mach port is already known */
1038 LOCK(_pthread_list_lock);
1039 TAILQ_FOREACH(p, &__pthread_head, plist) {
1040 if (p->kernel_thread == kernel_thread)
1041 break;
1042 }
1043 UNLOCK(_pthread_list_lock);
1044 return p;
1045 }
1046
1047 size_t
1048 pthread_get_stacksize_np(pthread_t t)
1049 {
1050 int ret,nestingDepth=0;
1051 size_t size = 0;
1052 vm_address_t address=0;
1053 vm_size_t region_size=0;
1054 struct vm_region_submap_info_64 info;
1055 mach_msg_type_number_t count;
1056
1057 if (t == NULL)
1058 return(ESRCH);
1059
1060 if ( t == pthread_self() || t == &_thread ) //since the main thread will not get de-allocated from underneath us
1061 {
1062 size=t->stacksize;
1063 return size;
1064 }
1065
1066
1067 LOCK(_pthread_list_lock);
1068
1069 if ((ret = _pthread_find_thread(t)) != 0) {
1070 UNLOCK(_pthread_list_lock);
1071 return(ret);
1072 }
1073
1074 size=t->stacksize;
1075 UNLOCK(_pthread_list_lock);
1076
1077 return(size);
1078 }
1079
1080 void *
1081 pthread_get_stackaddr_np(pthread_t t)
1082 {
1083 int ret;
1084 void * addr = NULL;
1085
1086 if (t == NULL)
1087 return((void *)(long)ESRCH);
1088
1089 if(t == pthread_self() || t == &_thread) //since the main thread will not get deallocated from underneath us
1090 return t->stackaddr;
1091
1092 LOCK(_pthread_list_lock);
1093
1094 if ((ret = _pthread_find_thread(t)) != 0) {
1095 UNLOCK(_pthread_list_lock);
1096 return((void *)(long)ret);
1097 }
1098 addr = t->stackaddr;
1099 UNLOCK(_pthread_list_lock);
1100
1101 return(addr);
1102 }
1103
1104 mach_port_t
1105 _pthread_reply_port(pthread_t t)
1106 {
1107 return t->reply_port;
1108 }
1109
1110
1111 /* returns non-zero if the current thread is the main thread */
1112 int
1113 pthread_main_np(void)
1114 {
1115 pthread_t self = pthread_self();
1116
1117 return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT);
1118 }
1119
1120
1121 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
1122 /* if we are passed in a pthread_t that is NULL, then we return
1123 the current thread's thread_id. So folks don't have to call
1124 pthread_self, in addition to us doing it, if they just want
1125 their thread_id.
1126 */
1127 int
1128 pthread_threadid_np(pthread_t thread, __uint64_t *thread_id)
1129 {
1130 int rval=0;
1131 pthread_t self = pthread_self();
1132
1133 if (thread_id == NULL) {
1134 return(EINVAL);
1135 } else if (thread == NULL || thread == self) {
1136 *thread_id = self->thread_id;
1137 return rval;
1138 }
1139
1140 LOCK(_pthread_list_lock);
1141 if ((rval = _pthread_find_thread(thread)) != 0) {
1142 UNLOCK(_pthread_list_lock);
1143 return(rval);
1144 }
1145 *thread_id = thread->thread_id;
1146 UNLOCK(_pthread_list_lock);
1147 return rval;
1148 }
1149 #endif
1150
1151 int
1152 pthread_getname_np(pthread_t thread, char *threadname, size_t len)
1153 {
1154 int rval;
1155 rval = 0;
1156
1157 if (thread == NULL)
1158 return(ESRCH);
1159
1160 LOCK(_pthread_list_lock);
1161 if ((rval = _pthread_find_thread(thread)) != 0) {
1162 UNLOCK(_pthread_list_lock);
1163 return(rval);
1164 }
1165 strlcpy(threadname, thread->pthread_name, len);
1166 UNLOCK(_pthread_list_lock);
1167 return rval;
1168 }
1169
1170 int
1171 pthread_setname_np(const char *threadname)
1172 {
1173 int rval;
1174 size_t len;
1175
1176 rval = 0;
1177 len = strlen(threadname);
1178 rval = sysctlbyname("kern.threadname", NULL, 0, threadname, len);
1179 if(rval == 0)
1180 {
1181 strlcpy((pthread_self())->pthread_name, threadname, len+1);
1182 }
1183 return rval;
1184
1185 }
1186
1187 static int
1188 _new_pthread_create_suspended(pthread_t *thread,
1189 const pthread_attr_t *attr,
1190 void *(*start_routine)(void *),
1191 void *arg,
1192 int create_susp)
1193 {
1194 pthread_attr_t *attrs;
1195 void *stack;
1196 int error;
1197 unsigned int flags;
1198 pthread_t t,t2;
1199 kern_return_t kern_res;
1200 mach_port_t kernel_thread = MACH_PORT_NULL;
1201 int needresume;
1202 task_t self = mach_task_self();
1203 int kernalloc = 0;
1204 int susp = create_susp;
1205
1206 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
1207 { /* Set up default paramters */
1208 attrs = &_pthread_attr_default;
1209 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1210 return EINVAL;
1211 }
1212 error = 0;
1213
1214 if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
1215 (attrs->param.sched_priority != default_priority)) && (create_susp == 0)) {
1216 needresume = 1;
1217 susp = 1;
1218 } else
1219 needresume = 0;
1220
1221 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1222 * any change in priority or policy is needed here.
1223 */
1224 if ((__oldstyle == 1) || (create_susp != 0)) {
1225 /* Rosetta or pthread_create_suspended() */
1226 /* running under rosetta */
1227 /* Allocate a stack for the thread */
1228 #if PTH_TRACE
1229 __kdebug_trace(0x9000000, create_susp, 0, 0, 0, 0);
1230 #endif
1231 if ((error = _pthread_allocate_stack(attrs, &stack)) != 0) {
1232 return(error);
1233 }
1234 t = (pthread_t)malloc(sizeof(struct _pthread));
1235 *thread = t;
1236 if (susp) {
1237 /* Create the Mach thread for this thread */
1238 PTHREAD_MACH_CALL(thread_create(self, &kernel_thread), kern_res);
1239 if (kern_res != KERN_SUCCESS)
1240 {
1241 printf("Can't create thread: %d\n", kern_res);
1242 return(EINVAL);
1243 }
1244 }
1245 if ((error = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
1246 {
1247 return(error);
1248 }
1249 set_malloc_singlethreaded(0);
1250 __is_threaded = 1;
1251
1252 /* Send it on it's way */
1253 t->arg = arg;
1254 t->fun = start_routine;
1255 t->newstyle = 0;
1256 /* Now set it up to execute */
1257 LOCK(_pthread_list_lock);
1258 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1259 #if PTH_LISTTRACE
1260 __kdebug_trace(0x900000c, t, 0, 0, 4, 0);
1261 #endif
1262 _pthread_count++;
1263 UNLOCK(_pthread_list_lock);
1264 _pthread_setup(t, _pthread_body, stack, susp, needresume);
1265 return(0);
1266 } else {
1267
1268 flags = 0;
1269 if (attrs->fastpath == 1)
1270 kernalloc = 1;
1271
1272 if (attrs->detached == PTHREAD_CREATE_DETACHED)
1273 flags |= PTHREAD_START_DETACHED;
1274 if (attrs->schedset != 0) {
1275 flags |= PTHREAD_START_SETSCHED;
1276 flags |= ((attrs->policy & PTHREAD_START_POLICY_MASK) << PTHREAD_START_POLICY_BITSHIFT);
1277 flags |= (attrs->param.sched_priority & PTHREAD_START_IMPORTANCE_MASK);
1278 }
1279
1280 set_malloc_singlethreaded(0);
1281 __is_threaded = 1;
1282
1283 if (kernalloc == 0) {
1284 /* Allocate a stack for the thread */
1285 flags |= PTHREAD_START_CUSTOM;
1286 if ((error = _pthread_create_pthread_onstack(attrs, &stack, &t)) != 0) {
1287 return(error);
1288 }
1289 /* Send it on it's way */
1290 t->arg = arg;
1291 t->fun = start_routine;
1292 t->newstyle = 1;
1293
1294 #if PTH_TRACE
1295 __kdebug_trace(0x9000004, t, flags, 0, 0, 0);
1296 #endif
1297
1298 if ((t2 = __bsdthread_create(start_routine, arg, stack, t, flags)) == (pthread_t)-1) {
1299 _pthread_free_pthread_onstack(t, 1, 0);
1300 return (EAGAIN);
1301 }
1302 else t=t2;
1303 LOCK(_pthread_list_lock);
1304 t->parentcheck = 1;
1305 if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) {
1306 /* detached child exited, mop up */
1307 UNLOCK(_pthread_list_lock);
1308 #if PTH_TRACE
1309 __kdebug_trace(0x9000008, t, 0, 0, 1, 0);
1310 #endif
1311 if(t->freeStackOnExit)
1312 vm_deallocate(self, (mach_vm_address_t)(long)t, pthreadsize);
1313 else
1314 free(t);
1315 } else if (t->childrun == 0) {
1316 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1317 _pthread_count++;
1318 #if PTH_LISTTRACE
1319 __kdebug_trace(0x900000c, t, 0, 0, 1, 0);
1320 #endif
1321 UNLOCK(_pthread_list_lock);
1322 } else
1323 UNLOCK(_pthread_list_lock);
1324
1325 *thread = t;
1326
1327 #if PTH_TRACE
1328 __kdebug_trace(0x9000014, t, 0, 0, 1, 0);
1329 #endif
1330 return (0);
1331
1332 } else {
1333 /* kernel allocation */
1334 #if PTH_TRACE
1335 __kdebug_trace(0x9000018, flags, 0, 0, 0, 0);
1336 #endif
1337 if ((t = __bsdthread_create(start_routine, arg, (void *)attrs->stacksize, NULL, flags)) == (pthread_t)-1)
1338 return (EAGAIN);
1339 /* Now set it up to execute */
1340 LOCK(_pthread_list_lock);
1341 t->parentcheck = 1;
1342 if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) {
1343 /* detached child exited, mop up */
1344 UNLOCK(_pthread_list_lock);
1345 #if PTH_TRACE
1346 __kdebug_trace(0x9000008, t, pthreadsize, 0, 2, 0);
1347 #endif
1348 vm_deallocate(self, (mach_vm_address_t)(long)t, pthreadsize);
1349 } else if (t->childrun == 0) {
1350 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1351 _pthread_count++;
1352 #if PTH_LISTTRACE
1353 __kdebug_trace(0x900000c, t, 0, 0, 2, 0);
1354 #endif
1355 UNLOCK(_pthread_list_lock);
1356 } else
1357 UNLOCK(_pthread_list_lock);
1358
1359 *thread = t;
1360
1361 #if PTH_TRACE
1362 __kdebug_trace(0x9000014, t, 0, 0, 2, 0);
1363 #endif
1364 return(0);
1365 }
1366 }
1367 }
1368
1369 static int
1370 _pthread_create_suspended(pthread_t *thread,
1371 const pthread_attr_t *attr,
1372 void *(*start_routine)(void *),
1373 void *arg,
1374 int suspended)
1375 {
1376 pthread_attr_t *attrs;
1377 void *stack;
1378 int res;
1379 pthread_t t;
1380 kern_return_t kern_res;
1381 mach_port_t kernel_thread = MACH_PORT_NULL;
1382 int needresume;
1383
1384 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
1385 { /* Set up default paramters */
1386 attrs = &_pthread_attr_default;
1387 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1388 return EINVAL;
1389 }
1390 res = 0;
1391
1392 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1393 * any change in priority or policy is needed here.
1394 */
1395 if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
1396 (attrs->param.sched_priority != default_priority)) && (suspended == 0)) {
1397 needresume = 1;
1398 suspended = 1;
1399 } else
1400 needresume = 0;
1401
1402 do
1403 {
1404 /* Allocate a stack for the thread */
1405 if ((res = _pthread_allocate_stack(attrs, &stack)) != 0) {
1406 break;
1407 }
1408 t = (pthread_t)malloc(sizeof(struct _pthread));
1409 *thread = t;
1410 if (suspended) {
1411 /* Create the Mach thread for this thread */
1412 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread), kern_res);
1413 if (kern_res != KERN_SUCCESS)
1414 {
1415 printf("Can't create thread: %d\n", kern_res);
1416 res = EINVAL; /* Need better error here? */
1417 break;
1418 }
1419 }
1420 if ((res = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
1421 {
1422 break;
1423 }
1424 set_malloc_singlethreaded(0);
1425 __is_threaded = 1;
1426
1427 /* Send it on it's way */
1428 t->arg = arg;
1429 t->fun = start_routine;
1430 /* Now set it up to execute */
1431 LOCK(_pthread_list_lock);
1432 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1433 #if PTH_LISTTRACE
1434 __kdebug_trace(0x900000c, t, 0, 0, 5, 0);
1435 #endif
1436 _pthread_count++;
1437 UNLOCK(_pthread_list_lock);
1438 _pthread_setup(t, _pthread_body, stack, suspended, needresume);
1439 } while (0);
1440 return (res);
1441 }
1442
1443 int
1444 pthread_create(pthread_t *thread,
1445 const pthread_attr_t *attr,
1446 void *(*start_routine)(void *),
1447 void *arg)
1448 {
1449 return _new_pthread_create_suspended(thread, attr, start_routine, arg, 0);
1450 }
1451
1452 int
1453 pthread_create_suspended_np(pthread_t *thread,
1454 const pthread_attr_t *attr,
1455 void *(*start_routine)(void *),
1456 void *arg)
1457 {
1458 return _pthread_create_suspended(thread, attr, start_routine, arg, 1);
1459 }
1460
1461 /*
1462 * Make a thread 'undetached' - no longer 'joinable' with other threads.
1463 */
1464 int
1465 pthread_detach(pthread_t thread)
1466 {
1467 int newstyle = 0;
1468 int ret;
1469
1470 if ((ret = _pthread_lookup_thread(thread, NULL, 1)) != 0)
1471 return (ret); /* Not a valid thread */
1472
1473 LOCK(thread->lock);
1474 newstyle = thread->newstyle;
1475 if (thread->detached & PTHREAD_CREATE_JOINABLE)
1476 {
1477 if (thread->detached & _PTHREAD_EXITED) {
1478 UNLOCK(thread->lock);
1479 pthread_join(thread, NULL);
1480 return 0;
1481 } else {
1482 if (newstyle == 0) {
1483 semaphore_t death = thread->death;
1484
1485 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
1486 thread->detached |= PTHREAD_CREATE_DETACHED;
1487 UNLOCK(thread->lock);
1488 if (death)
1489 (void) semaphore_signal(death);
1490 } else {
1491 mach_port_t joinport = thread->joiner_notify;
1492
1493 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
1494 thread->detached |= PTHREAD_CREATE_DETACHED;
1495
1496 UNLOCK(thread->lock);
1497 if (joinport) {
1498 semaphore_signal(joinport);
1499 }
1500 }
1501 return(0);
1502 }
1503 } else {
1504 UNLOCK(thread->lock);
1505 return (EINVAL);
1506 }
1507 }
1508
1509
1510 /*
1511 * pthread_kill call to system call
1512 */
1513 int
1514 pthread_kill (
1515 pthread_t th,
1516 int sig)
1517 {
1518 int error = 0;
1519 mach_port_t kport = MACH_PORT_NULL;
1520
1521 if ((sig < 0) || (sig > NSIG))
1522 return(EINVAL);
1523
1524 if (_pthread_lookup_thread(th, &kport, 0) != 0)
1525 return (ESRCH); /* Not a valid thread */
1526
1527 /* if the thread is a workqueue thread, just return error */
1528 if ((th->wqthread != 0) && (th->wqkillset ==0)) {
1529 return(ENOTSUP);
1530 }
1531
1532 error = __pthread_kill(kport, sig);
1533
1534 if (error == -1)
1535 error = errno;
1536 return(error);
1537 }
1538
1539 int
1540 __pthread_workqueue_setkill(int enable)
1541 {
1542 pthread_t self = pthread_self();
1543
1544 LOCK(self->lock);
1545 if (enable == 0)
1546 self->wqkillset = 0;
1547 else
1548 self->wqkillset = 1;
1549 UNLOCK(self->lock);
1550
1551 return(0);
1552
1553 }
1554
1555 /* Announce that there are pthread resources ready to be reclaimed in a */
1556 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
1557 /* thread underneath is terminated right away. */
1558 static
1559 void _pthread_become_available(pthread_t thread, mach_port_t kernel_thread) {
1560 pthread_reap_msg_t msg;
1561 kern_return_t ret;
1562
1563 msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,
1564 MACH_MSG_TYPE_MOVE_SEND);
1565 msg.header.msgh_size = sizeof msg - sizeof msg.trailer;
1566 msg.header.msgh_remote_port = thread_recycle_port;
1567 msg.header.msgh_local_port = kernel_thread;
1568 msg.header.msgh_id = 0x44454144; /* 'DEAD' */
1569 msg.thread = thread;
1570 ret = mach_msg_send(&msg.header);
1571 assert(ret == MACH_MSG_SUCCESS);
1572 }
1573
1574 /* Reap the resources for available threads */
1575 __private_extern__
1576 int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr, int conforming) {
1577 mach_port_type_t ptype;
1578 kern_return_t ret;
1579 task_t self;
1580
1581 self = mach_task_self();
1582 if (kernel_thread != MACH_PORT_DEAD) {
1583 ret = mach_port_type(self, kernel_thread, &ptype);
1584 if (ret == KERN_SUCCESS && ptype != MACH_PORT_TYPE_DEAD_NAME) {
1585 /* not quite dead yet... */
1586 return EAGAIN;
1587 }
1588 ret = mach_port_deallocate(self, kernel_thread);
1589 if (ret != KERN_SUCCESS) {
1590 fprintf(stderr,
1591 "mach_port_deallocate(kernel_thread) failed: %s\n",
1592 mach_error_string(ret));
1593 }
1594 }
1595
1596 if (th->reply_port != MACH_PORT_NULL) {
1597 ret = mach_port_mod_refs(self, th->reply_port,
1598 MACH_PORT_RIGHT_RECEIVE, -1);
1599 if (ret != KERN_SUCCESS) {
1600 fprintf(stderr,
1601 "mach_port_mod_refs(reply_port) failed: %s\n",
1602 mach_error_string(ret));
1603 }
1604 }
1605
1606 if (th->freeStackOnExit) {
1607 vm_address_t addr = (vm_address_t)th->stackaddr;
1608 vm_size_t size;
1609
1610 size = (vm_size_t)th->stacksize + th->guardsize;
1611
1612 addr -= size;
1613 ret = vm_deallocate(self, addr, size);
1614 if (ret != KERN_SUCCESS) {
1615 fprintf(stderr,
1616 "vm_deallocate(stack) failed: %s\n",
1617 mach_error_string(ret));
1618 }
1619 }
1620
1621
1622 if (value_ptr)
1623 *value_ptr = th->exit_value;
1624 if (conforming) {
1625 if ((th->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
1626 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING) && (value_ptr != NULL))
1627 *value_ptr = PTHREAD_CANCELED;
1628 th->sig = _PTHREAD_NO_SIG;
1629 }
1630
1631
1632 if (th != &_thread)
1633 free(th);
1634
1635 return 0;
1636 }
1637
1638 static
1639 void _pthread_reap_threads(void)
1640 {
1641 pthread_reap_msg_t msg;
1642 kern_return_t ret;
1643
1644 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
1645 sizeof msg, thread_recycle_port,
1646 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1647 while (ret == MACH_MSG_SUCCESS) {
1648 mach_port_t kernel_thread = msg.header.msgh_remote_port;
1649 pthread_t thread = msg.thread;
1650
1651 /* deal with race with thread_create_running() */
1652 if (kernel_thread == MACH_PORT_NULL &&
1653 kernel_thread != thread->kernel_thread) {
1654 kernel_thread = thread->kernel_thread;
1655 }
1656
1657 if ( kernel_thread == MACH_PORT_NULL ||
1658 _pthread_reap_thread(thread, kernel_thread, (void **)0, 0) == EAGAIN)
1659 {
1660 /* not dead yet, put it back for someone else to reap, stop here */
1661 _pthread_become_available(thread, kernel_thread);
1662 return;
1663 }
1664
1665 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
1666 sizeof msg, thread_recycle_port,
1667 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1668 }
1669 }
1670
1671 /* For compatibility... */
1672
1673 pthread_t
1674 _pthread_self() {
1675 return pthread_self();
1676 }
1677
1678 /*
1679 * Terminate a thread.
1680 */
1681 int __disable_threadsignal(int);
1682
1683 static void
1684 _pthread_exit(pthread_t self, void *value_ptr)
1685 {
1686 struct __darwin_pthread_handler_rec *handler;
1687 kern_return_t kern_res;
1688 int thread_count;
1689 int newstyle = self->newstyle;
1690
1691 /* Make this thread not to receive any signals */
1692 __disable_threadsignal(1);
1693
1694 #if PTH_TRACE
1695 __kdebug_trace(0x900001c, self, newstyle, 0, 0, 0);
1696 #endif
1697
1698 /* set cancel state to disable and type to deferred */
1699 _pthread_setcancelstate_exit(self, value_ptr, __unix_conforming);
1700
1701 while ((handler = self->__cleanup_stack) != 0)
1702 {
1703 (handler->__routine)(handler->__arg);
1704 self->__cleanup_stack = handler->__next;
1705 }
1706 _pthread_tsd_cleanup(self);
1707
1708 if (newstyle == 0) {
1709 _pthread_reap_threads();
1710
1711 LOCK(self->lock);
1712 self->detached |= _PTHREAD_EXITED;
1713
1714 if (self->detached & PTHREAD_CREATE_JOINABLE) {
1715 mach_port_t death = self->death;
1716 self->exit_value = value_ptr;
1717 UNLOCK(self->lock);
1718 /* the joiner will need a kernel thread reference, leave ours for it */
1719 if (death) {
1720 PTHREAD_MACH_CALL(semaphore_signal(death), kern_res);
1721 if (kern_res != KERN_SUCCESS)
1722 fprintf(stderr,
1723 "semaphore_signal(death) failed: %s\n",
1724 mach_error_string(kern_res));
1725 }
1726 LOCK(_pthread_list_lock);
1727 thread_count = --_pthread_count;
1728 UNLOCK(_pthread_list_lock);
1729 } else {
1730 UNLOCK(self->lock);
1731 LOCK(_pthread_list_lock);
1732 TAILQ_REMOVE(&__pthread_head, self, plist);
1733 #if PTH_LISTTRACE
1734 __kdebug_trace(0x9000010, self, 0, 0, 5, 0);
1735 #endif
1736 thread_count = --_pthread_count;
1737 UNLOCK(_pthread_list_lock);
1738 /* with no joiner, we let become available consume our cached ref */
1739 _pthread_become_available(self, self->kernel_thread);
1740 }
1741
1742 if (thread_count <= 0)
1743 exit(0);
1744
1745 /* Use a new reference to terminate ourselves. Should never return. */
1746 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res);
1747 fprintf(stderr, "thread_terminate(mach_thread_self()) failed: %s\n",
1748 mach_error_string(kern_res));
1749 } else {
1750 semaphore_t joinsem = SEMAPHORE_NULL;
1751
1752 if ((self->joiner_notify == (mach_port_t)0) && (self->detached & PTHREAD_CREATE_JOINABLE))
1753 joinsem = new_sem_from_pool();
1754 LOCK(self->lock);
1755 self->detached |= _PTHREAD_EXITED;
1756
1757 self->exit_value = value_ptr;
1758 if (self->detached & PTHREAD_CREATE_JOINABLE) {
1759 if (self->joiner_notify == (mach_port_t)0) {
1760 self->joiner_notify = joinsem;
1761 joinsem = SEMAPHORE_NULL;
1762 }
1763 UNLOCK(self->lock);
1764 if (joinsem != SEMAPHORE_NULL)
1765 restore_sem_to_pool(joinsem);
1766 _pthread_free_pthread_onstack(self, 0, 1);
1767 } else {
1768 UNLOCK(self->lock);
1769 /* with no joiner, we let become available consume our cached ref */
1770 if (joinsem != SEMAPHORE_NULL)
1771 restore_sem_to_pool(joinsem);
1772 _pthread_free_pthread_onstack(self, 1, 1);
1773 }
1774 }
1775 LIBC_ABORT("thread %p didn't exit", self);
1776 }
1777
1778 void
1779 pthread_exit(void *value_ptr)
1780 {
1781 pthread_t self = pthread_self();
1782 /* if the current thread is a workqueue thread, just crash the app, as per libdispatch folks */
1783 if (self->wqthread == 0) {
1784 _pthread_exit(self, value_ptr);
1785 } else {
1786 LIBC_ABORT("pthread_exit() may only be called against threads created via pthread_create()");
1787 }
1788 }
1789
1790 /*
1791 * Get the scheduling policy and scheduling paramters for a thread.
1792 */
1793 int
1794 pthread_getschedparam(pthread_t thread,
1795 int *policy,
1796 struct sched_param *param)
1797 {
1798 int ret;
1799
1800 if (thread == NULL)
1801 return(ESRCH);
1802
1803 LOCK(_pthread_list_lock);
1804
1805 if ((ret = _pthread_find_thread(thread)) != 0) {
1806 UNLOCK(_pthread_list_lock);
1807 return(ret);
1808 }
1809 if (policy != 0)
1810 *policy = thread->policy;
1811 if (param != 0)
1812 *param = thread->param;
1813 UNLOCK(_pthread_list_lock);
1814
1815 return(0);
1816 }
1817
1818 /*
1819 * Set the scheduling policy and scheduling paramters for a thread.
1820 */
1821 static int
1822 pthread_setschedparam_internal(pthread_t thread,
1823 mach_port_t kport,
1824 int policy,
1825 const struct sched_param *param)
1826 {
1827 policy_base_data_t bases;
1828 policy_base_t base;
1829 mach_msg_type_number_t count;
1830 kern_return_t ret;
1831
1832 switch (policy)
1833 {
1834 case SCHED_OTHER:
1835 bases.ts.base_priority = param->sched_priority;
1836 base = (policy_base_t)&bases.ts;
1837 count = POLICY_TIMESHARE_BASE_COUNT;
1838 break;
1839 case SCHED_FIFO:
1840 bases.fifo.base_priority = param->sched_priority;
1841 base = (policy_base_t)&bases.fifo;
1842 count = POLICY_FIFO_BASE_COUNT;
1843 break;
1844 case SCHED_RR:
1845 bases.rr.base_priority = param->sched_priority;
1846 /* quantum isn't public yet */
1847 bases.rr.quantum = param->quantum;
1848 base = (policy_base_t)&bases.rr;
1849 count = POLICY_RR_BASE_COUNT;
1850 break;
1851 default:
1852 return (EINVAL);
1853 }
1854 ret = thread_policy(kport, policy, base, count, TRUE);
1855 if (ret != KERN_SUCCESS)
1856 return (EINVAL);
1857 return (0);
1858 }
1859
1860 int
1861 pthread_setschedparam(pthread_t t,
1862 int policy,
1863 const struct sched_param *param)
1864 {
1865 mach_port_t kport = MACH_PORT_NULL;
1866 int error;
1867 int bypass = 1;
1868
1869 if (t != pthread_self() && t != &_thread ) { //since the main thread will not get de-allocated from underneath us
1870 bypass = 0;
1871 if (_pthread_lookup_thread(t, &kport, 0) != 0)
1872 return(ESRCH);
1873 } else
1874 kport = t->kernel_thread;
1875
1876 error = pthread_setschedparam_internal(t, kport, policy, param);
1877 if (error == 0) {
1878 if (bypass == 0) {
1879 /* ensure the thread is still valid */
1880 LOCK(_pthread_list_lock);
1881 if ((error = _pthread_find_thread(t)) != 0) {
1882 UNLOCK(_pthread_list_lock);
1883 return(error);
1884 }
1885 t->policy = policy;
1886 t->param = *param;
1887 UNLOCK(_pthread_list_lock);
1888 } else {
1889 t->policy = policy;
1890 t->param = *param;
1891 }
1892 }
1893 return(error);
1894 }
1895
1896 /*
1897 * Get the minimum priority for the given policy
1898 */
1899 int
1900 sched_get_priority_min(int policy)
1901 {
1902 return default_priority - 16;
1903 }
1904
1905 /*
1906 * Get the maximum priority for the given policy
1907 */
1908 int
1909 sched_get_priority_max(int policy)
1910 {
1911 return default_priority + 16;
1912 }
1913
1914 /*
1915 * Determine if two thread identifiers represent the same thread.
1916 */
1917 int
1918 pthread_equal(pthread_t t1,
1919 pthread_t t2)
1920 {
1921 return (t1 == t2);
1922 }
1923
1924 __private_extern__ void
1925 _pthread_set_self(pthread_t p)
1926 {
1927 extern void __pthread_set_self(pthread_t);
1928 if (p == 0) {
1929 bzero(&_thread, sizeof(struct _pthread));
1930 p = &_thread;
1931 }
1932 p->tsd[0] = p;
1933 __pthread_set_self(p);
1934 }
1935
1936 void
1937 cthread_set_self(void *cself)
1938 {
1939 pthread_t self = pthread_self();
1940 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1941 _pthread_set_self(cself);
1942 return;
1943 }
1944 self->cthread_self = cself;
1945 }
1946
1947 void *
1948 ur_cthread_self(void) {
1949 pthread_t self = pthread_self();
1950 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1951 return (void *)self;
1952 }
1953 return self->cthread_self;
1954 }
1955
1956 /*
1957 * cancellation handler for pthread once as the init routine can have a
1958 * cancellation point. In that case we need to restore the spin unlock
1959 */
1960 void
1961 __pthread_once_cancel_handler(pthread_once_t *once_control)
1962 {
1963 _spin_unlock(&once_control->lock);
1964 }
1965
1966
1967 /*
1968 * Execute a function exactly one time in a thread-safe fashion.
1969 */
1970 int
1971 pthread_once(pthread_once_t *once_control,
1972 void (*init_routine)(void))
1973 {
1974 _spin_lock(&once_control->lock);
1975 if (once_control->sig == _PTHREAD_ONCE_SIG_init)
1976 {
1977 pthread_cleanup_push((void (*)(void *))__pthread_once_cancel_handler, once_control);
1978 (*init_routine)();
1979 pthread_cleanup_pop(0);
1980 once_control->sig = _PTHREAD_ONCE_SIG;
1981 }
1982 _spin_unlock(&once_control->lock);
1983 return (0); /* Spec defines no possible errors! */
1984 }
1985
1986 /*
1987 * Insert a cancellation point in a thread.
1988 */
1989 __private_extern__ void
1990 _pthread_testcancel(pthread_t thread, int isconforming)
1991 {
1992 LOCK(thread->lock);
1993 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
1994 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
1995 {
1996 UNLOCK(thread->lock);
1997 if (isconforming)
1998 pthread_exit(PTHREAD_CANCELED);
1999 else
2000 pthread_exit(0);
2001 }
2002 UNLOCK(thread->lock);
2003 }
2004
2005
2006
2007 int
2008 pthread_getconcurrency(void)
2009 {
2010 return(pthread_concurrency);
2011 }
2012
2013 int
2014 pthread_setconcurrency(int new_level)
2015 {
2016 if (new_level < 0)
2017 return EINVAL;
2018 pthread_concurrency = new_level;
2019 return(0);
2020 }
2021
2022 /*
2023 * Perform package initialization - called automatically when application starts
2024 */
2025 __private_extern__ int
2026 pthread_init(void)
2027 {
2028 pthread_attr_t *attrs;
2029 pthread_t thread;
2030 kern_return_t kr;
2031 host_priority_info_data_t priority_info;
2032 host_info_t info;
2033 host_flavor_t flavor;
2034 host_t host;
2035 mach_msg_type_number_t count;
2036 int mib[2];
2037 int ncpus = 0;
2038 size_t len;
2039 void *stackaddr;
2040
2041 pthreadsize = round_page(sizeof (struct _pthread));
2042 count = HOST_PRIORITY_INFO_COUNT;
2043 info = (host_info_t)&priority_info;
2044 flavor = HOST_PRIORITY_INFO;
2045 host = mach_host_self();
2046 kr = host_info(host, flavor, info, &count);
2047 if (kr != KERN_SUCCESS)
2048 printf("host_info failed (%d); probably need privilege.\n", kr);
2049 else {
2050 default_priority = priority_info.user_priority;
2051 min_priority = priority_info.minimum_priority;
2052 max_priority = priority_info.maximum_priority;
2053 }
2054 attrs = &_pthread_attr_default;
2055 pthread_attr_init(attrs);
2056
2057 TAILQ_INIT(&__pthread_head);
2058 LOCK_INIT(_pthread_list_lock);
2059 thread = &_thread;
2060 TAILQ_INSERT_HEAD(&__pthread_head, thread, plist);
2061 _pthread_set_self(thread);
2062 #if PTH_LISTTRACE
2063 __kdebug_trace(0x900000c, thread, 0, 0, 10, 0);
2064 #endif
2065
2066 /* In case of dyld reset the tsd keys from 1 - 10 */
2067 _pthread_keys_init();
2068
2069 mib[0] = CTL_KERN;
2070 mib[1] = KERN_USRSTACK;
2071 len = sizeof (stackaddr);
2072 if (sysctl (mib, 2, &stackaddr, &len, NULL, 0) != 0)
2073 stackaddr = (void *)USRSTACK;
2074 _pthread_create(thread, attrs, stackaddr, mach_thread_self());
2075 thread->stacksize = DFLSSIZ; //initialize main thread's stacksize based on vmparam.h
2076 thread->detached = PTHREAD_CREATE_JOINABLE|_PTHREAD_CREATE_PARENT;
2077
2078 _init_cpu_capabilities();
2079 if ((ncpus = _NumCPUs()) > 1)
2080 _spin_tries = MP_SPIN_TRIES;
2081
2082 workq_targetconc[WORKQ_HIGH_PRIOQUEUE] = ncpus;
2083 workq_targetconc[WORKQ_DEFAULT_PRIOQUEUE] = ncpus;
2084 workq_targetconc[WORKQ_LOW_PRIOQUEUE] = ncpus;
2085
2086 mach_port_deallocate(mach_task_self(), host);
2087
2088 #if defined(__ppc__)
2089 IF_ROSETTA() {
2090 __oldstyle = 1;
2091 }
2092 #endif
2093
2094 #if defined(_OBJC_PAGE_BASE_ADDRESS)
2095 {
2096 vm_address_t objcRTPage = (vm_address_t)_OBJC_PAGE_BASE_ADDRESS;
2097 kr = vm_map(mach_task_self(),
2098 &objcRTPage, vm_page_size * 4, vm_page_size - 1,
2099 VM_FLAGS_FIXED | VM_MAKE_TAG(0), // Which tag to use?
2100 MACH_PORT_NULL,
2101 (vm_address_t)0, FALSE,
2102 (vm_prot_t)0, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE,
2103 VM_INHERIT_DEFAULT);
2104 /* We ignore the return result here. The ObjC runtime will just have to deal. */
2105 }
2106 #endif
2107
2108 mig_init(1); /* enable multi-threaded mig interfaces */
2109 if (__oldstyle == 0) {
2110 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2111 __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)), _pthread_start, &workq_targetconc[0], (__uint64_t)(&thread->tsd[__PTK_LIBDISPATCH_KEY0]) - (__uint64_t)thread);
2112 #else
2113 __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)), NULL, &workq_targetconc[0], (__uint64_t)&thread->tsd[__PTK_LIBDISPATCH_KEY0] - (__uint64_t)thread);
2114 #endif
2115 }
2116
2117 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2118 if( (thread->thread_id = __thread_selfid()) == (__uint64_t)-1)
2119 printf("Failed to set thread_id in pthread_init\n");
2120 return 0;
2121 #endif
2122 }
2123
2124 int sched_yield(void)
2125 {
2126 swtch_pri(0);
2127 return 0;
2128 }
2129
2130 /* This used to be the "magic" that gets the initialization routine called when the application starts */
2131 static int _do_nothing(void) { return 0; }
2132 int (*_cthread_init_routine)(void) = _do_nothing;
2133
2134 /* Get a semaphore from the pool, growing it if necessary */
2135
2136 __private_extern__ semaphore_t new_sem_from_pool(void) {
2137 kern_return_t res;
2138 semaphore_t sem;
2139 int i;
2140
2141 LOCK(sem_pool_lock);
2142 if (sem_pool_current == sem_pool_count) {
2143 sem_pool_count += 16;
2144 sem_pool = realloc(sem_pool, sem_pool_count * sizeof(semaphore_t));
2145 for (i = sem_pool_current; i < sem_pool_count; i++) {
2146 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool[i], SYNC_POLICY_FIFO, 0), res);
2147 }
2148 }
2149 sem = sem_pool[sem_pool_current++];
2150 UNLOCK(sem_pool_lock);
2151 return sem;
2152 }
2153
2154 /* Put a semaphore back into the pool */
2155 __private_extern__ void restore_sem_to_pool(semaphore_t sem) {
2156 LOCK(sem_pool_lock);
2157 sem_pool[--sem_pool_current] = sem;
2158 UNLOCK(sem_pool_lock);
2159 }
2160
2161 static void sem_pool_reset(void) {
2162 LOCK(sem_pool_lock);
2163 sem_pool_count = 0;
2164 sem_pool_current = 0;
2165 sem_pool = NULL;
2166 UNLOCK(sem_pool_lock);
2167 }
2168
2169 __private_extern__ void _pthread_fork_child(pthread_t p) {
2170 /* Just in case somebody had it locked... */
2171 UNLOCK(sem_pool_lock);
2172 sem_pool_reset();
2173 /* No need to hold the pthread_list_lock as no one other than this
2174 * thread is present at this time
2175 */
2176 TAILQ_INIT(&__pthread_head);
2177 LOCK_INIT(_pthread_list_lock);
2178 TAILQ_INSERT_HEAD(&__pthread_head, p, plist);
2179 #if PTH_LISTTRACE
2180 __kdebug_trace(0x900000c, p, 0, 0, 10, 0);
2181 #endif
2182 _pthread_count = 1;
2183 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2184 if( (p->thread_id = __thread_selfid()) == (__uint64_t)-1)
2185 printf("Failed to set thread_id in pthread_fork_child\n");
2186 #endif
2187 }
2188
2189 /*
2190 * Query/update the cancelability 'state' of a thread
2191 */
2192 int
2193 _pthread_setcancelstate_internal(int state, int *oldstate, int conforming)
2194 {
2195 pthread_t self = pthread_self();
2196
2197
2198 switch (state) {
2199 case PTHREAD_CANCEL_ENABLE:
2200 if (conforming)
2201 __pthread_canceled(1);
2202 break;
2203 case PTHREAD_CANCEL_DISABLE:
2204 if (conforming)
2205 __pthread_canceled(2);
2206 break;
2207 default:
2208 return EINVAL;
2209 }
2210
2211 self = pthread_self();
2212 LOCK(self->lock);
2213 if (oldstate)
2214 *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
2215 self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK;
2216 self->cancel_state |= state;
2217 UNLOCK(self->lock);
2218 if (!conforming)
2219 _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
2220 return (0);
2221 }
2222
2223 /* When a thread exits set the cancellation state to DISABLE and DEFERRED */
2224 static void
2225 _pthread_setcancelstate_exit(pthread_t self, void * value_ptr, int conforming)
2226 {
2227 LOCK(self->lock);
2228 self->cancel_state &= ~(_PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK);
2229 self->cancel_state |= (PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED);
2230 if ((value_ptr == PTHREAD_CANCELED)) {
2231 // 4597450: begin
2232 self->detached |= _PTHREAD_WASCANCEL;
2233 // 4597450: end
2234 }
2235 UNLOCK(self->lock);
2236 }
2237
2238 int
2239 _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming)
2240 {
2241 kern_return_t res;
2242 int detached = 0, ret;
2243
2244 #if PTH_TRACE
2245 __kdebug_trace(0x9000028, thread, 0, 0, 1, 0);
2246 #endif
2247 /* The scenario where the joiner was waiting for the thread and
2248 * the pthread detach happened on that thread. Then the semaphore
2249 * will trigger but by the time joiner runs, the target thread could be
2250 * freed. So we need to make sure that the thread is still in the list
2251 * and is joinable before we continue with the join.
2252 */
2253 LOCK(_pthread_list_lock);
2254 if ((ret = _pthread_find_thread(thread)) != 0) {
2255 UNLOCK(_pthread_list_lock);
2256 /* returns ESRCH */
2257 return(ret);
2258 }
2259 if ((thread->detached & PTHREAD_CREATE_JOINABLE) == 0) {
2260 /* the thread might be a detached thread */
2261 UNLOCK(_pthread_list_lock);
2262 return(ESRCH);
2263
2264 }
2265 /* It is still a joinable thread and needs to be reaped */
2266 TAILQ_REMOVE(&__pthread_head, thread, plist);
2267 #if PTH_LISTTRACE
2268 __kdebug_trace(0x9000010, thread, 0, 0, 3, 0);
2269 #endif
2270 UNLOCK(_pthread_list_lock);
2271
2272 if (value_ptr)
2273 *value_ptr = thread->exit_value;
2274 if (conforming) {
2275 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
2276 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING) && (value_ptr != NULL)) {
2277 *value_ptr = PTHREAD_CANCELED;
2278 }
2279 }
2280 if (thread->reply_port != MACH_PORT_NULL) {
2281 res = mach_port_mod_refs(mach_task_self(), thread->reply_port, MACH_PORT_RIGHT_RECEIVE, -1);
2282 if (res != KERN_SUCCESS)
2283 fprintf(stderr,"mach_port_mod_refs(reply_port) failed: %s\n",mach_error_string(res));
2284 thread->reply_port = MACH_PORT_NULL;
2285 }
2286 if (thread->freeStackOnExit) {
2287 thread->sig = _PTHREAD_NO_SIG;
2288 #if PTH_TRACE
2289 __kdebug_trace(0x9000028, thread, 0, 0, 2, 0);
2290 #endif
2291 vm_deallocate(mach_task_self(), (mach_vm_address_t)(long)thread, pthreadsize);
2292 } else {
2293 thread->sig = _PTHREAD_NO_SIG;
2294 #if PTH_TRACE
2295 __kdebug_trace(0x9000028, thread, 0, 0, 3, 0);
2296 #endif
2297 free(thread);
2298 }
2299 return(0);
2300 }
2301
2302 /* ALWAYS called with list lock and return with list lock */
2303 int
2304 _pthread_find_thread(pthread_t thread)
2305 {
2306 pthread_t p;
2307
2308 loop:
2309 TAILQ_FOREACH(p, &__pthread_head, plist) {
2310 if (p == thread) {
2311 if (thread->kernel_thread == MACH_PORT_NULL) {
2312 UNLOCK(_pthread_list_lock);
2313 sched_yield();
2314 LOCK(_pthread_list_lock);
2315 goto loop;
2316 }
2317 return(0);
2318 }
2319 }
2320 return(ESRCH);
2321 }
2322
2323 int
2324 _pthread_lookup_thread(pthread_t thread, mach_port_t * portp, int only_joinable)
2325 {
2326 mach_port_t kport;
2327 int ret = 0;
2328
2329 if (thread == NULL)
2330 return(ESRCH);
2331
2332 LOCK(_pthread_list_lock);
2333
2334 if ((ret = _pthread_find_thread(thread)) != 0) {
2335 UNLOCK(_pthread_list_lock);
2336 return(ret);
2337 }
2338 if ((only_joinable != 0) && ((thread->detached & PTHREAD_CREATE_DETACHED) != 0)) {
2339 UNLOCK(_pthread_list_lock);
2340 return(EINVAL);
2341 }
2342 kport = thread->kernel_thread;
2343 UNLOCK(_pthread_list_lock);
2344 if (portp != NULL)
2345 *portp = kport;
2346 return(0);
2347 }
2348
2349 /* XXXXXXXXXXXXX Pthread Workqueue Attributes XXXXXXXXXXXXXXXXXX */
2350 int
2351 pthread_workqueue_attr_init_np(pthread_workqueue_attr_t * attrp)
2352 {
2353 attrp->queueprio = WORKQ_DEFAULT_PRIOQUEUE;
2354 attrp->sig = PTHREAD_WORKQUEUE_ATTR_SIG;
2355 attrp->overcommit = 0;
2356 return(0);
2357 }
2358
2359 int
2360 pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t * attr)
2361 {
2362 if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG)
2363 {
2364 return (0);
2365 } else
2366 {
2367 return (EINVAL); /* Not an attribute structure! */
2368 }
2369 }
2370
2371 int
2372 pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t * attr, int * qpriop)
2373 {
2374 if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
2375 *qpriop = attr->queueprio;
2376 return (0);
2377 } else {
2378 return (EINVAL); /* Not an attribute structure! */
2379 }
2380 }
2381
2382
2383 int
2384 pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t * attr, int qprio)
2385 {
2386 int error = 0;
2387
2388 if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
2389 switch(qprio) {
2390 case WORKQ_HIGH_PRIOQUEUE:
2391 case WORKQ_DEFAULT_PRIOQUEUE:
2392 case WORKQ_LOW_PRIOQUEUE:
2393 attr->queueprio = qprio;
2394 break;
2395 default:
2396 error = EINVAL;
2397 }
2398 } else {
2399 error = EINVAL;
2400 }
2401 return (error);
2402 }
2403
2404
2405 int
2406 pthread_workqueue_attr_getovercommit_np(const pthread_workqueue_attr_t * attr, int * ocommp)
2407 {
2408 if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
2409 *ocommp = attr->overcommit;
2410 return (0);
2411 } else {
2412 return (EINVAL); /* Not an attribute structure! */
2413 }
2414 }
2415
2416
2417 int
2418 pthread_workqueue_attr_setovercommit_np(pthread_workqueue_attr_t * attr, int ocomm)
2419 {
2420 int error = 0;
2421
2422 if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
2423 attr->overcommit = ocomm;
2424 } else {
2425 error = EINVAL;
2426 }
2427 return (error);
2428 }
2429 /* XXXXXXXXXXXXX Pthread Workqueue support routines XXXXXXXXXXXXXXXXXX */
2430
2431 static void
2432 workqueue_list_lock()
2433 {
2434 OSSpinLockLock(&__workqueue_list_lock);
2435 }
2436
2437 static void
2438 workqueue_list_unlock()
2439 {
2440 OSSpinLockUnlock(&__workqueue_list_lock);
2441 }
2442
2443 int
2444 pthread_workqueue_init_np()
2445 {
2446 int ret;
2447
2448 workqueue_list_lock();
2449 ret =_pthread_work_internal_init();
2450 workqueue_list_unlock();
2451
2452 return(ret);
2453 }
2454
2455 int
2456 pthread_workqueue_requestconcurrency_np(int queue, int request_concurrency)
2457 {
2458 int error = 0;
2459
2460 if (queue < 0 || queue > WORKQ_NUM_PRIOQUEUE)
2461 return(EINVAL);
2462
2463 error =__workq_kernreturn(WQOPS_THREAD_SETCONC, NULL, request_concurrency, queue);
2464
2465 if (error == -1)
2466 return(errno);
2467 return(0);
2468 }
2469
2470 void
2471 pthread_workqueue_atfork_prepare(void)
2472 {
2473 /*
2474 * NOTE: Any workq additions here
2475 * should be for i386,x86_64 only
2476 */
2477 dispatch_atfork_prepare();
2478 }
2479
2480 void
2481 pthread_workqueue_atfork_parent(void)
2482 {
2483 /*
2484 * NOTE: Any workq additions here
2485 * should be for i386,x86_64 only
2486 */
2487 dispatch_atfork_parent();
2488 }
2489
2490 void
2491 pthread_workqueue_atfork_child(void)
2492 {
2493 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2494 /*
2495 * NOTE: workq additions here
2496 * are for i386,x86_64 only as
2497 * ppc and arm do not support it
2498 */
2499 __workqueue_list_lock = OS_SPINLOCK_INIT;
2500 if (kernel_workq_setup != 0){
2501 kernel_workq_setup = 0;
2502 _pthread_work_internal_init();
2503 }
2504 #endif
2505 dispatch_atfork_child();
2506 }
2507
2508 static int
2509 _pthread_work_internal_init(void)
2510 {
2511 int i, error;
2512 pthread_workqueue_head_t headp;
2513 pthread_workitem_t witemp;
2514 pthread_workqueue_t wq;
2515
2516 if (kernel_workq_setup == 0) {
2517 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2518 __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)),NULL,NULL, NULL);
2519 #else
2520 __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)),NULL,NULL, NULL);
2521 #endif
2522
2523 _pthread_wq_attr_default.queueprio = WORKQ_DEFAULT_PRIOQUEUE;
2524 _pthread_wq_attr_default.sig = PTHREAD_WORKQUEUE_ATTR_SIG;
2525
2526 for( i = 0; i< WQ_NUM_PRIO_QS; i++) {
2527 headp = __pthread_wq_head_tbl[i];
2528 TAILQ_INIT(&headp->wqhead);
2529 headp->next_workq = 0;
2530 }
2531
2532 /* create work item and workqueue pools */
2533 witemp = (struct _pthread_workitem *)malloc(sizeof(struct _pthread_workitem) * WORKITEM_POOL_SIZE);
2534 bzero(witemp, (sizeof(struct _pthread_workitem) * WORKITEM_POOL_SIZE));
2535 for (i = 0; i < WORKITEM_POOL_SIZE; i++) {
2536 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, &witemp[i], item_entry);
2537 }
2538 wq = (struct _pthread_workqueue *)malloc(sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE);
2539 bzero(wq, (sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE));
2540 for (i = 0; i < WORKQUEUE_POOL_SIZE; i++) {
2541 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, &wq[i], wq_list);
2542 }
2543
2544 if (error = __workq_open()) {
2545 TAILQ_INIT(&__pthread_workitem_pool_head);
2546 TAILQ_INIT(&__pthread_workqueue_pool_head);
2547 free(witemp);
2548 free(wq);
2549 return(ENOMEM);
2550 }
2551 kernel_workq_setup = 1;
2552 }
2553 return(0);
2554 }
2555
2556
2557 /* This routine is called with list lock held */
2558 static pthread_workitem_t
2559 alloc_workitem(void)
2560 {
2561 pthread_workitem_t witem;
2562
2563 if (TAILQ_EMPTY(&__pthread_workitem_pool_head)) {
2564 workqueue_list_unlock();
2565 witem = malloc(sizeof(struct _pthread_workitem));
2566 witem->gencount = 0;
2567 workqueue_list_lock();
2568 } else {
2569 witem = TAILQ_FIRST(&__pthread_workitem_pool_head);
2570 TAILQ_REMOVE(&__pthread_workitem_pool_head, witem, item_entry);
2571 }
2572 return(witem);
2573 }
2574
2575 /* This routine is called with list lock held */
2576 static void
2577 free_workitem(pthread_workitem_t witem)
2578 {
2579 witem->gencount++;
2580 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, witem, item_entry);
2581 }
2582
2583 /* This routine is called with list lock held */
2584 static pthread_workqueue_t
2585 alloc_workqueue(void)
2586 {
2587 pthread_workqueue_t wq;
2588
2589 if (TAILQ_EMPTY(&__pthread_workqueue_pool_head)) {
2590 workqueue_list_unlock();
2591 wq = malloc(sizeof(struct _pthread_workqueue));
2592 workqueue_list_lock();
2593 } else {
2594 wq = TAILQ_FIRST(&__pthread_workqueue_pool_head);
2595 TAILQ_REMOVE(&__pthread_workqueue_pool_head, wq, wq_list);
2596 }
2597 user_workq_count++;
2598 return(wq);
2599 }
2600
2601 /* This routine is called with list lock held */
2602 static void
2603 free_workqueue(pthread_workqueue_t wq)
2604 {
2605 user_workq_count--;
2606 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, wq, wq_list);
2607 }
2608
2609 static void
2610 _pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * attr)
2611 {
2612 bzero(wq, sizeof(struct _pthread_workqueue));
2613 if (attr != NULL) {
2614 wq->queueprio = attr->queueprio;
2615 wq->overcommit = attr->overcommit;
2616 } else {
2617 wq->queueprio = WORKQ_DEFAULT_PRIOQUEUE;
2618 wq->overcommit = 0;
2619 }
2620 LOCK_INIT(wq->lock);
2621 wq->flags = 0;
2622 TAILQ_INIT(&wq->item_listhead);
2623 TAILQ_INIT(&wq->item_kernhead);
2624 #if WQ_LISTTRACE
2625 __kdebug_trace(0x90080ac, wq, &wq->item_listhead, wq->item_listhead.tqh_first, wq->item_listhead.tqh_last, 0);
2626 #endif
2627 wq->wq_list.tqe_next = 0;
2628 wq->wq_list.tqe_prev = 0;
2629 wq->sig = PTHREAD_WORKQUEUE_SIG;
2630 wq->headp = __pthread_wq_head_tbl[wq->queueprio];
2631 }
2632
2633 int
2634 valid_workq(pthread_workqueue_t workq)
2635 {
2636 if (workq->sig == PTHREAD_WORKQUEUE_SIG)
2637 return(1);
2638 else
2639 return(0);
2640 }
2641
2642
2643 /* called with list lock */
2644 static void
2645 pick_nextworkqueue_droplock()
2646 {
2647 int i, curwqprio, val, found;
2648 pthread_workqueue_head_t headp;
2649 pthread_workqueue_t workq;
2650 pthread_workqueue_t nworkq = NULL;
2651
2652 #if WQ_TRACE
2653 __kdebug_trace(0x9008098, kernel_workq_count, 0, 0, 0, 0);
2654 #endif
2655 loop:
2656 while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) {
2657 found = 0;
2658 for (i = 0; i < WQ_NUM_PRIO_QS; i++) {
2659 wqreadyprio = i; /* because there is nothing else higher to run */
2660 headp = __pthread_wq_head_tbl[i];
2661
2662 if (TAILQ_EMPTY(&headp->wqhead))
2663 continue;
2664 workq = headp->next_workq;
2665 if (workq == NULL)
2666 workq = TAILQ_FIRST(&headp->wqhead);
2667 curwqprio = workq->queueprio;
2668 nworkq = workq; /* starting pt */
2669 while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) {
2670 headp->next_workq = TAILQ_NEXT(workq, wq_list);
2671 if (headp->next_workq == NULL)
2672 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2673 #if WQ_TRACE
2674 __kdebug_trace(0x9008098, kernel_workq_count, workq, 0, 1, 0);
2675 #endif
2676 val = post_nextworkitem(workq);
2677
2678 if (val != 0) {
2679 /* things could have changed so reasses */
2680 /* If kernel queue is full , skip */
2681 if (kernel_workq_count >= KERNEL_WORKQ_ELEM_MAX)
2682 break;
2683 /* If anything with higher prio arrived, then reevaluate */
2684 if (wqreadyprio < curwqprio)
2685 goto loop; /* we need re evaluate again */
2686 /* we can post some more work items */
2687 found = 1;
2688 }
2689
2690 /* cannot use workq here as it could be freed */
2691 if (TAILQ_EMPTY(&headp->wqhead))
2692 break;
2693 /* if we found nothing to run and only one workqueue in the list, skip */
2694 if ((val == 0) && (workq == headp->next_workq))
2695 break;
2696 workq = headp->next_workq;
2697 if (workq == NULL)
2698 workq = TAILQ_FIRST(&headp->wqhead);
2699 if (val != 0)
2700 nworkq = workq;
2701 /* if we found nothing to run and back to workq where we started */
2702 if ((val == 0) && (workq == nworkq))
2703 break;
2704 }
2705 if (kernel_workq_count >= KERNEL_WORKQ_ELEM_MAX)
2706 break;
2707 }
2708 /* nothing found to run? */
2709 if (found == 0)
2710 break;
2711 }
2712 workqueue_list_unlock();
2713 }
2714
2715 static int
2716 post_nextworkitem(pthread_workqueue_t workq)
2717 {
2718 int error, prio;
2719 pthread_workitem_t witem;
2720 pthread_workqueue_head_t headp;
2721 void (*func)(pthread_workqueue_t, void *);
2722
2723 if ((workq->flags & PTHREAD_WORKQ_SUSPEND) == PTHREAD_WORKQ_SUSPEND) {
2724 return(0);
2725 }
2726 #if WQ_TRACE
2727 __kdebug_trace(0x900809c, workq, workq->item_listhead.tqh_first, 0, 1, 0);
2728 #endif
2729 if (TAILQ_EMPTY(&workq->item_listhead)) {
2730 return(0);
2731 }
2732 if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON)
2733 return(0);
2734
2735 witem = TAILQ_FIRST(&workq->item_listhead);
2736 headp = workq->headp;
2737 #if WQ_TRACE
2738 __kdebug_trace(0x900809c, workq, witem, 0, 0xee, 0);
2739 #endif
2740 if ((witem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) {
2741 #if WQ_TRACE
2742 __kdebug_trace(0x9000064, workq, 0, 0, 2, 0);
2743 #endif
2744
2745 if ((witem->flags & PTH_WQITEM_APPLIED) != 0) {
2746 return(0);
2747 }
2748 /* Also barrier when nothing is there needs to be handled */
2749 /* Nothing to wait for */
2750 if (workq->kq_count != 0) {
2751 witem->flags |= PTH_WQITEM_APPLIED;
2752 workq->flags |= PTHREAD_WORKQ_BARRIER_ON;
2753 workq->barrier_count = workq->kq_count;
2754 #if WQ_TRACE
2755 __kdebug_trace(0x9000064, 1, workq->barrier_count, 0, 0, 0);
2756 #endif
2757 return(1);
2758 } else {
2759 #if WQ_TRACE
2760 __kdebug_trace(0x9000064, 2, workq->barrier_count, 0, 0, 0);
2761 #endif
2762 if (witem->func != NULL) {
2763 /* since we are going to drop list lock */
2764 witem->flags |= PTH_WQITEM_APPLIED;
2765 workq->flags |= PTHREAD_WORKQ_BARRIER_ON;
2766 workqueue_list_unlock();
2767 func = (void (*)(pthread_workqueue_t, void *))witem->func;
2768 (*func)(workq, witem->func_arg);
2769 #if WQ_TRACE
2770 __kdebug_trace(0x9000064, 3, workq->barrier_count, 0, 0, 0);
2771 #endif
2772 workqueue_list_lock();
2773 workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
2774 }
2775 TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
2776 #if WQ_LISTTRACE
2777 __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
2778 #endif
2779 witem->flags = 0;
2780 free_workitem(witem);
2781 #if WQ_TRACE
2782 __kdebug_trace(0x9000064, 4, workq->barrier_count, 0, 0, 0);
2783 #endif
2784 return(1);
2785 }
2786 } else if ((witem->flags & PTH_WQITEM_DESTROY) == PTH_WQITEM_DESTROY) {
2787 #if WQ_TRACE
2788 __kdebug_trace(0x9000068, 1, workq->kq_count, 0, 0, 0);
2789 #endif
2790 if ((witem->flags & PTH_WQITEM_APPLIED) != 0) {
2791 return(0);
2792 }
2793 witem->flags |= PTH_WQITEM_APPLIED;
2794 workq->flags |= (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON);
2795 workq->barrier_count = workq->kq_count;
2796 workq->term_callback = (void (*)(struct _pthread_workqueue *,void *))witem->func;
2797 workq->term_callarg = witem->func_arg;
2798 TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
2799 #if WQ_LISTTRACE
2800 __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
2801 #endif
2802 if ((TAILQ_EMPTY(&workq->item_listhead)) && (workq->kq_count == 0)) {
2803 if (!(TAILQ_EMPTY(&workq->item_kernhead))) {
2804 #if WQ_TRACE
2805 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 0xff, 0);
2806 #endif
2807 }
2808 witem->flags = 0;
2809 free_workitem(witem);
2810 workq->flags |= PTHREAD_WORKQ_DESTROYED;
2811 #if WQ_TRACE
2812 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 1, 0);
2813 #endif
2814 headp = __pthread_wq_head_tbl[workq->queueprio];
2815 if (headp->next_workq == workq) {
2816 headp->next_workq = TAILQ_NEXT(workq, wq_list);
2817 if (headp->next_workq == NULL) {
2818 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2819 if (headp->next_workq == workq)
2820 headp->next_workq = NULL;
2821 }
2822 }
2823 workq->sig = 0;
2824 TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
2825 if (workq->term_callback != NULL) {
2826 workqueue_list_unlock();
2827 (*workq->term_callback)(workq, workq->term_callarg);
2828 workqueue_list_lock();
2829 }
2830 free_workqueue(workq);
2831 return(1);
2832 } else {
2833 TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry);
2834 #if WQ_LISTTRACE
2835 __kdebug_trace(0x90080b0, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
2836 #endif
2837 }
2838 #if WQ_TRACE
2839 __kdebug_trace(0x9000068, 2, workq->barrier_count, 0, 0, 0);
2840 #endif
2841 return(1);
2842 } else {
2843 #if WQ_TRACE
2844 __kdebug_trace(0x9000060, witem, workq, witem->func_arg, 0xfff, 0);
2845 #endif
2846 TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
2847 #if WQ_LISTTRACE
2848 __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
2849 #endif
2850 TAILQ_INSERT_TAIL(&workq->item_kernhead, witem, item_entry);
2851 if ((witem->flags & PTH_WQITEM_KERN_COUNT) == 0) {
2852 workq->kq_count++;
2853 witem->flags |= PTH_WQITEM_KERN_COUNT;
2854 }
2855 OSAtomicIncrement32(&kernel_workq_count);
2856 workqueue_list_unlock();
2857
2858 prio = workq->queueprio;
2859 if (workq->overcommit != 0) {
2860 prio |= WORKQUEUE_OVERCOMMIT;
2861 }
2862
2863 if (( error =__workq_kernreturn(WQOPS_QUEUE_ADD, witem, workq->affinity, prio)) == -1) {
2864 OSAtomicDecrement32(&kernel_workq_count);
2865 workqueue_list_lock();
2866 #if WQ_TRACE
2867 __kdebug_trace(0x900007c, witem, workq, witem->func_arg, workq->kq_count, 0);
2868 #endif
2869 TAILQ_REMOVE(&workq->item_kernhead, witem, item_entry);
2870 TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry);
2871 #if WQ_LISTTRACE
2872 __kdebug_trace(0x90080b0, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
2873 #endif
2874 if ((workq->flags & (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON)) != 0)
2875 workq->flags |= PTHREAD_WORKQ_REQUEUED;
2876 } else
2877 workqueue_list_lock();
2878 #if WQ_TRACE
2879 __kdebug_trace(0x9000060, witem, workq, witem->func_arg, workq->kq_count, 0);
2880 #endif
2881 return(1);
2882 }
2883 /* noone should come here */
2884 #if 1
2885 printf("error in logic for next workitem\n");
2886 LIBC_ABORT("error in logic for next workitem");
2887 #endif
2888 return(0);
2889 }
2890
2891 void
2892 _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse)
2893 {
2894 int ret;
2895 pthread_attr_t *attrs = &_pthread_attr_default;
2896 pthread_workqueue_t workq;
2897 #if WQ_DEBUG
2898 pthread_t pself;
2899 #endif
2900
2901
2902 workq = item->workq;
2903 if (reuse == 0) {
2904 /* reuse is set to 0, when a thread is newly created to run a workitem */
2905 _pthread_struct_init(self, attrs, stackaddr, DEFAULT_STACK_SIZE, 1, 1);
2906 self->wqthread = 1;
2907 self->wqkillset = 0;
2908 self->parentcheck = 1;
2909
2910 /* These are not joinable threads */
2911 self->detached &= ~PTHREAD_CREATE_JOINABLE;
2912 self->detached |= PTHREAD_CREATE_DETACHED;
2913 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2914 _pthread_set_self(self);
2915 #endif
2916 #if WQ_TRACE
2917 __kdebug_trace(0x9000050, self, item, item->func_arg, 0, 0);
2918 #endif
2919 self->kernel_thread = kport;
2920 self->fun = (void *(*)(void *))item->func;
2921 self->arg = item->func_arg;
2922 /* Add to the pthread list */
2923 LOCK(_pthread_list_lock);
2924 TAILQ_INSERT_TAIL(&__pthread_head, self, plist);
2925 #if PTH_LISTTRACE
2926 __kdebug_trace(0x900000c, self, 0, 0, 10, 0);
2927 #endif
2928 _pthread_count++;
2929 UNLOCK(_pthread_list_lock);
2930
2931 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2932 if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1)
2933 printf("Failed to set thread_id in pthread_wqthread\n");
2934 #endif
2935
2936 } else {
2937 /* reuse is set to 1, when a thread is resued to run another work item */
2938 #if WQ_TRACE
2939 __kdebug_trace(0x9000054, self, item, item->func_arg, 0, 0);
2940 #endif
2941 /* reset all tsd from 1 to KEYS_MAX */
2942 if (self == NULL)
2943 LIBC_ABORT("_pthread_wqthread: pthread %p setup to be NULL", self);
2944
2945 self->fun = (void *(*)(void *))item->func;
2946 self->arg = item->func_arg;
2947 }
2948
2949 #if WQ_DEBUG
2950 if (reuse == 0) {
2951 pself = pthread_self();
2952 if (self != pself) {
2953 #if WQ_TRACE
2954 __kdebug_trace(0x9000078, self, pself, item->func_arg, 0, 0);
2955 #endif
2956 printf("pthread_self not set: pself %p, passed in %p\n", pself, self);
2957 _pthread_set_self(self);
2958 pself = pthread_self();
2959 if (self != pself)
2960 printf("(2)pthread_self not set: pself %p, passed in %p\n", pself, self);
2961 pself = self;
2962 }
2963 } else {
2964 pself = pthread_self();
2965 if (self != pself) {
2966 printf("(3)pthread_self not set in reuse: pself %p, passed in %p\n", pself, self);
2967 LIBC_ABORT("(3)pthread_self not set in reuse: pself %p, passed in %p", pself, self);
2968 }
2969 }
2970 #endif /* WQ_DEBUG */
2971
2972 self->cur_workq = workq;
2973 self->cur_workitem = item;
2974 OSAtomicDecrement32(&kernel_workq_count);
2975
2976 ret = (int)(intptr_t)(*self->fun)(self->arg);
2977
2978 /* If we reach here without going through the above initialization path then don't go through
2979 * with the teardown code path ( e.g. setjmp/longjmp ). Instead just exit this thread.
2980 */
2981 if(self != pthread_self()) {
2982 pthread_exit(PTHREAD_CANCELED);
2983 }
2984
2985 workqueue_exit(self, workq, item);
2986
2987 }
2988
2989 static void
2990 workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item)
2991 {
2992 pthread_attr_t *attrs = &_pthread_attr_default;
2993 pthread_workitem_t baritem;
2994 pthread_workqueue_head_t headp;
2995 void (*func)(pthread_workqueue_t, void *);
2996
2997 workqueue_list_lock();
2998
2999 TAILQ_REMOVE(&workq->item_kernhead, item, item_entry);
3000 workq->kq_count--;
3001 #if WQ_TRACE
3002 __kdebug_trace(0x9000070, self, 1, item->func_arg, workq->kq_count, 0);
3003 #endif
3004 item->flags = 0;
3005 free_workitem(item);
3006
3007 if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON) {
3008 workq->barrier_count--;
3009 #if WQ_TRACE
3010 __kdebug_trace(0x9000084, self, workq->barrier_count, workq->kq_count, 1, 0);
3011 #endif
3012 if (workq->barrier_count <= 0 ) {
3013 /* Need to remove barrier item from the list */
3014 baritem = TAILQ_FIRST(&workq->item_listhead);
3015 #if WQ_DEBUG
3016 if ((baritem->flags & (PTH_WQITEM_BARRIER | PTH_WQITEM_DESTROY| PTH_WQITEM_APPLIED)) == 0)
3017 printf("Incorect bar item being removed in barrier processing\n");
3018 #endif /* WQ_DEBUG */
3019 /* if the front item is a barrier and call back is registered, run that */
3020 if (((baritem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) && (baritem->func != NULL)) {
3021 workqueue_list_unlock();
3022 func = (void (*)(pthread_workqueue_t, void *))baritem->func;
3023 (*func)(workq, baritem->func_arg);
3024 workqueue_list_lock();
3025 }
3026 TAILQ_REMOVE(&workq->item_listhead, baritem, item_entry);
3027 #if WQ_LISTTRACE
3028 __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
3029 #endif
3030 baritem->flags = 0;
3031 free_workitem(baritem);
3032 workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
3033 #if WQ_TRACE
3034 __kdebug_trace(0x9000058, self, item, item->func_arg, 0, 0);
3035 #endif
3036 if ((workq->flags & PTHREAD_WORKQ_TERM_ON) != 0) {
3037 headp = __pthread_wq_head_tbl[workq->queueprio];
3038 workq->flags |= PTHREAD_WORKQ_DESTROYED;
3039 #if WQ_TRACE
3040 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 2, 0);
3041 #endif
3042 if (headp->next_workq == workq) {
3043 headp->next_workq = TAILQ_NEXT(workq, wq_list);
3044 if (headp->next_workq == NULL) {
3045 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
3046 if (headp->next_workq == workq)
3047 headp->next_workq = NULL;
3048 }
3049 }
3050 TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
3051 workq->sig = 0;
3052 if (workq->term_callback != NULL) {
3053 workqueue_list_unlock();
3054 (*workq->term_callback)(workq, workq->term_callarg);
3055 workqueue_list_lock();
3056 }
3057 free_workqueue(workq);
3058 } else {
3059 /* if there are higher prio schedulabel item reset to wqreadyprio */
3060 if ((workq->queueprio < wqreadyprio) && (!(TAILQ_EMPTY(&workq->item_listhead))))
3061 wqreadyprio = workq->queueprio;
3062 }
3063 }
3064 }
3065 #if WQ_TRACE
3066 else {
3067 __kdebug_trace(0x9000070, self, 2, item->func_arg, workq->barrier_count, 0);
3068 }
3069
3070 __kdebug_trace(0x900005c, self, item, 0, 0, 0);
3071 #endif
3072 pick_nextworkqueue_droplock();
3073 _pthread_workq_return(self);
3074 }
3075
3076 static void
3077 _pthread_workq_return(pthread_t self)
3078 {
3079 __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0);
3080
3081 /* This is the way to terminate the thread */
3082 _pthread_exit(self, NULL);
3083 }
3084
3085
3086 /* XXXXXXXXXXXXX Pthread Workqueue functions XXXXXXXXXXXXXXXXXX */
3087
3088 int
3089 pthread_workqueue_create_np(pthread_workqueue_t * workqp, const pthread_workqueue_attr_t * attr)
3090 {
3091 pthread_workqueue_t wq;
3092 pthread_workqueue_head_t headp;
3093
3094 #if defined(__ppc__)
3095 IF_ROSETTA() {
3096 return(ENOTSUP);
3097 }
3098 #endif
3099 if ((attr != NULL) && (attr->sig != PTHREAD_WORKQUEUE_ATTR_SIG)) {
3100 return(EINVAL);
3101 }
3102
3103 if (__is_threaded == 0)
3104 __is_threaded = 1;
3105
3106 workqueue_list_lock();
3107 if (kernel_workq_setup == 0) {
3108 int ret = _pthread_work_internal_init();
3109 if (ret != 0) {
3110 workqueue_list_unlock();
3111 return(ret);
3112 }
3113 }
3114
3115 wq = alloc_workqueue();
3116
3117 _pthread_workq_init(wq, attr);
3118
3119 headp = __pthread_wq_head_tbl[wq->queueprio];
3120 TAILQ_INSERT_TAIL(&headp->wqhead, wq, wq_list);
3121 if (headp->next_workq == NULL) {
3122 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
3123 }
3124
3125 workqueue_list_unlock();
3126
3127 *workqp = wq;
3128
3129 return(0);
3130 }
3131
3132 int
3133 pthread_workqueue_additem_np(pthread_workqueue_t workq, void ( *workitem_func)(void *), void * workitem_arg, pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp)
3134 {
3135 pthread_workitem_t witem;
3136
3137 if (valid_workq(workq) == 0) {
3138 return(EINVAL);
3139 }
3140
3141 workqueue_list_lock();
3142
3143 /*
3144 * Allocate the workitem here as it can drop the lock.
3145 * Also we can evaluate the workqueue state only once.
3146 */
3147 witem = alloc_workitem();
3148 witem->func = workitem_func;
3149 witem->func_arg = workitem_arg;
3150 witem->flags = 0;
3151 witem->workq = workq;
3152 witem->item_entry.tqe_next = 0;
3153 witem->item_entry.tqe_prev = 0;
3154
3155 /* alloc workitem can drop the lock, check the state */
3156 if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
3157 free_workitem(witem);
3158 workqueue_list_unlock();
3159 *itemhandlep = 0;
3160 return(ESRCH);
3161 }
3162
3163 if (itemhandlep != NULL)
3164 *itemhandlep = (pthread_workitem_handle_t *)witem;
3165 if (gencountp != NULL)
3166 *gencountp = witem->gencount;
3167 #if WQ_TRACE
3168 __kdebug_trace(0x9008090, witem, witem->func, witem->func_arg, workq, 0);
3169 #endif
3170 TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry);
3171 #if WQ_LISTTRACE
3172 __kdebug_trace(0x90080a4, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
3173 #endif
3174
3175 if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio))
3176 wqreadyprio = workq->queueprio;
3177
3178 pick_nextworkqueue_droplock();
3179
3180 return(0);
3181 }
3182
3183 int
3184 pthread_workqueue_getovercommit_np(pthread_workqueue_t workq, unsigned int *ocommp)
3185 {
3186 pthread_workitem_t witem;
3187
3188 if (valid_workq(workq) == 0) {
3189 return(EINVAL);
3190 }
3191
3192 if (ocommp != NULL)
3193 *ocommp = workq->overcommit;
3194 return(0);
3195 }
3196
3197
3198 /* DEPRECATED
3199 int pthread_workqueue_removeitem_np(pthread_workqueue_t workq, pthread_workitem_handle_t itemhandle, unsigned int gencount)
3200 int pthread_workqueue_addbarrier_np(pthread_workqueue_t workq, void (* callback_func)(pthread_workqueue_t, void *), void * callback_arg, pthread_workitem_handle_t *itemhandlep, unsigned int *gencountp)
3201 int pthread_workqueue_suspend_np(pthread_workqueue_t workq)
3202 int pthread_workqueue_resume_np(pthread_workqueue_t workq)
3203 */
3204
3205 #else /* !BUILDING_VARIANT ] [ */
3206 extern int __unix_conforming;
3207 extern int _pthread_count;
3208 extern pthread_lock_t _pthread_list_lock;
3209 extern void _pthread_testcancel(pthread_t thread, int isconforming);
3210 extern int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr, int conforming);
3211
3212 #endif /* !BUILDING_VARIANT ] */
3213
3214 #if __DARWIN_UNIX03
3215
3216 __private_extern__ void
3217 __posix_join_cleanup(void *arg)
3218 {
3219 pthread_t thread = (pthread_t)arg;
3220 int already_exited, res;
3221 void * dummy;
3222 semaphore_t death;
3223 mach_port_t joinport;
3224 int newstyle = 0;
3225
3226 LOCK(thread->lock);
3227 already_exited = (thread->detached & _PTHREAD_EXITED);
3228
3229 newstyle = thread->newstyle;
3230
3231 #if WQ_TRACE
3232 __kdebug_trace(0x900002c, thread, newstyle, 0, 0, 0);
3233 #endif
3234 if (newstyle == 0) {
3235 death = thread->death;
3236 if (!already_exited){
3237 thread->joiner = (struct _pthread *)NULL;
3238 UNLOCK(thread->lock);
3239 restore_sem_to_pool(death);
3240 } else {
3241 UNLOCK(thread->lock);
3242 while ((res = _pthread_reap_thread(thread,
3243 thread->kernel_thread,
3244 &dummy, 1)) == EAGAIN)
3245 {
3246 sched_yield();
3247 }
3248 restore_sem_to_pool(death);
3249
3250 }
3251
3252 } else {
3253 /* leave another thread to join */
3254 thread->joiner = (struct _pthread *)NULL;
3255 UNLOCK(thread->lock);
3256 }
3257 }
3258
3259 #endif /* __DARWIN_UNIX03 */
3260
3261
3262 /*
3263 * Wait for a thread to terminate and obtain its exit value.
3264 */
3265 /*
3266 int
3267 pthread_join(pthread_t thread,
3268 void **value_ptr)
3269
3270 moved to pthread_cancelable.c */
3271
3272 /*
3273 * Cancel a thread
3274 */
3275 int
3276 pthread_cancel(pthread_t thread)
3277 {
3278 #if __DARWIN_UNIX03
3279 if (__unix_conforming == 0)
3280 __unix_conforming = 1;
3281 #endif /* __DARWIN_UNIX03 */
3282
3283 if (_pthread_lookup_thread(thread, NULL, 0) != 0)
3284 return(ESRCH);
3285
3286 /* if the thread is a workqueue thread, then return error */
3287 if (thread->wqthread != 0) {
3288 return(ENOTSUP);
3289 }
3290 #if __DARWIN_UNIX03
3291 int state;
3292
3293 LOCK(thread->lock);
3294 state = thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
3295 UNLOCK(thread->lock);
3296 if (state & PTHREAD_CANCEL_ENABLE)
3297 __pthread_markcancel(thread->kernel_thread);
3298 #else /* __DARWIN_UNIX03 */
3299 thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
3300 #endif /* __DARWIN_UNIX03 */
3301 return (0);
3302 }
3303
3304 void
3305 pthread_testcancel(void)
3306 {
3307 pthread_t self = pthread_self();
3308
3309 #if __DARWIN_UNIX03
3310 if (__unix_conforming == 0)
3311 __unix_conforming = 1;
3312 _pthread_testcancel(self, 1);
3313 #else /* __DARWIN_UNIX03 */
3314 _pthread_testcancel(self, 0);
3315 #endif /* __DARWIN_UNIX03 */
3316
3317 }
3318
3319
3320 /*
3321 * Query/update the cancelability 'state' of a thread
3322 */
3323 int
3324 pthread_setcancelstate(int state, int *oldstate)
3325 {
3326 #if __DARWIN_UNIX03
3327 if (__unix_conforming == 0) {
3328 __unix_conforming = 1;
3329 }
3330 return (_pthread_setcancelstate_internal(state, oldstate, 1));
3331 #else /* __DARWIN_UNIX03 */
3332 return (_pthread_setcancelstate_internal(state, oldstate, 0));
3333 #endif /* __DARWIN_UNIX03 */
3334
3335 }
3336
3337
3338
3339 /*
3340 * Query/update the cancelability 'type' of a thread
3341 */
3342 int
3343 pthread_setcanceltype(int type, int *oldtype)
3344 {
3345 pthread_t self = pthread_self();
3346
3347 #if __DARWIN_UNIX03
3348 if (__unix_conforming == 0)
3349 __unix_conforming = 1;
3350 #endif /* __DARWIN_UNIX03 */
3351
3352 if ((type != PTHREAD_CANCEL_DEFERRED) &&
3353 (type != PTHREAD_CANCEL_ASYNCHRONOUS))
3354 return EINVAL;
3355 self = pthread_self();
3356 LOCK(self->lock);
3357 if (oldtype)
3358 *oldtype = self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK;
3359 self->cancel_state &= ~_PTHREAD_CANCEL_TYPE_MASK;
3360 self->cancel_state |= type;
3361 UNLOCK(self->lock);
3362 #if !__DARWIN_UNIX03
3363 _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
3364 #endif /* __DARWIN_UNIX03 */
3365 return (0);
3366 }
3367
3368 int
3369 pthread_sigmask(int how, const sigset_t * set, sigset_t * oset)
3370 {
3371 #if __DARWIN_UNIX03
3372 int err = 0;
3373
3374 if (__pthread_sigmask(how, set, oset) == -1) {
3375 err = errno;
3376 }
3377 return(err);
3378 #else /* __DARWIN_UNIX03 */
3379 return(__pthread_sigmask(how, set, oset));
3380 #endif /* __DARWIN_UNIX03 */
3381 }
3382
3383 /*
3384 int
3385 sigwait(const sigset_t * set, int * sig)
3386
3387 moved to pthread_cancelable.c */