]> git.saurik.com Git - apple/libc.git/blob - pthreads/pthread.c
09367523a9815df66879c4ca2e37d7f80857ac03
[apple/libc.git] / pthreads / pthread.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 */
51
52 #include "pthread_internals.h"
53 #include "pthread_workqueue.h"
54
55 #include <assert.h>
56 #include <stdio.h> /* For printf(). */
57 #include <stdlib.h>
58 #include <errno.h> /* For __mach_errno_addr() prototype. */
59 #include <signal.h>
60 #include <sys/time.h>
61 #include <sys/resource.h>
62 #include <sys/sysctl.h>
63 #include <sys/queue.h>
64 #include <machine/vmparam.h>
65 #include <mach/vm_statistics.h>
66 #define __APPLE_API_PRIVATE
67 #include <machine/cpu_capabilities.h>
68 #include <libkern/OSAtomic.h>
69 #if defined(__ppc__)
70 #include <libkern/OSCrossEndian.h>
71 #endif
72
73
74 extern int _pthread_setcancelstate_internal(int state, int *oldstate, int conforming);
75 extern int __pthread_sigmask(int, const sigset_t *, sigset_t *);
76
77 #ifndef BUILDING_VARIANT /* [ */
78
79 __private_extern__ struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
80
81
82
83 int32_t workq_targetconc[WORKQ_NUM_PRIOQUEUE];
84
85 /* Per-thread kernel support */
86 extern void _pthread_set_self(pthread_t);
87 extern void mig_init(int);
88 static int _pthread_create_pthread_onstack(pthread_attr_t *attrs, void **stack, pthread_t *thread);
89 static kern_return_t _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread);
90 static void _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero);
91 static void _pthread_tsd_reinit(pthread_t t);
92 static int _new_pthread_create_suspended(pthread_t *thread,
93 const pthread_attr_t *attr,
94 void *(*start_routine)(void *),
95 void *arg,
96 int create_susp);
97
98 /* Get CPU capabilities from the kernel */
99 __private_extern__ void _init_cpu_capabilities(void);
100
101 /* Needed to tell the malloc subsystem we're going multithreaded */
102 extern void set_malloc_singlethreaded(int);
103
104 /* Used when we need to call into the kernel with no reply port */
105 extern pthread_lock_t reply_port_lock;
106 int _pthread_find_thread(pthread_t thread);
107
108 /* Mach message used to notify that a thread needs to be reaped */
109
110 typedef struct _pthread_reap_msg_t {
111 mach_msg_header_t header;
112 pthread_t thread;
113 mach_msg_trailer_t trailer;
114 } pthread_reap_msg_t;
115
116 /* We'll implement this when the main thread is a pthread */
117 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
118 static struct _pthread _thread = {0};
119
120 /* This global should be used (carefully) by anyone needing to know if a
121 ** pthread has been created.
122 */
123 int __is_threaded = 0;
124 /* _pthread_count is protected by _pthread_list_lock */
125 static int _pthread_count = 1;
126 int __unix_conforming = 0;
127 __private_extern__ size_t pthreadsize = 0;
128
129 /* under rosetta we will use old style creation of threads */
130 static int __oldstyle = 0;
131
132 __private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER;
133
134 /* Same implementation as LOCK, but without the __is_threaded check */
135 int _spin_tries = 0;
136 extern kern_return_t syscall_thread_switch(mach_port_name_t, int, mach_msg_timeout_t);
137 __private_extern__ void _spin_lock_retry(pthread_lock_t *lock)
138 {
139 int tries = _spin_tries;
140 do {
141 if (tries-- > 0)
142 continue;
143 syscall_thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1);
144 tries = _spin_tries;
145 } while(!_spin_lock_try(lock));
146 }
147
148 extern mach_port_t thread_recycle_port;
149
150 /* These are used to keep track of a semaphore pool shared by mutexes and condition
151 ** variables.
152 */
153
154 static semaphore_t *sem_pool = NULL;
155 static int sem_pool_count = 0;
156 static int sem_pool_current = 0;
157 static pthread_lock_t sem_pool_lock = LOCK_INITIALIZER;
158
159 static int default_priority;
160 static int max_priority;
161 static int min_priority;
162 static int pthread_concurrency;
163
164 static OSSpinLock __workqueue_list_lock = OS_SPINLOCK_INIT;
165
166 static void _pthread_exit(pthread_t self, void *value_ptr);
167 static void _pthread_setcancelstate_exit(pthread_t self, void *value_ptr, int conforming);
168 static pthread_attr_t _pthread_attr_default = {0};
169 static void _pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * attr);
170 static int kernel_workq_setup = 0;
171 static volatile int32_t kernel_workq_count = 0;
172 static volatile unsigned int user_workq_count = 0;
173 #define KERNEL_WORKQ_ELEM_MAX 64 /* Max number of elements in the kerrel */
174 static int wqreadyprio = 0; /* current highest prio queue ready with items */
175
176 static int __pthread_workqueue_affinity = 1; /* 0 means no affinity */
177 __private_extern__ struct __pthread_workitem_pool __pthread_workitem_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workitem_pool_head);
178 __private_extern__ struct __pthread_workqueue_pool __pthread_workqueue_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workqueue_pool_head);
179
180 struct _pthread_workqueue_head __pthread_workq0_head;
181 struct _pthread_workqueue_head __pthread_workq1_head;
182 struct _pthread_workqueue_head __pthread_workq2_head;
183 pthread_workqueue_head_t __pthread_wq_head_tbl[WQ_NUM_PRIO_QS] = {&__pthread_workq0_head, &__pthread_workq1_head, &__pthread_workq2_head};
184
185 static void workqueue_list_lock(void);
186 static void workqueue_list_unlock(void);
187 static int valid_workq(pthread_workqueue_t);
188 static void pick_nextworkqueue_droplock(void);
189 static int post_nextworkitem(pthread_workqueue_t workq);
190 static void _pthread_workq_return(pthread_t self);
191 static pthread_workqueue_attr_t _pthread_wq_attr_default = {0};
192 extern void start_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse);
193 extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags);
194 static pthread_workitem_t alloc_workitem(void);
195 static void free_workitem(pthread_workitem_t);
196 static pthread_workqueue_t alloc_workqueue(void);
197 static void free_workqueue(pthread_workqueue_t);
198 static int _pthread_work_internal_init(void);
199 static void workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item);
200
201 void pthread_workqueue_atfork_prepare(void);
202 void pthread_workqueue_atfork_parent(void);
203 void pthread_workqueue_atfork_child(void);
204
205 extern void dispatch_atfork_prepare(void);
206 extern void dispatch_atfork_parent(void);
207 extern void dispatch_atfork_child(void);
208
209 /* workq_kernreturn commands */
210 #define WQOPS_QUEUE_ADD 1
211 #define WQOPS_QUEUE_REMOVE 2
212 #define WQOPS_THREAD_RETURN 4
213 #define WQOPS_THREAD_SETCONC 8
214
215 /*
216 * Flags filed passed to bsdthread_create and back in pthread_start
217 31 <---------------------------------> 0
218 _________________________________________
219 | flags(8) | policy(8) | importance(16) |
220 -----------------------------------------
221 */
222 __private_extern__
223 void _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags);
224
225 __private_extern__
226 void _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse);
227
228 #define PTHREAD_START_CUSTOM 0x01000000
229 #define PTHREAD_START_SETSCHED 0x02000000
230 #define PTHREAD_START_DETACHED 0x04000000
231 #define PTHREAD_START_POLICY_BITSHIFT 16
232 #define PTHREAD_START_POLICY_MASK 0xff
233 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
234
235 static int pthread_setschedparam_internal(pthread_t, mach_port_t, int, const struct sched_param *);
236 extern pthread_t __bsdthread_create(void *(*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags);
237 extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, pthread_workitem_t, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, pthread_workitem_t, int),__uint64_t);
238 extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem);
239 extern __uint64_t __thread_selfid( void );
240 extern int __pthread_canceled(int);
241 extern void _pthread_keys_init(void);
242 extern int __pthread_kill(mach_port_t, int);
243 extern int __pthread_markcancel(int);
244 extern int __workq_open(void);
245
246 #define WORKQUEUE_OVERCOMMIT 0x10000
247
248 extern int __workq_kernreturn(int, pthread_workitem_t, int, int);
249
250 #if defined(__ppc__) || defined(__ppc64__)
251 static const vm_address_t PTHREAD_STACK_HINT = 0xF0000000;
252 #elif defined(__i386__) || defined(__x86_64__)
253 static const vm_address_t PTHREAD_STACK_HINT = 0xB0000000;
254 #elif defined(__arm__)
255 static const vm_address_t PTHREAD_STACK_HINT = 0x30000000;
256 #else
257 #error Need to define a stack address hint for this architecture
258 #endif
259
260 /* Set the base address to use as the stack pointer, before adjusting due to the ABI
261 * The guardpages for stackoverflow protection is also allocated here
262 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
263 * set up for the thread
264 */
265
266 static int
267 _pthread_allocate_stack(pthread_attr_t *attrs, void **stack)
268 {
269 kern_return_t kr;
270 vm_address_t stackaddr;
271 size_t guardsize;
272
273 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
274 if (attrs->stackaddr != NULL) {
275 /* No guard pages setup in this case */
276 assert(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
277 *stack = attrs->stackaddr;
278 return 0;
279 }
280
281 guardsize = attrs->guardsize;
282 stackaddr = PTHREAD_STACK_HINT;
283 kr = vm_map(mach_task_self(), &stackaddr,
284 attrs->stacksize + guardsize,
285 vm_page_size-1,
286 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
287 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
288 VM_INHERIT_DEFAULT);
289 if (kr != KERN_SUCCESS)
290 kr = vm_allocate(mach_task_self(),
291 &stackaddr, attrs->stacksize + guardsize,
292 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
293 if (kr != KERN_SUCCESS) {
294 return EAGAIN;
295 }
296 /* The guard page is at the lowest address */
297 /* The stack base is the highest address */
298 if (guardsize)
299 kr = vm_protect(mach_task_self(), stackaddr, guardsize, FALSE, VM_PROT_NONE);
300 *stack = (void *)(stackaddr + attrs->stacksize + guardsize);
301 return 0;
302 }
303
304 static int
305 _pthread_create_pthread_onstack(pthread_attr_t *attrs, void **stack, pthread_t *thread)
306 {
307 kern_return_t kr;
308 pthread_t t;
309 vm_address_t stackaddr;
310 size_t guardsize, allocsize;
311
312 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
313
314 if (attrs->stackaddr != NULL) {
315 /* No guard pages setup in this case */
316 assert(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
317 *stack = attrs->stackaddr;
318 t = (pthread_t)malloc(pthreadsize);
319 _pthread_struct_init(t, attrs, attrs->stackaddr, 0, 0, 0);
320 t->freeStackOnExit = 0;
321 t->freeaddr = 0;
322 t->freesize = 0;
323 *thread = t;
324 return 0;
325 }
326
327 guardsize = attrs->guardsize;
328 allocsize = attrs->stacksize + guardsize + pthreadsize;
329 stackaddr = PTHREAD_STACK_HINT;
330 kr = vm_map(mach_task_self(), &stackaddr,
331 allocsize,
332 vm_page_size-1,
333 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
334 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
335 VM_INHERIT_DEFAULT);
336 if (kr != KERN_SUCCESS)
337 kr = vm_allocate(mach_task_self(),
338 &stackaddr, allocsize,
339 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
340 if (kr != KERN_SUCCESS) {
341 return EAGAIN;
342 }
343 /* The guard page is at the lowest address */
344 /* The stack base is the highest address */
345 if (guardsize)
346 kr = vm_protect(mach_task_self(), stackaddr, guardsize, FALSE, VM_PROT_NONE);
347
348
349 *stack = (void *)(stackaddr + attrs->stacksize + guardsize);
350
351 t = (pthread_t)(stackaddr + attrs->stacksize + guardsize);
352 _pthread_struct_init(t, attrs, *stack, 0, 0, 1);
353 t->kernalloc = 0;
354 t->freesize = allocsize;
355 t->freeaddr = (void *)stackaddr;
356 t->freeStackOnExit = 1;
357 *thread = t;
358
359 return 0;
360 }
361
362 static kern_return_t
363 _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread)
364 {
365 kern_return_t res = 0;
366 vm_address_t freeaddr;
367 size_t freesize;
368 task_t self = mach_task_self();
369 int thread_count;
370 mach_port_t kport;
371 semaphore_t joinsem = SEMAPHORE_NULL;
372
373 #if PTH_TRACE
374 __kdebug_trace(0x900001c, freestruct, termthread, 0, 0, 0);
375 #endif
376 kport = t->kernel_thread;
377 joinsem = t->joiner_notify;
378
379 if (t->freeStackOnExit) {
380 freeaddr = (vm_address_t)t->freeaddr;
381 if (freestruct)
382 freesize = t->stacksize + t->guardsize + pthreadsize;
383 else
384 freesize = t->stacksize + t->guardsize;
385 if (termthread) {
386 mig_dealloc_reply_port(MACH_PORT_NULL);
387 LOCK(_pthread_list_lock);
388 if (freestruct != 0) {
389 TAILQ_REMOVE(&__pthread_head, t, plist);
390 /* if parent has not returned from create yet keep pthread_t */
391 #if PTH_LISTTRACE
392 __kdebug_trace(0x9000010, t, 0, 0, 1, 0);
393 #endif
394 if (t->parentcheck == 0)
395 freesize -= pthreadsize;
396 }
397 t->childexit = 1;
398 thread_count = --_pthread_count;
399 UNLOCK(_pthread_list_lock);
400
401 #if PTH_TRACE
402 __kdebug_trace(0x9000020, freeaddr, freesize, kport, 1, 0);
403 #endif
404 if (thread_count <=0)
405 exit(0);
406 else
407 __bsdthread_terminate((void *)freeaddr, freesize, kport, joinsem);
408 LIBC_ABORT("thread %p didn't terminate", t);
409 } else {
410 #if PTH_TRACE
411 __kdebug_trace(0x9000024, freeaddr, freesize, 0, 1, 0);
412 #endif
413 res = vm_deallocate(mach_task_self(), freeaddr, freesize);
414 }
415 } else {
416 if (termthread) {
417 mig_dealloc_reply_port(MACH_PORT_NULL);
418 LOCK(_pthread_list_lock);
419 if (freestruct != 0) {
420 TAILQ_REMOVE(&__pthread_head, t, plist);
421 #if PTH_LISTTRACE
422 __kdebug_trace(0x9000010, t, 0, 0, 2, 0);
423 #endif
424 }
425 thread_count = --_pthread_count;
426 t->childexit = 1;
427 UNLOCK(_pthread_list_lock);
428
429 if (freestruct) {
430 #if PTH_TRACE
431 __kdebug_trace(0x9000008, t, 0, 0, 2, 0);
432 #endif
433 free(t);
434 }
435
436 freeaddr = 0;
437 freesize = 0;
438 #if PTH_TRACE
439 __kdebug_trace(0x9000020, 0, 0, kport, 2, 0);
440 #endif
441
442 if (thread_count <=0)
443 exit(0);
444 else
445 __bsdthread_terminate(NULL, 0, kport, joinsem);
446 LIBC_ABORT("thread %p didn't terminate", t);
447 } else if (freestruct) {
448 t->sig = _PTHREAD_NO_SIG;
449 #if PTH_TRACE
450 __kdebug_trace(0x9000024, t, 0, 0, 2, 0);
451 #endif
452 free(t);
453 }
454 }
455 return(res);
456 }
457
458
459
460 /*
461 * Destroy a thread attribute structure
462 */
463 int
464 pthread_attr_destroy(pthread_attr_t *attr)
465 {
466 if (attr->sig == _PTHREAD_ATTR_SIG)
467 {
468 attr->sig = 0;
469 return (0);
470 } else
471 {
472 return (EINVAL); /* Not an attribute structure! */
473 }
474 }
475
476 /*
477 * Get the 'detach' state from a thread attribute structure.
478 * Note: written as a helper function for info hiding
479 */
480 int
481 pthread_attr_getdetachstate(const pthread_attr_t *attr,
482 int *detachstate)
483 {
484 if (attr->sig == _PTHREAD_ATTR_SIG)
485 {
486 *detachstate = attr->detached;
487 return (0);
488 } else
489 {
490 return (EINVAL); /* Not an attribute structure! */
491 }
492 }
493
494 /*
495 * Get the 'inherit scheduling' info from a thread attribute structure.
496 * Note: written as a helper function for info hiding
497 */
498 int
499 pthread_attr_getinheritsched(const pthread_attr_t *attr,
500 int *inheritsched)
501 {
502 if (attr->sig == _PTHREAD_ATTR_SIG)
503 {
504 *inheritsched = attr->inherit;
505 return (0);
506 } else
507 {
508 return (EINVAL); /* Not an attribute structure! */
509 }
510 }
511
512 /*
513 * Get the scheduling parameters from a thread attribute structure.
514 * Note: written as a helper function for info hiding
515 */
516 int
517 pthread_attr_getschedparam(const pthread_attr_t *attr,
518 struct sched_param *param)
519 {
520 if (attr->sig == _PTHREAD_ATTR_SIG)
521 {
522 *param = attr->param;
523 return (0);
524 } else
525 {
526 return (EINVAL); /* Not an attribute structure! */
527 }
528 }
529
530 /*
531 * Get the scheduling policy from a thread attribute structure.
532 * Note: written as a helper function for info hiding
533 */
534 int
535 pthread_attr_getschedpolicy(const pthread_attr_t *attr,
536 int *policy)
537 {
538 if (attr->sig == _PTHREAD_ATTR_SIG)
539 {
540 *policy = attr->policy;
541 return (0);
542 } else
543 {
544 return (EINVAL); /* Not an attribute structure! */
545 }
546 }
547
548 /* Retain the existing stack size of 512K and not depend on Main thread default stack size */
549 static const size_t DEFAULT_STACK_SIZE = (512*1024);
550 /*
551 * Initialize a thread attribute structure to default values.
552 */
553 int
554 pthread_attr_init(pthread_attr_t *attr)
555 {
556 attr->stacksize = DEFAULT_STACK_SIZE;
557 attr->stackaddr = NULL;
558 attr->sig = _PTHREAD_ATTR_SIG;
559 attr->param.sched_priority = default_priority;
560 attr->param.quantum = 10; /* quantum isn't public yet */
561 attr->detached = PTHREAD_CREATE_JOINABLE;
562 attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
563 attr->policy = _PTHREAD_DEFAULT_POLICY;
564 attr->freeStackOnExit = 1;
565 attr->fastpath = 1;
566 attr->schedset = 0;
567 attr->guardsize = vm_page_size;
568 return (0);
569 }
570
571 /*
572 * Set the 'detach' state in a thread attribute structure.
573 * Note: written as a helper function for info hiding
574 */
575 int
576 pthread_attr_setdetachstate(pthread_attr_t *attr,
577 int detachstate)
578 {
579 if (attr->sig == _PTHREAD_ATTR_SIG)
580 {
581 if ((detachstate == PTHREAD_CREATE_JOINABLE) ||
582 (detachstate == PTHREAD_CREATE_DETACHED))
583 {
584 attr->detached = detachstate;
585 return (0);
586 } else
587 {
588 return (EINVAL);
589 }
590 } else
591 {
592 return (EINVAL); /* Not an attribute structure! */
593 }
594 }
595
596 /*
597 * Set the 'inherit scheduling' state in a thread attribute structure.
598 * Note: written as a helper function for info hiding
599 */
600 int
601 pthread_attr_setinheritsched(pthread_attr_t *attr,
602 int inheritsched)
603 {
604 if (attr->sig == _PTHREAD_ATTR_SIG)
605 {
606 if ((inheritsched == PTHREAD_INHERIT_SCHED) ||
607 (inheritsched == PTHREAD_EXPLICIT_SCHED))
608 {
609 attr->inherit = inheritsched;
610 return (0);
611 } else
612 {
613 return (EINVAL);
614 }
615 } else
616 {
617 return (EINVAL); /* Not an attribute structure! */
618 }
619 }
620
621 /*
622 * Set the scheduling paramters in a thread attribute structure.
623 * Note: written as a helper function for info hiding
624 */
625 int
626 pthread_attr_setschedparam(pthread_attr_t *attr,
627 const struct sched_param *param)
628 {
629 if (attr->sig == _PTHREAD_ATTR_SIG)
630 {
631 /* TODO: Validate sched_param fields */
632 attr->param = *param;
633 attr->schedset = 1;
634 return (0);
635 } else
636 {
637 return (EINVAL); /* Not an attribute structure! */
638 }
639 }
640
641 /*
642 * Set the scheduling policy in a thread attribute structure.
643 * Note: written as a helper function for info hiding
644 */
645 int
646 pthread_attr_setschedpolicy(pthread_attr_t *attr,
647 int policy)
648 {
649 if (attr->sig == _PTHREAD_ATTR_SIG)
650 {
651 if ((policy == SCHED_OTHER) ||
652 (policy == SCHED_RR) ||
653 (policy == SCHED_FIFO))
654 {
655 attr->policy = policy;
656 attr->schedset = 1;
657 return (0);
658 } else
659 {
660 return (EINVAL);
661 }
662 } else
663 {
664 return (EINVAL); /* Not an attribute structure! */
665 }
666 }
667
668 /*
669 * Set the scope for the thread.
670 * We currently only provide PTHREAD_SCOPE_SYSTEM
671 */
672 int
673 pthread_attr_setscope(pthread_attr_t *attr,
674 int scope)
675 {
676 if (attr->sig == _PTHREAD_ATTR_SIG) {
677 if (scope == PTHREAD_SCOPE_SYSTEM) {
678 /* No attribute yet for the scope */
679 return (0);
680 } else if (scope == PTHREAD_SCOPE_PROCESS) {
681 return (ENOTSUP);
682 }
683 }
684 return (EINVAL); /* Not an attribute structure! */
685 }
686
687 /*
688 * Get the scope for the thread.
689 * We currently only provide PTHREAD_SCOPE_SYSTEM
690 */
691 int
692 pthread_attr_getscope(const pthread_attr_t *attr,
693 int *scope)
694 {
695 if (attr->sig == _PTHREAD_ATTR_SIG) {
696 *scope = PTHREAD_SCOPE_SYSTEM;
697 return (0);
698 }
699 return (EINVAL); /* Not an attribute structure! */
700 }
701
702 /* Get the base stack address of the given thread */
703 int
704 pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
705 {
706 if (attr->sig == _PTHREAD_ATTR_SIG) {
707 *stackaddr = attr->stackaddr;
708 return (0);
709 } else {
710 return (EINVAL); /* Not an attribute structure! */
711 }
712 }
713
714 int
715 pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
716 {
717 if ((attr->sig == _PTHREAD_ATTR_SIG) && (((uintptr_t)stackaddr % vm_page_size) == 0)) {
718 attr->stackaddr = stackaddr;
719 attr->freeStackOnExit = 0;
720 attr->fastpath = 0;
721 return (0);
722 } else {
723 return (EINVAL); /* Not an attribute structure! */
724 }
725 }
726
727 int
728 pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
729 {
730 if (attr->sig == _PTHREAD_ATTR_SIG) {
731 *stacksize = attr->stacksize;
732 return (0);
733 } else {
734 return (EINVAL); /* Not an attribute structure! */
735 }
736 }
737
738 int
739 pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
740 {
741 if ((attr->sig == _PTHREAD_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
742 attr->stacksize = stacksize;
743 return (0);
744 } else {
745 return (EINVAL); /* Not an attribute structure! */
746 }
747 }
748
749 int
750 pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
751 {
752 if (attr->sig == _PTHREAD_ATTR_SIG) {
753 *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize);
754 *stacksize = attr->stacksize;
755 return (0);
756 } else {
757 return (EINVAL); /* Not an attribute structure! */
758 }
759 }
760
761 /* By SUSV spec, the stackaddr is the base address, the lowest addressable
762 * byte address. This is not the same as in pthread_attr_setstackaddr.
763 */
764 int
765 pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
766 {
767 if ((attr->sig == _PTHREAD_ATTR_SIG) &&
768 (((uintptr_t)stackaddr % vm_page_size) == 0) &&
769 ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
770 attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize);
771 attr->stacksize = stacksize;
772 attr->freeStackOnExit = 0;
773 attr->fastpath = 0;
774 return (0);
775 } else {
776 return (EINVAL); /* Not an attribute structure! */
777 }
778 }
779
780
781 /*
782 * Set the guardsize attribute in the attr.
783 */
784 int
785 pthread_attr_setguardsize(pthread_attr_t *attr,
786 size_t guardsize)
787 {
788 if (attr->sig == _PTHREAD_ATTR_SIG) {
789 /* Guardsize of 0 is valid, ot means no guard */
790 if ((guardsize % vm_page_size) == 0) {
791 attr->guardsize = guardsize;
792 attr->fastpath = 0;
793 return (0);
794 } else
795 return(EINVAL);
796 }
797 return (EINVAL); /* Not an attribute structure! */
798 }
799
800 /*
801 * Get the guardsize attribute in the attr.
802 */
803 int
804 pthread_attr_getguardsize(const pthread_attr_t *attr,
805 size_t *guardsize)
806 {
807 if (attr->sig == _PTHREAD_ATTR_SIG) {
808 *guardsize = attr->guardsize;
809 return (0);
810 }
811 return (EINVAL); /* Not an attribute structure! */
812 }
813
814
815 /*
816 * Create and start execution of a new thread.
817 */
818
819 static void
820 _pthread_body(pthread_t self)
821 {
822 _pthread_set_self(self);
823 _pthread_exit(self, (self->fun)(self->arg));
824 }
825
826 void
827 _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int pflags)
828 {
829 int ret;
830 #if WQ_DEBUG
831 pthread_t pself;
832 #endif
833 pthread_attr_t *attrs = &_pthread_attr_default;
834 char * stackaddr;
835
836 if ((pflags & PTHREAD_START_CUSTOM) == 0) {
837 stackaddr = (char *)self;
838 _pthread_struct_init(self, attrs, stackaddr, stacksize, 1, 1);
839 #if defined(__i386__) || defined(__x86_64__)
840 _pthread_set_self(self);
841 #endif
842 LOCK(_pthread_list_lock);
843 if (pflags & PTHREAD_START_SETSCHED) {
844 self->policy = ((pflags >> PTHREAD_START_POLICY_BITSHIFT) & PTHREAD_START_POLICY_MASK);
845 self->param.sched_priority = (pflags & PTHREAD_START_IMPORTANCE_MASK);
846 }
847 /* These are not joinable threads */
848 if ((pflags & PTHREAD_START_DETACHED) == PTHREAD_START_DETACHED) {
849 self->detached &= ~PTHREAD_CREATE_JOINABLE;
850 self->detached |= PTHREAD_CREATE_DETACHED;
851 }
852 } else {
853 #if defined(__i386__) || defined(__x86_64__)
854 _pthread_set_self(self);
855 #endif
856 LOCK(_pthread_list_lock);
857 }
858 self->kernel_thread = kport;
859 self->fun = fun;
860 self->arg = funarg;
861
862 /* Add to the pthread list */
863 if (self->parentcheck == 0) {
864 TAILQ_INSERT_TAIL(&__pthread_head, self, plist);
865 #if PTH_LISTTRACE
866 __kdebug_trace(0x900000c, self, 0, 0, 3, 0);
867 #endif
868 _pthread_count++;
869 }
870 self->childrun = 1;
871 UNLOCK(_pthread_list_lock);
872
873 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
874 if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1)
875 printf("Failed to set thread_id in pthread_start\n");
876 #endif
877
878 #if WQ_DEBUG
879 pself = pthread_self();
880 if (self != pself)
881 LIBC_ABORT("self %p != pself %p", self, pself);
882 #endif
883 #if PTH_TRACE
884 __kdebug_trace(0x9000030, self, pflags, 0, 0, 0);
885 #endif
886
887 _pthread_exit(self, (self->fun)(self->arg));
888 }
889
890 int
891 _pthread_create(pthread_t t,
892 const pthread_attr_t *attrs,
893 void *stack,
894 const mach_port_t kernel_thread)
895 {
896 int res;
897 res = 0;
898
899 do
900 {
901 memset(t, 0, sizeof(*t));
902 t->newstyle = 0;
903 t->schedset = 0;
904 t->kernalloc = 0;
905 t->tsd[0] = t;
906 t->max_tsd_key = 0;
907 t->wqthread = 0;
908 t->cur_workq = 0;
909 t->cur_workitem = 0;
910 t->stacksize = attrs->stacksize;
911 t->stackaddr = (void *)stack;
912 t->guardsize = attrs->guardsize;
913 t->kernel_thread = kernel_thread;
914 t->detached = attrs->detached;
915 t->inherit = attrs->inherit;
916 t->policy = attrs->policy;
917 t->param = attrs->param;
918 t->freeStackOnExit = attrs->freeStackOnExit;
919 t->mutexes = (struct _pthread_mutex *)NULL;
920 t->sig = _PTHREAD_SIG;
921 t->reply_port = MACH_PORT_NULL;
922 t->cthread_self = NULL;
923 LOCK_INIT(t->lock);
924 t->plist.tqe_next = (struct _pthread *)0;
925 t->plist.tqe_prev = (struct _pthread **)0;
926 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
927 t->__cleanup_stack = (struct __darwin_pthread_handler_rec *)NULL;
928 t->death = SEMAPHORE_NULL;
929
930 if (kernel_thread != MACH_PORT_NULL)
931 (void)pthread_setschedparam_internal(t, kernel_thread, t->policy, &t->param);
932 } while (0);
933 return (res);
934 }
935
936 void
937 _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero)
938 {
939 mach_vm_offset_t stackaddr = (mach_vm_offset_t)(long)stack;
940
941 if (nozero == 0) {
942 memset(t, 0, sizeof(*t));
943 t->plist.tqe_next = (struct _pthread *)0;
944 t->plist.tqe_prev = (struct _pthread **)0;
945 }
946 t->schedset = attrs->schedset;
947 t->tsd[0] = t;
948 if (kernalloc != 0) {
949 stackaddr = (mach_vm_offset_t)(long)t;
950
951 /* if allocated from kernel set values appropriately */
952 t->stacksize = stacksize;
953 t->stackaddr = (void *)(long)stackaddr;
954 t->freeStackOnExit = 1;
955 t->freeaddr = (void *)(long)(stackaddr - stacksize - vm_page_size);
956 t->freesize = pthreadsize + stacksize + vm_page_size;
957 } else {
958 t->stacksize = attrs->stacksize;
959 t->stackaddr = (void *)stack;
960 }
961 t->guardsize = attrs->guardsize;
962 t->detached = attrs->detached;
963 t->inherit = attrs->inherit;
964 t->policy = attrs->policy;
965 t->param = attrs->param;
966 t->mutexes = (struct _pthread_mutex *)NULL;
967 t->sig = _PTHREAD_SIG;
968 t->reply_port = MACH_PORT_NULL;
969 t->cthread_self = NULL;
970 LOCK_INIT(t->lock);
971 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
972 t->__cleanup_stack = (struct __darwin_pthread_handler_rec *)NULL;
973 t->death = SEMAPHORE_NULL;
974 t->newstyle = 1;
975 t->kernalloc = kernalloc;
976 t->wqthread = 0;
977 t->cur_workq = 0;
978 t->cur_workitem = 0;
979 t->max_tsd_key = 0;
980 }
981
982 static void
983 _pthread_tsd_reinit(pthread_t t)
984 {
985 bzero(&t->tsd[1], (_INTERNAL_POSIX_THREAD_KEYS_END-1) * sizeof(void *));
986 }
987
988
989 /* Need to deprecate this in future */
990 int
991 _pthread_is_threaded(void)
992 {
993 return __is_threaded;
994 }
995
996 /* Non portable public api to know whether this process has(had) atleast one thread
997 * apart from main thread. There could be race if there is a thread in the process of
998 * creation at the time of call . It does not tell whether there are more than one thread
999 * at this point of time.
1000 */
1001 int
1002 pthread_is_threaded_np(void)
1003 {
1004 return (__is_threaded);
1005 }
1006
1007 mach_port_t
1008 pthread_mach_thread_np(pthread_t t)
1009 {
1010 mach_port_t kport = MACH_PORT_NULL;
1011
1012 if (t == NULL)
1013 goto out;
1014
1015 /*
1016 * If the call is on self, return the kernel port. We cannot
1017 * add this bypass for main thread as it might have exited,
1018 * and we should not return stale port info.
1019 */
1020 if (t == pthread_self())
1021 {
1022 kport = t->kernel_thread;
1023 goto out;
1024 }
1025
1026 if (_pthread_lookup_thread(t, &kport, 0) != 0)
1027 return((mach_port_t)0);
1028
1029 out:
1030 return(kport);
1031 }
1032
1033 pthread_t pthread_from_mach_thread_np(mach_port_t kernel_thread)
1034 {
1035 struct _pthread * p = NULL;
1036
1037 /* No need to wait as mach port is already known */
1038 LOCK(_pthread_list_lock);
1039 TAILQ_FOREACH(p, &__pthread_head, plist) {
1040 if (p->kernel_thread == kernel_thread)
1041 break;
1042 }
1043 UNLOCK(_pthread_list_lock);
1044 return p;
1045 }
1046
1047 size_t
1048 pthread_get_stacksize_np(pthread_t t)
1049 {
1050 int ret,nestingDepth=0;
1051 size_t size = 0;
1052 vm_address_t address=0;
1053 vm_size_t region_size=0;
1054 struct vm_region_submap_info_64 info;
1055 mach_msg_type_number_t count;
1056
1057 if (t == NULL)
1058 return(ESRCH);
1059
1060 if ( t == pthread_self() || t == &_thread ) //since the main thread will not get de-allocated from underneath us
1061 {
1062 size=t->stacksize;
1063 return size;
1064 }
1065
1066
1067 LOCK(_pthread_list_lock);
1068
1069 if ((ret = _pthread_find_thread(t)) != 0) {
1070 UNLOCK(_pthread_list_lock);
1071 return(ret);
1072 }
1073
1074 size=t->stacksize;
1075 UNLOCK(_pthread_list_lock);
1076
1077 return(size);
1078 }
1079
1080 void *
1081 pthread_get_stackaddr_np(pthread_t t)
1082 {
1083 int ret;
1084 void * addr = NULL;
1085
1086 if (t == NULL)
1087 return((void *)(long)ESRCH);
1088
1089 if(t == pthread_self() || t == &_thread) //since the main thread will not get deallocated from underneath us
1090 return t->stackaddr;
1091
1092 LOCK(_pthread_list_lock);
1093
1094 if ((ret = _pthread_find_thread(t)) != 0) {
1095 UNLOCK(_pthread_list_lock);
1096 return((void *)(long)ret);
1097 }
1098 addr = t->stackaddr;
1099 UNLOCK(_pthread_list_lock);
1100
1101 return(addr);
1102 }
1103
1104 mach_port_t
1105 _pthread_reply_port(pthread_t t)
1106 {
1107 return t->reply_port;
1108 }
1109
1110
1111 /* returns non-zero if the current thread is the main thread */
1112 int
1113 pthread_main_np(void)
1114 {
1115 pthread_t self = pthread_self();
1116
1117 return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT);
1118 }
1119
1120
1121 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
1122 /* if we are passed in a pthread_t that is NULL, then we return
1123 the current thread's thread_id. So folks don't have to call
1124 pthread_self, in addition to us doing it, if they just want
1125 their thread_id.
1126 */
1127 int
1128 pthread_threadid_np(pthread_t thread, __uint64_t *thread_id)
1129 {
1130 int rval=0;
1131 pthread_t self = pthread_self();
1132
1133 if (thread_id == NULL) {
1134 return(EINVAL);
1135 } else if (thread == NULL || thread == self) {
1136 *thread_id = self->thread_id;
1137 return rval;
1138 }
1139
1140 LOCK(_pthread_list_lock);
1141 if ((rval = _pthread_find_thread(thread)) != 0) {
1142 UNLOCK(_pthread_list_lock);
1143 return(rval);
1144 }
1145 *thread_id = thread->thread_id;
1146 UNLOCK(_pthread_list_lock);
1147 return rval;
1148 }
1149 #endif
1150
1151 int
1152 pthread_getname_np(pthread_t thread, char *threadname, size_t len)
1153 {
1154 int rval;
1155 rval = 0;
1156
1157 if (thread == NULL)
1158 return(ESRCH);
1159
1160 LOCK(_pthread_list_lock);
1161 if ((rval = _pthread_find_thread(thread)) != 0) {
1162 UNLOCK(_pthread_list_lock);
1163 return(rval);
1164 }
1165 strlcpy(threadname, thread->pthread_name, len);
1166 UNLOCK(_pthread_list_lock);
1167 return rval;
1168 }
1169
1170 int
1171 pthread_setname_np(const char *threadname)
1172 {
1173 int rval;
1174 size_t len;
1175
1176 rval = 0;
1177 len = strlen(threadname);
1178 rval = sysctlbyname("kern.threadname", NULL, 0, threadname, len);
1179 if(rval == 0)
1180 {
1181 strlcpy((pthread_self())->pthread_name, threadname, len+1);
1182 }
1183 return rval;
1184
1185 }
1186
1187 static int
1188 _new_pthread_create_suspended(pthread_t *thread,
1189 const pthread_attr_t *attr,
1190 void *(*start_routine)(void *),
1191 void *arg,
1192 int create_susp)
1193 {
1194 pthread_attr_t *attrs;
1195 void *stack;
1196 int error;
1197 unsigned int flags;
1198 pthread_t t,t2;
1199 kern_return_t kern_res;
1200 mach_port_t kernel_thread = MACH_PORT_NULL;
1201 int needresume;
1202 task_t self = mach_task_self();
1203 int kernalloc = 0;
1204 int susp = create_susp;
1205
1206 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
1207 { /* Set up default paramters */
1208 attrs = &_pthread_attr_default;
1209 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1210 return EINVAL;
1211 }
1212 error = 0;
1213
1214 if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
1215 (attrs->param.sched_priority != default_priority)) && (create_susp == 0)) {
1216 needresume = 1;
1217 susp = 1;
1218 } else
1219 needresume = 0;
1220
1221 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1222 * any change in priority or policy is needed here.
1223 */
1224 if ((__oldstyle == 1) || (create_susp != 0)) {
1225 /* Rosetta or pthread_create_suspended() */
1226 /* running under rosetta */
1227 /* Allocate a stack for the thread */
1228 #if PTH_TRACE
1229 __kdebug_trace(0x9000000, create_susp, 0, 0, 0, 0);
1230 #endif
1231 if ((error = _pthread_allocate_stack(attrs, &stack)) != 0) {
1232 return(error);
1233 }
1234 t = (pthread_t)malloc(sizeof(struct _pthread));
1235 *thread = t;
1236 if (susp) {
1237 /* Create the Mach thread for this thread */
1238 PTHREAD_MACH_CALL(thread_create(self, &kernel_thread), kern_res);
1239 if (kern_res != KERN_SUCCESS)
1240 {
1241 printf("Can't create thread: %d\n", kern_res);
1242 return(EINVAL);
1243 }
1244 }
1245 if ((error = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
1246 {
1247 return(error);
1248 }
1249 set_malloc_singlethreaded(0);
1250 __is_threaded = 1;
1251
1252 /* Send it on it's way */
1253 t->arg = arg;
1254 t->fun = start_routine;
1255 t->newstyle = 0;
1256 /* Now set it up to execute */
1257 LOCK(_pthread_list_lock);
1258 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1259 #if PTH_LISTTRACE
1260 __kdebug_trace(0x900000c, t, 0, 0, 4, 0);
1261 #endif
1262 _pthread_count++;
1263 UNLOCK(_pthread_list_lock);
1264 _pthread_setup(t, _pthread_body, stack, susp, needresume);
1265 return(0);
1266 } else {
1267
1268 flags = 0;
1269 if (attrs->fastpath == 1)
1270 kernalloc = 1;
1271
1272 if (attrs->detached == PTHREAD_CREATE_DETACHED)
1273 flags |= PTHREAD_START_DETACHED;
1274 if (attrs->schedset != 0) {
1275 flags |= PTHREAD_START_SETSCHED;
1276 flags |= ((attrs->policy & PTHREAD_START_POLICY_MASK) << PTHREAD_START_POLICY_BITSHIFT);
1277 flags |= (attrs->param.sched_priority & PTHREAD_START_IMPORTANCE_MASK);
1278 }
1279
1280 set_malloc_singlethreaded(0);
1281 __is_threaded = 1;
1282
1283 if (kernalloc == 0) {
1284 /* Allocate a stack for the thread */
1285 flags |= PTHREAD_START_CUSTOM;
1286 if ((error = _pthread_create_pthread_onstack(attrs, &stack, &t)) != 0) {
1287 return(error);
1288 }
1289 /* Send it on it's way */
1290 t->arg = arg;
1291 t->fun = start_routine;
1292 t->newstyle = 1;
1293
1294 #if PTH_TRACE
1295 __kdebug_trace(0x9000004, t, flags, 0, 0, 0);
1296 #endif
1297
1298 if ((t2 = __bsdthread_create(start_routine, arg, stack, t, flags)) == (pthread_t)-1) {
1299 _pthread_free_pthread_onstack(t, 1, 0);
1300 return (EAGAIN);
1301 }
1302 else t=t2;
1303 LOCK(_pthread_list_lock);
1304 t->parentcheck = 1;
1305 if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) {
1306 /* detached child exited, mop up */
1307 UNLOCK(_pthread_list_lock);
1308 #if PTH_TRACE
1309 __kdebug_trace(0x9000008, t, 0, 0, 1, 0);
1310 #endif
1311 if(t->freeStackOnExit)
1312 vm_deallocate(self, (mach_vm_address_t)(long)t, pthreadsize);
1313 else
1314 free(t);
1315 } else if (t->childrun == 0) {
1316 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1317 _pthread_count++;
1318 #if PTH_LISTTRACE
1319 __kdebug_trace(0x900000c, t, 0, 0, 1, 0);
1320 #endif
1321 UNLOCK(_pthread_list_lock);
1322 } else
1323 UNLOCK(_pthread_list_lock);
1324
1325 *thread = t;
1326
1327 #if PTH_TRACE
1328 __kdebug_trace(0x9000014, t, 0, 0, 1, 0);
1329 #endif
1330 return (0);
1331
1332 } else {
1333 /* kernel allocation */
1334 #if PTH_TRACE
1335 __kdebug_trace(0x9000018, flags, 0, 0, 0, 0);
1336 #endif
1337 if ((t = __bsdthread_create(start_routine, arg, (void *)attrs->stacksize, NULL, flags)) == (pthread_t)-1)
1338 return (EAGAIN);
1339 /* Now set it up to execute */
1340 LOCK(_pthread_list_lock);
1341 t->parentcheck = 1;
1342 if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) {
1343 /* detached child exited, mop up */
1344 UNLOCK(_pthread_list_lock);
1345 #if PTH_TRACE
1346 __kdebug_trace(0x9000008, t, pthreadsize, 0, 2, 0);
1347 #endif
1348 vm_deallocate(self, (mach_vm_address_t)(long)t, pthreadsize);
1349 } else if (t->childrun == 0) {
1350 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1351 _pthread_count++;
1352 #if PTH_LISTTRACE
1353 __kdebug_trace(0x900000c, t, 0, 0, 2, 0);
1354 #endif
1355 UNLOCK(_pthread_list_lock);
1356 } else
1357 UNLOCK(_pthread_list_lock);
1358
1359 *thread = t;
1360
1361 #if PTH_TRACE
1362 __kdebug_trace(0x9000014, t, 0, 0, 2, 0);
1363 #endif
1364 return(0);
1365 }
1366 }
1367 }
1368
1369 static int
1370 _pthread_create_suspended(pthread_t *thread,
1371 const pthread_attr_t *attr,
1372 void *(*start_routine)(void *),
1373 void *arg,
1374 int suspended)
1375 {
1376 pthread_attr_t *attrs;
1377 void *stack;
1378 int res;
1379 pthread_t t;
1380 kern_return_t kern_res;
1381 mach_port_t kernel_thread = MACH_PORT_NULL;
1382 int needresume;
1383
1384 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
1385 { /* Set up default paramters */
1386 attrs = &_pthread_attr_default;
1387 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1388 return EINVAL;
1389 }
1390 res = 0;
1391
1392 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1393 * any change in priority or policy is needed here.
1394 */
1395 if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
1396 (attrs->param.sched_priority != default_priority)) && (suspended == 0)) {
1397 needresume = 1;
1398 suspended = 1;
1399 } else
1400 needresume = 0;
1401
1402 do
1403 {
1404 /* Allocate a stack for the thread */
1405 if ((res = _pthread_allocate_stack(attrs, &stack)) != 0) {
1406 break;
1407 }
1408 t = (pthread_t)malloc(sizeof(struct _pthread));
1409 *thread = t;
1410 if (suspended) {
1411 /* Create the Mach thread for this thread */
1412 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread), kern_res);
1413 if (kern_res != KERN_SUCCESS)
1414 {
1415 printf("Can't create thread: %d\n", kern_res);
1416 res = EINVAL; /* Need better error here? */
1417 break;
1418 }
1419 }
1420 if ((res = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
1421 {
1422 break;
1423 }
1424 set_malloc_singlethreaded(0);
1425 __is_threaded = 1;
1426
1427 /* Send it on it's way */
1428 t->arg = arg;
1429 t->fun = start_routine;
1430 /* Now set it up to execute */
1431 LOCK(_pthread_list_lock);
1432 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1433 #if PTH_LISTTRACE
1434 __kdebug_trace(0x900000c, t, 0, 0, 5, 0);
1435 #endif
1436 _pthread_count++;
1437 UNLOCK(_pthread_list_lock);
1438 _pthread_setup(t, _pthread_body, stack, suspended, needresume);
1439 } while (0);
1440 return (res);
1441 }
1442
1443 int
1444 pthread_create(pthread_t *thread,
1445 const pthread_attr_t *attr,
1446 void *(*start_routine)(void *),
1447 void *arg)
1448 {
1449 return _new_pthread_create_suspended(thread, attr, start_routine, arg, 0);
1450 }
1451
1452 int
1453 pthread_create_suspended_np(pthread_t *thread,
1454 const pthread_attr_t *attr,
1455 void *(*start_routine)(void *),
1456 void *arg)
1457 {
1458 return _pthread_create_suspended(thread, attr, start_routine, arg, 1);
1459 }
1460
1461 /*
1462 * Make a thread 'undetached' - no longer 'joinable' with other threads.
1463 */
1464 int
1465 pthread_detach(pthread_t thread)
1466 {
1467 int newstyle = 0;
1468 int ret;
1469
1470 if ((ret = _pthread_lookup_thread(thread, NULL, 1)) != 0)
1471 return (ret); /* Not a valid thread */
1472
1473 LOCK(thread->lock);
1474 newstyle = thread->newstyle;
1475 if (thread->detached & PTHREAD_CREATE_JOINABLE)
1476 {
1477 if (thread->detached & _PTHREAD_EXITED) {
1478 UNLOCK(thread->lock);
1479 pthread_join(thread, NULL);
1480 return 0;
1481 } else {
1482 if (newstyle == 0) {
1483 semaphore_t death = thread->death;
1484
1485 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
1486 thread->detached |= PTHREAD_CREATE_DETACHED;
1487 UNLOCK(thread->lock);
1488 if (death)
1489 (void) semaphore_signal(death);
1490 } else {
1491 mach_port_t joinport = thread->joiner_notify;
1492
1493 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
1494 thread->detached |= PTHREAD_CREATE_DETACHED;
1495
1496 UNLOCK(thread->lock);
1497 if (joinport) {
1498 semaphore_signal(joinport);
1499 }
1500 }
1501 return(0);
1502 }
1503 } else {
1504 UNLOCK(thread->lock);
1505 return (EINVAL);
1506 }
1507 }
1508
1509
1510 /*
1511 * pthread_kill call to system call
1512 */
1513 int
1514 pthread_kill (
1515 pthread_t th,
1516 int sig)
1517 {
1518 int error = 0;
1519 mach_port_t kport = MACH_PORT_NULL;
1520
1521 if ((sig < 0) || (sig > NSIG))
1522 return(EINVAL);
1523
1524 if (_pthread_lookup_thread(th, &kport, 0) != 0)
1525 return (ESRCH); /* Not a valid thread */
1526
1527 /* if the thread is a workqueue thread, just return error */
1528 if ((th->wqthread != 0) && (th->wqkillset ==0)) {
1529 return(ENOTSUP);
1530 }
1531
1532 error = __pthread_kill(kport, sig);
1533
1534 if (error == -1)
1535 error = errno;
1536 return(error);
1537 }
1538
1539 int
1540 __pthread_workqueue_setkill(int enable)
1541 {
1542 pthread_t self = pthread_self();
1543
1544 LOCK(self->lock);
1545 if (enable == 0)
1546 self->wqkillset = 0;
1547 else
1548 self->wqkillset = 1;
1549 UNLOCK(self->lock);
1550
1551 return(0);
1552
1553 }
1554
1555 /* Announce that there are pthread resources ready to be reclaimed in a */
1556 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
1557 /* thread underneath is terminated right away. */
1558 static
1559 void _pthread_become_available(pthread_t thread, mach_port_t kernel_thread) {
1560 pthread_reap_msg_t msg;
1561 kern_return_t ret;
1562
1563 msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,
1564 MACH_MSG_TYPE_MOVE_SEND);
1565 msg.header.msgh_size = sizeof msg - sizeof msg.trailer;
1566 msg.header.msgh_remote_port = thread_recycle_port;
1567 msg.header.msgh_local_port = kernel_thread;
1568 msg.header.msgh_id = 0x44454144; /* 'DEAD' */
1569 msg.thread = thread;
1570 ret = mach_msg_send(&msg.header);
1571 assert(ret == MACH_MSG_SUCCESS);
1572 }
1573
1574 /* Reap the resources for available threads */
1575 __private_extern__
1576 int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr, int conforming) {
1577 mach_port_type_t ptype;
1578 kern_return_t ret;
1579 task_t self;
1580
1581 self = mach_task_self();
1582 if (kernel_thread != MACH_PORT_DEAD) {
1583 ret = mach_port_type(self, kernel_thread, &ptype);
1584 if (ret == KERN_SUCCESS && ptype != MACH_PORT_TYPE_DEAD_NAME) {
1585 /* not quite dead yet... */
1586 return EAGAIN;
1587 }
1588 ret = mach_port_deallocate(self, kernel_thread);
1589 if (ret != KERN_SUCCESS) {
1590 fprintf(stderr,
1591 "mach_port_deallocate(kernel_thread) failed: %s\n",
1592 mach_error_string(ret));
1593 }
1594 }
1595
1596 if (th->reply_port != MACH_PORT_NULL) {
1597 ret = mach_port_mod_refs(self, th->reply_port,
1598 MACH_PORT_RIGHT_RECEIVE, -1);
1599 if (ret != KERN_SUCCESS) {
1600 fprintf(stderr,
1601 "mach_port_mod_refs(reply_port) failed: %s\n",
1602 mach_error_string(ret));
1603 }
1604 }
1605
1606 if (th->freeStackOnExit) {
1607 vm_address_t addr = (vm_address_t)th->stackaddr;
1608 vm_size_t size;
1609
1610 size = (vm_size_t)th->stacksize + th->guardsize;
1611
1612 addr -= size;
1613 ret = vm_deallocate(self, addr, size);
1614 if (ret != KERN_SUCCESS) {
1615 fprintf(stderr,
1616 "vm_deallocate(stack) failed: %s\n",
1617 mach_error_string(ret));
1618 }
1619 }
1620
1621
1622 if (value_ptr)
1623 *value_ptr = th->exit_value;
1624 if (conforming) {
1625 if ((th->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
1626 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING) && (value_ptr != NULL))
1627 *value_ptr = PTHREAD_CANCELED;
1628 th->sig = _PTHREAD_NO_SIG;
1629 }
1630
1631
1632 if (th != &_thread)
1633 free(th);
1634
1635 return 0;
1636 }
1637
1638 static
1639 void _pthread_reap_threads(void)
1640 {
1641 pthread_reap_msg_t msg;
1642 kern_return_t ret;
1643
1644 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
1645 sizeof msg, thread_recycle_port,
1646 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1647 while (ret == MACH_MSG_SUCCESS) {
1648 mach_port_t kernel_thread = msg.header.msgh_remote_port;
1649 pthread_t thread = msg.thread;
1650
1651 /* deal with race with thread_create_running() */
1652 if (kernel_thread == MACH_PORT_NULL &&
1653 kernel_thread != thread->kernel_thread) {
1654 kernel_thread = thread->kernel_thread;
1655 }
1656
1657 if ( kernel_thread == MACH_PORT_NULL ||
1658 _pthread_reap_thread(thread, kernel_thread, (void **)0, 0) == EAGAIN)
1659 {
1660 /* not dead yet, put it back for someone else to reap, stop here */
1661 _pthread_become_available(thread, kernel_thread);
1662 return;
1663 }
1664
1665 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
1666 sizeof msg, thread_recycle_port,
1667 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1668 }
1669 }
1670
1671 /* For compatibility... */
1672
1673 pthread_t
1674 _pthread_self() {
1675 return pthread_self();
1676 }
1677
1678 /*
1679 * Terminate a thread.
1680 */
1681 int __disable_threadsignal(int);
1682
1683 static void
1684 _pthread_exit(pthread_t self, void *value_ptr)
1685 {
1686 struct __darwin_pthread_handler_rec *handler;
1687 kern_return_t kern_res;
1688 int thread_count;
1689 int newstyle = self->newstyle;
1690
1691 /* Make this thread not to receive any signals */
1692 __disable_threadsignal(1);
1693
1694 #if PTH_TRACE
1695 __kdebug_trace(0x900001c, self, newstyle, 0, 0, 0);
1696 #endif
1697
1698 /* set cancel state to disable and type to deferred */
1699 _pthread_setcancelstate_exit(self, value_ptr, __unix_conforming);
1700
1701 while ((handler = self->__cleanup_stack) != 0)
1702 {
1703 (handler->__routine)(handler->__arg);
1704 self->__cleanup_stack = handler->__next;
1705 }
1706 _pthread_tsd_cleanup(self);
1707
1708 if (newstyle == 0) {
1709 _pthread_reap_threads();
1710
1711 LOCK(self->lock);
1712 self->detached |= _PTHREAD_EXITED;
1713
1714 if (self->detached & PTHREAD_CREATE_JOINABLE) {
1715 mach_port_t death = self->death;
1716 self->exit_value = value_ptr;
1717 UNLOCK(self->lock);
1718 /* the joiner will need a kernel thread reference, leave ours for it */
1719 if (death) {
1720 PTHREAD_MACH_CALL(semaphore_signal(death), kern_res);
1721 if (kern_res != KERN_SUCCESS)
1722 fprintf(stderr,
1723 "semaphore_signal(death) failed: %s\n",
1724 mach_error_string(kern_res));
1725 }
1726 LOCK(_pthread_list_lock);
1727 thread_count = --_pthread_count;
1728 UNLOCK(_pthread_list_lock);
1729 } else {
1730 UNLOCK(self->lock);
1731 LOCK(_pthread_list_lock);
1732 TAILQ_REMOVE(&__pthread_head, self, plist);
1733 #if PTH_LISTTRACE
1734 __kdebug_trace(0x9000010, self, 0, 0, 5, 0);
1735 #endif
1736 thread_count = --_pthread_count;
1737 UNLOCK(_pthread_list_lock);
1738 /* with no joiner, we let become available consume our cached ref */
1739 _pthread_become_available(self, self->kernel_thread);
1740 }
1741
1742 if (thread_count <= 0)
1743 exit(0);
1744
1745 /* Use a new reference to terminate ourselves. Should never return. */
1746 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res);
1747 fprintf(stderr, "thread_terminate(mach_thread_self()) failed: %s\n",
1748 mach_error_string(kern_res));
1749 } else {
1750 semaphore_t joinsem = SEMAPHORE_NULL;
1751
1752 if ((self->joiner_notify == (mach_port_t)0) && (self->detached & PTHREAD_CREATE_JOINABLE))
1753 joinsem = new_sem_from_pool();
1754 LOCK(self->lock);
1755 self->detached |= _PTHREAD_EXITED;
1756
1757 self->exit_value = value_ptr;
1758 if (self->detached & PTHREAD_CREATE_JOINABLE) {
1759 if (self->joiner_notify == (mach_port_t)0) {
1760 self->joiner_notify = joinsem;
1761 joinsem = SEMAPHORE_NULL;
1762 }
1763 UNLOCK(self->lock);
1764 if (joinsem != SEMAPHORE_NULL)
1765 restore_sem_to_pool(joinsem);
1766 _pthread_free_pthread_onstack(self, 0, 1);
1767 } else {
1768 UNLOCK(self->lock);
1769 /* with no joiner, we let become available consume our cached ref */
1770 if (joinsem != SEMAPHORE_NULL)
1771 restore_sem_to_pool(joinsem);
1772 _pthread_free_pthread_onstack(self, 1, 1);
1773 }
1774 }
1775 LIBC_ABORT("thread %p didn't exit", self);
1776 }
1777
1778 void
1779 pthread_exit(void *value_ptr)
1780 {
1781 pthread_t self = pthread_self();
1782 /* if the current thread is a workqueue thread, just crash the app, as per libdispatch folks */
1783 if (self->wqthread == 0) {
1784 _pthread_exit(self, value_ptr);
1785 } else {
1786 LIBC_ABORT("pthread_exit() may only be called against threads created via pthread_create()");
1787 }
1788 }
1789
1790 /*
1791 * Get the scheduling policy and scheduling paramters for a thread.
1792 */
1793 int
1794 pthread_getschedparam(pthread_t thread,
1795 int *policy,
1796 struct sched_param *param)
1797 {
1798 int ret;
1799
1800 if (thread == NULL)
1801 return(ESRCH);
1802
1803 LOCK(_pthread_list_lock);
1804
1805 if ((ret = _pthread_find_thread(thread)) != 0) {
1806 UNLOCK(_pthread_list_lock);
1807 return(ret);
1808 }
1809 if (policy != 0)
1810 *policy = thread->policy;
1811 if (param != 0)
1812 *param = thread->param;
1813 UNLOCK(_pthread_list_lock);
1814
1815 return(0);
1816 }
1817
1818 /*
1819 * Set the scheduling policy and scheduling paramters for a thread.
1820 */
1821 static int
1822 pthread_setschedparam_internal(pthread_t thread,
1823 mach_port_t kport,
1824 int policy,
1825 const struct sched_param *param)
1826 {
1827 policy_base_data_t bases;
1828 policy_base_t base;
1829 mach_msg_type_number_t count;
1830 kern_return_t ret;
1831
1832 switch (policy)
1833 {
1834 case SCHED_OTHER:
1835 bases.ts.base_priority = param->sched_priority;
1836 base = (policy_base_t)&bases.ts;
1837 count = POLICY_TIMESHARE_BASE_COUNT;
1838 break;
1839 case SCHED_FIFO:
1840 bases.fifo.base_priority = param->sched_priority;
1841 base = (policy_base_t)&bases.fifo;
1842 count = POLICY_FIFO_BASE_COUNT;
1843 break;
1844 case SCHED_RR:
1845 bases.rr.base_priority = param->sched_priority;
1846 /* quantum isn't public yet */
1847 bases.rr.quantum = param->quantum;
1848 base = (policy_base_t)&bases.rr;
1849 count = POLICY_RR_BASE_COUNT;
1850 break;
1851 default:
1852 return (EINVAL);
1853 }
1854 ret = thread_policy(kport, policy, base, count, TRUE);
1855 if (ret != KERN_SUCCESS)
1856 return (EINVAL);
1857 return (0);
1858 }
1859
1860 int
1861 pthread_setschedparam(pthread_t t,
1862 int policy,
1863 const struct sched_param *param)
1864 {
1865 mach_port_t kport = MACH_PORT_NULL;
1866 int error;
1867 int bypass = 1;
1868
1869 if (t != pthread_self() && t != &_thread ) { //since the main thread will not get de-allocated from underneath us
1870 bypass = 0;
1871 if (_pthread_lookup_thread(t, &kport, 0) != 0)
1872 return(ESRCH);
1873 } else
1874 kport = t->kernel_thread;
1875
1876 error = pthread_setschedparam_internal(t, kport, policy, param);
1877 if (error == 0) {
1878 if (bypass == 0) {
1879 /* ensure the thread is still valid */
1880 LOCK(_pthread_list_lock);
1881 if ((error = _pthread_find_thread(t)) != 0) {
1882 UNLOCK(_pthread_list_lock);
1883 return(error);
1884 }
1885 t->policy = policy;
1886 t->param = *param;
1887 UNLOCK(_pthread_list_lock);
1888 } else {
1889 t->policy = policy;
1890 t->param = *param;
1891 }
1892 }
1893 return(error);
1894 }
1895
1896 /*
1897 * Get the minimum priority for the given policy
1898 */
1899 int
1900 sched_get_priority_min(int policy)
1901 {
1902 return default_priority - 16;
1903 }
1904
1905 /*
1906 * Get the maximum priority for the given policy
1907 */
1908 int
1909 sched_get_priority_max(int policy)
1910 {
1911 return default_priority + 16;
1912 }
1913
1914 /*
1915 * Determine if two thread identifiers represent the same thread.
1916 */
1917 int
1918 pthread_equal(pthread_t t1,
1919 pthread_t t2)
1920 {
1921 return (t1 == t2);
1922 }
1923
1924 __private_extern__ void
1925 _pthread_set_self(pthread_t p)
1926 {
1927 extern void __pthread_set_self(pthread_t);
1928 if (p == 0) {
1929 bzero(&_thread, sizeof(struct _pthread));
1930 p = &_thread;
1931 }
1932 p->tsd[0] = p;
1933 __pthread_set_self(p);
1934 }
1935
1936 void
1937 cthread_set_self(void *cself)
1938 {
1939 pthread_t self = pthread_self();
1940 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1941 _pthread_set_self(cself);
1942 return;
1943 }
1944 self->cthread_self = cself;
1945 }
1946
1947 void *
1948 ur_cthread_self(void) {
1949 pthread_t self = pthread_self();
1950 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1951 return (void *)self;
1952 }
1953 return self->cthread_self;
1954 }
1955
1956 /*
1957 * cancellation handler for pthread once as the init routine can have a
1958 * cancellation point. In that case we need to restore the spin unlock
1959 */
1960 void
1961 __pthread_once_cancel_handler(pthread_once_t *once_control)
1962 {
1963 _spin_unlock(&once_control->lock);
1964 }
1965
1966
1967 /*
1968 * Execute a function exactly one time in a thread-safe fashion.
1969 */
1970 int
1971 pthread_once(pthread_once_t *once_control,
1972 void (*init_routine)(void))
1973 {
1974 _spin_lock(&once_control->lock);
1975 if (once_control->sig == _PTHREAD_ONCE_SIG_init)
1976 {
1977 pthread_cleanup_push((void (*)(void *))__pthread_once_cancel_handler, once_control);
1978 (*init_routine)();
1979 pthread_cleanup_pop(0);
1980 once_control->sig = _PTHREAD_ONCE_SIG;
1981 }
1982 _spin_unlock(&once_control->lock);
1983 return (0); /* Spec defines no possible errors! */
1984 }
1985
1986 /*
1987 * Insert a cancellation point in a thread.
1988 */
1989 __private_extern__ void
1990 _pthread_testcancel(pthread_t thread, int isconforming)
1991 {
1992 LOCK(thread->lock);
1993 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
1994 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
1995 {
1996 UNLOCK(thread->lock);
1997 if (isconforming)
1998 pthread_exit(PTHREAD_CANCELED);
1999 else
2000 pthread_exit(0);
2001 }
2002 UNLOCK(thread->lock);
2003 }
2004
2005
2006
2007 int
2008 pthread_getconcurrency(void)
2009 {
2010 return(pthread_concurrency);
2011 }
2012
2013 int
2014 pthread_setconcurrency(int new_level)
2015 {
2016 if (new_level < 0)
2017 return EINVAL;
2018 pthread_concurrency = new_level;
2019 return(0);
2020 }
2021
2022 /*
2023 * Perform package initialization - called automatically when application starts
2024 */
2025 __private_extern__ int
2026 pthread_init(void)
2027 {
2028 pthread_attr_t *attrs;
2029 pthread_t thread;
2030 kern_return_t kr;
2031 host_priority_info_data_t priority_info;
2032 host_info_t info;
2033 host_flavor_t flavor;
2034 host_t host;
2035 mach_msg_type_number_t count;
2036 int mib[2];
2037 int ncpus = 0;
2038 size_t len;
2039 void *stackaddr;
2040
2041 pthreadsize = round_page(sizeof (struct _pthread));
2042 count = HOST_PRIORITY_INFO_COUNT;
2043 info = (host_info_t)&priority_info;
2044 flavor = HOST_PRIORITY_INFO;
2045 host = mach_host_self();
2046 kr = host_info(host, flavor, info, &count);
2047 if (kr != KERN_SUCCESS)
2048 printf("host_info failed (%d); probably need privilege.\n", kr);
2049 else {
2050 default_priority = priority_info.user_priority;
2051 min_priority = priority_info.minimum_priority;
2052 max_priority = priority_info.maximum_priority;
2053 }
2054 attrs = &_pthread_attr_default;
2055 pthread_attr_init(attrs);
2056
2057 TAILQ_INIT(&__pthread_head);
2058 LOCK_INIT(_pthread_list_lock);
2059 thread = &_thread;
2060 TAILQ_INSERT_HEAD(&__pthread_head, thread, plist);
2061 _pthread_set_self(thread);
2062 #if PTH_LISTTRACE
2063 __kdebug_trace(0x900000c, thread, 0, 0, 10, 0);
2064 #endif
2065
2066 /* In case of dyld reset the tsd keys from 1 - 10 */
2067 _pthread_keys_init();
2068
2069 mib[0] = CTL_KERN;
2070 mib[1] = KERN_USRSTACK;
2071 len = sizeof (stackaddr);
2072 if (sysctl (mib, 2, &stackaddr, &len, NULL, 0) != 0)
2073 stackaddr = (void *)USRSTACK;
2074 _pthread_create(thread, attrs, stackaddr, mach_thread_self());
2075 thread->stacksize = DFLSSIZ; //initialize main thread's stacksize based on vmparam.h
2076 thread->detached = PTHREAD_CREATE_JOINABLE|_PTHREAD_CREATE_PARENT;
2077
2078 _init_cpu_capabilities();
2079 if ((ncpus = _NumCPUs()) > 1)
2080 _spin_tries = MP_SPIN_TRIES;
2081
2082 workq_targetconc[WORKQ_HIGH_PRIOQUEUE] = ncpus;
2083 workq_targetconc[WORKQ_DEFAULT_PRIOQUEUE] = ncpus;
2084 workq_targetconc[WORKQ_LOW_PRIOQUEUE] = ncpus;
2085
2086 mach_port_deallocate(mach_task_self(), host);
2087
2088 #if defined(__ppc__)
2089 IF_ROSETTA() {
2090 __oldstyle = 1;
2091 }
2092 #endif
2093 #if defined(__arm__)
2094 __oldstyle = 1;
2095 #endif
2096
2097 #if defined(_OBJC_PAGE_BASE_ADDRESS)
2098 {
2099 vm_address_t objcRTPage = (vm_address_t)_OBJC_PAGE_BASE_ADDRESS;
2100 kr = vm_map(mach_task_self(),
2101 &objcRTPage, vm_page_size * 4, vm_page_size - 1,
2102 VM_FLAGS_FIXED | VM_MAKE_TAG(0), // Which tag to use?
2103 MACH_PORT_NULL,
2104 (vm_address_t)0, FALSE,
2105 (vm_prot_t)0, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE,
2106 VM_INHERIT_DEFAULT);
2107 /* We ignore the return result here. The ObjC runtime will just have to deal. */
2108 }
2109 #endif
2110
2111 mig_init(1); /* enable multi-threaded mig interfaces */
2112 if (__oldstyle == 0) {
2113 #if defined(__i386__) || defined(__x86_64__)
2114 __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)), _pthread_start, &workq_targetconc[0], (__uint64_t)(&thread->tsd[__PTK_LIBDISPATCH_KEY0]) - (__uint64_t)thread);
2115 #else
2116 __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)), NULL, &workq_targetconc[0], (__uint64_t)&thread->tsd[__PTK_LIBDISPATCH_KEY0] - (__uint64_t)thread);
2117 #endif
2118 }
2119
2120 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2121 if( (thread->thread_id = __thread_selfid()) == (__uint64_t)-1)
2122 printf("Failed to set thread_id in pthread_init\n");
2123 return 0;
2124 #endif
2125 }
2126
2127 int sched_yield(void)
2128 {
2129 swtch_pri(0);
2130 return 0;
2131 }
2132
2133 /* This used to be the "magic" that gets the initialization routine called when the application starts */
2134 static int _do_nothing(void) { return 0; }
2135 int (*_cthread_init_routine)(void) = _do_nothing;
2136
2137 /* Get a semaphore from the pool, growing it if necessary */
2138
2139 __private_extern__ semaphore_t new_sem_from_pool(void) {
2140 kern_return_t res;
2141 semaphore_t sem;
2142 int i;
2143
2144 LOCK(sem_pool_lock);
2145 if (sem_pool_current == sem_pool_count) {
2146 sem_pool_count += 16;
2147 sem_pool = realloc(sem_pool, sem_pool_count * sizeof(semaphore_t));
2148 for (i = sem_pool_current; i < sem_pool_count; i++) {
2149 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool[i], SYNC_POLICY_FIFO, 0), res);
2150 }
2151 }
2152 sem = sem_pool[sem_pool_current++];
2153 UNLOCK(sem_pool_lock);
2154 return sem;
2155 }
2156
2157 /* Put a semaphore back into the pool */
2158 __private_extern__ void restore_sem_to_pool(semaphore_t sem) {
2159 LOCK(sem_pool_lock);
2160 sem_pool[--sem_pool_current] = sem;
2161 UNLOCK(sem_pool_lock);
2162 }
2163
2164 static void sem_pool_reset(void) {
2165 LOCK(sem_pool_lock);
2166 sem_pool_count = 0;
2167 sem_pool_current = 0;
2168 sem_pool = NULL;
2169 UNLOCK(sem_pool_lock);
2170 }
2171
2172 __private_extern__ void _pthread_fork_child(pthread_t p) {
2173 /* Just in case somebody had it locked... */
2174 UNLOCK(sem_pool_lock);
2175 sem_pool_reset();
2176 /* No need to hold the pthread_list_lock as no one other than this
2177 * thread is present at this time
2178 */
2179 TAILQ_INIT(&__pthread_head);
2180 LOCK_INIT(_pthread_list_lock);
2181 TAILQ_INSERT_HEAD(&__pthread_head, p, plist);
2182 #if PTH_LISTTRACE
2183 __kdebug_trace(0x900000c, p, 0, 0, 10, 0);
2184 #endif
2185 _pthread_count = 1;
2186 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2187 if( (p->thread_id = __thread_selfid()) == (__uint64_t)-1)
2188 printf("Failed to set thread_id in pthread_fork_child\n");
2189 #endif
2190 }
2191
2192 /*
2193 * Query/update the cancelability 'state' of a thread
2194 */
2195 int
2196 _pthread_setcancelstate_internal(int state, int *oldstate, int conforming)
2197 {
2198 pthread_t self = pthread_self();
2199
2200
2201 switch (state) {
2202 case PTHREAD_CANCEL_ENABLE:
2203 if (conforming)
2204 __pthread_canceled(1);
2205 break;
2206 case PTHREAD_CANCEL_DISABLE:
2207 if (conforming)
2208 __pthread_canceled(2);
2209 break;
2210 default:
2211 return EINVAL;
2212 }
2213
2214 self = pthread_self();
2215 LOCK(self->lock);
2216 if (oldstate)
2217 *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
2218 self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK;
2219 self->cancel_state |= state;
2220 UNLOCK(self->lock);
2221 if (!conforming)
2222 _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
2223 return (0);
2224 }
2225
2226 /* When a thread exits set the cancellation state to DISABLE and DEFERRED */
2227 static void
2228 _pthread_setcancelstate_exit(pthread_t self, void * value_ptr, int conforming)
2229 {
2230 LOCK(self->lock);
2231 self->cancel_state &= ~(_PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK);
2232 self->cancel_state |= (PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED);
2233 if ((value_ptr == PTHREAD_CANCELED)) {
2234 // 4597450: begin
2235 self->detached |= _PTHREAD_WASCANCEL;
2236 // 4597450: end
2237 }
2238 UNLOCK(self->lock);
2239 }
2240
2241 int
2242 _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming)
2243 {
2244 kern_return_t res;
2245 int detached = 0, ret;
2246
2247 #if PTH_TRACE
2248 __kdebug_trace(0x9000028, thread, 0, 0, 1, 0);
2249 #endif
2250 /* The scenario where the joiner was waiting for the thread and
2251 * the pthread detach happened on that thread. Then the semaphore
2252 * will trigger but by the time joiner runs, the target thread could be
2253 * freed. So we need to make sure that the thread is still in the list
2254 * and is joinable before we continue with the join.
2255 */
2256 LOCK(_pthread_list_lock);
2257 if ((ret = _pthread_find_thread(thread)) != 0) {
2258 UNLOCK(_pthread_list_lock);
2259 /* returns ESRCH */
2260 return(ret);
2261 }
2262 if ((thread->detached & PTHREAD_CREATE_JOINABLE) == 0) {
2263 /* the thread might be a detached thread */
2264 UNLOCK(_pthread_list_lock);
2265 return(ESRCH);
2266
2267 }
2268 /* It is still a joinable thread and needs to be reaped */
2269 TAILQ_REMOVE(&__pthread_head, thread, plist);
2270 #if PTH_LISTTRACE
2271 __kdebug_trace(0x9000010, thread, 0, 0, 3, 0);
2272 #endif
2273 UNLOCK(_pthread_list_lock);
2274
2275 if (value_ptr)
2276 *value_ptr = thread->exit_value;
2277 if (conforming) {
2278 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
2279 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING) && (value_ptr != NULL)) {
2280 *value_ptr = PTHREAD_CANCELED;
2281 }
2282 }
2283 if (thread->reply_port != MACH_PORT_NULL) {
2284 res = mach_port_mod_refs(mach_task_self(), thread->reply_port, MACH_PORT_RIGHT_RECEIVE, -1);
2285 if (res != KERN_SUCCESS)
2286 fprintf(stderr,"mach_port_mod_refs(reply_port) failed: %s\n",mach_error_string(res));
2287 thread->reply_port = MACH_PORT_NULL;
2288 }
2289 if (thread->freeStackOnExit) {
2290 thread->sig = _PTHREAD_NO_SIG;
2291 #if PTH_TRACE
2292 __kdebug_trace(0x9000028, thread, 0, 0, 2, 0);
2293 #endif
2294 vm_deallocate(mach_task_self(), (mach_vm_address_t)(long)thread, pthreadsize);
2295 } else {
2296 thread->sig = _PTHREAD_NO_SIG;
2297 #if PTH_TRACE
2298 __kdebug_trace(0x9000028, thread, 0, 0, 3, 0);
2299 #endif
2300 free(thread);
2301 }
2302 return(0);
2303 }
2304
2305 /* ALWAYS called with list lock and return with list lock */
2306 int
2307 _pthread_find_thread(pthread_t thread)
2308 {
2309 pthread_t p;
2310
2311 loop:
2312 TAILQ_FOREACH(p, &__pthread_head, plist) {
2313 if (p == thread) {
2314 if (thread->kernel_thread == MACH_PORT_NULL) {
2315 UNLOCK(_pthread_list_lock);
2316 sched_yield();
2317 LOCK(_pthread_list_lock);
2318 goto loop;
2319 }
2320 return(0);
2321 }
2322 }
2323 return(ESRCH);
2324 }
2325
2326 int
2327 _pthread_lookup_thread(pthread_t thread, mach_port_t * portp, int only_joinable)
2328 {
2329 mach_port_t kport;
2330 int ret = 0;
2331
2332 if (thread == NULL)
2333 return(ESRCH);
2334
2335 LOCK(_pthread_list_lock);
2336
2337 if ((ret = _pthread_find_thread(thread)) != 0) {
2338 UNLOCK(_pthread_list_lock);
2339 return(ret);
2340 }
2341 if ((only_joinable != 0) && ((thread->detached & PTHREAD_CREATE_DETACHED) != 0)) {
2342 UNLOCK(_pthread_list_lock);
2343 return(EINVAL);
2344 }
2345 kport = thread->kernel_thread;
2346 UNLOCK(_pthread_list_lock);
2347 if (portp != NULL)
2348 *portp = kport;
2349 return(0);
2350 }
2351
2352 /* XXXXXXXXXXXXX Pthread Workqueue Attributes XXXXXXXXXXXXXXXXXX */
2353 int
2354 pthread_workqueue_attr_init_np(pthread_workqueue_attr_t * attrp)
2355 {
2356 attrp->queueprio = WORKQ_DEFAULT_PRIOQUEUE;
2357 attrp->sig = PTHREAD_WORKQUEUE_ATTR_SIG;
2358 attrp->overcommit = 0;
2359 return(0);
2360 }
2361
2362 int
2363 pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t * attr)
2364 {
2365 if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG)
2366 {
2367 return (0);
2368 } else
2369 {
2370 return (EINVAL); /* Not an attribute structure! */
2371 }
2372 }
2373
2374 int
2375 pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t * attr, int * qpriop)
2376 {
2377 if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
2378 *qpriop = attr->queueprio;
2379 return (0);
2380 } else {
2381 return (EINVAL); /* Not an attribute structure! */
2382 }
2383 }
2384
2385
2386 int
2387 pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t * attr, int qprio)
2388 {
2389 int error = 0;
2390
2391 if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
2392 switch(qprio) {
2393 case WORKQ_HIGH_PRIOQUEUE:
2394 case WORKQ_DEFAULT_PRIOQUEUE:
2395 case WORKQ_LOW_PRIOQUEUE:
2396 attr->queueprio = qprio;
2397 break;
2398 default:
2399 error = EINVAL;
2400 }
2401 } else {
2402 error = EINVAL;
2403 }
2404 return (error);
2405 }
2406
2407
2408 int
2409 pthread_workqueue_attr_getovercommit_np(const pthread_workqueue_attr_t * attr, int * ocommp)
2410 {
2411 if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
2412 *ocommp = attr->overcommit;
2413 return (0);
2414 } else {
2415 return (EINVAL); /* Not an attribute structure! */
2416 }
2417 }
2418
2419
2420 int
2421 pthread_workqueue_attr_setovercommit_np(pthread_workqueue_attr_t * attr, int ocomm)
2422 {
2423 int error = 0;
2424
2425 if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
2426 attr->overcommit = ocomm;
2427 } else {
2428 error = EINVAL;
2429 }
2430 return (error);
2431 }
2432 /* XXXXXXXXXXXXX Pthread Workqueue support routines XXXXXXXXXXXXXXXXXX */
2433
2434 static void
2435 workqueue_list_lock()
2436 {
2437 OSSpinLockLock(&__workqueue_list_lock);
2438 }
2439
2440 static void
2441 workqueue_list_unlock()
2442 {
2443 OSSpinLockUnlock(&__workqueue_list_lock);
2444 }
2445
2446 int
2447 pthread_workqueue_init_np()
2448 {
2449 int ret;
2450
2451 workqueue_list_lock();
2452 ret =_pthread_work_internal_init();
2453 workqueue_list_unlock();
2454
2455 return(ret);
2456 }
2457
2458 int
2459 pthread_workqueue_requestconcurrency_np(int queue, int request_concurrency)
2460 {
2461 int error = 0;
2462
2463 if (queue < 0 || queue > WORKQ_NUM_PRIOQUEUE)
2464 return(EINVAL);
2465
2466 error =__workq_kernreturn(WQOPS_THREAD_SETCONC, NULL, request_concurrency, queue);
2467
2468 if (error == -1)
2469 return(errno);
2470 return(0);
2471 }
2472
2473 void
2474 pthread_workqueue_atfork_prepare(void)
2475 {
2476 /*
2477 * NOTE: Any workq additions here
2478 * should be for i386,x86_64 only
2479 */
2480 dispatch_atfork_prepare();
2481 }
2482
2483 void
2484 pthread_workqueue_atfork_parent(void)
2485 {
2486 /*
2487 * NOTE: Any workq additions here
2488 * should be for i386,x86_64 only
2489 */
2490 dispatch_atfork_parent();
2491 }
2492
2493 void
2494 pthread_workqueue_atfork_child(void)
2495 {
2496 #if defined(__i386__) || defined(__x86_64__)
2497 /*
2498 * NOTE: workq additions here
2499 * are for i386,x86_64 only as
2500 * ppc and arm do not support it
2501 */
2502 __workqueue_list_lock = OS_SPINLOCK_INIT;
2503 if (kernel_workq_setup != 0){
2504 kernel_workq_setup = 0;
2505 _pthread_work_internal_init();
2506 }
2507 #endif
2508 dispatch_atfork_child();
2509 }
2510
2511 static int
2512 _pthread_work_internal_init(void)
2513 {
2514 int i, error;
2515 pthread_workqueue_head_t headp;
2516 pthread_workitem_t witemp;
2517 pthread_workqueue_t wq;
2518
2519 if (kernel_workq_setup == 0) {
2520 #if defined(__i386__) || defined(__x86_64__)
2521 __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)),NULL,NULL, NULL);
2522 #else
2523 __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)),NULL,NULL, NULL);
2524 #endif
2525
2526 _pthread_wq_attr_default.queueprio = WORKQ_DEFAULT_PRIOQUEUE;
2527 _pthread_wq_attr_default.sig = PTHREAD_WORKQUEUE_ATTR_SIG;
2528
2529 for( i = 0; i< WQ_NUM_PRIO_QS; i++) {
2530 headp = __pthread_wq_head_tbl[i];
2531 TAILQ_INIT(&headp->wqhead);
2532 headp->next_workq = 0;
2533 }
2534
2535 /* create work item and workqueue pools */
2536 witemp = (struct _pthread_workitem *)malloc(sizeof(struct _pthread_workitem) * WORKITEM_POOL_SIZE);
2537 bzero(witemp, (sizeof(struct _pthread_workitem) * WORKITEM_POOL_SIZE));
2538 for (i = 0; i < WORKITEM_POOL_SIZE; i++) {
2539 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, &witemp[i], item_entry);
2540 }
2541 wq = (struct _pthread_workqueue *)malloc(sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE);
2542 bzero(wq, (sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE));
2543 for (i = 0; i < WORKQUEUE_POOL_SIZE; i++) {
2544 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, &wq[i], wq_list);
2545 }
2546
2547 if (error = __workq_open()) {
2548 TAILQ_INIT(&__pthread_workitem_pool_head);
2549 TAILQ_INIT(&__pthread_workqueue_pool_head);
2550 free(witemp);
2551 free(wq);
2552 return(ENOMEM);
2553 }
2554 kernel_workq_setup = 1;
2555 }
2556 return(0);
2557 }
2558
2559
2560 /* This routine is called with list lock held */
2561 static pthread_workitem_t
2562 alloc_workitem(void)
2563 {
2564 pthread_workitem_t witem;
2565
2566 if (TAILQ_EMPTY(&__pthread_workitem_pool_head)) {
2567 workqueue_list_unlock();
2568 witem = malloc(sizeof(struct _pthread_workitem));
2569 witem->gencount = 0;
2570 workqueue_list_lock();
2571 } else {
2572 witem = TAILQ_FIRST(&__pthread_workitem_pool_head);
2573 TAILQ_REMOVE(&__pthread_workitem_pool_head, witem, item_entry);
2574 }
2575 return(witem);
2576 }
2577
2578 /* This routine is called with list lock held */
2579 static void
2580 free_workitem(pthread_workitem_t witem)
2581 {
2582 witem->gencount++;
2583 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, witem, item_entry);
2584 }
2585
2586 /* This routine is called with list lock held */
2587 static pthread_workqueue_t
2588 alloc_workqueue(void)
2589 {
2590 pthread_workqueue_t wq;
2591
2592 if (TAILQ_EMPTY(&__pthread_workqueue_pool_head)) {
2593 workqueue_list_unlock();
2594 wq = malloc(sizeof(struct _pthread_workqueue));
2595 workqueue_list_lock();
2596 } else {
2597 wq = TAILQ_FIRST(&__pthread_workqueue_pool_head);
2598 TAILQ_REMOVE(&__pthread_workqueue_pool_head, wq, wq_list);
2599 }
2600 user_workq_count++;
2601 return(wq);
2602 }
2603
2604 /* This routine is called with list lock held */
2605 static void
2606 free_workqueue(pthread_workqueue_t wq)
2607 {
2608 user_workq_count--;
2609 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, wq, wq_list);
2610 }
2611
2612 static void
2613 _pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * attr)
2614 {
2615 bzero(wq, sizeof(struct _pthread_workqueue));
2616 if (attr != NULL) {
2617 wq->queueprio = attr->queueprio;
2618 wq->overcommit = attr->overcommit;
2619 } else {
2620 wq->queueprio = WORKQ_DEFAULT_PRIOQUEUE;
2621 wq->overcommit = 0;
2622 }
2623 LOCK_INIT(wq->lock);
2624 wq->flags = 0;
2625 TAILQ_INIT(&wq->item_listhead);
2626 TAILQ_INIT(&wq->item_kernhead);
2627 #if WQ_LISTTRACE
2628 __kdebug_trace(0x90080ac, wq, &wq->item_listhead, wq->item_listhead.tqh_first, wq->item_listhead.tqh_last, 0);
2629 #endif
2630 wq->wq_list.tqe_next = 0;
2631 wq->wq_list.tqe_prev = 0;
2632 wq->sig = PTHREAD_WORKQUEUE_SIG;
2633 wq->headp = __pthread_wq_head_tbl[wq->queueprio];
2634 }
2635
2636 int
2637 valid_workq(pthread_workqueue_t workq)
2638 {
2639 if (workq->sig == PTHREAD_WORKQUEUE_SIG)
2640 return(1);
2641 else
2642 return(0);
2643 }
2644
2645
2646 /* called with list lock */
2647 static void
2648 pick_nextworkqueue_droplock()
2649 {
2650 int i, curwqprio, val, found;
2651 pthread_workqueue_head_t headp;
2652 pthread_workqueue_t workq;
2653 pthread_workqueue_t nworkq = NULL;
2654
2655 #if WQ_TRACE
2656 __kdebug_trace(0x9008098, kernel_workq_count, 0, 0, 0, 0);
2657 #endif
2658 loop:
2659 while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) {
2660 found = 0;
2661 for (i = 0; i < WQ_NUM_PRIO_QS; i++) {
2662 wqreadyprio = i; /* because there is nothing else higher to run */
2663 headp = __pthread_wq_head_tbl[i];
2664
2665 if (TAILQ_EMPTY(&headp->wqhead))
2666 continue;
2667 workq = headp->next_workq;
2668 if (workq == NULL)
2669 workq = TAILQ_FIRST(&headp->wqhead);
2670 curwqprio = workq->queueprio;
2671 nworkq = workq; /* starting pt */
2672 while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) {
2673 headp->next_workq = TAILQ_NEXT(workq, wq_list);
2674 if (headp->next_workq == NULL)
2675 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2676 #if WQ_TRACE
2677 __kdebug_trace(0x9008098, kernel_workq_count, workq, 0, 1, 0);
2678 #endif
2679 val = post_nextworkitem(workq);
2680
2681 if (val != 0) {
2682 /* things could have changed so reasses */
2683 /* If kernel queue is full , skip */
2684 if (kernel_workq_count >= KERNEL_WORKQ_ELEM_MAX)
2685 break;
2686 /* If anything with higher prio arrived, then reevaluate */
2687 if (wqreadyprio < curwqprio)
2688 goto loop; /* we need re evaluate again */
2689 /* we can post some more work items */
2690 found = 1;
2691 }
2692
2693 /* cannot use workq here as it could be freed */
2694 if (TAILQ_EMPTY(&headp->wqhead))
2695 break;
2696 /* if we found nothing to run and only one workqueue in the list, skip */
2697 if ((val == 0) && (workq == headp->next_workq))
2698 break;
2699 workq = headp->next_workq;
2700 if (workq == NULL)
2701 workq = TAILQ_FIRST(&headp->wqhead);
2702 if (val != 0)
2703 nworkq = workq;
2704 /* if we found nothing to run and back to workq where we started */
2705 if ((val == 0) && (workq == nworkq))
2706 break;
2707 }
2708 if (kernel_workq_count >= KERNEL_WORKQ_ELEM_MAX)
2709 break;
2710 }
2711 /* nothing found to run? */
2712 if (found == 0)
2713 break;
2714 }
2715 workqueue_list_unlock();
2716 }
2717
2718 static int
2719 post_nextworkitem(pthread_workqueue_t workq)
2720 {
2721 int error, prio;
2722 pthread_workitem_t witem;
2723 pthread_workqueue_head_t headp;
2724 void (*func)(pthread_workqueue_t, void *);
2725
2726 if ((workq->flags & PTHREAD_WORKQ_SUSPEND) == PTHREAD_WORKQ_SUSPEND) {
2727 return(0);
2728 }
2729 #if WQ_TRACE
2730 __kdebug_trace(0x900809c, workq, workq->item_listhead.tqh_first, 0, 1, 0);
2731 #endif
2732 if (TAILQ_EMPTY(&workq->item_listhead)) {
2733 return(0);
2734 }
2735 if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON)
2736 return(0);
2737
2738 witem = TAILQ_FIRST(&workq->item_listhead);
2739 headp = workq->headp;
2740 #if WQ_TRACE
2741 __kdebug_trace(0x900809c, workq, witem, 0, 0xee, 0);
2742 #endif
2743 if ((witem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) {
2744 #if WQ_TRACE
2745 __kdebug_trace(0x9000064, workq, 0, 0, 2, 0);
2746 #endif
2747
2748 if ((witem->flags & PTH_WQITEM_APPLIED) != 0) {
2749 return(0);
2750 }
2751 /* Also barrier when nothing is there needs to be handled */
2752 /* Nothing to wait for */
2753 if (workq->kq_count != 0) {
2754 witem->flags |= PTH_WQITEM_APPLIED;
2755 workq->flags |= PTHREAD_WORKQ_BARRIER_ON;
2756 workq->barrier_count = workq->kq_count;
2757 #if WQ_TRACE
2758 __kdebug_trace(0x9000064, 1, workq->barrier_count, 0, 0, 0);
2759 #endif
2760 return(1);
2761 } else {
2762 #if WQ_TRACE
2763 __kdebug_trace(0x9000064, 2, workq->barrier_count, 0, 0, 0);
2764 #endif
2765 if (witem->func != NULL) {
2766 /* since we are going to drop list lock */
2767 witem->flags |= PTH_WQITEM_APPLIED;
2768 workq->flags |= PTHREAD_WORKQ_BARRIER_ON;
2769 workqueue_list_unlock();
2770 func = (void (*)(pthread_workqueue_t, void *))witem->func;
2771 (*func)(workq, witem->func_arg);
2772 #if WQ_TRACE
2773 __kdebug_trace(0x9000064, 3, workq->barrier_count, 0, 0, 0);
2774 #endif
2775 workqueue_list_lock();
2776 workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
2777 }
2778 TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
2779 #if WQ_LISTTRACE
2780 __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
2781 #endif
2782 witem->flags = 0;
2783 free_workitem(witem);
2784 #if WQ_TRACE
2785 __kdebug_trace(0x9000064, 4, workq->barrier_count, 0, 0, 0);
2786 #endif
2787 return(1);
2788 }
2789 } else if ((witem->flags & PTH_WQITEM_DESTROY) == PTH_WQITEM_DESTROY) {
2790 #if WQ_TRACE
2791 __kdebug_trace(0x9000068, 1, workq->kq_count, 0, 0, 0);
2792 #endif
2793 if ((witem->flags & PTH_WQITEM_APPLIED) != 0) {
2794 return(0);
2795 }
2796 witem->flags |= PTH_WQITEM_APPLIED;
2797 workq->flags |= (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON);
2798 workq->barrier_count = workq->kq_count;
2799 workq->term_callback = (void (*)(struct _pthread_workqueue *,void *))witem->func;
2800 workq->term_callarg = witem->func_arg;
2801 TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
2802 #if WQ_LISTTRACE
2803 __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
2804 #endif
2805 if ((TAILQ_EMPTY(&workq->item_listhead)) && (workq->kq_count == 0)) {
2806 if (!(TAILQ_EMPTY(&workq->item_kernhead))) {
2807 #if WQ_TRACE
2808 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 0xff, 0);
2809 #endif
2810 }
2811 witem->flags = 0;
2812 free_workitem(witem);
2813 workq->flags |= PTHREAD_WORKQ_DESTROYED;
2814 #if WQ_TRACE
2815 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 1, 0);
2816 #endif
2817 headp = __pthread_wq_head_tbl[workq->queueprio];
2818 if (headp->next_workq == workq) {
2819 headp->next_workq = TAILQ_NEXT(workq, wq_list);
2820 if (headp->next_workq == NULL) {
2821 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2822 if (headp->next_workq == workq)
2823 headp->next_workq = NULL;
2824 }
2825 }
2826 workq->sig = 0;
2827 TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
2828 if (workq->term_callback != NULL) {
2829 workqueue_list_unlock();
2830 (*workq->term_callback)(workq, workq->term_callarg);
2831 workqueue_list_lock();
2832 }
2833 free_workqueue(workq);
2834 return(1);
2835 } else {
2836 TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry);
2837 #if WQ_LISTTRACE
2838 __kdebug_trace(0x90080b0, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
2839 #endif
2840 }
2841 #if WQ_TRACE
2842 __kdebug_trace(0x9000068, 2, workq->barrier_count, 0, 0, 0);
2843 #endif
2844 return(1);
2845 } else {
2846 #if WQ_TRACE
2847 __kdebug_trace(0x9000060, witem, workq, witem->func_arg, 0xfff, 0);
2848 #endif
2849 TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
2850 #if WQ_LISTTRACE
2851 __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
2852 #endif
2853 TAILQ_INSERT_TAIL(&workq->item_kernhead, witem, item_entry);
2854 if ((witem->flags & PTH_WQITEM_KERN_COUNT) == 0) {
2855 workq->kq_count++;
2856 witem->flags |= PTH_WQITEM_KERN_COUNT;
2857 }
2858 OSAtomicIncrement32(&kernel_workq_count);
2859 workqueue_list_unlock();
2860
2861 prio = workq->queueprio;
2862 if (workq->overcommit != 0) {
2863 prio |= WORKQUEUE_OVERCOMMIT;
2864 }
2865
2866 if (( error =__workq_kernreturn(WQOPS_QUEUE_ADD, witem, workq->affinity, prio)) == -1) {
2867 OSAtomicDecrement32(&kernel_workq_count);
2868 workqueue_list_lock();
2869 #if WQ_TRACE
2870 __kdebug_trace(0x900007c, witem, workq, witem->func_arg, workq->kq_count, 0);
2871 #endif
2872 TAILQ_REMOVE(&workq->item_kernhead, witem, item_entry);
2873 TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry);
2874 #if WQ_LISTTRACE
2875 __kdebug_trace(0x90080b0, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
2876 #endif
2877 if ((workq->flags & (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON)) != 0)
2878 workq->flags |= PTHREAD_WORKQ_REQUEUED;
2879 } else
2880 workqueue_list_lock();
2881 #if WQ_TRACE
2882 __kdebug_trace(0x9000060, witem, workq, witem->func_arg, workq->kq_count, 0);
2883 #endif
2884 return(1);
2885 }
2886 /* noone should come here */
2887 #if 1
2888 printf("error in logic for next workitem\n");
2889 LIBC_ABORT("error in logic for next workitem");
2890 #endif
2891 return(0);
2892 }
2893
2894 void
2895 _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse)
2896 {
2897 int ret;
2898 pthread_attr_t *attrs = &_pthread_attr_default;
2899 pthread_workqueue_t workq;
2900 #if WQ_DEBUG
2901 pthread_t pself;
2902 #endif
2903
2904
2905 workq = item->workq;
2906 if (reuse == 0) {
2907 /* reuse is set to 0, when a thread is newly created to run a workitem */
2908 _pthread_struct_init(self, attrs, stackaddr, DEFAULT_STACK_SIZE, 1, 1);
2909 self->wqthread = 1;
2910 self->wqkillset = 0;
2911 self->parentcheck = 1;
2912
2913 /* These are not joinable threads */
2914 self->detached &= ~PTHREAD_CREATE_JOINABLE;
2915 self->detached |= PTHREAD_CREATE_DETACHED;
2916 #if defined(__i386__) || defined(__x86_64__)
2917 _pthread_set_self(self);
2918 #endif
2919 #if WQ_TRACE
2920 __kdebug_trace(0x9000050, self, item, item->func_arg, 0, 0);
2921 #endif
2922 self->kernel_thread = kport;
2923 self->fun = (void *(*)(void *))item->func;
2924 self->arg = item->func_arg;
2925 /* Add to the pthread list */
2926 LOCK(_pthread_list_lock);
2927 TAILQ_INSERT_TAIL(&__pthread_head, self, plist);
2928 #if PTH_LISTTRACE
2929 __kdebug_trace(0x900000c, self, 0, 0, 10, 0);
2930 #endif
2931 _pthread_count++;
2932 UNLOCK(_pthread_list_lock);
2933
2934 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2935 if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1)
2936 printf("Failed to set thread_id in pthread_wqthread\n");
2937 #endif
2938
2939 } else {
2940 /* reuse is set to 1, when a thread is resued to run another work item */
2941 #if WQ_TRACE
2942 __kdebug_trace(0x9000054, self, item, item->func_arg, 0, 0);
2943 #endif
2944 /* reset all tsd from 1 to KEYS_MAX */
2945 if (self == NULL)
2946 LIBC_ABORT("_pthread_wqthread: pthread %p setup to be NULL", self);
2947
2948 self->fun = (void *(*)(void *))item->func;
2949 self->arg = item->func_arg;
2950 }
2951
2952 #if WQ_DEBUG
2953 if (reuse == 0) {
2954 pself = pthread_self();
2955 if (self != pself) {
2956 #if WQ_TRACE
2957 __kdebug_trace(0x9000078, self, pself, item->func_arg, 0, 0);
2958 #endif
2959 printf("pthread_self not set: pself %p, passed in %p\n", pself, self);
2960 _pthread_set_self(self);
2961 pself = pthread_self();
2962 if (self != pself)
2963 printf("(2)pthread_self not set: pself %p, passed in %p\n", pself, self);
2964 pself = self;
2965 }
2966 } else {
2967 pself = pthread_self();
2968 if (self != pself) {
2969 printf("(3)pthread_self not set in reuse: pself %p, passed in %p\n", pself, self);
2970 LIBC_ABORT("(3)pthread_self not set in reuse: pself %p, passed in %p", pself, self);
2971 }
2972 }
2973 #endif /* WQ_DEBUG */
2974
2975 self->cur_workq = workq;
2976 self->cur_workitem = item;
2977 OSAtomicDecrement32(&kernel_workq_count);
2978
2979 ret = (int)(intptr_t)(*self->fun)(self->arg);
2980
2981 /* If we reach here without going through the above initialization path then don't go through
2982 * with the teardown code path ( e.g. setjmp/longjmp ). Instead just exit this thread.
2983 */
2984 if(self != pthread_self()) {
2985 pthread_exit(PTHREAD_CANCELED);
2986 }
2987
2988 workqueue_exit(self, workq, item);
2989
2990 }
2991
2992 static void
2993 workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item)
2994 {
2995 pthread_attr_t *attrs = &_pthread_attr_default;
2996 pthread_workitem_t baritem;
2997 pthread_workqueue_head_t headp;
2998 void (*func)(pthread_workqueue_t, void *);
2999
3000 workqueue_list_lock();
3001
3002 TAILQ_REMOVE(&workq->item_kernhead, item, item_entry);
3003 workq->kq_count--;
3004 #if WQ_TRACE
3005 __kdebug_trace(0x9000070, self, 1, item->func_arg, workq->kq_count, 0);
3006 #endif
3007 item->flags = 0;
3008 free_workitem(item);
3009
3010 if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON) {
3011 workq->barrier_count--;
3012 #if WQ_TRACE
3013 __kdebug_trace(0x9000084, self, workq->barrier_count, workq->kq_count, 1, 0);
3014 #endif
3015 if (workq->barrier_count <= 0 ) {
3016 /* Need to remove barrier item from the list */
3017 baritem = TAILQ_FIRST(&workq->item_listhead);
3018 #if WQ_DEBUG
3019 if ((baritem->flags & (PTH_WQITEM_BARRIER | PTH_WQITEM_DESTROY| PTH_WQITEM_APPLIED)) == 0)
3020 printf("Incorect bar item being removed in barrier processing\n");
3021 #endif /* WQ_DEBUG */
3022 /* if the front item is a barrier and call back is registered, run that */
3023 if (((baritem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) && (baritem->func != NULL)) {
3024 workqueue_list_unlock();
3025 func = (void (*)(pthread_workqueue_t, void *))baritem->func;
3026 (*func)(workq, baritem->func_arg);
3027 workqueue_list_lock();
3028 }
3029 TAILQ_REMOVE(&workq->item_listhead, baritem, item_entry);
3030 #if WQ_LISTTRACE
3031 __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
3032 #endif
3033 baritem->flags = 0;
3034 free_workitem(baritem);
3035 workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
3036 #if WQ_TRACE
3037 __kdebug_trace(0x9000058, self, item, item->func_arg, 0, 0);
3038 #endif
3039 if ((workq->flags & PTHREAD_WORKQ_TERM_ON) != 0) {
3040 headp = __pthread_wq_head_tbl[workq->queueprio];
3041 workq->flags |= PTHREAD_WORKQ_DESTROYED;
3042 #if WQ_TRACE
3043 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 2, 0);
3044 #endif
3045 if (headp->next_workq == workq) {
3046 headp->next_workq = TAILQ_NEXT(workq, wq_list);
3047 if (headp->next_workq == NULL) {
3048 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
3049 if (headp->next_workq == workq)
3050 headp->next_workq = NULL;
3051 }
3052 }
3053 TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
3054 workq->sig = 0;
3055 if (workq->term_callback != NULL) {
3056 workqueue_list_unlock();
3057 (*workq->term_callback)(workq, workq->term_callarg);
3058 workqueue_list_lock();
3059 }
3060 free_workqueue(workq);
3061 } else {
3062 /* if there are higher prio schedulabel item reset to wqreadyprio */
3063 if ((workq->queueprio < wqreadyprio) && (!(TAILQ_EMPTY(&workq->item_listhead))))
3064 wqreadyprio = workq->queueprio;
3065 }
3066 }
3067 }
3068 #if WQ_TRACE
3069 else {
3070 __kdebug_trace(0x9000070, self, 2, item->func_arg, workq->barrier_count, 0);
3071 }
3072
3073 __kdebug_trace(0x900005c, self, item, 0, 0, 0);
3074 #endif
3075 pick_nextworkqueue_droplock();
3076 _pthread_workq_return(self);
3077 }
3078
3079 static void
3080 _pthread_workq_return(pthread_t self)
3081 {
3082 __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0);
3083
3084 /* This is the way to terminate the thread */
3085 _pthread_exit(self, NULL);
3086 }
3087
3088
3089 /* XXXXXXXXXXXXX Pthread Workqueue functions XXXXXXXXXXXXXXXXXX */
3090
3091 int
3092 pthread_workqueue_create_np(pthread_workqueue_t * workqp, const pthread_workqueue_attr_t * attr)
3093 {
3094 pthread_workqueue_t wq;
3095 pthread_workqueue_head_t headp;
3096
3097 #if defined(__arm__)
3098 /* not supported under arm */
3099 return(ENOTSUP);
3100 #endif
3101 #if defined(__ppc__)
3102 IF_ROSETTA() {
3103 return(ENOTSUP);
3104 }
3105 #endif
3106 if ((attr != NULL) && (attr->sig != PTHREAD_WORKQUEUE_ATTR_SIG)) {
3107 return(EINVAL);
3108 }
3109
3110 if (__is_threaded == 0)
3111 __is_threaded = 1;
3112
3113 workqueue_list_lock();
3114 if (kernel_workq_setup == 0) {
3115 int ret = _pthread_work_internal_init();
3116 if (ret != 0) {
3117 workqueue_list_unlock();
3118 return(ret);
3119 }
3120 }
3121
3122 wq = alloc_workqueue();
3123
3124 _pthread_workq_init(wq, attr);
3125
3126 headp = __pthread_wq_head_tbl[wq->queueprio];
3127 TAILQ_INSERT_TAIL(&headp->wqhead, wq, wq_list);
3128 if (headp->next_workq == NULL) {
3129 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
3130 }
3131
3132 workqueue_list_unlock();
3133
3134 *workqp = wq;
3135
3136 return(0);
3137 }
3138
3139 int
3140 pthread_workqueue_additem_np(pthread_workqueue_t workq, void ( *workitem_func)(void *), void * workitem_arg, pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp)
3141 {
3142 pthread_workitem_t witem;
3143
3144 if (valid_workq(workq) == 0) {
3145 return(EINVAL);
3146 }
3147
3148 workqueue_list_lock();
3149
3150 /*
3151 * Allocate the workitem here as it can drop the lock.
3152 * Also we can evaluate the workqueue state only once.
3153 */
3154 witem = alloc_workitem();
3155 witem->func = workitem_func;
3156 witem->func_arg = workitem_arg;
3157 witem->flags = 0;
3158 witem->workq = workq;
3159 witem->item_entry.tqe_next = 0;
3160 witem->item_entry.tqe_prev = 0;
3161
3162 /* alloc workitem can drop the lock, check the state */
3163 if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
3164 free_workitem(witem);
3165 workqueue_list_unlock();
3166 *itemhandlep = 0;
3167 return(ESRCH);
3168 }
3169
3170 if (itemhandlep != NULL)
3171 *itemhandlep = (pthread_workitem_handle_t *)witem;
3172 if (gencountp != NULL)
3173 *gencountp = witem->gencount;
3174 #if WQ_TRACE
3175 __kdebug_trace(0x9008090, witem, witem->func, witem->func_arg, workq, 0);
3176 #endif
3177 TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry);
3178 #if WQ_LISTTRACE
3179 __kdebug_trace(0x90080a4, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
3180 #endif
3181
3182 if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio))
3183 wqreadyprio = workq->queueprio;
3184
3185 pick_nextworkqueue_droplock();
3186
3187 return(0);
3188 }
3189
3190 int
3191 pthread_workqueue_getovercommit_np(pthread_workqueue_t workq, unsigned int *ocommp)
3192 {
3193 pthread_workitem_t witem;
3194
3195 if (valid_workq(workq) == 0) {
3196 return(EINVAL);
3197 }
3198
3199 if (ocommp != NULL)
3200 *ocommp = workq->overcommit;
3201 return(0);
3202 }
3203
3204
3205 /* DEPRECATED
3206 int pthread_workqueue_removeitem_np(pthread_workqueue_t workq, pthread_workitem_handle_t itemhandle, unsigned int gencount)
3207 int pthread_workqueue_addbarrier_np(pthread_workqueue_t workq, void (* callback_func)(pthread_workqueue_t, void *), void * callback_arg, pthread_workitem_handle_t *itemhandlep, unsigned int *gencountp)
3208 int pthread_workqueue_suspend_np(pthread_workqueue_t workq)
3209 int pthread_workqueue_resume_np(pthread_workqueue_t workq)
3210 */
3211
3212 #else /* !BUILDING_VARIANT ] [ */
3213 extern int __unix_conforming;
3214 extern int _pthread_count;
3215 extern pthread_lock_t _pthread_list_lock;
3216 extern void _pthread_testcancel(pthread_t thread, int isconforming);
3217 extern int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr, int conforming);
3218
3219 #endif /* !BUILDING_VARIANT ] */
3220
3221 #if __DARWIN_UNIX03
3222
3223 __private_extern__ void
3224 __posix_join_cleanup(void *arg)
3225 {
3226 pthread_t thread = (pthread_t)arg;
3227 int already_exited, res;
3228 void * dummy;
3229 semaphore_t death;
3230 mach_port_t joinport;
3231 int newstyle = 0;
3232
3233 LOCK(thread->lock);
3234 already_exited = (thread->detached & _PTHREAD_EXITED);
3235
3236 newstyle = thread->newstyle;
3237
3238 #if WQ_TRACE
3239 __kdebug_trace(0x900002c, thread, newstyle, 0, 0, 0);
3240 #endif
3241 if (newstyle == 0) {
3242 death = thread->death;
3243 if (!already_exited){
3244 thread->joiner = (struct _pthread *)NULL;
3245 UNLOCK(thread->lock);
3246 restore_sem_to_pool(death);
3247 } else {
3248 UNLOCK(thread->lock);
3249 while ((res = _pthread_reap_thread(thread,
3250 thread->kernel_thread,
3251 &dummy, 1)) == EAGAIN)
3252 {
3253 sched_yield();
3254 }
3255 restore_sem_to_pool(death);
3256
3257 }
3258
3259 } else {
3260 /* leave another thread to join */
3261 thread->joiner = (struct _pthread *)NULL;
3262 UNLOCK(thread->lock);
3263 }
3264 }
3265
3266 #endif /* __DARWIN_UNIX03 */
3267
3268
3269 /*
3270 * Wait for a thread to terminate and obtain its exit value.
3271 */
3272 /*
3273 int
3274 pthread_join(pthread_t thread,
3275 void **value_ptr)
3276
3277 moved to pthread_cancelable.c */
3278
3279 /*
3280 * Cancel a thread
3281 */
3282 int
3283 pthread_cancel(pthread_t thread)
3284 {
3285 #if __DARWIN_UNIX03
3286 if (__unix_conforming == 0)
3287 __unix_conforming = 1;
3288 #endif /* __DARWIN_UNIX03 */
3289
3290 if (_pthread_lookup_thread(thread, NULL, 0) != 0)
3291 return(ESRCH);
3292
3293 /* if the thread is a workqueue thread, then return error */
3294 if (thread->wqthread != 0) {
3295 return(ENOTSUP);
3296 }
3297 #if __DARWIN_UNIX03
3298 int state;
3299
3300 LOCK(thread->lock);
3301 state = thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
3302 UNLOCK(thread->lock);
3303 if (state & PTHREAD_CANCEL_ENABLE)
3304 __pthread_markcancel(thread->kernel_thread);
3305 #else /* __DARWIN_UNIX03 */
3306 thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
3307 #endif /* __DARWIN_UNIX03 */
3308 return (0);
3309 }
3310
3311 void
3312 pthread_testcancel(void)
3313 {
3314 pthread_t self = pthread_self();
3315
3316 #if __DARWIN_UNIX03
3317 if (__unix_conforming == 0)
3318 __unix_conforming = 1;
3319 _pthread_testcancel(self, 1);
3320 #else /* __DARWIN_UNIX03 */
3321 _pthread_testcancel(self, 0);
3322 #endif /* __DARWIN_UNIX03 */
3323
3324 }
3325
3326
3327 /*
3328 * Query/update the cancelability 'state' of a thread
3329 */
3330 int
3331 pthread_setcancelstate(int state, int *oldstate)
3332 {
3333 #if __DARWIN_UNIX03
3334 if (__unix_conforming == 0) {
3335 __unix_conforming = 1;
3336 }
3337 return (_pthread_setcancelstate_internal(state, oldstate, 1));
3338 #else /* __DARWIN_UNIX03 */
3339 return (_pthread_setcancelstate_internal(state, oldstate, 0));
3340 #endif /* __DARWIN_UNIX03 */
3341
3342 }
3343
3344
3345
3346 /*
3347 * Query/update the cancelability 'type' of a thread
3348 */
3349 int
3350 pthread_setcanceltype(int type, int *oldtype)
3351 {
3352 pthread_t self = pthread_self();
3353
3354 #if __DARWIN_UNIX03
3355 if (__unix_conforming == 0)
3356 __unix_conforming = 1;
3357 #endif /* __DARWIN_UNIX03 */
3358
3359 if ((type != PTHREAD_CANCEL_DEFERRED) &&
3360 (type != PTHREAD_CANCEL_ASYNCHRONOUS))
3361 return EINVAL;
3362 self = pthread_self();
3363 LOCK(self->lock);
3364 if (oldtype)
3365 *oldtype = self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK;
3366 self->cancel_state &= ~_PTHREAD_CANCEL_TYPE_MASK;
3367 self->cancel_state |= type;
3368 UNLOCK(self->lock);
3369 #if !__DARWIN_UNIX03
3370 _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
3371 #endif /* __DARWIN_UNIX03 */
3372 return (0);
3373 }
3374
3375 int
3376 pthread_sigmask(int how, const sigset_t * set, sigset_t * oset)
3377 {
3378 #if __DARWIN_UNIX03
3379 int err = 0;
3380
3381 if (__pthread_sigmask(how, set, oset) == -1) {
3382 err = errno;
3383 }
3384 return(err);
3385 #else /* __DARWIN_UNIX03 */
3386 return(__pthread_sigmask(how, set, oset));
3387 #endif /* __DARWIN_UNIX03 */
3388 }
3389
3390 /*
3391 int
3392 sigwait(const sigset_t * set, int * sig)
3393
3394 moved to pthread_cancelable.c */