]> git.saurik.com Git - apple/libc.git/blob - pthreads/pthread.c
b01850a6e270c16f2a61db077962af2ab33b31f1
[apple/libc.git] / pthreads / pthread.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 */
51
52 #include "pthread_internals.h"
53 #include "pthread_workqueue.h"
54
55 #include <assert.h>
56 #include <stdio.h> /* For printf(). */
57 #include <stdlib.h>
58 #include <errno.h> /* For __mach_errno_addr() prototype. */
59 #include <signal.h>
60 #include <sys/time.h>
61 #include <sys/resource.h>
62 #include <sys/sysctl.h>
63 #include <sys/queue.h>
64 #include <machine/vmparam.h>
65 #include <mach/vm_statistics.h>
66 #define __APPLE_API_PRIVATE
67 #include <machine/cpu_capabilities.h>
68 #include <libkern/OSAtomic.h>
69 #if defined(__ppc__)
70 #include <libkern/OSCrossEndian.h>
71 #endif
72
73
74 #ifndef BUILDING_VARIANT /* [ */
75
76 __private_extern__ struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
77
78
79
80 /* Per-thread kernel support */
81 extern void _pthread_set_self(pthread_t);
82 extern void mig_init(int);
83 static int _pthread_create_pthread_onstack(pthread_attr_t *attrs, void **stack, pthread_t *thread);
84 static kern_return_t _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread);
85 void _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero);
86 static void _pthread_tsd_reinit(pthread_t t);
87 static int _new_pthread_create_suspended(pthread_t *thread,
88 const pthread_attr_t *attr,
89 void *(*start_routine)(void *),
90 void *arg,
91 int create_susp);
92
93 /* Get CPU capabilities from the kernel */
94 __private_extern__ void _init_cpu_capabilities(void);
95
96 /* Needed to tell the malloc subsystem we're going multithreaded */
97 extern void set_malloc_singlethreaded(int);
98
99 /* Used when we need to call into the kernel with no reply port */
100 extern pthread_lock_t reply_port_lock;
101 int _pthread_find_thread(pthread_t thread);
102
103 /* Mach message used to notify that a thread needs to be reaped */
104
105 typedef struct _pthread_reap_msg_t {
106 mach_msg_header_t header;
107 pthread_t thread;
108 mach_msg_trailer_t trailer;
109 } pthread_reap_msg_t;
110
111 /* We'll implement this when the main thread is a pthread */
112 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
113 static struct _pthread _thread = {0};
114
115 /* This global should be used (carefully) by anyone needing to know if a
116 ** pthread has been created.
117 */
118 int __is_threaded = 0;
119 /* _pthread_count is protected by _pthread_list_lock */
120 static int _pthread_count = 1;
121 int __unix_conforming = 0;
122 __private_extern__ size_t pthreadsize = 0;
123
124 /* under rosetta we will use old style creation of threads */
125 static int __oldstyle = 0;
126
127 __private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER;
128
129 /* Same implementation as LOCK, but without the __is_threaded check */
130 int _spin_tries = 0;
131 __private_extern__ void _spin_lock_retry(pthread_lock_t *lock)
132 {
133 int tries = _spin_tries;
134 do {
135 if (tries-- > 0)
136 continue;
137 syscall_thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1);
138 tries = _spin_tries;
139 } while(!_spin_lock_try(lock));
140 }
141
142 extern mach_port_t thread_recycle_port;
143
144 /* These are used to keep track of a semaphore pool shared by mutexes and condition
145 ** variables.
146 */
147
148 static semaphore_t *sem_pool = NULL;
149 static int sem_pool_count = 0;
150 static int sem_pool_current = 0;
151 static pthread_lock_t sem_pool_lock = LOCK_INITIALIZER;
152
153 static int default_priority;
154 static int max_priority;
155 static int min_priority;
156 static int pthread_concurrency;
157
158 static OSSpinLock __workqueue_list_lock = OS_SPINLOCK_INIT;
159
160 static void _pthread_exit(pthread_t self, void *value_ptr);
161 int _pthread_setcancelstate_internal(int state, int *oldstate, int conforming);
162 static void _pthread_setcancelstate_exit(pthread_t self, void *value_ptr, int conforming);
163 static pthread_attr_t _pthread_attr_default = {0};
164 static void _pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * attr);
165 static int handle_removeitem(pthread_workqueue_t workq, pthread_workitem_t item);
166 static int kernel_workq_setup = 0;
167 static volatile int32_t kernel_workq_count = 0;
168 static volatile unsigned int user_workq_count = 0;
169 #define KERNEL_WORKQ_ELEM_MAX 64 /* Max number of elements in the kerrel */
170 static int wqreadyprio = 0; /* current highest prio queue ready with items */
171
172 __private_extern__ struct __pthread_workitem_pool __pthread_workitem_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workitem_pool_head);
173 __private_extern__ struct __pthread_workqueue_pool __pthread_workqueue_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workqueue_pool_head);
174
175 struct _pthread_workqueue_head __pthread_workq0_head;
176 struct _pthread_workqueue_head __pthread_workq1_head;
177 struct _pthread_workqueue_head __pthread_workq2_head;
178 struct _pthread_workqueue_head __pthread_workq3_head;
179 struct _pthread_workqueue_head __pthread_workq4_head;
180 pthread_workqueue_head_t __pthread_wq_head_tbl[WQ_NUM_PRIO_QS] = {&__pthread_workq0_head, &__pthread_workq1_head, &__pthread_workq2_head, &__pthread_workq3_head, &__pthread_workq4_head};
181
182 static void workqueue_list_lock(void);
183 static void workqueue_list_unlock(void);
184 static int valid_workq(pthread_workqueue_t);
185 static void pick_nextworkqueue_droplock(void);
186 static int post_nextworkitem(pthread_workqueue_t workq);
187 static void _pthread_workq_return(pthread_t self);
188 static pthread_workqueue_attr_t _pthread_wq_attr_default = {0};
189 void _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse);
190 extern void start_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse);
191 extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, int flags);
192 static pthread_workitem_t alloc_workitem(void);
193 static void free_workitem(pthread_workitem_t);
194 static pthread_workqueue_t alloc_workqueue(void);
195 static void free_workqueue(pthread_workqueue_t);
196 static int _pthread_work_internal_init(void);
197 static void workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item);
198
199 /* workq_ops commands */
200 #define WQOPS_QUEUE_ADD 1
201 #define WQOPS_QUEUE_REMOVE 2
202 #define WQOPS_THREAD_RETURN 4
203
204 /*
205 * Flags filed passed to bsdthread_create and back in pthread_start
206 31 <---------------------------------> 0
207 _________________________________________
208 | flags(8) | policy(8) | importance(16) |
209 -----------------------------------------
210 */
211 void _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags);
212
213 #define PTHREAD_START_CUSTOM 0x01000000
214 #define PTHREAD_START_SETSCHED 0x02000000
215 #define PTHREAD_START_DETACHED 0x04000000
216 #define PTHREAD_START_POLICY_BITSHIFT 16
217 #define PTHREAD_START_POLICY_MASK 0xff
218 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
219
220 static int pthread_setschedparam_internal(pthread_t, mach_port_t, int, const struct sched_param *);
221 extern pthread_t __bsdthread_create(void (*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags);
222 extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem);
223
224 #if defined(__ppc__) || defined(__ppc64__)
225 static const vm_address_t PTHREAD_STACK_HINT = 0xF0000000;
226 #elif defined(__i386__) || defined(__x86_64__)
227 static const vm_address_t PTHREAD_STACK_HINT = 0xB0000000;
228 #elif defined(__arm__)
229 static const vm_address_t PTHREAD_STACK_HINT = 0x30000000;
230 #else
231 #error Need to define a stack address hint for this architecture
232 #endif
233
234 /* Set the base address to use as the stack pointer, before adjusting due to the ABI
235 * The guardpages for stackoverflow protection is also allocated here
236 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
237 * set up for the thread
238 */
239
240 static int
241 _pthread_allocate_stack(pthread_attr_t *attrs, void **stack)
242 {
243 kern_return_t kr;
244 vm_address_t stackaddr;
245 size_t guardsize;
246
247 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
248 if (attrs->stackaddr != NULL) {
249 /* No guard pages setup in this case */
250 assert(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
251 *stack = attrs->stackaddr;
252 return 0;
253 }
254
255 guardsize = attrs->guardsize;
256 stackaddr = PTHREAD_STACK_HINT;
257 kr = vm_map(mach_task_self(), &stackaddr,
258 attrs->stacksize + guardsize,
259 vm_page_size-1,
260 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
261 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
262 VM_INHERIT_DEFAULT);
263 if (kr != KERN_SUCCESS)
264 kr = vm_allocate(mach_task_self(),
265 &stackaddr, attrs->stacksize + guardsize,
266 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
267 if (kr != KERN_SUCCESS) {
268 return EAGAIN;
269 }
270 /* The guard page is at the lowest address */
271 /* The stack base is the highest address */
272 if (guardsize)
273 kr = vm_protect(mach_task_self(), stackaddr, guardsize, FALSE, VM_PROT_NONE);
274 *stack = (void *)(stackaddr + attrs->stacksize + guardsize);
275 return 0;
276 }
277
278 static int
279 _pthread_create_pthread_onstack(pthread_attr_t *attrs, void **stack, pthread_t *thread)
280 {
281 kern_return_t kr;
282 pthread_t t;
283 vm_address_t stackaddr;
284 size_t guardsize, allocsize;
285
286 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
287
288 if (attrs->stackaddr != NULL) {
289 /* No guard pages setup in this case */
290 assert(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
291 *stack = attrs->stackaddr;
292 t = (pthread_t)malloc(pthreadsize);
293 _pthread_struct_init(t, attrs, attrs->stackaddr, 0, 0, 0);
294 t->freeStackOnExit = 0;
295 t->freeaddr = 0;
296 t->freesize = 0;
297 *thread = t;
298 return 0;
299 }
300
301 guardsize = attrs->guardsize;
302 allocsize = attrs->stacksize + guardsize + pthreadsize;
303 stackaddr = PTHREAD_STACK_HINT;
304 kr = vm_map(mach_task_self(), &stackaddr,
305 allocsize,
306 vm_page_size-1,
307 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
308 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
309 VM_INHERIT_DEFAULT);
310 if (kr != KERN_SUCCESS)
311 kr = vm_allocate(mach_task_self(),
312 &stackaddr, allocsize,
313 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
314 if (kr != KERN_SUCCESS) {
315 return EAGAIN;
316 }
317 /* The guard page is at the lowest address */
318 /* The stack base is the highest address */
319 if (guardsize)
320 kr = vm_protect(mach_task_self(), stackaddr, guardsize, FALSE, VM_PROT_NONE);
321
322
323 *stack = (void *)(stackaddr + attrs->stacksize + guardsize);
324
325 t = (pthread_t)(stackaddr + attrs->stacksize + guardsize);
326 _pthread_struct_init(t, attrs, *stack, 0, 0, 1);
327 t->kernalloc = 0;
328 t->freesize = allocsize;
329 t->freeaddr = (void *)stackaddr;
330 t->freeStackOnExit = 1;
331 *thread = t;
332
333 return 0;
334 }
335
336 static kern_return_t
337 _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread)
338 {
339 kern_return_t res = 0;
340 vm_address_t freeaddr;
341 size_t freesize;
342 task_t self = mach_task_self();
343 int thread_count;
344 mach_port_t kport;
345 semaphore_t joinsem = SEMAPHORE_NULL;
346
347 #if WQ_TRACE
348 __kdebug_trace(0x900001c, freestruct, termthread, 0, 0, 0);
349 #endif
350 kport = t->kernel_thread;
351 joinsem = t->joiner_notify;
352
353 if (t->freeStackOnExit) {
354 freeaddr = (vm_address_t)t->freeaddr;
355 if (freestruct)
356 freesize = t->stacksize + t->guardsize + pthreadsize;
357 else
358 freesize = t->stacksize + t->guardsize;
359 if (termthread) {
360 mig_dealloc_reply_port(MACH_PORT_NULL);
361 LOCK(_pthread_list_lock);
362 if (freestruct != 0) {
363 TAILQ_REMOVE(&__pthread_head, t, plist);
364 /* if parent has not returned from create yet keep pthread_t */
365 #if WQ_TRACE
366 __kdebug_trace(0x9000010, t, 0, 0, 1, 0);
367 #endif
368 if (t->parentcheck == 0)
369 freesize -= pthreadsize;
370 }
371 t->childexit = 1;
372 thread_count = --_pthread_count;
373 UNLOCK(_pthread_list_lock);
374
375 #if WQ_TRACE
376 __kdebug_trace(0x9000020, freeaddr, freesize, kport, 1, 0);
377 #endif
378 if (thread_count <=0)
379 exit(0);
380 else
381 __bsdthread_terminate(freeaddr, freesize, kport, joinsem);
382 abort();
383 } else {
384 #if WQ_TRACE
385 __kdebug_trace(0x9000024, freeaddr, freesize, 0, 1, 0);
386 #endif
387 res = vm_deallocate(mach_task_self(), freeaddr, freesize);
388 }
389 } else {
390 if (termthread) {
391 mig_dealloc_reply_port(MACH_PORT_NULL);
392 LOCK(_pthread_list_lock);
393 if (freestruct != 0) {
394 TAILQ_REMOVE(&__pthread_head, t, plist);
395 #if WQ_TRACE
396 __kdebug_trace(0x9000010, t, 0, 0, 2, 0);
397 #endif
398 }
399 thread_count = --_pthread_count;
400 t->childexit = 1;
401 UNLOCK(_pthread_list_lock);
402
403 if (freestruct) {
404 #if WQ_TRACE
405 __kdebug_trace(0x9000008, t, 0, 0, 2, 0);
406 #endif
407 free(t);
408 }
409
410 freeaddr = 0;
411 freesize = 0;
412 #if WQ_TRACE
413 __kdebug_trace(0x9000020, 0, 0, kport, 2, 0);
414 #endif
415
416 if (thread_count <=0)
417 exit(0);
418 else
419 __bsdthread_terminate(NULL, 0, kport, joinsem);
420 abort();
421 } else if (freestruct) {
422 t->sig = _PTHREAD_NO_SIG;
423 #if WQ_TRACE
424 __kdebug_trace(0x9000024, t, 0, 0, 2, 0);
425 #endif
426 free(t);
427 }
428 }
429 return(res);
430 }
431
432
433
434 /*
435 * Destroy a thread attribute structure
436 */
437 int
438 pthread_attr_destroy(pthread_attr_t *attr)
439 {
440 if (attr->sig == _PTHREAD_ATTR_SIG)
441 {
442 attr->sig = 0;
443 return (0);
444 } else
445 {
446 return (EINVAL); /* Not an attribute structure! */
447 }
448 }
449
450 /*
451 * Get the 'detach' state from a thread attribute structure.
452 * Note: written as a helper function for info hiding
453 */
454 int
455 pthread_attr_getdetachstate(const pthread_attr_t *attr,
456 int *detachstate)
457 {
458 if (attr->sig == _PTHREAD_ATTR_SIG)
459 {
460 *detachstate = attr->detached;
461 return (0);
462 } else
463 {
464 return (EINVAL); /* Not an attribute structure! */
465 }
466 }
467
468 /*
469 * Get the 'inherit scheduling' info from a thread attribute structure.
470 * Note: written as a helper function for info hiding
471 */
472 int
473 pthread_attr_getinheritsched(const pthread_attr_t *attr,
474 int *inheritsched)
475 {
476 if (attr->sig == _PTHREAD_ATTR_SIG)
477 {
478 *inheritsched = attr->inherit;
479 return (0);
480 } else
481 {
482 return (EINVAL); /* Not an attribute structure! */
483 }
484 }
485
486 /*
487 * Get the scheduling parameters from a thread attribute structure.
488 * Note: written as a helper function for info hiding
489 */
490 int
491 pthread_attr_getschedparam(const pthread_attr_t *attr,
492 struct sched_param *param)
493 {
494 if (attr->sig == _PTHREAD_ATTR_SIG)
495 {
496 *param = attr->param;
497 return (0);
498 } else
499 {
500 return (EINVAL); /* Not an attribute structure! */
501 }
502 }
503
504 /*
505 * Get the scheduling policy from a thread attribute structure.
506 * Note: written as a helper function for info hiding
507 */
508 int
509 pthread_attr_getschedpolicy(const pthread_attr_t *attr,
510 int *policy)
511 {
512 if (attr->sig == _PTHREAD_ATTR_SIG)
513 {
514 *policy = attr->policy;
515 return (0);
516 } else
517 {
518 return (EINVAL); /* Not an attribute structure! */
519 }
520 }
521
522 /* Retain the existing stack size of 512K and not depend on Main thread default stack size */
523 static const size_t DEFAULT_STACK_SIZE = (512*1024);
524 /*
525 * Initialize a thread attribute structure to default values.
526 */
527 int
528 pthread_attr_init(pthread_attr_t *attr)
529 {
530 attr->stacksize = DEFAULT_STACK_SIZE;
531 attr->stackaddr = NULL;
532 attr->sig = _PTHREAD_ATTR_SIG;
533 attr->param.sched_priority = default_priority;
534 attr->param.quantum = 10; /* quantum isn't public yet */
535 attr->detached = PTHREAD_CREATE_JOINABLE;
536 attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
537 attr->policy = _PTHREAD_DEFAULT_POLICY;
538 attr->freeStackOnExit = 1;
539 attr->fastpath = 1;
540 attr->schedset = 0;
541 attr->guardsize = vm_page_size;
542 return (0);
543 }
544
545 /*
546 * Set the 'detach' state in a thread attribute structure.
547 * Note: written as a helper function for info hiding
548 */
549 int
550 pthread_attr_setdetachstate(pthread_attr_t *attr,
551 int detachstate)
552 {
553 if (attr->sig == _PTHREAD_ATTR_SIG)
554 {
555 if ((detachstate == PTHREAD_CREATE_JOINABLE) ||
556 (detachstate == PTHREAD_CREATE_DETACHED))
557 {
558 attr->detached = detachstate;
559 return (0);
560 } else
561 {
562 return (EINVAL);
563 }
564 } else
565 {
566 return (EINVAL); /* Not an attribute structure! */
567 }
568 }
569
570 /*
571 * Set the 'inherit scheduling' state in a thread attribute structure.
572 * Note: written as a helper function for info hiding
573 */
574 int
575 pthread_attr_setinheritsched(pthread_attr_t *attr,
576 int inheritsched)
577 {
578 if (attr->sig == _PTHREAD_ATTR_SIG)
579 {
580 if ((inheritsched == PTHREAD_INHERIT_SCHED) ||
581 (inheritsched == PTHREAD_EXPLICIT_SCHED))
582 {
583 attr->inherit = inheritsched;
584 return (0);
585 } else
586 {
587 return (EINVAL);
588 }
589 } else
590 {
591 return (EINVAL); /* Not an attribute structure! */
592 }
593 }
594
595 /*
596 * Set the scheduling paramters in a thread attribute structure.
597 * Note: written as a helper function for info hiding
598 */
599 int
600 pthread_attr_setschedparam(pthread_attr_t *attr,
601 const struct sched_param *param)
602 {
603 if (attr->sig == _PTHREAD_ATTR_SIG)
604 {
605 /* TODO: Validate sched_param fields */
606 attr->param = *param;
607 attr->schedset = 1;
608 return (0);
609 } else
610 {
611 return (EINVAL); /* Not an attribute structure! */
612 }
613 }
614
615 /*
616 * Set the scheduling policy in a thread attribute structure.
617 * Note: written as a helper function for info hiding
618 */
619 int
620 pthread_attr_setschedpolicy(pthread_attr_t *attr,
621 int policy)
622 {
623 if (attr->sig == _PTHREAD_ATTR_SIG)
624 {
625 if ((policy == SCHED_OTHER) ||
626 (policy == SCHED_RR) ||
627 (policy == SCHED_FIFO))
628 {
629 attr->policy = policy;
630 attr->schedset = 1;
631 return (0);
632 } else
633 {
634 return (EINVAL);
635 }
636 } else
637 {
638 return (EINVAL); /* Not an attribute structure! */
639 }
640 }
641
642 /*
643 * Set the scope for the thread.
644 * We currently only provide PTHREAD_SCOPE_SYSTEM
645 */
646 int
647 pthread_attr_setscope(pthread_attr_t *attr,
648 int scope)
649 {
650 if (attr->sig == _PTHREAD_ATTR_SIG) {
651 if (scope == PTHREAD_SCOPE_SYSTEM) {
652 /* No attribute yet for the scope */
653 return (0);
654 } else if (scope == PTHREAD_SCOPE_PROCESS) {
655 return (ENOTSUP);
656 }
657 }
658 return (EINVAL); /* Not an attribute structure! */
659 }
660
661 /*
662 * Get the scope for the thread.
663 * We currently only provide PTHREAD_SCOPE_SYSTEM
664 */
665 int
666 pthread_attr_getscope(const pthread_attr_t *attr,
667 int *scope)
668 {
669 if (attr->sig == _PTHREAD_ATTR_SIG) {
670 *scope = PTHREAD_SCOPE_SYSTEM;
671 return (0);
672 }
673 return (EINVAL); /* Not an attribute structure! */
674 }
675
676 /* Get the base stack address of the given thread */
677 int
678 pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
679 {
680 if (attr->sig == _PTHREAD_ATTR_SIG) {
681 *stackaddr = attr->stackaddr;
682 return (0);
683 } else {
684 return (EINVAL); /* Not an attribute structure! */
685 }
686 }
687
688 int
689 pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
690 {
691 if ((attr->sig == _PTHREAD_ATTR_SIG) && (((uintptr_t)stackaddr % vm_page_size) == 0)) {
692 attr->stackaddr = stackaddr;
693 attr->freeStackOnExit = 0;
694 attr->fastpath = 0;
695 return (0);
696 } else {
697 return (EINVAL); /* Not an attribute structure! */
698 }
699 }
700
701 int
702 pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
703 {
704 if (attr->sig == _PTHREAD_ATTR_SIG) {
705 *stacksize = attr->stacksize;
706 return (0);
707 } else {
708 return (EINVAL); /* Not an attribute structure! */
709 }
710 }
711
712 int
713 pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
714 {
715 if ((attr->sig == _PTHREAD_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
716 attr->stacksize = stacksize;
717 return (0);
718 } else {
719 return (EINVAL); /* Not an attribute structure! */
720 }
721 }
722
723 int
724 pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
725 {
726 if (attr->sig == _PTHREAD_ATTR_SIG) {
727 *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize);
728 *stacksize = attr->stacksize;
729 return (0);
730 } else {
731 return (EINVAL); /* Not an attribute structure! */
732 }
733 }
734
735 /* By SUSV spec, the stackaddr is the base address, the lowest addressable
736 * byte address. This is not the same as in pthread_attr_setstackaddr.
737 */
738 int
739 pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
740 {
741 if ((attr->sig == _PTHREAD_ATTR_SIG) &&
742 (((uintptr_t)stackaddr % vm_page_size) == 0) &&
743 ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
744 attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize);
745 attr->stacksize = stacksize;
746 attr->freeStackOnExit = 0;
747 attr->fastpath = 0;
748 return (0);
749 } else {
750 return (EINVAL); /* Not an attribute structure! */
751 }
752 }
753
754
755 /*
756 * Set the guardsize attribute in the attr.
757 */
758 int
759 pthread_attr_setguardsize(pthread_attr_t *attr,
760 size_t guardsize)
761 {
762 if (attr->sig == _PTHREAD_ATTR_SIG) {
763 /* Guardsize of 0 is valid, ot means no guard */
764 if ((guardsize % vm_page_size) == 0) {
765 attr->guardsize = guardsize;
766 attr->fastpath = 0;
767 return (0);
768 } else
769 return(EINVAL);
770 }
771 return (EINVAL); /* Not an attribute structure! */
772 }
773
774 /*
775 * Get the guardsize attribute in the attr.
776 */
777 int
778 pthread_attr_getguardsize(const pthread_attr_t *attr,
779 size_t *guardsize)
780 {
781 if (attr->sig == _PTHREAD_ATTR_SIG) {
782 *guardsize = attr->guardsize;
783 return (0);
784 }
785 return (EINVAL); /* Not an attribute structure! */
786 }
787
788
789 /*
790 * Create and start execution of a new thread.
791 */
792
793 static void
794 _pthread_body(pthread_t self)
795 {
796 _pthread_set_self(self);
797 _pthread_exit(self, (self->fun)(self->arg));
798 }
799
800 void
801 _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int pflags)
802 {
803 int ret;
804 #if WQ_DEBUG
805 pthread_t pself;
806 #endif
807 pthread_attr_t *attrs = &_pthread_attr_default;
808 char * stackaddr;
809
810 if ((pflags & PTHREAD_START_CUSTOM) == 0) {
811 stackaddr = self;
812 _pthread_struct_init(self, attrs, stackaddr, stacksize, 1, 1);
813 LOCK(_pthread_list_lock);
814 if (pflags & PTHREAD_START_SETSCHED) {
815 self->policy = ((pflags >> PTHREAD_START_POLICY_BITSHIFT) & PTHREAD_START_POLICY_MASK);
816 self->param.sched_priority = (pflags & PTHREAD_START_IMPORTANCE_MASK);
817 }
818 /* These are not joinable threads */
819 if ((pflags & PTHREAD_START_DETACHED) == PTHREAD_START_DETACHED) {
820 self->detached &= ~PTHREAD_CREATE_JOINABLE;
821 self->detached |= PTHREAD_CREATE_DETACHED;
822 }
823 } else
824 LOCK(_pthread_list_lock);
825 self->kernel_thread = kport;
826 self->fun = fun;
827 self->arg = funarg;
828
829 /* Add to the pthread list */
830 if (self->parentcheck == 0) {
831 TAILQ_INSERT_TAIL(&__pthread_head, self, plist);
832 #if WQ_TRACE
833 __kdebug_trace(0x900000c, self, 0, 0, 3, 0);
834 #endif
835 _pthread_count++;
836 }
837 self->childrun = 1;
838 UNLOCK(_pthread_list_lock);
839 #if defined(__i386__) || defined(__x86_64__)
840 _pthread_set_self(self);
841 #endif
842
843 #if WQ_DEBUG
844 pself = pthread_self();
845 if (self != pself)
846 abort();
847 #endif
848 #if WQ_TRACE
849 __kdebug_trace(0x9000030, self, pflags, 0, 0, 0);
850 #endif
851
852 _pthread_exit(self, (self->fun)(self->arg));
853 }
854
855 int
856 _pthread_create(pthread_t t,
857 const pthread_attr_t *attrs,
858 void *stack,
859 const mach_port_t kernel_thread)
860 {
861 int res;
862 res = 0;
863
864 do
865 {
866 memset(t, 0, sizeof(*t));
867 t->newstyle = 0;
868 t->schedset = 0;
869 t->kernalloc = 0;
870 t->tsd[0] = t;
871 t->max_tsd_key = 0;
872 t->wqthread = 0;
873 t->cur_workq = 0;
874 t->cur_workitem = 0;
875 t->stacksize = attrs->stacksize;
876 t->stackaddr = (void *)stack;
877 t->guardsize = attrs->guardsize;
878 t->kernel_thread = kernel_thread;
879 t->detached = attrs->detached;
880 t->inherit = attrs->inherit;
881 t->policy = attrs->policy;
882 t->param = attrs->param;
883 t->freeStackOnExit = attrs->freeStackOnExit;
884 t->mutexes = (struct _pthread_mutex *)NULL;
885 t->sig = _PTHREAD_SIG;
886 t->reply_port = MACH_PORT_NULL;
887 t->cthread_self = NULL;
888 LOCK_INIT(t->lock);
889 t->plist.tqe_next = (struct _pthread *)0;
890 t->plist.tqe_prev = (struct _pthread **)0;
891 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
892 t->__cleanup_stack = (struct __darwin_pthread_handler_rec *)NULL;
893 t->death = SEMAPHORE_NULL;
894
895 if (kernel_thread != MACH_PORT_NULL)
896 (void)pthread_setschedparam_internal(t, kernel_thread, t->policy, &t->param);
897 } while (0);
898 return (res);
899 }
900
901 void
902 _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero)
903 {
904 mach_vm_offset_t stackaddr = (mach_vm_offset_t)stack;
905
906 if (nozero == 0) {
907 memset(t, 0, sizeof(*t));
908 t->plist.tqe_next = (struct _pthread *)0;
909 t->plist.tqe_prev = (struct _pthread **)0;
910 }
911 t->schedset = attrs->schedset;
912 t->tsd[0] = t;
913 if (kernalloc != 0) {
914 stackaddr = (mach_vm_offset_t)t;
915
916 /* if allocated from kernel set values appropriately */
917 t->stacksize = stacksize;
918 t->stackaddr = stackaddr;
919 t->freeStackOnExit = 1;
920 t->freeaddr = stackaddr - stacksize - vm_page_size;
921 t->freesize = pthreadsize + stacksize + vm_page_size;
922 } else {
923 t->stacksize = attrs->stacksize;
924 t->stackaddr = (void *)stack;
925 }
926 t->guardsize = attrs->guardsize;
927 t->detached = attrs->detached;
928 t->inherit = attrs->inherit;
929 t->policy = attrs->policy;
930 t->param = attrs->param;
931 t->mutexes = (struct _pthread_mutex *)NULL;
932 t->sig = _PTHREAD_SIG;
933 t->reply_port = MACH_PORT_NULL;
934 t->cthread_self = NULL;
935 LOCK_INIT(t->lock);
936 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
937 t->__cleanup_stack = (struct __darwin_pthread_handler_rec *)NULL;
938 t->death = SEMAPHORE_NULL;
939 t->newstyle = 1;
940 t->kernalloc = kernalloc;
941 t->wqthread = 0;
942 t->cur_workq = 0;
943 t->cur_workitem = 0;
944 t->max_tsd_key = 0;
945 }
946
947 static void
948 _pthread_tsd_reinit(pthread_t t)
949 {
950 bzero(&t->tsd[1], (_INTERNAL_POSIX_THREAD_KEYS_END-1) * sizeof(void *));
951 }
952
953
954 /* Need to deprecate this in future */
955 int
956 _pthread_is_threaded(void)
957 {
958 return __is_threaded;
959 }
960
961 /* Non portable public api to know whether this process has(had) atleast one thread
962 * apart from main thread. There could be race if there is a thread in the process of
963 * creation at the time of call . It does not tell whether there are more than one thread
964 * at this point of time.
965 */
966 int
967 pthread_is_threaded_np(void)
968 {
969 return (__is_threaded);
970 }
971
972 mach_port_t
973 pthread_mach_thread_np(pthread_t t)
974 {
975 mach_port_t kport = MACH_PORT_NULL;
976
977 if (_pthread_lookup_thread(t, &kport, 0) != 0)
978 return(NULL);
979
980 return(kport);
981 }
982
983 pthread_t pthread_from_mach_thread_np(mach_port_t kernel_thread)
984 {
985 struct _pthread * p = NULL;
986
987 /* No need to wait as mach port is already known */
988 LOCK(_pthread_list_lock);
989 TAILQ_FOREACH(p, &__pthread_head, plist) {
990 if (p->kernel_thread == kernel_thread)
991 break;
992 }
993 UNLOCK(_pthread_list_lock);
994 return p;
995 }
996
997 size_t
998 pthread_get_stacksize_np(pthread_t t)
999 {
1000 int ret;
1001 size_t size = 0;
1002
1003 if (t == NULL)
1004 return(ESRCH);
1005
1006 LOCK(_pthread_list_lock);
1007
1008 if ((ret = _pthread_find_thread(t)) != 0) {
1009 UNLOCK(_pthread_list_lock);
1010 return(ret);
1011 }
1012 size = t->stacksize;
1013 UNLOCK(_pthread_list_lock);
1014 return(size);
1015 }
1016
1017 void *
1018 pthread_get_stackaddr_np(pthread_t t)
1019 {
1020 int ret;
1021 void * addr = NULL;
1022
1023 if (t == NULL)
1024 return(ESRCH);
1025
1026 LOCK(_pthread_list_lock);
1027
1028 if ((ret = _pthread_find_thread(t)) != 0) {
1029 UNLOCK(_pthread_list_lock);
1030 return(ret);
1031 }
1032 addr = t->stackaddr;
1033 UNLOCK(_pthread_list_lock);
1034
1035 return(addr);
1036 }
1037
1038 mach_port_t
1039 _pthread_reply_port(pthread_t t)
1040 {
1041 return t->reply_port;
1042 }
1043
1044
1045 /* returns non-zero if the current thread is the main thread */
1046 int
1047 pthread_main_np(void)
1048 {
1049 pthread_t self = pthread_self();
1050
1051 return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT);
1052 }
1053
1054 static int
1055 _new_pthread_create_suspended(pthread_t *thread,
1056 const pthread_attr_t *attr,
1057 void *(*start_routine)(void *),
1058 void *arg,
1059 int create_susp)
1060 {
1061 pthread_attr_t *attrs;
1062 void *stack;
1063 int error;
1064 unsigned int flags;
1065 pthread_t t;
1066 kern_return_t kern_res;
1067 mach_port_t kernel_thread = MACH_PORT_NULL;
1068 int needresume;
1069 task_t self = mach_task_self();
1070 int kernalloc = 0;
1071 int susp = create_susp;
1072
1073 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
1074 { /* Set up default paramters */
1075 attrs = &_pthread_attr_default;
1076 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1077 return EINVAL;
1078 }
1079 error = 0;
1080
1081 if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
1082 (attrs->param.sched_priority != default_priority)) && (create_susp == 0)) {
1083 needresume = 1;
1084 susp = 1;
1085 } else
1086 needresume = 0;
1087
1088 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1089 * any change in priority or policy is needed here.
1090 */
1091 if ((__oldstyle == 1) || (create_susp != 0)) {
1092 /* Rosetta or pthread_create_suspended() */
1093 /* running under rosetta */
1094 /* Allocate a stack for the thread */
1095 #if WQ_TRACE
1096 __kdebug_trace(0x9000000, create_susp, 0, 0, 0, 0);
1097 #endif
1098 if ((error = _pthread_allocate_stack(attrs, &stack)) != 0) {
1099 return(error);
1100 }
1101 t = (pthread_t)malloc(sizeof(struct _pthread));
1102 *thread = t;
1103 if (susp) {
1104 /* Create the Mach thread for this thread */
1105 PTHREAD_MACH_CALL(thread_create(self, &kernel_thread), kern_res);
1106 if (kern_res != KERN_SUCCESS)
1107 {
1108 printf("Can't create thread: %d\n", kern_res);
1109 return(EINVAL);
1110 }
1111 }
1112 if ((error = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
1113 {
1114 return(error);
1115 }
1116 set_malloc_singlethreaded(0);
1117 __is_threaded = 1;
1118
1119 /* Send it on it's way */
1120 t->arg = arg;
1121 t->fun = start_routine;
1122 t->newstyle = 0;
1123 /* Now set it up to execute */
1124 LOCK(_pthread_list_lock);
1125 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1126 #if WQ_TRACE
1127 __kdebug_trace(0x900000c, t, 0, 0, 4, 0);
1128 #endif
1129 _pthread_count++;
1130 UNLOCK(_pthread_list_lock);
1131 _pthread_setup(t, _pthread_body, stack, susp, needresume);
1132 return(0);
1133 } else {
1134
1135 flags = 0;
1136 if (attrs->fastpath == 1)
1137 kernalloc = 1;
1138
1139 if (attrs->detached == PTHREAD_CREATE_DETACHED)
1140 flags |= PTHREAD_START_DETACHED;
1141 if (attrs->schedset != 0) {
1142 flags |= PTHREAD_START_SETSCHED;
1143 flags |= ((attrs->policy & PTHREAD_START_POLICY_MASK) << PTHREAD_START_POLICY_BITSHIFT);
1144 flags |= (attrs->param.sched_priority & PTHREAD_START_IMPORTANCE_MASK);
1145 }
1146
1147 set_malloc_singlethreaded(0);
1148 __is_threaded = 1;
1149
1150 if (kernalloc == 0) {
1151 /* Allocate a stack for the thread */
1152 flags |= PTHREAD_START_CUSTOM;
1153 if ((error = _pthread_create_pthread_onstack(attrs, &stack, &t)) != 0) {
1154 return(error);
1155 }
1156 /* Send it on it's way */
1157 t->arg = arg;
1158 t->fun = start_routine;
1159 t->newstyle = 1;
1160
1161 #if WQ_TRACE
1162 __kdebug_trace(0x9000004, t, flags, 0, 0, 0);
1163 #endif
1164
1165 if ((t = __bsdthread_create(start_routine, arg, stack, t, flags)) == -1) {
1166 _pthread_free_pthread_onstack(t, 1, 0);
1167 return (EAGAIN);
1168 }
1169 LOCK(_pthread_list_lock);
1170 t->parentcheck = 1;
1171 if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) {
1172 /* detached child exited, mop up */
1173 UNLOCK(_pthread_list_lock);
1174 #if WQ_TRACE
1175 __kdebug_trace(0x9000008, t, 0, 0, 1, 0);
1176 #endif
1177 free(t);
1178 } else if (t->childrun == 0) {
1179 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1180 _pthread_count++;
1181 #if WQ_TRACE
1182 __kdebug_trace(0x900000c, t, 0, 0, 1, 0);
1183 #endif
1184 UNLOCK(_pthread_list_lock);
1185 } else
1186 UNLOCK(_pthread_list_lock);
1187
1188 *thread = t;
1189
1190 #if WQ_TRACE
1191 __kdebug_trace(0x9000014, t, 0, 0, 1, 0);
1192 #endif
1193 return (0);
1194
1195 } else {
1196 /* kernel allocation */
1197 #if WQ_TRACE
1198 __kdebug_trace(0x9000018, flags, 0, 0, 0, 0);
1199 #endif
1200 if ((t = __bsdthread_create(start_routine, arg, attrs->stacksize, NULL, flags)) == -1)
1201 return (EAGAIN);
1202 /* Now set it up to execute */
1203 LOCK(_pthread_list_lock);
1204 t->parentcheck = 1;
1205 if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) {
1206 /* detached child exited, mop up */
1207 UNLOCK(_pthread_list_lock);
1208 #if WQ_TRACE
1209 __kdebug_trace(0x9000008, t, pthreadsize, 0, 2, 0);
1210 #endif
1211 vm_deallocate(self, t, pthreadsize);
1212 } else if (t->childrun == 0) {
1213 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1214 _pthread_count++;
1215 #if WQ_TRACE
1216 __kdebug_trace(0x900000c, t, 0, 0, 2, 0);
1217 #endif
1218 UNLOCK(_pthread_list_lock);
1219 } else
1220 UNLOCK(_pthread_list_lock);
1221
1222 *thread = t;
1223
1224 #if WQ_TRACE
1225 __kdebug_trace(0x9000014, t, 0, 0, 2, 0);
1226 #endif
1227 return(0);
1228 }
1229 }
1230 }
1231
1232 static int
1233 _pthread_create_suspended(pthread_t *thread,
1234 const pthread_attr_t *attr,
1235 void *(*start_routine)(void *),
1236 void *arg,
1237 int suspended)
1238 {
1239 pthread_attr_t *attrs;
1240 void *stack;
1241 int res;
1242 pthread_t t;
1243 kern_return_t kern_res;
1244 mach_port_t kernel_thread = MACH_PORT_NULL;
1245 int needresume;
1246
1247 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
1248 { /* Set up default paramters */
1249 attrs = &_pthread_attr_default;
1250 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1251 return EINVAL;
1252 }
1253 res = 0;
1254
1255 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1256 * any change in priority or policy is needed here.
1257 */
1258 if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
1259 (attrs->param.sched_priority != default_priority)) && (suspended == 0)) {
1260 needresume = 1;
1261 suspended = 1;
1262 } else
1263 needresume = 0;
1264
1265 do
1266 {
1267 /* Allocate a stack for the thread */
1268 if ((res = _pthread_allocate_stack(attrs, &stack)) != 0) {
1269 break;
1270 }
1271 t = (pthread_t)malloc(sizeof(struct _pthread));
1272 *thread = t;
1273 if (suspended) {
1274 /* Create the Mach thread for this thread */
1275 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread), kern_res);
1276 if (kern_res != KERN_SUCCESS)
1277 {
1278 printf("Can't create thread: %d\n", kern_res);
1279 res = EINVAL; /* Need better error here? */
1280 break;
1281 }
1282 }
1283 if ((res = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
1284 {
1285 break;
1286 }
1287 set_malloc_singlethreaded(0);
1288 __is_threaded = 1;
1289
1290 /* Send it on it's way */
1291 t->arg = arg;
1292 t->fun = start_routine;
1293 /* Now set it up to execute */
1294 LOCK(_pthread_list_lock);
1295 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1296 #if WQ_TRACE
1297 __kdebug_trace(0x900000c, t, 0, 0, 5, 0);
1298 #endif
1299 _pthread_count++;
1300 UNLOCK(_pthread_list_lock);
1301 _pthread_setup(t, _pthread_body, stack, suspended, needresume);
1302 } while (0);
1303 return (res);
1304 }
1305
1306 int
1307 pthread_create(pthread_t *thread,
1308 const pthread_attr_t *attr,
1309 void *(*start_routine)(void *),
1310 void *arg)
1311 {
1312 return _new_pthread_create_suspended(thread, attr, start_routine, arg, 0);
1313 }
1314
1315 int
1316 pthread_create_suspended_np(pthread_t *thread,
1317 const pthread_attr_t *attr,
1318 void *(*start_routine)(void *),
1319 void *arg)
1320 {
1321 return _pthread_create_suspended(thread, attr, start_routine, arg, 1);
1322 }
1323
1324 /*
1325 * Make a thread 'undetached' - no longer 'joinable' with other threads.
1326 */
1327 int
1328 pthread_detach(pthread_t thread)
1329 {
1330 int newstyle = 0;
1331 int ret;
1332
1333 if ((ret = _pthread_lookup_thread(thread, NULL, 1)) != 0)
1334 return (ret); /* Not a valid thread */
1335
1336 LOCK(thread->lock);
1337 newstyle = thread->newstyle;
1338 if (thread->detached & PTHREAD_CREATE_JOINABLE)
1339 {
1340 if (thread->detached & _PTHREAD_EXITED) {
1341 UNLOCK(thread->lock);
1342 pthread_join(thread, NULL);
1343 return 0;
1344 } else {
1345 if (newstyle == 0) {
1346 semaphore_t death = thread->death;
1347
1348 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
1349 thread->detached |= PTHREAD_CREATE_DETACHED;
1350 UNLOCK(thread->lock);
1351 if (death)
1352 (void) semaphore_signal(death);
1353 } else {
1354 mach_port_t joinport = thread->joiner_notify;
1355
1356 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
1357 thread->detached |= PTHREAD_CREATE_DETACHED;
1358
1359 UNLOCK(thread->lock);
1360 if (joinport) {
1361 semaphore_signal(joinport);
1362 }
1363 }
1364 return(0);
1365 }
1366 } else {
1367 UNLOCK(thread->lock);
1368 return (EINVAL);
1369 }
1370 }
1371
1372
1373 /*
1374 * pthread_kill call to system call
1375 */
1376
1377 extern int __pthread_kill(mach_port_t, int);
1378
1379 int
1380 pthread_kill (
1381 pthread_t th,
1382 int sig)
1383 {
1384 int error = 0;
1385 mach_port_t kport = MACH_PORT_NULL;
1386
1387 if ((sig < 0) || (sig > NSIG))
1388 return(EINVAL);
1389
1390 if (_pthread_lookup_thread(th, &kport, 0) != 0)
1391 return (ESRCH); /* Not a valid thread */
1392
1393 error = __pthread_kill(kport, sig);
1394
1395 if (error == -1)
1396 error = errno;
1397 return(error);
1398 }
1399
1400 /* Announce that there are pthread resources ready to be reclaimed in a */
1401 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
1402 /* thread underneath is terminated right away. */
1403 static
1404 void _pthread_become_available(pthread_t thread, mach_port_t kernel_thread) {
1405 pthread_reap_msg_t msg;
1406 kern_return_t ret;
1407
1408 msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,
1409 MACH_MSG_TYPE_MOVE_SEND);
1410 msg.header.msgh_size = sizeof msg - sizeof msg.trailer;
1411 msg.header.msgh_remote_port = thread_recycle_port;
1412 msg.header.msgh_local_port = kernel_thread;
1413 msg.header.msgh_id = 0x44454144; /* 'DEAD' */
1414 msg.thread = thread;
1415 ret = mach_msg_send(&msg.header);
1416 assert(ret == MACH_MSG_SUCCESS);
1417 }
1418
1419 /* Reap the resources for available threads */
1420 __private_extern__
1421 int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr, int conforming) {
1422 mach_port_type_t ptype;
1423 kern_return_t ret;
1424 task_t self;
1425
1426 self = mach_task_self();
1427 if (kernel_thread != MACH_PORT_DEAD) {
1428 ret = mach_port_type(self, kernel_thread, &ptype);
1429 if (ret == KERN_SUCCESS && ptype != MACH_PORT_TYPE_DEAD_NAME) {
1430 /* not quite dead yet... */
1431 return EAGAIN;
1432 }
1433 ret = mach_port_deallocate(self, kernel_thread);
1434 if (ret != KERN_SUCCESS) {
1435 fprintf(stderr,
1436 "mach_port_deallocate(kernel_thread) failed: %s\n",
1437 mach_error_string(ret));
1438 }
1439 }
1440
1441 if (th->reply_port != MACH_PORT_NULL) {
1442 ret = mach_port_mod_refs(self, th->reply_port,
1443 MACH_PORT_RIGHT_RECEIVE, -1);
1444 if (ret != KERN_SUCCESS) {
1445 fprintf(stderr,
1446 "mach_port_mod_refs(reply_port) failed: %s\n",
1447 mach_error_string(ret));
1448 }
1449 }
1450
1451 if (th->freeStackOnExit) {
1452 vm_address_t addr = (vm_address_t)th->stackaddr;
1453 vm_size_t size;
1454
1455 size = (vm_size_t)th->stacksize + th->guardsize;
1456
1457 addr -= size;
1458 ret = vm_deallocate(self, addr, size);
1459 if (ret != KERN_SUCCESS) {
1460 fprintf(stderr,
1461 "vm_deallocate(stack) failed: %s\n",
1462 mach_error_string(ret));
1463 }
1464 }
1465
1466
1467 if (value_ptr)
1468 *value_ptr = th->exit_value;
1469 if (conforming) {
1470 if ((th->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
1471 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
1472 *value_ptr = PTHREAD_CANCELED;
1473 th->sig = _PTHREAD_NO_SIG;
1474 }
1475
1476
1477 if (th != &_thread)
1478 free(th);
1479
1480 return 0;
1481 }
1482
1483 static
1484 void _pthread_reap_threads(void)
1485 {
1486 pthread_reap_msg_t msg;
1487 kern_return_t ret;
1488
1489 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
1490 sizeof msg, thread_recycle_port,
1491 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1492 while (ret == MACH_MSG_SUCCESS) {
1493 mach_port_t kernel_thread = msg.header.msgh_remote_port;
1494 pthread_t thread = msg.thread;
1495
1496 if (_pthread_reap_thread(thread, kernel_thread, (void **)0, 0) == EAGAIN)
1497 {
1498 /* not dead yet, put it back for someone else to reap, stop here */
1499 _pthread_become_available(thread, kernel_thread);
1500 return;
1501 }
1502 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
1503 sizeof msg, thread_recycle_port,
1504 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1505 }
1506 }
1507
1508 /* For compatibility... */
1509
1510 pthread_t
1511 _pthread_self() {
1512 return pthread_self();
1513 }
1514
1515 /*
1516 * Terminate a thread.
1517 */
1518 int __disable_threadsignal(int);
1519
1520 static void
1521 _pthread_exit(pthread_t self, void *value_ptr)
1522 {
1523 struct __darwin_pthread_handler_rec *handler;
1524 kern_return_t kern_res;
1525 int thread_count;
1526 int newstyle = self->newstyle;
1527
1528 /* Make this thread not to receive any signals */
1529 __disable_threadsignal(1);
1530
1531 #if WQ_TRACE
1532 __kdebug_trace(0x900001c, self, newstyle, 0, 0, 0);
1533 #endif
1534
1535 /* set cancel state to disable and type to deferred */
1536 _pthread_setcancelstate_exit(self, value_ptr, __unix_conforming);
1537
1538 while ((handler = self->__cleanup_stack) != 0)
1539 {
1540 (handler->__routine)(handler->__arg);
1541 self->__cleanup_stack = handler->__next;
1542 }
1543 _pthread_tsd_cleanup(self);
1544
1545 if (newstyle == 0) {
1546 _pthread_reap_threads();
1547
1548 LOCK(self->lock);
1549 self->detached |= _PTHREAD_EXITED;
1550
1551 if (self->detached & PTHREAD_CREATE_JOINABLE) {
1552 mach_port_t death = self->death;
1553 self->exit_value = value_ptr;
1554 UNLOCK(self->lock);
1555 /* the joiner will need a kernel thread reference, leave ours for it */
1556 if (death) {
1557 PTHREAD_MACH_CALL(semaphore_signal(death), kern_res);
1558 if (kern_res != KERN_SUCCESS)
1559 fprintf(stderr,
1560 "semaphore_signal(death) failed: %s\n",
1561 mach_error_string(kern_res));
1562 }
1563 LOCK(_pthread_list_lock);
1564 thread_count = --_pthread_count;
1565 UNLOCK(_pthread_list_lock);
1566 } else {
1567 UNLOCK(self->lock);
1568 LOCK(_pthread_list_lock);
1569 TAILQ_REMOVE(&__pthread_head, self, plist);
1570 #if WQ_TRACE
1571 __kdebug_trace(0x9000010, self, 0, 0, 5, 0);
1572 #endif
1573 thread_count = --_pthread_count;
1574 UNLOCK(_pthread_list_lock);
1575 /* with no joiner, we let become available consume our cached ref */
1576 _pthread_become_available(self, self->kernel_thread);
1577 }
1578
1579 if (thread_count <= 0)
1580 exit(0);
1581
1582 /* Use a new reference to terminate ourselves. Should never return. */
1583 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res);
1584 fprintf(stderr, "thread_terminate(mach_thread_self()) failed: %s\n",
1585 mach_error_string(kern_res));
1586 } else {
1587 semaphore_t joinsem = SEMAPHORE_NULL;
1588
1589 if ((self->joiner_notify == NULL) && (self->detached & PTHREAD_CREATE_JOINABLE))
1590 joinsem = new_sem_from_pool();
1591 LOCK(self->lock);
1592 self->detached |= _PTHREAD_EXITED;
1593
1594 self->exit_value = value_ptr;
1595 if (self->detached & PTHREAD_CREATE_JOINABLE) {
1596 if (self->joiner_notify == NULL) {
1597 self->joiner_notify = joinsem;
1598 joinsem = SEMAPHORE_NULL;
1599 }
1600 UNLOCK(self->lock);
1601 if (joinsem != SEMAPHORE_NULL)
1602 restore_sem_to_pool(joinsem);
1603 _pthread_free_pthread_onstack(self, 0, 1);
1604 } else {
1605 UNLOCK(self->lock);
1606 /* with no joiner, we let become available consume our cached ref */
1607 if (joinsem != SEMAPHORE_NULL)
1608 restore_sem_to_pool(joinsem);
1609 _pthread_free_pthread_onstack(self, 1, 1);
1610 }
1611 }
1612 abort();
1613 }
1614
1615 void
1616 pthread_exit(void *value_ptr)
1617 {
1618 pthread_t self = pthread_self();
1619 if (self->wqthread != 0)
1620 workqueue_exit(self, self->cur_workq, self->cur_workitem);
1621 else
1622 _pthread_exit(self, value_ptr);
1623 }
1624
1625 /*
1626 * Get the scheduling policy and scheduling paramters for a thread.
1627 */
1628 int
1629 pthread_getschedparam(pthread_t thread,
1630 int *policy,
1631 struct sched_param *param)
1632 {
1633 int ret;
1634
1635 if (thread == NULL)
1636 return(ESRCH);
1637
1638 LOCK(_pthread_list_lock);
1639
1640 if ((ret = _pthread_find_thread(thread)) != 0) {
1641 UNLOCK(_pthread_list_lock);
1642 return(ret);
1643 }
1644 if (policy != 0)
1645 *policy = thread->policy;
1646 if (param != 0)
1647 *param = thread->param;
1648 UNLOCK(_pthread_list_lock);
1649
1650 return(0);
1651 }
1652
1653 /*
1654 * Set the scheduling policy and scheduling paramters for a thread.
1655 */
1656 static int
1657 pthread_setschedparam_internal(pthread_t thread,
1658 mach_port_t kport,
1659 int policy,
1660 const struct sched_param *param)
1661 {
1662 policy_base_data_t bases;
1663 policy_base_t base;
1664 mach_msg_type_number_t count;
1665 kern_return_t ret;
1666
1667 switch (policy)
1668 {
1669 case SCHED_OTHER:
1670 bases.ts.base_priority = param->sched_priority;
1671 base = (policy_base_t)&bases.ts;
1672 count = POLICY_TIMESHARE_BASE_COUNT;
1673 break;
1674 case SCHED_FIFO:
1675 bases.fifo.base_priority = param->sched_priority;
1676 base = (policy_base_t)&bases.fifo;
1677 count = POLICY_FIFO_BASE_COUNT;
1678 break;
1679 case SCHED_RR:
1680 bases.rr.base_priority = param->sched_priority;
1681 /* quantum isn't public yet */
1682 bases.rr.quantum = param->quantum;
1683 base = (policy_base_t)&bases.rr;
1684 count = POLICY_RR_BASE_COUNT;
1685 break;
1686 default:
1687 return (EINVAL);
1688 }
1689 ret = thread_policy(kport, policy, base, count, TRUE);
1690 if (ret != KERN_SUCCESS)
1691 return (EINVAL);
1692 return (0);
1693 }
1694
1695 int
1696 pthread_setschedparam(pthread_t t,
1697 int policy,
1698 const struct sched_param *param)
1699 {
1700 mach_port_t kport = MACH_PORT_NULL;
1701 int error;
1702 int bypass = 1;
1703
1704 if (t != pthread_self() && t != &_thread ) { //since the main thread will not get de-allocated from underneath us
1705 bypass = 0;
1706 if (_pthread_lookup_thread(t, &kport, 0) != 0)
1707 return(ESRCH);
1708 } else
1709 kport = t->kernel_thread;
1710
1711 error = pthread_setschedparam_internal(t, kport, policy, param);
1712 if (error == 0) {
1713 if (bypass == 0) {
1714 /* ensure the thread is still valid */
1715 LOCK(_pthread_list_lock);
1716 if ((error = _pthread_find_thread(t)) != 0) {
1717 UNLOCK(_pthread_list_lock);
1718 return(error);
1719 }
1720 t->policy = policy;
1721 t->param = *param;
1722 UNLOCK(_pthread_list_lock);
1723 } else {
1724 t->policy = policy;
1725 t->param = *param;
1726 }
1727 }
1728 return(error);
1729 }
1730
1731 /*
1732 * Get the minimum priority for the given policy
1733 */
1734 int
1735 sched_get_priority_min(int policy)
1736 {
1737 return default_priority - 16;
1738 }
1739
1740 /*
1741 * Get the maximum priority for the given policy
1742 */
1743 int
1744 sched_get_priority_max(int policy)
1745 {
1746 return default_priority + 16;
1747 }
1748
1749 /*
1750 * Determine if two thread identifiers represent the same thread.
1751 */
1752 int
1753 pthread_equal(pthread_t t1,
1754 pthread_t t2)
1755 {
1756 return (t1 == t2);
1757 }
1758
1759 __private_extern__ void
1760 _pthread_set_self(pthread_t p)
1761 {
1762 extern void __pthread_set_self(pthread_t);
1763 if (p == 0) {
1764 bzero(&_thread, sizeof(struct _pthread));
1765 p = &_thread;
1766 }
1767 p->tsd[0] = p;
1768 __pthread_set_self(p);
1769 }
1770
1771 void
1772 cthread_set_self(void *cself)
1773 {
1774 pthread_t self = pthread_self();
1775 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1776 _pthread_set_self(cself);
1777 return;
1778 }
1779 self->cthread_self = cself;
1780 }
1781
1782 void *
1783 ur_cthread_self(void) {
1784 pthread_t self = pthread_self();
1785 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1786 return (void *)self;
1787 }
1788 return self->cthread_self;
1789 }
1790
1791 /*
1792 * cancellation handler for pthread once as the init routine can have a
1793 * cancellation point. In that case we need to restore the spin unlock
1794 */
1795 void
1796 __pthread_once_cancel_handler(pthread_once_t *once_control)
1797 {
1798 _spin_unlock(&once_control->lock);
1799 }
1800
1801
1802 /*
1803 * Execute a function exactly one time in a thread-safe fashion.
1804 */
1805 int
1806 pthread_once(pthread_once_t *once_control,
1807 void (*init_routine)(void))
1808 {
1809 _spin_lock(&once_control->lock);
1810 if (once_control->sig == _PTHREAD_ONCE_SIG_init)
1811 {
1812 pthread_cleanup_push(__pthread_once_cancel_handler, once_control);
1813 (*init_routine)();
1814 pthread_cleanup_pop(0);
1815 once_control->sig = _PTHREAD_ONCE_SIG;
1816 }
1817 _spin_unlock(&once_control->lock);
1818 return (0); /* Spec defines no possible errors! */
1819 }
1820
1821 /*
1822 * Insert a cancellation point in a thread.
1823 */
1824 __private_extern__ void
1825 _pthread_testcancel(pthread_t thread, int isconforming)
1826 {
1827 LOCK(thread->lock);
1828 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
1829 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
1830 {
1831 UNLOCK(thread->lock);
1832 if (isconforming)
1833 pthread_exit(PTHREAD_CANCELED);
1834 else
1835 pthread_exit(0);
1836 }
1837 UNLOCK(thread->lock);
1838 }
1839
1840
1841
1842 int
1843 pthread_getconcurrency(void)
1844 {
1845 return(pthread_concurrency);
1846 }
1847
1848 int
1849 pthread_setconcurrency(int new_level)
1850 {
1851 if (new_level < 0)
1852 return EINVAL;
1853 pthread_concurrency = new_level;
1854 return(0);
1855 }
1856
1857 /*
1858 * Perform package initialization - called automatically when application starts
1859 */
1860
1861 __private_extern__ int
1862 pthread_init(void)
1863 {
1864 pthread_attr_t *attrs;
1865 pthread_t thread;
1866 kern_return_t kr;
1867 host_priority_info_data_t priority_info;
1868 host_info_t info;
1869 host_flavor_t flavor;
1870 host_t host;
1871 mach_msg_type_number_t count;
1872 int mib[2];
1873 size_t len;
1874 void *stackaddr;
1875
1876 pthreadsize = round_page(sizeof (struct _pthread));
1877 count = HOST_PRIORITY_INFO_COUNT;
1878 info = (host_info_t)&priority_info;
1879 flavor = HOST_PRIORITY_INFO;
1880 host = mach_host_self();
1881 kr = host_info(host, flavor, info, &count);
1882 if (kr != KERN_SUCCESS)
1883 printf("host_info failed (%d); probably need privilege.\n", kr);
1884 else {
1885 default_priority = priority_info.user_priority;
1886 min_priority = priority_info.minimum_priority;
1887 max_priority = priority_info.maximum_priority;
1888 }
1889 attrs = &_pthread_attr_default;
1890 pthread_attr_init(attrs);
1891
1892 TAILQ_INIT(&__pthread_head);
1893 LOCK_INIT(_pthread_list_lock);
1894 thread = &_thread;
1895 TAILQ_INSERT_HEAD(&__pthread_head, thread, plist);
1896 _pthread_set_self(thread);
1897
1898 /* In case of dyld reset the tsd keys from 1 - 10 */
1899 _pthread_keys_init();
1900
1901 mib[0] = CTL_KERN;
1902 mib[1] = KERN_USRSTACK;
1903 len = sizeof (stackaddr);
1904 if (sysctl (mib, 2, &stackaddr, &len, NULL, 0) != 0)
1905 stackaddr = (void *)USRSTACK;
1906 _pthread_create(thread, attrs, stackaddr, mach_thread_self());
1907 thread->detached = PTHREAD_CREATE_JOINABLE|_PTHREAD_CREATE_PARENT;
1908
1909 _init_cpu_capabilities();
1910 if (_NumCPUs() > 1)
1911 _spin_tries = MP_SPIN_TRIES;
1912
1913 mach_port_deallocate(mach_task_self(), host);
1914
1915 #if defined(__ppc__)
1916 IF_ROSETTA() {
1917 __oldstyle = 1;
1918 }
1919 #endif
1920 #if defined(__arm__)
1921 __oldstyle = 1;
1922 #endif
1923
1924 #if defined(_OBJC_PAGE_BASE_ADDRESS)
1925 {
1926 vm_address_t objcRTPage = (vm_address_t)_OBJC_PAGE_BASE_ADDRESS;
1927 kr = vm_map(mach_task_self(),
1928 &objcRTPage, vm_page_size * 4, vm_page_size - 1,
1929 VM_FLAGS_FIXED | VM_MAKE_TAG(0), // Which tag to use?
1930 MACH_PORT_NULL,
1931 (vm_address_t)0, FALSE,
1932 (vm_prot_t)0, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE,
1933 VM_INHERIT_DEFAULT);
1934 /* We ignore the return result here. The ObjC runtime will just have to deal. */
1935 }
1936 #endif
1937
1938 mig_init(1); /* enable multi-threaded mig interfaces */
1939 if (__oldstyle == 0) {
1940 #if defined(__i386__) || defined(__x86_64__)
1941 __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)));
1942 #else
1943 __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)));
1944 #endif
1945 }
1946 return 0;
1947 }
1948
1949 int sched_yield(void)
1950 {
1951 swtch_pri(0);
1952 return 0;
1953 }
1954
1955 /* This used to be the "magic" that gets the initialization routine called when the application starts */
1956 static int _do_nothing(void) { return 0; }
1957 int (*_cthread_init_routine)(void) = _do_nothing;
1958
1959 /* Get a semaphore from the pool, growing it if necessary */
1960
1961 __private_extern__ semaphore_t new_sem_from_pool(void) {
1962 kern_return_t res;
1963 semaphore_t sem;
1964 int i;
1965
1966 LOCK(sem_pool_lock);
1967 if (sem_pool_current == sem_pool_count) {
1968 sem_pool_count += 16;
1969 sem_pool = realloc(sem_pool, sem_pool_count * sizeof(semaphore_t));
1970 for (i = sem_pool_current; i < sem_pool_count; i++) {
1971 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool[i], SYNC_POLICY_FIFO, 0), res);
1972 }
1973 }
1974 sem = sem_pool[sem_pool_current++];
1975 UNLOCK(sem_pool_lock);
1976 return sem;
1977 }
1978
1979 /* Put a semaphore back into the pool */
1980 __private_extern__ void restore_sem_to_pool(semaphore_t sem) {
1981 LOCK(sem_pool_lock);
1982 sem_pool[--sem_pool_current] = sem;
1983 UNLOCK(sem_pool_lock);
1984 }
1985
1986 static void sem_pool_reset(void) {
1987 LOCK(sem_pool_lock);
1988 sem_pool_count = 0;
1989 sem_pool_current = 0;
1990 sem_pool = NULL;
1991 UNLOCK(sem_pool_lock);
1992 }
1993
1994 __private_extern__ void _pthread_fork_child(pthread_t p) {
1995 /* Just in case somebody had it locked... */
1996 UNLOCK(sem_pool_lock);
1997 sem_pool_reset();
1998 /* No need to hold the pthread_list_lock as no one other than this
1999 * thread is present at this time
2000 */
2001 TAILQ_INIT(&__pthread_head);
2002 LOCK_INIT(_pthread_list_lock);
2003 TAILQ_INSERT_HEAD(&__pthread_head, p, plist);
2004 _pthread_count = 1;
2005 }
2006
2007 /*
2008 * Query/update the cancelability 'state' of a thread
2009 */
2010 int
2011 _pthread_setcancelstate_internal(int state, int *oldstate, int conforming)
2012 {
2013 pthread_t self = pthread_self();
2014
2015
2016 switch (state) {
2017 case PTHREAD_CANCEL_ENABLE:
2018 if (conforming)
2019 __pthread_canceled(1);
2020 break;
2021 case PTHREAD_CANCEL_DISABLE:
2022 if (conforming)
2023 __pthread_canceled(2);
2024 break;
2025 default:
2026 return EINVAL;
2027 }
2028
2029 self = pthread_self();
2030 LOCK(self->lock);
2031 if (oldstate)
2032 *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
2033 self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK;
2034 self->cancel_state |= state;
2035 UNLOCK(self->lock);
2036 if (!conforming)
2037 _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
2038 return (0);
2039 }
2040
2041 /* When a thread exits set the cancellation state to DISABLE and DEFERRED */
2042 static void
2043 _pthread_setcancelstate_exit(pthread_t self, void * value_ptr, int conforming)
2044 {
2045 LOCK(self->lock);
2046 self->cancel_state &= ~(_PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK);
2047 self->cancel_state |= (PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED);
2048 if ((value_ptr == PTHREAD_CANCELED)) {
2049 // 4597450: begin
2050 self->detached |= _PTHREAD_WASCANCEL;
2051 // 4597450: end
2052 }
2053 UNLOCK(self->lock);
2054 }
2055
2056 int
2057 _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming)
2058 {
2059 kern_return_t res;
2060 int detached = 0, ret;
2061
2062 #if WQ_TRACE
2063 __kdebug_trace(0x9000028, thread, 0, 0, 1, 0);
2064 #endif
2065 /* The scenario where the joiner was waiting for the thread and
2066 * the pthread detach happened on that thread. Then the semaphore
2067 * will trigger but by the time joiner runs, the target thread could be
2068 * freed. So we need to make sure that the thread is still in the list
2069 * and is joinable before we continue with the join.
2070 */
2071 LOCK(_pthread_list_lock);
2072 if ((ret = _pthread_find_thread(thread)) != 0) {
2073 UNLOCK(_pthread_list_lock);
2074 /* returns ESRCH */
2075 return(ret);
2076 }
2077 if ((thread->detached & PTHREAD_CREATE_JOINABLE) == 0) {
2078 /* the thread might be a detached thread */
2079 UNLOCK(_pthread_list_lock);
2080 return(ESRCH);
2081
2082 }
2083 /* It is still a joinable thread and needs to be reaped */
2084 TAILQ_REMOVE(&__pthread_head, thread, plist);
2085 #if WQ_TRACE
2086 __kdebug_trace(0x9000010, thread, 0, 0, 3, 0);
2087 #endif
2088 UNLOCK(_pthread_list_lock);
2089
2090 if (value_ptr)
2091 *value_ptr = thread->exit_value;
2092 if (conforming) {
2093 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
2094 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) {
2095 *value_ptr = PTHREAD_CANCELED;
2096 }
2097 }
2098 if (thread->reply_port != MACH_PORT_NULL) {
2099 res = mach_port_mod_refs(mach_task_self(), thread->reply_port, MACH_PORT_RIGHT_RECEIVE, -1);
2100 if (res != KERN_SUCCESS)
2101 fprintf(stderr,"mach_port_mod_refs(reply_port) failed: %s\n",mach_error_string(res));
2102 thread->reply_port = MACH_PORT_NULL;
2103 }
2104 if (thread->freeStackOnExit) {
2105 thread->sig = _PTHREAD_NO_SIG;
2106 #if WQ_TRACE
2107 __kdebug_trace(0x9000028, thread, 0, 0, 2, 0);
2108 #endif
2109 vm_deallocate(mach_task_self(), thread, pthreadsize);
2110 } else {
2111 thread->sig = _PTHREAD_NO_SIG;
2112 #if WQ_TRACE
2113 __kdebug_trace(0x9000028, thread, 0, 0, 3, 0);
2114 #endif
2115 free(thread);
2116 }
2117 return(0);
2118 }
2119
2120 /* ALWAYS called with list lock and return with list lock */
2121 int
2122 _pthread_find_thread(pthread_t thread)
2123 {
2124 pthread_t p;
2125
2126 loop:
2127 TAILQ_FOREACH(p, &__pthread_head, plist) {
2128 if (p == thread) {
2129 if (thread->kernel_thread == MACH_PORT_NULL) {
2130 UNLOCK(_pthread_list_lock);
2131 sched_yield();
2132 LOCK(_pthread_list_lock);
2133 goto loop;
2134 }
2135 return(0);
2136 }
2137 }
2138 return(ESRCH);
2139 }
2140
2141 int
2142 _pthread_lookup_thread(pthread_t thread, mach_port_t * portp, int only_joinable)
2143 {
2144 mach_port_t kport;
2145 int ret = 0;
2146
2147 if (thread == NULL)
2148 return(ESRCH);
2149
2150 LOCK(_pthread_list_lock);
2151
2152 if ((ret = _pthread_find_thread(thread)) != 0) {
2153 UNLOCK(_pthread_list_lock);
2154 return(ret);
2155 }
2156 if ((only_joinable != 0) && ((thread->detached & PTHREAD_CREATE_DETACHED) != 0)) {
2157 UNLOCK(_pthread_list_lock);
2158 return(EINVAL);
2159 }
2160 kport = thread->kernel_thread;
2161 UNLOCK(_pthread_list_lock);
2162 if (portp != NULL)
2163 *portp = kport;
2164 return(0);
2165 }
2166
2167 /* XXXXXXXXXXXXX Pthread Workqueue Attributes XXXXXXXXXXXXXXXXXX */
2168 int
2169 pthread_workqueue_attr_init_np(pthread_workqueue_attr_t * attrp)
2170 {
2171 attrp->stacksize = DEFAULT_STACK_SIZE;
2172 attrp->istimeshare = 1;
2173 attrp->importance = 0;
2174 attrp->affinity = 0;
2175 attrp->queueprio = WORK_QUEUE_NORMALIZER;
2176 attrp->sig = PTHEAD_WRKQUEUE_ATTR_SIG;
2177 return(0);
2178 }
2179
2180 int
2181 pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t * attr)
2182 {
2183 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG)
2184 {
2185 return (0);
2186 } else
2187 {
2188 return (EINVAL); /* Not an attribute structure! */
2189 }
2190 }
2191
2192 #ifdef NOTYET /* [ */
2193 int
2194 pthread_workqueue_attr_getstacksize_np(const pthread_workqueue_attr_t * attr, size_t * stacksizep)
2195 {
2196 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
2197 *stacksizep = attr->stacksize;
2198 return (0);
2199 } else {
2200 return (EINVAL); /* Not an attribute structure! */
2201 }
2202 }
2203
2204 int
2205 pthread_workqueue_attr_setstacksize_np(pthread_workqueue_attr_t * attr, size_t stacksize)
2206 {
2207 if ((attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
2208 attr->stacksize = stacksize;
2209 return (0);
2210 } else {
2211 return (EINVAL); /* Not an attribute structure! */
2212 }
2213 }
2214
2215
2216 int
2217 pthread_workqueue_attr_getthreadtimeshare_np(const pthread_workqueue_attr_t * attr, int * istimesahrep)
2218 {
2219 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
2220 *istimesahrep = attr->istimeshare;
2221 return (0);
2222 } else {
2223 return (EINVAL); /* Not an attribute structure! */
2224 }
2225 }
2226
2227 int
2228 pthread_workqueue_attr_settthreadtimeshare_np(pthread_workqueue_attr_t * attr, int istimeshare)
2229 {
2230 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
2231 if (istimeshare != 0)
2232 attr->istimeshare = istimeshare;
2233 else
2234 attr->istimeshare = 0;
2235 return (0);
2236 } else {
2237 return (EINVAL); /* Not an attribute structure! */
2238 }
2239 }
2240
2241 int
2242 pthread_workqueue_attr_getthreadimportance_np(const pthread_workqueue_attr_t * attr, int * importancep)
2243 {
2244 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
2245 *importancep = attr->importance;
2246 return (0);
2247 } else {
2248 return (EINVAL); /* Not an attribute structure! */
2249 }
2250 }
2251
2252 int
2253 pthread_workqueue_attr_settthreadimportance_np(pthread_workqueue_attr_t * attr, int importance)
2254 {
2255 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG){
2256 attr->importance = importance;
2257 return (0);
2258 } else {
2259 return (EINVAL); /* Not an attribute structure! */
2260 }
2261 }
2262
2263 int
2264 pthread_workqueue_attr_getthreadaffinity_np(const pthread_workqueue_attr_t * attr, int * affinityp)
2265 {
2266 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
2267 *affinityp = attr->affinity;
2268 return (0);
2269 } else {
2270 return (EINVAL); /* Not an attribute structure! */
2271 }
2272 }
2273
2274 int
2275 pthread_workqueue_attr_settthreadaffinity_np(pthread_workqueue_attr_t * attr, int affinity)
2276 {
2277 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG){
2278 attr->affinity = affinity;
2279 return (0);
2280 } else {
2281 return (EINVAL); /* Not an attribute structure! */
2282 }
2283 }
2284
2285 #endif /* NOTYET ] */
2286
2287 int
2288 pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t * attr, int * qpriop)
2289 {
2290 if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) {
2291 *qpriop = (attr->queueprio - WORK_QUEUE_NORMALIZER);
2292 return (0);
2293 } else {
2294 return (EINVAL); /* Not an attribute structure! */
2295 }
2296 }
2297
2298 int
2299 pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t * attr, int qprio)
2300 {
2301 /* only -2 to +2 is valid */
2302 if ((attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) && (qprio <= 2) && (qprio >= -2)) {
2303 attr->queueprio = (qprio + WORK_QUEUE_NORMALIZER);
2304 return (0);
2305 } else {
2306 return (EINVAL); /* Not an attribute structure! */
2307 }
2308 }
2309
2310 /* XXXXXXXXXXXXX Pthread Workqueue support routines XXXXXXXXXXXXXXXXXX */
2311
2312 static void
2313 workqueue_list_lock()
2314 {
2315 OSSpinLockLock(&__workqueue_list_lock);
2316 }
2317
2318 static void
2319 workqueue_list_unlock()
2320 {
2321 OSSpinLockUnlock(&__workqueue_list_lock);
2322 }
2323
2324 int
2325 pthread_workqueue_init_np()
2326 {
2327 int ret;
2328
2329 workqueue_list_lock();
2330 ret =_pthread_work_internal_init();
2331 workqueue_list_unlock();
2332
2333 return(ret);
2334 }
2335
2336 static int
2337 _pthread_work_internal_init(void)
2338 {
2339 int i, error;
2340 pthread_workqueue_head_t headp;
2341 pthread_workitem_t witemp;
2342 pthread_workqueue_t wq;
2343
2344 if (kernel_workq_setup == 0) {
2345 #if defined(__i386__) || defined(__x86_64__)
2346 __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)));
2347 #else
2348 __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)));
2349 #endif
2350
2351 _pthread_wq_attr_default.stacksize = DEFAULT_STACK_SIZE;
2352 _pthread_wq_attr_default.istimeshare = 1;
2353 _pthread_wq_attr_default.importance = 0;
2354 _pthread_wq_attr_default.affinity = 0;
2355 _pthread_wq_attr_default.queueprio = WORK_QUEUE_NORMALIZER;
2356 _pthread_wq_attr_default.sig = PTHEAD_WRKQUEUE_ATTR_SIG;
2357
2358 for( i = 0; i< WQ_NUM_PRIO_QS; i++) {
2359 headp = __pthread_wq_head_tbl[i];
2360 TAILQ_INIT(&headp->wqhead);
2361 headp->next_workq = 0;
2362 }
2363
2364 /* create work item and workqueue pools */
2365 witemp = (struct _pthread_workitem *)malloc(sizeof(struct _pthread_workitem) * WORKITEM_POOL_SIZE);
2366 bzero(witemp, (sizeof(struct _pthread_workitem) * WORKITEM_POOL_SIZE));
2367 for (i = 0; i < WORKITEM_POOL_SIZE; i++) {
2368 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, &witemp[i], item_entry);
2369 }
2370 wq = (struct _pthread_workqueue *)malloc(sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE);
2371 bzero(wq, (sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE));
2372 for (i = 0; i < WORKQUEUE_POOL_SIZE; i++) {
2373 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, &wq[i], wq_list);
2374 }
2375
2376 if (error = __workq_open()) {
2377 TAILQ_INIT(&__pthread_workitem_pool_head);
2378 TAILQ_INIT(&__pthread_workqueue_pool_head);
2379 free(witemp);
2380 free(wq);
2381 return(ENOMEM);
2382 }
2383 kernel_workq_setup = 1;
2384 }
2385 return(0);
2386 }
2387
2388
2389 /* This routine is called with list lock held */
2390 static pthread_workitem_t
2391 alloc_workitem(void)
2392 {
2393 pthread_workitem_t witem;
2394
2395 if (TAILQ_EMPTY(&__pthread_workitem_pool_head)) {
2396 workqueue_list_unlock();
2397 witem = malloc(sizeof(struct _pthread_workitem));
2398 workqueue_list_lock();
2399 } else {
2400 witem = TAILQ_FIRST(&__pthread_workitem_pool_head);
2401 TAILQ_REMOVE(&__pthread_workitem_pool_head, witem, item_entry);
2402 }
2403 return(witem);
2404 }
2405
2406 /* This routine is called with list lock held */
2407 static void
2408 free_workitem(pthread_workitem_t witem)
2409 {
2410 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, witem, item_entry);
2411 }
2412
2413 /* This routine is called with list lock held */
2414 static pthread_workqueue_t
2415 alloc_workqueue(void)
2416 {
2417 pthread_workqueue_t wq;
2418
2419 if (TAILQ_EMPTY(&__pthread_workqueue_pool_head)) {
2420 workqueue_list_unlock();
2421 wq = malloc(sizeof(struct _pthread_workqueue));
2422 workqueue_list_lock();
2423 } else {
2424 wq = TAILQ_FIRST(&__pthread_workqueue_pool_head);
2425 TAILQ_REMOVE(&__pthread_workqueue_pool_head, wq, wq_list);
2426 }
2427 user_workq_count++;
2428 return(wq);
2429 }
2430
2431 /* This routine is called with list lock held */
2432 static void
2433 free_workqueue(pthread_workqueue_t wq)
2434 {
2435 user_workq_count--;
2436 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, wq, wq_list);
2437 }
2438
2439 static void
2440 _pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * attr)
2441 {
2442 bzero(wq, sizeof(struct _pthread_workqueue));
2443 if (attr != NULL) {
2444 wq->stacksize = attr->stacksize;
2445 wq->istimeshare = attr->istimeshare;
2446 wq->importance = attr->importance;
2447 wq->affinity = attr->affinity;
2448 wq->queueprio = attr->queueprio;
2449 } else {
2450 wq->stacksize = DEFAULT_STACK_SIZE;
2451 wq->istimeshare = 1;
2452 wq->importance = 0;
2453 wq->affinity = 0;
2454 wq->queueprio = WORK_QUEUE_NORMALIZER;
2455 }
2456 LOCK_INIT(wq->lock);
2457 wq->flags = 0;
2458 TAILQ_INIT(&wq->item_listhead);
2459 TAILQ_INIT(&wq->item_kernhead);
2460 wq->wq_list.tqe_next = 0;
2461 wq->wq_list.tqe_prev = 0;
2462 wq->sig = PTHEAD_WRKQUEUE_SIG;
2463 wq->headp = __pthread_wq_head_tbl[wq->queueprio];
2464 }
2465
2466 int
2467 valid_workq(pthread_workqueue_t workq)
2468 {
2469 if (workq->sig == PTHEAD_WRKQUEUE_SIG)
2470 return(1);
2471 else
2472 return(0);
2473 }
2474
2475
2476 /* called with list lock */
2477 static void
2478 pick_nextworkqueue_droplock()
2479 {
2480 int i, curwqprio, val, found;
2481 pthread_workqueue_head_t headp;
2482 pthread_workqueue_t workq;
2483 pthread_workqueue_t nworkq = NULL;
2484
2485 loop:
2486 while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) {
2487 found = 0;
2488 for (i = 0; i < WQ_NUM_PRIO_QS; i++) {
2489 wqreadyprio = i; /* because there is nothing else higher to run */
2490 headp = __pthread_wq_head_tbl[i];
2491
2492 if (TAILQ_EMPTY(&headp->wqhead))
2493 continue;
2494 workq = headp->next_workq;
2495 if (workq == NULL)
2496 workq = TAILQ_FIRST(&headp->wqhead);
2497 curwqprio = workq->queueprio;
2498 nworkq = workq; /* starting pt */
2499 while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) {
2500 headp->next_workq = TAILQ_NEXT(workq, wq_list);
2501 if (headp->next_workq == NULL)
2502 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2503 val = post_nextworkitem(workq);
2504
2505 if (val != 0) {
2506 /* things could have changed so reasses */
2507 /* If kernel queue is full , skip */
2508 if (kernel_workq_count >= KERNEL_WORKQ_ELEM_MAX)
2509 break;
2510 /* If anything with higher prio arrived, then reevaluate */
2511 if (wqreadyprio < curwqprio)
2512 goto loop; /* we need re evaluate again */
2513 /* we can post some more work items */
2514 found = 1;
2515 }
2516
2517 /* cannot use workq here as it could be freed */
2518 if (TAILQ_EMPTY(&headp->wqhead))
2519 break;
2520 /* if we found nothing to run and only one workqueue in the list, skip */
2521 if ((val == 0) && (workq == headp->next_workq))
2522 break;
2523 workq = headp->next_workq;
2524 if (workq == NULL)
2525 workq = TAILQ_FIRST(&headp->wqhead);
2526 if (val != 0)
2527 nworkq = workq;
2528 /* if we found nothing to run and back to workq where we started */
2529 if ((val == 0) && (workq == nworkq))
2530 break;
2531 }
2532 if (kernel_workq_count >= KERNEL_WORKQ_ELEM_MAX)
2533 break;
2534 }
2535 /* nothing found to run? */
2536 if (found == 0)
2537 break;
2538 }
2539 workqueue_list_unlock();
2540 }
2541
2542 static int
2543 post_nextworkitem(pthread_workqueue_t workq)
2544 {
2545 int error;
2546 pthread_workitem_t witem;
2547 pthread_workqueue_head_t headp;
2548 void (*func)(pthread_workqueue_t, void *);
2549
2550 if ((workq->flags & PTHREAD_WORKQ_SUSPEND) == PTHREAD_WORKQ_SUSPEND) {
2551 return(0);
2552 }
2553 if (TAILQ_EMPTY(&workq->item_listhead)) {
2554 return(0);
2555 }
2556 witem = TAILQ_FIRST(&workq->item_listhead);
2557 headp = workq->headp;
2558 if ((witem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) {
2559
2560 if ((witem->flags & PTH_WQITEM_APPLIED) != 0) {
2561 return(0);
2562 }
2563 /* Also barrier when nothing is there needs to be handled */
2564 /* Nothing to wait for */
2565 if (workq->kq_count != 0) {
2566 witem->flags |= PTH_WQITEM_APPLIED;
2567 workq->flags |= PTHREAD_WORKQ_BARRIER_ON;
2568 workq->barrier_count = workq->kq_count;
2569 #if WQ_TRACE
2570 __kdebug_trace(0x9000064, 1, workq->barrier_count, 0, 0, 0);
2571 #endif
2572 return(1);
2573 } else {
2574 #if WQ_TRACE
2575 __kdebug_trace(0x9000064, 2, workq->barrier_count, 0, 0, 0);
2576 #endif
2577 if (witem->func != NULL) {
2578 workqueue_list_unlock();
2579 func = witem->func;
2580 (*func)(workq, witem->func_arg);
2581 workqueue_list_lock();
2582 }
2583 TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
2584 witem->flags = 0;
2585 free_workitem(witem);
2586 return(1);
2587 }
2588 } else if ((witem->flags & PTH_WQITEM_DESTROY) == PTH_WQITEM_DESTROY) {
2589 #if WQ_TRACE
2590 __kdebug_trace(0x9000068, 1, workq->kq_count, 0, 0, 0);
2591 #endif
2592 if ((witem->flags & PTH_WQITEM_APPLIED) != 0) {
2593 return(0);
2594 }
2595 witem->flags |= PTH_WQITEM_APPLIED;
2596 workq->flags |= (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON);
2597 workq->barrier_count = workq->kq_count;
2598 workq->term_callback = witem->func;
2599 workq->term_callarg = witem->func_arg;
2600 TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
2601 if ((TAILQ_EMPTY(&workq->item_listhead)) && (workq->kq_count == 0)) {
2602 if (!(TAILQ_EMPTY(&workq->item_kernhead))) {
2603 #if WQ_TRACE
2604 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 0xff, 0);
2605 #endif
2606 }
2607 witem->flags = 0;
2608 free_workitem(witem);
2609 workq->flags |= PTHREAD_WORKQ_DESTROYED;
2610 #if WQ_TRACE
2611 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 1, 0);
2612 #endif
2613 headp = __pthread_wq_head_tbl[workq->queueprio];
2614 if (headp->next_workq == workq) {
2615 headp->next_workq = TAILQ_NEXT(workq, wq_list);
2616 if (headp->next_workq == NULL) {
2617 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2618 if (headp->next_workq == workq)
2619 headp->next_workq = NULL;
2620 }
2621 }
2622 workq->sig = 0;
2623 TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
2624 if (workq->term_callback != NULL) {
2625 workqueue_list_unlock();
2626 (*workq->term_callback)(workq, workq->term_callarg);
2627 workqueue_list_lock();
2628 }
2629 free_workqueue(workq);
2630 return(1);
2631 } else
2632 TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry);
2633 #if WQ_TRACE
2634 __kdebug_trace(0x9000068, 2, workq->barrier_count, 0, 0, 0);
2635 #endif
2636 return(1);
2637 } else {
2638 #if WQ_TRACE
2639 __kdebug_trace(0x9000060, witem, workq, witem->func_arg, 0xfff, 0);
2640 #endif
2641 TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
2642 TAILQ_INSERT_TAIL(&workq->item_kernhead, witem, item_entry);
2643 if ((witem->flags & PTH_WQITEM_KERN_COUNT) == 0) {
2644 workq->kq_count++;
2645 witem->flags |= PTH_WQITEM_KERN_COUNT;
2646 }
2647 OSAtomicIncrement32(&kernel_workq_count);
2648 workqueue_list_unlock();
2649 if (( error =__workq_ops(WQOPS_QUEUE_ADD, witem, 0)) == -1) {
2650 OSAtomicDecrement32(&kernel_workq_count);
2651 workqueue_list_lock();
2652 #if WQ_TRACE
2653 __kdebug_trace(0x900007c, witem, workq, witem->func_arg, workq->kq_count, 0);
2654 #endif
2655 TAILQ_REMOVE(&workq->item_kernhead, witem, item_entry);
2656 TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry);
2657 if ((workq->flags & (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON)) != 0)
2658 workq->flags |= PTHREAD_WORKQ_REQUEUED;
2659 } else
2660 workqueue_list_lock();
2661 #if WQ_TRACE
2662 __kdebug_trace(0x9000060, witem, workq, witem->func_arg, workq->kq_count, 0);
2663 #endif
2664 return(1);
2665 }
2666 /* noone should come here */
2667 #if 1
2668 printf("error in logic for next workitem\n");
2669 abort();
2670 #endif
2671 return(0);
2672 }
2673
2674 void
2675 _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse)
2676 {
2677 int ret;
2678 pthread_attr_t *attrs = &_pthread_attr_default;
2679 pthread_workqueue_t workq;
2680 pthread_t pself;
2681
2682
2683 workq = item->workq;
2684 if (reuse == 0) {
2685 /* reuse is set to 0, when a thread is newly created to run a workitem */
2686 _pthread_struct_init(self, attrs, stackaddr, DEFAULT_STACK_SIZE, 1, 1);
2687 self->wqthread = 1;
2688 self->parentcheck = 1;
2689
2690 /* These are not joinable threads */
2691 self->detached &= ~PTHREAD_CREATE_JOINABLE;
2692 self->detached |= PTHREAD_CREATE_DETACHED;
2693 #if defined(__i386__) || defined(__x86_64__)
2694 _pthread_set_self(self);
2695 #endif
2696 #if WQ_TRACE
2697 __kdebug_trace(0x9000050, self, item, item->func_arg, 0, 0);
2698 #endif
2699 self->kernel_thread = kport;
2700 self->fun = item->func;
2701 self->arg = item->func_arg;
2702 /* Add to the pthread list */
2703 LOCK(_pthread_list_lock);
2704 TAILQ_INSERT_TAIL(&__pthread_head, self, plist);
2705 #if WQ_TRACE
2706 __kdebug_trace(0x900000c, self, 0, 0, 10, 0);
2707 #endif
2708 _pthread_count++;
2709 UNLOCK(_pthread_list_lock);
2710 } else {
2711 /* reuse is set to 1, when a thread is resued to run another work item */
2712 #if WQ_TRACE
2713 __kdebug_trace(0x9000054, self, item, item->func_arg, 0, 0);
2714 #endif
2715 /* reset all tsd from 1 to KEYS_MAX */
2716 _pthread_tsd_reinit(self);
2717
2718 self->fun = item->func;
2719 self->arg = item->func_arg;
2720 }
2721
2722 #if WQ_DEBUG
2723 if (reuse == 0) {
2724 pself = pthread_self();
2725 if (self != pself) {
2726 #if WQ_TRACE
2727 __kdebug_trace(0x9000078, self, pself, item->func_arg, 0, 0);
2728 #endif
2729 printf("pthread_self not set: pself %p, passed in %p\n", pself, self);
2730 _pthread_set_self(self);
2731 pself = pthread_self();
2732 if (self != pself)
2733 printf("(2)pthread_self not set: pself %p, passed in %p\n", pself, self);
2734 pself = self;
2735 }
2736 } else {
2737 pself = pthread_self();
2738 if (self != pself) {
2739 printf("(3)pthread_self not set in reuse: pself %p, passed in %p\n", pself, self);
2740 abort();
2741 }
2742 }
2743 #endif /* WQ_DEBUG */
2744
2745 self->cur_workq = workq;
2746 self->cur_workitem = item;
2747 OSAtomicDecrement32(&kernel_workq_count);
2748
2749 ret = (*self->fun)(self->arg);
2750
2751 workqueue_exit(self, workq, item);
2752
2753 }
2754
2755 static void
2756 workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item)
2757 {
2758 pthread_attr_t *attrs = &_pthread_attr_default;
2759 pthread_workitem_t baritem;
2760 pthread_workqueue_head_t headp;
2761 void (*func)(pthread_workqueue_t, void *);
2762
2763 workqueue_list_lock();
2764
2765 TAILQ_REMOVE(&workq->item_kernhead, item, item_entry);
2766 workq->kq_count--;
2767 #if WQ_TRACE
2768 __kdebug_trace(0x9000070, self, 1, item->func_arg, workq->kq_count, 0);
2769 #endif
2770 item->flags = 0;
2771 free_workitem(item);
2772
2773 if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON) {
2774 workq->barrier_count--;
2775 #if WQ_TRACE
2776 __kdebug_trace(0x9000084, self, workq->barrier_count, workq->kq_count, 1, 0);
2777 #endif
2778 if (workq->barrier_count <= 0 ) {
2779 /* Need to remove barrier item from the list */
2780 baritem = TAILQ_FIRST(&workq->item_listhead);
2781 #if WQ_DEBUG
2782 if ((baritem->flags & (PTH_WQITEM_BARRIER | PTH_WQITEM_DESTROY| PTH_WQITEM_APPLIED)) == 0)
2783 printf("Incorect bar item being removed in barrier processing\n");
2784 #endif /* WQ_DEBUG */
2785 /* if the front item is a barrier and call back is registered, run that */
2786 if (((baritem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) && (baritem->func != NULL)) {
2787 workqueue_list_unlock();
2788 func = baritem->func;
2789 (*func)(workq, baritem->func_arg);
2790 workqueue_list_lock();
2791 }
2792 TAILQ_REMOVE(&workq->item_listhead, baritem, item_entry);
2793 baritem->flags = 0;
2794 free_workitem(baritem);
2795 workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
2796 #if WQ_TRACE
2797 __kdebug_trace(0x9000058, self, item, item->func_arg, 0, 0);
2798 #endif
2799 if ((workq->flags & PTHREAD_WORKQ_TERM_ON) != 0) {
2800 headp = __pthread_wq_head_tbl[workq->queueprio];
2801 workq->flags |= PTHREAD_WORKQ_DESTROYED;
2802 #if WQ_TRACE
2803 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 2, 0);
2804 #endif
2805 if (headp->next_workq == workq) {
2806 headp->next_workq = TAILQ_NEXT(workq, wq_list);
2807 if (headp->next_workq == NULL) {
2808 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2809 if (headp->next_workq == workq)
2810 headp->next_workq = NULL;
2811 }
2812 }
2813 TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
2814 workq->sig = 0;
2815 if (workq->term_callback != NULL) {
2816 workqueue_list_unlock();
2817 (*workq->term_callback)(workq, workq->term_callarg);
2818 workqueue_list_lock();
2819 }
2820 free_workqueue(workq);
2821 } else {
2822 /* if there are higher prio schedulabel item reset to wqreadyprio */
2823 if ((workq->queueprio < wqreadyprio) && (!(TAILQ_EMPTY(&workq->item_listhead))))
2824 wqreadyprio = workq->queueprio;
2825 }
2826 }
2827 }
2828 #if WQ_TRACE
2829 else {
2830 __kdebug_trace(0x9000070, self, 2, item->func_arg, workq->barrier_count, 0);
2831 }
2832
2833 __kdebug_trace(0x900005c, self, item, 0, 0, 0);
2834 #endif
2835 pick_nextworkqueue_droplock();
2836 _pthread_workq_return(self);
2837 }
2838
2839 static void
2840 _pthread_workq_return(pthread_t self)
2841 {
2842 struct __darwin_pthread_handler_rec *handler;
2843 int value = 0;
2844 int * value_ptr=&value;
2845
2846 /* set cancel state to disable and type to deferred */
2847 _pthread_setcancelstate_exit(self, value_ptr, __unix_conforming);
2848
2849 /* Make this thread not to receive any signals */
2850 __disable_threadsignal(1);
2851
2852 while ((handler = self->__cleanup_stack) != 0)
2853 {
2854 (handler->__routine)(handler->__arg);
2855 self->__cleanup_stack = handler->__next;
2856 }
2857 _pthread_tsd_cleanup(self);
2858
2859 __workq_ops(WQOPS_THREAD_RETURN, NULL, 0);
2860
2861 /* This is the way to terminate the thread */
2862 _pthread_exit(self, NULL);
2863 }
2864
2865
2866 /* returns 0 if it handles it, otherwise 1 */
2867 static int
2868 handle_removeitem(pthread_workqueue_t workq, pthread_workitem_t item)
2869 {
2870 pthread_workitem_t baritem;
2871 pthread_workqueue_head_t headp;
2872 void (*func)(pthread_workqueue_t, void *);
2873
2874 if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON) {
2875 workq->barrier_count--;
2876 if (workq->barrier_count <= 0 ) {
2877 /* Need to remove barrier item from the list */
2878 baritem = TAILQ_FIRST(&workq->item_listhead);
2879 #if WQ_DEBUG
2880 if ((baritem->flags & (PTH_WQITEM_BARRIER | PTH_WQITEM_DESTROY| PTH_WQITEM_APPLIED)) == 0)
2881 printf("Incorect bar item being removed in barrier processing\n");
2882 #endif /* WQ_DEBUG */
2883 /* if the front item is a barrier and call back is registered, run that */
2884 if (((baritem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER)
2885 && (baritem->func != NULL)) {
2886 workqueue_list_unlock();
2887 func = baritem->func;
2888 (*func)(workq, baritem->func_arg);
2889 workqueue_list_lock();
2890 }
2891 TAILQ_REMOVE(&workq->item_listhead, baritem, item_entry);
2892 baritem->flags = 0;
2893 free_workitem(baritem);
2894 item->flags = 0;
2895 free_workitem(item);
2896 workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
2897 #if WQ_TRACE
2898 __kdebug_trace(0x9000058, pthread_self(), item, item->func_arg, 0, 0);
2899 #endif
2900 if ((workq->flags & PTHREAD_WORKQ_TERM_ON) != 0) {
2901 headp = __pthread_wq_head_tbl[workq->queueprio];
2902 workq->flags |= PTHREAD_WORKQ_DESTROYED;
2903 #if WQ_TRACE
2904 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 2, 0);
2905 #endif
2906 if (headp->next_workq == workq) {
2907 headp->next_workq = TAILQ_NEXT(workq, wq_list);
2908 if (headp->next_workq == NULL) {
2909 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2910 if (headp->next_workq == workq)
2911 headp->next_workq = NULL;
2912 }
2913 }
2914 TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
2915 workq->sig = 0;
2916 if (workq->term_callback != NULL) {
2917 workqueue_list_unlock();
2918 (*workq->term_callback)(workq, workq->term_callarg);
2919 workqueue_list_lock();
2920 }
2921 free_workqueue(workq);
2922 pick_nextworkqueue_droplock();
2923 return(0);
2924 } else {
2925 /* if there are higher prio schedulabel item reset to wqreadyprio */
2926 if ((workq->queueprio < wqreadyprio) && (!(TAILQ_EMPTY(&workq->item_listhead))))
2927 wqreadyprio = workq->queueprio;
2928 free_workitem(item);
2929 pick_nextworkqueue_droplock();
2930 return(0);
2931 }
2932 }
2933 }
2934 return(1);
2935 }
2936 /* XXXXXXXXXXXXX Pthread Workqueue functions XXXXXXXXXXXXXXXXXX */
2937
2938 int
2939 pthread_workqueue_create_np(pthread_workqueue_t * workqp, const pthread_workqueue_attr_t * attr)
2940 {
2941 pthread_workqueue_t wq;
2942 pthread_workqueue_head_t headp;
2943
2944 if ((attr != NULL) && (attr->sig != PTHEAD_WRKQUEUE_ATTR_SIG)) {
2945 return(EINVAL);
2946 }
2947
2948 if (__is_threaded == 0)
2949 __is_threaded = 1;
2950
2951 workqueue_list_lock();
2952 if (kernel_workq_setup == 0) {
2953 int ret = _pthread_work_internal_init();
2954 if (ret != 0) {
2955 workqueue_list_unlock();
2956 return(ret);
2957 }
2958 }
2959
2960 wq = alloc_workqueue();
2961
2962 _pthread_workq_init(wq, attr);
2963
2964 headp = __pthread_wq_head_tbl[wq->queueprio];
2965 TAILQ_INSERT_TAIL(&headp->wqhead, wq, wq_list);
2966 if (headp->next_workq == NULL) {
2967 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2968 }
2969
2970 workqueue_list_unlock();
2971
2972 *workqp = wq;
2973
2974 return(0);
2975 }
2976
2977 int
2978 pthread_workqueue_destroy_np(pthread_workqueue_t workq, void (* callback_func)(pthread_workqueue_t, void *), void * callback_arg)
2979 {
2980 pthread_workitem_t witem;
2981 pthread_workqueue_head_t headp;
2982
2983 if (valid_workq(workq) == 0) {
2984 return(EINVAL);
2985 }
2986
2987 workqueue_list_lock();
2988
2989 /*
2990 * Allocate the workitem here as it can drop the lock.
2991 * Also we can evaluate the workqueue state only once.
2992 */
2993 witem = alloc_workitem();
2994 witem->item_entry.tqe_next = 0;
2995 witem->item_entry.tqe_prev = 0;
2996 witem->func = callback_func;
2997 witem->func_arg = callback_arg;
2998 witem->flags = PTH_WQITEM_DESTROY;
2999
3000 if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_TERM_ON | PTHREAD_WORKQ_DESTROYED)) == 0) {
3001 workq->flags |= PTHREAD_WORKQ_IN_TERMINATE;
3002 /* If nothing queued or running, destroy now */
3003 if ((TAILQ_EMPTY(&workq->item_listhead)) && (TAILQ_EMPTY(&workq->item_kernhead))) {
3004 workq->flags |= (PTHREAD_WORKQ_TERM_ON | PTHREAD_WORKQ_DESTROYED);
3005 headp = __pthread_wq_head_tbl[workq->queueprio];
3006 workq->term_callback = callback_func;
3007 workq->term_callarg = callback_arg;
3008 if (headp->next_workq == workq) {
3009 headp->next_workq = TAILQ_NEXT(workq, wq_list);
3010 if (headp->next_workq == NULL) {
3011 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
3012 if (headp->next_workq == workq)
3013 headp->next_workq = NULL;
3014 }
3015 }
3016 TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
3017 workq->sig = 0;
3018 free_workitem(witem);
3019 if (workq->term_callback != NULL) {
3020 workqueue_list_unlock();
3021 (*workq->term_callback)(workq, workq->term_callarg);
3022 workqueue_list_lock();
3023 }
3024 #if WQ_TRACE
3025 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 3, 0);
3026 #endif
3027 free_workqueue(workq);
3028 workqueue_list_unlock();
3029 return(0);
3030 }
3031 TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry);
3032 } else {
3033 free_workitem(witem);
3034 workqueue_list_unlock();
3035 return(EINPROGRESS);
3036 }
3037 workqueue_list_unlock();
3038 return(0);
3039 }
3040
3041
3042 int
3043 pthread_workqueue_additem_np(pthread_workqueue_t workq, void ( *workitem_func)(void *), void * workitem_arg, pthread_workitem_handle_t * itemhandlep)
3044 {
3045 pthread_workitem_t witem;
3046
3047 if (valid_workq(workq) == 0) {
3048 return(EINVAL);
3049 }
3050
3051 workqueue_list_lock();
3052
3053 /*
3054 * Allocate the workitem here as it can drop the lock.
3055 * Also we can evaluate the workqueue state only once.
3056 */
3057 witem = alloc_workitem();
3058 witem->func = workitem_func;
3059 witem->func_arg = workitem_arg;
3060 witem->flags = 0;
3061 witem->workq = workq;
3062 witem->item_entry.tqe_next = 0;
3063 witem->item_entry.tqe_prev = 0;
3064
3065 /* alloc workitem can drop the lock, check the state */
3066 if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
3067 free_workitem(witem);
3068 workqueue_list_unlock();
3069 *itemhandlep = 0;
3070 return(ESRCH);
3071 }
3072
3073 if (itemhandlep != NULL)
3074 *itemhandlep = (pthread_workitem_handle_t *)witem;
3075 TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry);
3076 if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio))
3077 wqreadyprio = workq->queueprio;
3078
3079 pick_nextworkqueue_droplock();
3080
3081 return(0);
3082 }
3083
3084 int
3085 pthread_workqueue_removeitem_np(pthread_workqueue_t workq, pthread_workitem_handle_t itemhandle)
3086 {
3087 pthread_workitem_t item, baritem;
3088 pthread_workqueue_head_t headp;
3089 int error;
3090
3091 if (valid_workq(workq) == 0) {
3092 return(EINVAL);
3093 }
3094
3095 workqueue_list_lock();
3096 if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
3097 workqueue_list_unlock();
3098 return(ESRCH);
3099 }
3100
3101 TAILQ_FOREACH(item, &workq->item_listhead, item_entry) {
3102 if (item == (pthread_workitem_t)itemhandle) {
3103 TAILQ_REMOVE(&workq->item_listhead, item, item_entry);
3104 if ((item->flags & (PTH_WQITEM_BARRIER | PTH_WQITEM_APPLIED)) == (PTH_WQITEM_BARRIER | PTH_WQITEM_APPLIED)) {
3105 workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
3106 workq->barrier_count = 0;
3107 if ((workq->queueprio < wqreadyprio) && (!(TAILQ_EMPTY(&workq->item_listhead)))) {
3108 wqreadyprio = workq->queueprio;
3109 }
3110 } else if ((item->flags & PTH_WQITEM_KERN_COUNT) == PTH_WQITEM_KERN_COUNT) {
3111 workq->kq_count--;
3112 item->flags |= PTH_WQITEM_REMOVED;
3113 if (handle_removeitem(workq, item) == 0)
3114 return(0);
3115 }
3116 item->flags |= PTH_WQITEM_NOTINLIST;
3117 free_workitem(item);
3118 workqueue_list_unlock();
3119 return(0);
3120 }
3121 }
3122
3123 TAILQ_FOREACH(item, &workq->item_kernhead, item_entry) {
3124 if (item == (pthread_workitem_t)itemhandle) {
3125 workqueue_list_unlock();
3126 if ((error = __workq_ops(WQOPS_QUEUE_REMOVE, item, 0)) == 0) {
3127 workqueue_list_lock();
3128 TAILQ_REMOVE(&workq->item_kernhead, item, item_entry);
3129 OSAtomicDecrement32(&kernel_workq_count);
3130 workq->kq_count--;
3131 item->flags |= PTH_WQITEM_REMOVED;
3132 if (handle_removeitem(workq, item) != 0) {
3133 free_workitem(item);
3134 pick_nextworkqueue_droplock();
3135 }
3136 return(0);
3137 } else {
3138 workqueue_list_unlock();
3139 return(EBUSY);
3140 }
3141 }
3142 }
3143 workqueue_list_unlock();
3144 return(EINVAL);
3145 }
3146
3147
3148 int
3149 pthread_workqueue_addbarrier_np(pthread_workqueue_t workq, void (* callback_func)(pthread_workqueue_t, void *), void * callback_arg, __unused int waitforcallback, pthread_workitem_handle_t *itemhandlep)
3150 {
3151 pthread_workitem_t witem;
3152
3153 if (valid_workq(workq) == 0) {
3154 return(EINVAL);
3155 }
3156
3157 workqueue_list_lock();
3158
3159 /*
3160 * Allocate the workitem here as it can drop the lock.
3161 * Also we can evaluate the workqueue state only once.
3162 */
3163 witem = alloc_workitem();
3164 witem->item_entry.tqe_next = 0;
3165 witem->item_entry.tqe_prev = 0;
3166 witem->func = callback_func;
3167 witem->func_arg = callback_arg;
3168 witem->flags = PTH_WQITEM_BARRIER;
3169
3170 /* alloc workitem can drop the lock, check the state */
3171 if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
3172 free_workitem(witem);
3173 workqueue_list_unlock();
3174 return(ESRCH);
3175 }
3176
3177 if (itemhandlep != NULL)
3178 *itemhandlep = (pthread_workitem_handle_t *)witem;
3179
3180 TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry);
3181 if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio))
3182 wqreadyprio = workq->queueprio;
3183
3184 pick_nextworkqueue_droplock();
3185
3186 return(0);
3187 }
3188
3189 int
3190 pthread_workqueue_suspend_np(pthread_workqueue_t workq)
3191 {
3192 if (valid_workq(workq) == 0) {
3193 return(EINVAL);
3194 }
3195 workqueue_list_lock();
3196 if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
3197 workqueue_list_unlock();
3198 return(ESRCH);
3199 }
3200
3201 workq->flags |= PTHREAD_WORKQ_SUSPEND;
3202 workq->suspend_count++;
3203 workqueue_list_unlock();
3204 return(0);
3205 }
3206
3207 int
3208 pthread_workqueue_resume_np(pthread_workqueue_t workq)
3209 {
3210 if (valid_workq(workq) == 0) {
3211 return(EINVAL);
3212 }
3213 workqueue_list_lock();
3214 if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
3215 workqueue_list_unlock();
3216 return(ESRCH);
3217 }
3218
3219 workq->suspend_count--;
3220 if (workq->suspend_count <= 0) {
3221 workq->flags &= ~PTHREAD_WORKQ_SUSPEND;
3222 if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio))
3223 wqreadyprio = workq->queueprio;
3224
3225 pick_nextworkqueue_droplock();
3226 } else
3227 workqueue_list_unlock();
3228
3229
3230 return(0);
3231 }
3232
3233 #else /* !BUILDING_VARIANT ] [ */
3234 extern int __unix_conforming;
3235 extern int _pthread_count;
3236 extern pthread_lock_t _pthread_list_lock;
3237 extern void _pthread_testcancel(pthread_t thread, int isconforming);
3238 extern int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr, int conforming);
3239
3240 #endif /* !BUILDING_VARIANT ] */
3241
3242 #if __DARWIN_UNIX03
3243
3244 __private_extern__ void
3245 __posix_join_cleanup(void *arg)
3246 {
3247 pthread_t thread = (pthread_t)arg;
3248 int already_exited, res;
3249 void * dummy;
3250 semaphore_t death;
3251 mach_port_t joinport;
3252 int newstyle = 0;
3253
3254 LOCK(thread->lock);
3255 already_exited = (thread->detached & _PTHREAD_EXITED);
3256
3257 newstyle = thread->newstyle;
3258
3259 #if WQ_TRACE
3260 __kdebug_trace(0x900002c, thread, newstyle, 0, 0, 0);
3261 #endif
3262 if (newstyle = 0) {
3263 death = thread->death;
3264 if (!already_exited){
3265 thread->joiner = (struct _pthread *)NULL;
3266 UNLOCK(thread->lock);
3267 restore_sem_to_pool(death);
3268 } else {
3269 UNLOCK(thread->lock);
3270 while ((res = _pthread_reap_thread(thread,
3271 thread->kernel_thread,
3272 &dummy, 1)) == EAGAIN)
3273 {
3274 sched_yield();
3275 }
3276 restore_sem_to_pool(death);
3277
3278 }
3279
3280 } else {
3281 /* leave another thread to join */
3282 thread->joiner = (struct _pthread *)NULL;
3283 UNLOCK(thread->lock);
3284 }
3285 }
3286
3287 #endif /* __DARWIN_UNIX03 */
3288
3289
3290 /*
3291 * Wait for a thread to terminate and obtain its exit value.
3292 */
3293 /*
3294 int
3295 pthread_join(pthread_t thread,
3296 void **value_ptr)
3297
3298 moved to pthread_cancelable.c */
3299
3300 /*
3301 * Cancel a thread
3302 */
3303 int
3304 pthread_cancel(pthread_t thread)
3305 {
3306 #if __DARWIN_UNIX03
3307 if (__unix_conforming == 0)
3308 __unix_conforming = 1;
3309 #endif /* __DARWIN_UNIX03 */
3310
3311 if (_pthread_lookup_thread(thread, NULL, 0) != 0)
3312 return(ESRCH);
3313
3314 #if __DARWIN_UNIX03
3315 int state;
3316
3317 LOCK(thread->lock);
3318 state = thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
3319 UNLOCK(thread->lock);
3320 if (state & PTHREAD_CANCEL_ENABLE)
3321 __pthread_markcancel(thread->kernel_thread);
3322 #else /* __DARWIN_UNIX03 */
3323 thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
3324 #endif /* __DARWIN_UNIX03 */
3325 return (0);
3326 }
3327
3328 void
3329 pthread_testcancel(void)
3330 {
3331 pthread_t self = pthread_self();
3332
3333 #if __DARWIN_UNIX03
3334 if (__unix_conforming == 0)
3335 __unix_conforming = 1;
3336 _pthread_testcancel(self, 1);
3337 #else /* __DARWIN_UNIX03 */
3338 _pthread_testcancel(self, 0);
3339 #endif /* __DARWIN_UNIX03 */
3340
3341 }
3342
3343
3344 /*
3345 * Query/update the cancelability 'state' of a thread
3346 */
3347 int
3348 pthread_setcancelstate(int state, int *oldstate)
3349 {
3350 #if __DARWIN_UNIX03
3351 if (__unix_conforming == 0) {
3352 __unix_conforming = 1;
3353 }
3354 return (_pthread_setcancelstate_internal(state, oldstate, 1));
3355 #else /* __DARWIN_UNIX03 */
3356 return (_pthread_setcancelstate_internal(state, oldstate, 0));
3357 #endif /* __DARWIN_UNIX03 */
3358
3359 }
3360
3361
3362
3363 /*
3364 * Query/update the cancelability 'type' of a thread
3365 */
3366 int
3367 pthread_setcanceltype(int type, int *oldtype)
3368 {
3369 pthread_t self = pthread_self();
3370
3371 #if __DARWIN_UNIX03
3372 if (__unix_conforming == 0)
3373 __unix_conforming = 1;
3374 #endif /* __DARWIN_UNIX03 */
3375
3376 if ((type != PTHREAD_CANCEL_DEFERRED) &&
3377 (type != PTHREAD_CANCEL_ASYNCHRONOUS))
3378 return EINVAL;
3379 self = pthread_self();
3380 LOCK(self->lock);
3381 if (oldtype)
3382 *oldtype = self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK;
3383 self->cancel_state &= ~_PTHREAD_CANCEL_TYPE_MASK;
3384 self->cancel_state |= type;
3385 UNLOCK(self->lock);
3386 #if !__DARWIN_UNIX03
3387 _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
3388 #endif /* __DARWIN_UNIX03 */
3389 return (0);
3390 }
3391
3392 int
3393 pthread_sigmask(int how, const sigset_t * set, sigset_t * oset)
3394 {
3395 #if __DARWIN_UNIX03
3396 int err = 0;
3397
3398 if (__pthread_sigmask(how, set, oset) == -1) {
3399 err = errno;
3400 }
3401 return(err);
3402 #else /* __DARWIN_UNIX03 */
3403 return(__pthread_sigmask(how, set, oset));
3404 #endif /* __DARWIN_UNIX03 */
3405 }
3406
3407 /*
3408 int
3409 sigwait(const sigset_t * set, int * sig)
3410
3411 moved to pthread_cancelable.c */