]> git.saurik.com Git - apple/libc.git/blame - pthreads/pthread.c
Libc-763.12.tar.gz
[apple/libc.git] / pthreads / pthread.c
CommitLineData
9385eb3d 1/*
34e8f829 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
9385eb3d
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
9385eb3d
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
e9ce8d39
A
23/*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44/*
45 * MkLinux
46 */
47
48/*
49 * POSIX Pthread Library
50 */
51
9385eb3d 52#include "pthread_internals.h"
224c7076 53#include "pthread_workqueue.h"
9385eb3d 54
e9ce8d39
A
55#include <assert.h>
56#include <stdio.h> /* For printf(). */
57#include <stdlib.h>
58#include <errno.h> /* For __mach_errno_addr() prototype. */
224c7076 59#include <signal.h>
e9ce8d39
A
60#include <sys/time.h>
61#include <sys/resource.h>
62#include <sys/sysctl.h>
9385eb3d 63#include <sys/queue.h>
1f2f436a 64#include <sys/mman.h>
e9ce8d39
A
65#include <machine/vmparam.h>
66#include <mach/vm_statistics.h>
1f2f436a 67#include <mach/mach_init.h>
9385eb3d
A
68#define __APPLE_API_PRIVATE
69#include <machine/cpu_capabilities.h>
224c7076
A
70#include <libkern/OSAtomic.h>
71#if defined(__ppc__)
72#include <libkern/OSCrossEndian.h>
73#endif
e9ce8d39 74
3d9156a7 75
34e8f829
A
76extern int _pthread_setcancelstate_internal(int state, int *oldstate, int conforming);
77extern int __pthread_sigmask(int, const sigset_t *, sigset_t *);
78
3d9156a7
A
79#ifndef BUILDING_VARIANT /* [ */
80
224c7076
A
81__private_extern__ struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
82
83
e9ce8d39 84
34e8f829
A
85int32_t workq_targetconc[WORKQ_NUM_PRIOQUEUE];
86
e9ce8d39
A
87/* Per-thread kernel support */
88extern void _pthread_set_self(pthread_t);
89extern void mig_init(int);
224c7076
A
90static int _pthread_create_pthread_onstack(pthread_attr_t *attrs, void **stack, pthread_t *thread);
91static kern_return_t _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread);
34e8f829 92static void _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero);
224c7076
A
93static int _new_pthread_create_suspended(pthread_t *thread,
94 const pthread_attr_t *attr,
95 void *(*start_routine)(void *),
96 void *arg,
97 int create_susp);
e9ce8d39 98
9385eb3d
A
99/* Get CPU capabilities from the kernel */
100__private_extern__ void _init_cpu_capabilities(void);
101
e9ce8d39
A
102/* Needed to tell the malloc subsystem we're going multithreaded */
103extern void set_malloc_singlethreaded(int);
104
105/* Used when we need to call into the kernel with no reply port */
106extern pthread_lock_t reply_port_lock;
224c7076 107int _pthread_find_thread(pthread_t thread);
e9ce8d39 108
3d9156a7
A
109/* Mach message used to notify that a thread needs to be reaped */
110
111typedef struct _pthread_reap_msg_t {
112 mach_msg_header_t header;
113 pthread_t thread;
114 mach_msg_trailer_t trailer;
115} pthread_reap_msg_t;
116
5b2abdfb
A
117/* We'll implement this when the main thread is a pthread */
118/* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
119static struct _pthread _thread = {0};
e9ce8d39 120
5b2abdfb
A
121/* This global should be used (carefully) by anyone needing to know if a
122** pthread has been created.
123*/
124int __is_threaded = 0;
9385eb3d 125/* _pthread_count is protected by _pthread_list_lock */
5b2abdfb 126static int _pthread_count = 1;
3d9156a7 127int __unix_conforming = 0;
224c7076 128__private_extern__ size_t pthreadsize = 0;
3d9156a7 129
224c7076
A
130/* under rosetta we will use old style creation of threads */
131static int __oldstyle = 0;
5b2abdfb 132
9385eb3d 133__private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER;
5b2abdfb
A
134
135/* Same implementation as LOCK, but without the __is_threaded check */
3b2a1fe8 136int _spin_tries = 0;
34e8f829 137extern kern_return_t syscall_thread_switch(mach_port_name_t, int, mach_msg_timeout_t);
5b2abdfb
A
138__private_extern__ void _spin_lock_retry(pthread_lock_t *lock)
139{
140 int tries = _spin_tries;
141 do {
142 if (tries-- > 0)
143 continue;
144 syscall_thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1);
145 tries = _spin_tries;
146 } while(!_spin_lock_try(lock));
147}
148
1f2f436a 149static mach_port_t thread_recycle_port = MACH_PORT_NULL;
e9ce8d39
A
150
151/* These are used to keep track of a semaphore pool shared by mutexes and condition
152** variables.
153*/
154
155static semaphore_t *sem_pool = NULL;
156static int sem_pool_count = 0;
157static int sem_pool_current = 0;
158static pthread_lock_t sem_pool_lock = LOCK_INITIALIZER;
159
160static int default_priority;
161static int max_priority;
162static int min_priority;
5b2abdfb 163static int pthread_concurrency;
e9ce8d39 164
224c7076
A
165static OSSpinLock __workqueue_list_lock = OS_SPINLOCK_INIT;
166
1f2f436a 167static void _pthread_exit(pthread_t self, void *value_ptr) __dead2;
224c7076
A
168static void _pthread_setcancelstate_exit(pthread_t self, void *value_ptr, int conforming);
169static pthread_attr_t _pthread_attr_default = {0};
170static void _pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * attr);
224c7076
A
171static int kernel_workq_setup = 0;
172static volatile int32_t kernel_workq_count = 0;
1f2f436a
A
173static volatile unsigned int user_workq_count = 0; /* number of outstanding workqueues */
174static volatile unsigned int user_workitem_count = 0; /* number of outstanding workitems */
224c7076
A
175#define KERNEL_WORKQ_ELEM_MAX 64 /* Max number of elements in the kerrel */
176static int wqreadyprio = 0; /* current highest prio queue ready with items */
177
178__private_extern__ struct __pthread_workitem_pool __pthread_workitem_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workitem_pool_head);
179__private_extern__ struct __pthread_workqueue_pool __pthread_workqueue_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workqueue_pool_head);
180
1f2f436a
A
181static struct _pthread_workitem * __workqueue_pool_ptr;
182static size_t __workqueue_pool_size = 0;
183static int __workqueue_nitems = 0;
184
224c7076
A
185struct _pthread_workqueue_head __pthread_workq0_head;
186struct _pthread_workqueue_head __pthread_workq1_head;
187struct _pthread_workqueue_head __pthread_workq2_head;
1f2f436a
A
188struct _pthread_workqueue_head __pthread_workq3_head;
189pthread_workqueue_head_t __pthread_wq_head_tbl[WORKQ_NUM_PRIOQUEUE] = {&__pthread_workq0_head, &__pthread_workq1_head, &__pthread_workq2_head, &__pthread_workq3_head};
224c7076
A
190
191static void workqueue_list_lock(void);
192static void workqueue_list_unlock(void);
193static int valid_workq(pthread_workqueue_t);
194static void pick_nextworkqueue_droplock(void);
195static int post_nextworkitem(pthread_workqueue_t workq);
196static void _pthread_workq_return(pthread_t self);
197static pthread_workqueue_attr_t _pthread_wq_attr_default = {0};
224c7076 198extern void start_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse);
34e8f829 199extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags);
224c7076
A
200static pthread_workitem_t alloc_workitem(void);
201static void free_workitem(pthread_workitem_t);
1f2f436a 202static void grow_workitem(void);
224c7076
A
203static pthread_workqueue_t alloc_workqueue(void);
204static void free_workqueue(pthread_workqueue_t);
205static int _pthread_work_internal_init(void);
206static void workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item);
1f2f436a 207void _pthread_fork_child_postinit();
224c7076 208
34e8f829
A
209void pthread_workqueue_atfork_prepare(void);
210void pthread_workqueue_atfork_parent(void);
211void pthread_workqueue_atfork_child(void);
212
213extern void dispatch_atfork_prepare(void);
214extern void dispatch_atfork_parent(void);
215extern void dispatch_atfork_child(void);
216
217/* workq_kernreturn commands */
224c7076
A
218#define WQOPS_QUEUE_ADD 1
219#define WQOPS_QUEUE_REMOVE 2
220#define WQOPS_THREAD_RETURN 4
34e8f829 221#define WQOPS_THREAD_SETCONC 8
3d9156a7 222
5b2abdfb 223/*
224c7076
A
224 * Flags filed passed to bsdthread_create and back in pthread_start
22531 <---------------------------------> 0
226_________________________________________
227| flags(8) | policy(8) | importance(16) |
228-----------------------------------------
229*/
34e8f829 230__private_extern__
224c7076 231void _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags);
e9ce8d39 232
51282358 233__private_extern__
34e8f829
A
234void _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse);
235
224c7076
A
236#define PTHREAD_START_CUSTOM 0x01000000
237#define PTHREAD_START_SETSCHED 0x02000000
238#define PTHREAD_START_DETACHED 0x04000000
239#define PTHREAD_START_POLICY_BITSHIFT 16
240#define PTHREAD_START_POLICY_MASK 0xff
241#define PTHREAD_START_IMPORTANCE_MASK 0xffff
e9ce8d39 242
b5d655f7 243static int pthread_setschedparam_internal(pthread_t, mach_port_t, int, const struct sched_param *);
34e8f829 244extern pthread_t __bsdthread_create(void *(*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags);
1f2f436a 245extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, pthread_workitem_t, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t);
224c7076 246extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem);
34e8f829
A
247extern __uint64_t __thread_selfid( void );
248extern int __pthread_canceled(int);
249extern void _pthread_keys_init(void);
250extern int __pthread_kill(mach_port_t, int);
251extern int __pthread_markcancel(int);
252extern int __workq_open(void);
253
254#define WORKQUEUE_OVERCOMMIT 0x10000
255
256extern int __workq_kernreturn(int, pthread_workitem_t, int, int);
e9ce8d39 257
59e0d9fe 258#if defined(__ppc__) || defined(__ppc64__)
5b2abdfb 259static const vm_address_t PTHREAD_STACK_HINT = 0xF0000000;
8e029c65 260#elif defined(__i386__) || defined(__x86_64__)
5b2abdfb 261static const vm_address_t PTHREAD_STACK_HINT = 0xB0000000;
b5d655f7
A
262#elif defined(__arm__)
263static const vm_address_t PTHREAD_STACK_HINT = 0x30000000;
5b2abdfb
A
264#else
265#error Need to define a stack address hint for this architecture
266#endif
267
9385eb3d
A
268/* Set the base address to use as the stack pointer, before adjusting due to the ABI
269 * The guardpages for stackoverflow protection is also allocated here
270 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
271 * set up for the thread
272 */
e9ce8d39
A
273
274static int
5b2abdfb 275_pthread_allocate_stack(pthread_attr_t *attrs, void **stack)
e9ce8d39
A
276{
277 kern_return_t kr;
3d9156a7 278 vm_address_t stackaddr;
9385eb3d 279 size_t guardsize;
224c7076 280
e9ce8d39
A
281 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
282 if (attrs->stackaddr != NULL) {
9385eb3d 283 /* No guard pages setup in this case */
3d9156a7 284 assert(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
5b2abdfb
A
285 *stack = attrs->stackaddr;
286 return 0;
e9ce8d39 287 }
5b2abdfb 288
3d9156a7
A
289 guardsize = attrs->guardsize;
290 stackaddr = PTHREAD_STACK_HINT;
291 kr = vm_map(mach_task_self(), &stackaddr,
9385eb3d 292 attrs->stacksize + guardsize,
5b2abdfb
A
293 vm_page_size-1,
294 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
295 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
296 VM_INHERIT_DEFAULT);
297 if (kr != KERN_SUCCESS)
298 kr = vm_allocate(mach_task_self(),
3d9156a7 299 &stackaddr, attrs->stacksize + guardsize,
5b2abdfb 300 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
e9ce8d39
A
301 if (kr != KERN_SUCCESS) {
302 return EAGAIN;
303 }
5b2abdfb
A
304 /* The guard page is at the lowest address */
305 /* The stack base is the highest address */
9385eb3d 306 if (guardsize)
3d9156a7
A
307 kr = vm_protect(mach_task_self(), stackaddr, guardsize, FALSE, VM_PROT_NONE);
308 *stack = (void *)(stackaddr + attrs->stacksize + guardsize);
224c7076
A
309 return 0;
310}
e9ce8d39 311
224c7076
A
312static int
313_pthread_create_pthread_onstack(pthread_attr_t *attrs, void **stack, pthread_t *thread)
314{
315 kern_return_t kr;
316 pthread_t t;
317 vm_address_t stackaddr;
318 size_t guardsize, allocsize;
319
320 assert(attrs->stacksize >= PTHREAD_STACK_MIN);
321
322 if (attrs->stackaddr != NULL) {
323 /* No guard pages setup in this case */
324 assert(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
325 *stack = attrs->stackaddr;
326 t = (pthread_t)malloc(pthreadsize);
327 _pthread_struct_init(t, attrs, attrs->stackaddr, 0, 0, 0);
328 t->freeStackOnExit = 0;
329 t->freeaddr = 0;
330 t->freesize = 0;
331 *thread = t;
332 return 0;
333 }
334
335 guardsize = attrs->guardsize;
336 allocsize = attrs->stacksize + guardsize + pthreadsize;
337 stackaddr = PTHREAD_STACK_HINT;
338 kr = vm_map(mach_task_self(), &stackaddr,
339 allocsize,
340 vm_page_size-1,
341 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
342 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
343 VM_INHERIT_DEFAULT);
344 if (kr != KERN_SUCCESS)
345 kr = vm_allocate(mach_task_self(),
346 &stackaddr, allocsize,
347 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
348 if (kr != KERN_SUCCESS) {
349 return EAGAIN;
350 }
351 /* The guard page is at the lowest address */
352 /* The stack base is the highest address */
353 if (guardsize)
354 kr = vm_protect(mach_task_self(), stackaddr, guardsize, FALSE, VM_PROT_NONE);
355
356
357 *stack = (void *)(stackaddr + attrs->stacksize + guardsize);
358
359 t = (pthread_t)(stackaddr + attrs->stacksize + guardsize);
360 _pthread_struct_init(t, attrs, *stack, 0, 0, 1);
361 t->kernalloc = 0;
362 t->freesize = allocsize;
363 t->freeaddr = (void *)stackaddr;
364 t->freeStackOnExit = 1;
365 *thread = t;
366
367 return 0;
368}
369
370static kern_return_t
371_pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread)
372{
373 kern_return_t res = 0;
374 vm_address_t freeaddr;
375 size_t freesize;
224c7076
A
376 int thread_count;
377 mach_port_t kport;
378 semaphore_t joinsem = SEMAPHORE_NULL;
379
34e8f829 380#if PTH_TRACE
224c7076 381 __kdebug_trace(0x900001c, freestruct, termthread, 0, 0, 0);
e9ce8d39 382#endif
224c7076
A
383 kport = t->kernel_thread;
384 joinsem = t->joiner_notify;
385
386 if (t->freeStackOnExit) {
387 freeaddr = (vm_address_t)t->freeaddr;
388 if (freestruct)
389 freesize = t->stacksize + t->guardsize + pthreadsize;
390 else
391 freesize = t->stacksize + t->guardsize;
392 if (termthread) {
393 mig_dealloc_reply_port(MACH_PORT_NULL);
394 LOCK(_pthread_list_lock);
395 if (freestruct != 0) {
396 TAILQ_REMOVE(&__pthread_head, t, plist);
397 /* if parent has not returned from create yet keep pthread_t */
34e8f829 398#if PTH_LISTTRACE
224c7076
A
399 __kdebug_trace(0x9000010, t, 0, 0, 1, 0);
400#endif
401 if (t->parentcheck == 0)
402 freesize -= pthreadsize;
e9ce8d39 403 }
224c7076
A
404 t->childexit = 1;
405 thread_count = --_pthread_count;
406 UNLOCK(_pthread_list_lock);
407
34e8f829 408#if PTH_TRACE
224c7076 409 __kdebug_trace(0x9000020, freeaddr, freesize, kport, 1, 0);
e9ce8d39 410#endif
224c7076
A
411 if (thread_count <=0)
412 exit(0);
413 else
34e8f829
A
414 __bsdthread_terminate((void *)freeaddr, freesize, kport, joinsem);
415 LIBC_ABORT("thread %p didn't terminate", t);
224c7076 416 } else {
34e8f829 417#if PTH_TRACE
224c7076
A
418 __kdebug_trace(0x9000024, freeaddr, freesize, 0, 1, 0);
419#endif
420 res = vm_deallocate(mach_task_self(), freeaddr, freesize);
e9ce8d39 421 }
224c7076
A
422 } else {
423 if (termthread) {
424 mig_dealloc_reply_port(MACH_PORT_NULL);
425 LOCK(_pthread_list_lock);
426 if (freestruct != 0) {
427 TAILQ_REMOVE(&__pthread_head, t, plist);
34e8f829 428#if PTH_LISTTRACE
224c7076
A
429 __kdebug_trace(0x9000010, t, 0, 0, 2, 0);
430#endif
431 }
432 thread_count = --_pthread_count;
433 t->childexit = 1;
434 UNLOCK(_pthread_list_lock);
435
436 if (freestruct) {
34e8f829 437#if PTH_TRACE
224c7076
A
438 __kdebug_trace(0x9000008, t, 0, 0, 2, 0);
439#endif
440 free(t);
e9ce8d39 441 }
224c7076
A
442
443 freeaddr = 0;
444 freesize = 0;
34e8f829 445#if PTH_TRACE
224c7076
A
446 __kdebug_trace(0x9000020, 0, 0, kport, 2, 0);
447#endif
448
449 if (thread_count <=0)
450 exit(0);
451 else
452 __bsdthread_terminate(NULL, 0, kport, joinsem);
34e8f829 453 LIBC_ABORT("thread %p didn't terminate", t);
224c7076
A
454 } else if (freestruct) {
455 t->sig = _PTHREAD_NO_SIG;
34e8f829 456#if PTH_TRACE
224c7076 457 __kdebug_trace(0x9000024, t, 0, 0, 2, 0);
e9ce8d39 458#endif
224c7076 459 free(t);
e9ce8d39 460 }
e9ce8d39 461 }
224c7076
A
462 return(res);
463}
464
e9ce8d39 465
5b2abdfb 466
e9ce8d39
A
467/*
468 * Destroy a thread attribute structure
469 */
470int
471pthread_attr_destroy(pthread_attr_t *attr)
472{
473 if (attr->sig == _PTHREAD_ATTR_SIG)
474 {
224c7076
A
475 attr->sig = 0;
476 return (0);
e9ce8d39
A
477 } else
478 {
479 return (EINVAL); /* Not an attribute structure! */
480 }
481}
482
483/*
484 * Get the 'detach' state from a thread attribute structure.
485 * Note: written as a helper function for info hiding
486 */
487int
488pthread_attr_getdetachstate(const pthread_attr_t *attr,
489 int *detachstate)
490{
491 if (attr->sig == _PTHREAD_ATTR_SIG)
492 {
493 *detachstate = attr->detached;
224c7076 494 return (0);
e9ce8d39
A
495 } else
496 {
497 return (EINVAL); /* Not an attribute structure! */
498 }
499}
500
501/*
502 * Get the 'inherit scheduling' info from a thread attribute structure.
503 * Note: written as a helper function for info hiding
504 */
505int
506pthread_attr_getinheritsched(const pthread_attr_t *attr,
507 int *inheritsched)
508{
509 if (attr->sig == _PTHREAD_ATTR_SIG)
510 {
511 *inheritsched = attr->inherit;
224c7076 512 return (0);
e9ce8d39
A
513 } else
514 {
515 return (EINVAL); /* Not an attribute structure! */
516 }
517}
518
519/*
520 * Get the scheduling parameters from a thread attribute structure.
521 * Note: written as a helper function for info hiding
522 */
523int
524pthread_attr_getschedparam(const pthread_attr_t *attr,
525 struct sched_param *param)
526{
527 if (attr->sig == _PTHREAD_ATTR_SIG)
528 {
529 *param = attr->param;
224c7076 530 return (0);
e9ce8d39
A
531 } else
532 {
533 return (EINVAL); /* Not an attribute structure! */
534 }
535}
536
537/*
538 * Get the scheduling policy from a thread attribute structure.
539 * Note: written as a helper function for info hiding
540 */
541int
542pthread_attr_getschedpolicy(const pthread_attr_t *attr,
543 int *policy)
544{
545 if (attr->sig == _PTHREAD_ATTR_SIG)
546 {
547 *policy = attr->policy;
224c7076 548 return (0);
e9ce8d39
A
549 } else
550 {
551 return (EINVAL); /* Not an attribute structure! */
552 }
553}
554
9385eb3d
A
555/* Retain the existing stack size of 512K and not depend on Main thread default stack size */
556static const size_t DEFAULT_STACK_SIZE = (512*1024);
e9ce8d39
A
557/*
558 * Initialize a thread attribute structure to default values.
559 */
560int
561pthread_attr_init(pthread_attr_t *attr)
562{
224c7076
A
563 attr->stacksize = DEFAULT_STACK_SIZE;
564 attr->stackaddr = NULL;
e9ce8d39 565 attr->sig = _PTHREAD_ATTR_SIG;
e9ce8d39
A
566 attr->param.sched_priority = default_priority;
567 attr->param.quantum = 10; /* quantum isn't public yet */
e9ce8d39 568 attr->detached = PTHREAD_CREATE_JOINABLE;
9385eb3d
A
569 attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
570 attr->policy = _PTHREAD_DEFAULT_POLICY;
224c7076
A
571 attr->freeStackOnExit = 1;
572 attr->fastpath = 1;
573 attr->schedset = 0;
9385eb3d 574 attr->guardsize = vm_page_size;
224c7076 575 return (0);
e9ce8d39
A
576}
577
578/*
579 * Set the 'detach' state in a thread attribute structure.
580 * Note: written as a helper function for info hiding
581 */
582int
583pthread_attr_setdetachstate(pthread_attr_t *attr,
584 int detachstate)
585{
586 if (attr->sig == _PTHREAD_ATTR_SIG)
587 {
588 if ((detachstate == PTHREAD_CREATE_JOINABLE) ||
589 (detachstate == PTHREAD_CREATE_DETACHED))
590 {
591 attr->detached = detachstate;
224c7076 592 return (0);
e9ce8d39
A
593 } else
594 {
595 return (EINVAL);
596 }
597 } else
598 {
599 return (EINVAL); /* Not an attribute structure! */
600 }
601}
602
603/*
604 * Set the 'inherit scheduling' state in a thread attribute structure.
605 * Note: written as a helper function for info hiding
606 */
607int
608pthread_attr_setinheritsched(pthread_attr_t *attr,
609 int inheritsched)
610{
611 if (attr->sig == _PTHREAD_ATTR_SIG)
612 {
613 if ((inheritsched == PTHREAD_INHERIT_SCHED) ||
614 (inheritsched == PTHREAD_EXPLICIT_SCHED))
615 {
616 attr->inherit = inheritsched;
224c7076 617 return (0);
e9ce8d39
A
618 } else
619 {
620 return (EINVAL);
621 }
622 } else
623 {
624 return (EINVAL); /* Not an attribute structure! */
625 }
626}
627
628/*
629 * Set the scheduling paramters in a thread attribute structure.
630 * Note: written as a helper function for info hiding
631 */
632int
633pthread_attr_setschedparam(pthread_attr_t *attr,
634 const struct sched_param *param)
635{
636 if (attr->sig == _PTHREAD_ATTR_SIG)
637 {
638 /* TODO: Validate sched_param fields */
639 attr->param = *param;
224c7076
A
640 attr->schedset = 1;
641 return (0);
e9ce8d39
A
642 } else
643 {
644 return (EINVAL); /* Not an attribute structure! */
645 }
646}
647
648/*
649 * Set the scheduling policy in a thread attribute structure.
650 * Note: written as a helper function for info hiding
651 */
652int
653pthread_attr_setschedpolicy(pthread_attr_t *attr,
654 int policy)
655{
656 if (attr->sig == _PTHREAD_ATTR_SIG)
657 {
658 if ((policy == SCHED_OTHER) ||
659 (policy == SCHED_RR) ||
660 (policy == SCHED_FIFO))
661 {
662 attr->policy = policy;
224c7076
A
663 attr->schedset = 1;
664 return (0);
e9ce8d39
A
665 } else
666 {
667 return (EINVAL);
668 }
669 } else
670 {
671 return (EINVAL); /* Not an attribute structure! */
672 }
673}
674
675/*
676 * Set the scope for the thread.
677 * We currently only provide PTHREAD_SCOPE_SYSTEM
678 */
679int
680pthread_attr_setscope(pthread_attr_t *attr,
681 int scope)
682{
683 if (attr->sig == _PTHREAD_ATTR_SIG) {
684 if (scope == PTHREAD_SCOPE_SYSTEM) {
685 /* No attribute yet for the scope */
224c7076 686 return (0);
e9ce8d39
A
687 } else if (scope == PTHREAD_SCOPE_PROCESS) {
688 return (ENOTSUP);
689 }
690 }
691 return (EINVAL); /* Not an attribute structure! */
692}
693
694/*
695 * Get the scope for the thread.
696 * We currently only provide PTHREAD_SCOPE_SYSTEM
697 */
698int
224c7076 699pthread_attr_getscope(const pthread_attr_t *attr,
e9ce8d39
A
700 int *scope)
701{
702 if (attr->sig == _PTHREAD_ATTR_SIG) {
703 *scope = PTHREAD_SCOPE_SYSTEM;
224c7076 704 return (0);
e9ce8d39
A
705 }
706 return (EINVAL); /* Not an attribute structure! */
707}
708
709/* Get the base stack address of the given thread */
710int
711pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
712{
713 if (attr->sig == _PTHREAD_ATTR_SIG) {
714 *stackaddr = attr->stackaddr;
224c7076 715 return (0);
e9ce8d39
A
716 } else {
717 return (EINVAL); /* Not an attribute structure! */
718 }
719}
720
721int
722pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
723{
3d9156a7 724 if ((attr->sig == _PTHREAD_ATTR_SIG) && (((uintptr_t)stackaddr % vm_page_size) == 0)) {
e9ce8d39 725 attr->stackaddr = stackaddr;
224c7076
A
726 attr->freeStackOnExit = 0;
727 attr->fastpath = 0;
728 return (0);
e9ce8d39
A
729 } else {
730 return (EINVAL); /* Not an attribute structure! */
731 }
732}
733
734int
735pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
736{
737 if (attr->sig == _PTHREAD_ATTR_SIG) {
738 *stacksize = attr->stacksize;
224c7076 739 return (0);
e9ce8d39
A
740 } else {
741 return (EINVAL); /* Not an attribute structure! */
742 }
743}
744
745int
746pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
747{
748 if ((attr->sig == _PTHREAD_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
749 attr->stacksize = stacksize;
224c7076 750 return (0);
e9ce8d39
A
751 } else {
752 return (EINVAL); /* Not an attribute structure! */
753 }
754}
755
5b2abdfb
A
756int
757pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
758{
759 if (attr->sig == _PTHREAD_ATTR_SIG) {
3d9156a7 760 *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize);
5b2abdfb 761 *stacksize = attr->stacksize;
224c7076 762 return (0);
5b2abdfb
A
763 } else {
764 return (EINVAL); /* Not an attribute structure! */
765 }
766}
767
9385eb3d
A
768/* By SUSV spec, the stackaddr is the base address, the lowest addressable
769 * byte address. This is not the same as in pthread_attr_setstackaddr.
770 */
5b2abdfb
A
771int
772pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
773{
774 if ((attr->sig == _PTHREAD_ATTR_SIG) &&
3d9156a7
A
775 (((uintptr_t)stackaddr % vm_page_size) == 0) &&
776 ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
777 attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize);
5b2abdfb 778 attr->stacksize = stacksize;
224c7076
A
779 attr->freeStackOnExit = 0;
780 attr->fastpath = 0;
781 return (0);
5b2abdfb
A
782 } else {
783 return (EINVAL); /* Not an attribute structure! */
784 }
785}
786
9385eb3d
A
787
788/*
789 * Set the guardsize attribute in the attr.
790 */
791int
792pthread_attr_setguardsize(pthread_attr_t *attr,
793 size_t guardsize)
794{
795 if (attr->sig == _PTHREAD_ATTR_SIG) {
796 /* Guardsize of 0 is valid, ot means no guard */
797 if ((guardsize % vm_page_size) == 0) {
798 attr->guardsize = guardsize;
224c7076
A
799 attr->fastpath = 0;
800 return (0);
9385eb3d
A
801 } else
802 return(EINVAL);
803 }
804 return (EINVAL); /* Not an attribute structure! */
805}
806
807/*
808 * Get the guardsize attribute in the attr.
809 */
810int
811pthread_attr_getguardsize(const pthread_attr_t *attr,
812 size_t *guardsize)
813{
814 if (attr->sig == _PTHREAD_ATTR_SIG) {
815 *guardsize = attr->guardsize;
224c7076 816 return (0);
9385eb3d
A
817 }
818 return (EINVAL); /* Not an attribute structure! */
819}
820
821
e9ce8d39
A
822/*
823 * Create and start execution of a new thread.
824 */
825
826static void
827_pthread_body(pthread_t self)
828{
e9ce8d39 829 _pthread_set_self(self);
1f2f436a
A
830#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
831 if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1)
832 printf("Failed to set thread_id in _pthread_body\n");
833#endif
3d9156a7 834 _pthread_exit(self, (self->fun)(self->arg));
e9ce8d39
A
835}
836
224c7076
A
837void
838_pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int pflags)
839{
224c7076
A
840#if WQ_DEBUG
841 pthread_t pself;
842#endif
843 pthread_attr_t *attrs = &_pthread_attr_default;
844 char * stackaddr;
845
846 if ((pflags & PTHREAD_START_CUSTOM) == 0) {
34e8f829 847 stackaddr = (char *)self;
224c7076 848 _pthread_struct_init(self, attrs, stackaddr, stacksize, 1, 1);
51282358 849#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
34e8f829 850 _pthread_set_self(self);
51282358 851#endif
224c7076
A
852 LOCK(_pthread_list_lock);
853 if (pflags & PTHREAD_START_SETSCHED) {
854 self->policy = ((pflags >> PTHREAD_START_POLICY_BITSHIFT) & PTHREAD_START_POLICY_MASK);
855 self->param.sched_priority = (pflags & PTHREAD_START_IMPORTANCE_MASK);
856 }
857 /* These are not joinable threads */
858 if ((pflags & PTHREAD_START_DETACHED) == PTHREAD_START_DETACHED) {
859 self->detached &= ~PTHREAD_CREATE_JOINABLE;
860 self->detached |= PTHREAD_CREATE_DETACHED;
861 }
34e8f829 862 } else {
51282358 863#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
34e8f829 864 _pthread_set_self(self);
51282358 865#endif
224c7076 866 LOCK(_pthread_list_lock);
34e8f829 867 }
224c7076
A
868 self->kernel_thread = kport;
869 self->fun = fun;
870 self->arg = funarg;
871
872 /* Add to the pthread list */
873 if (self->parentcheck == 0) {
874 TAILQ_INSERT_TAIL(&__pthread_head, self, plist);
34e8f829 875#if PTH_LISTTRACE
224c7076
A
876 __kdebug_trace(0x900000c, self, 0, 0, 3, 0);
877#endif
878 _pthread_count++;
879 }
880 self->childrun = 1;
881 UNLOCK(_pthread_list_lock);
34e8f829
A
882
883#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
884 if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1)
885 printf("Failed to set thread_id in pthread_start\n");
224c7076
A
886#endif
887
888#if WQ_DEBUG
889 pself = pthread_self();
890 if (self != pself)
34e8f829 891 LIBC_ABORT("self %p != pself %p", self, pself);
224c7076 892#endif
34e8f829 893#if PTH_TRACE
224c7076
A
894 __kdebug_trace(0x9000030, self, pflags, 0, 0, 0);
895#endif
896
897 _pthread_exit(self, (self->fun)(self->arg));
898}
899
e9ce8d39
A
900int
901_pthread_create(pthread_t t,
902 const pthread_attr_t *attrs,
5b2abdfb 903 void *stack,
e9ce8d39
A
904 const mach_port_t kernel_thread)
905{
906 int res;
224c7076 907 res = 0;
5b2abdfb 908
e9ce8d39
A
909 do
910 {
911 memset(t, 0, sizeof(*t));
224c7076
A
912 t->newstyle = 0;
913 t->schedset = 0;
914 t->kernalloc = 0;
9385eb3d 915 t->tsd[0] = t;
224c7076
A
916 t->max_tsd_key = 0;
917 t->wqthread = 0;
918 t->cur_workq = 0;
919 t->cur_workitem = 0;
e9ce8d39
A
920 t->stacksize = attrs->stacksize;
921 t->stackaddr = (void *)stack;
9385eb3d 922 t->guardsize = attrs->guardsize;
e9ce8d39
A
923 t->kernel_thread = kernel_thread;
924 t->detached = attrs->detached;
925 t->inherit = attrs->inherit;
926 t->policy = attrs->policy;
927 t->param = attrs->param;
224c7076 928 t->freeStackOnExit = attrs->freeStackOnExit;
1f2f436a 929 t->cancel_error = 0;
e9ce8d39
A
930 t->sig = _PTHREAD_SIG;
931 t->reply_port = MACH_PORT_NULL;
932 t->cthread_self = NULL;
933 LOCK_INIT(t->lock);
224c7076
A
934 t->plist.tqe_next = (struct _pthread *)0;
935 t->plist.tqe_prev = (struct _pthread **)0;
e9ce8d39 936 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
3d9156a7 937 t->__cleanup_stack = (struct __darwin_pthread_handler_rec *)NULL;
5b2abdfb
A
938 t->death = SEMAPHORE_NULL;
939
940 if (kernel_thread != MACH_PORT_NULL)
b5d655f7 941 (void)pthread_setschedparam_internal(t, kernel_thread, t->policy, &t->param);
e9ce8d39
A
942 } while (0);
943 return (res);
944}
945
224c7076
A
946void
947_pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero)
948{
1f2f436a 949 mach_vm_offset_t stackaddr = (mach_vm_offset_t)(uintptr_t)stack;
224c7076
A
950
951 if (nozero == 0) {
952 memset(t, 0, sizeof(*t));
953 t->plist.tqe_next = (struct _pthread *)0;
954 t->plist.tqe_prev = (struct _pthread **)0;
955 }
956 t->schedset = attrs->schedset;
957 t->tsd[0] = t;
958 if (kernalloc != 0) {
1f2f436a 959 stackaddr = (mach_vm_offset_t)(uintptr_t)t;
224c7076
A
960
961 /* if allocated from kernel set values appropriately */
962 t->stacksize = stacksize;
1f2f436a 963 t->stackaddr = (void *)(uintptr_t)stackaddr;
224c7076 964 t->freeStackOnExit = 1;
1f2f436a 965 t->freeaddr = (void *)(uintptr_t)(stackaddr - stacksize - vm_page_size);
224c7076
A
966 t->freesize = pthreadsize + stacksize + vm_page_size;
967 } else {
968 t->stacksize = attrs->stacksize;
969 t->stackaddr = (void *)stack;
970 }
971 t->guardsize = attrs->guardsize;
972 t->detached = attrs->detached;
973 t->inherit = attrs->inherit;
974 t->policy = attrs->policy;
975 t->param = attrs->param;
1f2f436a 976 t->cancel_error = 0;
224c7076
A
977 t->sig = _PTHREAD_SIG;
978 t->reply_port = MACH_PORT_NULL;
979 t->cthread_self = NULL;
980 LOCK_INIT(t->lock);
981 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
982 t->__cleanup_stack = (struct __darwin_pthread_handler_rec *)NULL;
983 t->death = SEMAPHORE_NULL;
984 t->newstyle = 1;
985 t->kernalloc = kernalloc;
986 t->wqthread = 0;
987 t->cur_workq = 0;
988 t->cur_workitem = 0;
989 t->max_tsd_key = 0;
990}
991
5b2abdfb 992/* Need to deprecate this in future */
e9ce8d39
A
993int
994_pthread_is_threaded(void)
995{
996 return __is_threaded;
997}
998
5b2abdfb
A
999/* Non portable public api to know whether this process has(had) atleast one thread
1000 * apart from main thread. There could be race if there is a thread in the process of
1001 * creation at the time of call . It does not tell whether there are more than one thread
1002 * at this point of time.
1003 */
1004int
1005pthread_is_threaded_np(void)
1006{
1007 return (__is_threaded);
1008}
1009
e9ce8d39
A
1010mach_port_t
1011pthread_mach_thread_np(pthread_t t)
1012{
224c7076
A
1013 mach_port_t kport = MACH_PORT_NULL;
1014
34e8f829
A
1015 if (t == NULL)
1016 goto out;
1017
1018 /*
1019 * If the call is on self, return the kernel port. We cannot
1020 * add this bypass for main thread as it might have exited,
1021 * and we should not return stale port info.
1022 */
1023 if (t == pthread_self())
1024 {
1025 kport = t->kernel_thread;
1026 goto out;
1027 }
1028
224c7076 1029 if (_pthread_lookup_thread(t, &kport, 0) != 0)
34e8f829 1030 return((mach_port_t)0);
224c7076 1031
34e8f829 1032out:
224c7076
A
1033 return(kport);
1034}
5b2abdfb 1035
224c7076
A
1036pthread_t pthread_from_mach_thread_np(mach_port_t kernel_thread)
1037{
1038 struct _pthread * p = NULL;
5b2abdfb 1039
224c7076
A
1040 /* No need to wait as mach port is already known */
1041 LOCK(_pthread_list_lock);
1042 TAILQ_FOREACH(p, &__pthread_head, plist) {
1043 if (p->kernel_thread == kernel_thread)
1044 break;
1045 }
1046 UNLOCK(_pthread_list_lock);
1047 return p;
e9ce8d39
A
1048}
1049
1050size_t
1051pthread_get_stacksize_np(pthread_t t)
1052{
1f2f436a 1053 int ret;
224c7076
A
1054 size_t size = 0;
1055
1056 if (t == NULL)
1057 return(ESRCH);
1058
34e8f829
A
1059 if ( t == pthread_self() || t == &_thread ) //since the main thread will not get de-allocated from underneath us
1060 {
1061 size=t->stacksize;
1062 return size;
1063 }
1064
1065
224c7076
A
1066 LOCK(_pthread_list_lock);
1067
1068 if ((ret = _pthread_find_thread(t)) != 0) {
1069 UNLOCK(_pthread_list_lock);
1070 return(ret);
1071 }
34e8f829
A
1072
1073 size=t->stacksize;
224c7076 1074 UNLOCK(_pthread_list_lock);
34e8f829 1075
224c7076 1076 return(size);
e9ce8d39
A
1077}
1078
1079void *
1080pthread_get_stackaddr_np(pthread_t t)
1081{
224c7076
A
1082 int ret;
1083 void * addr = NULL;
1084
1085 if (t == NULL)
1f2f436a 1086 return((void *)(uintptr_t)ESRCH);
224c7076 1087
34e8f829
A
1088 if(t == pthread_self() || t == &_thread) //since the main thread will not get deallocated from underneath us
1089 return t->stackaddr;
1090
224c7076
A
1091 LOCK(_pthread_list_lock);
1092
1093 if ((ret = _pthread_find_thread(t)) != 0) {
1094 UNLOCK(_pthread_list_lock);
1f2f436a 1095 return((void *)(uintptr_t)ret);
224c7076
A
1096 }
1097 addr = t->stackaddr;
1098 UNLOCK(_pthread_list_lock);
1099
1100 return(addr);
e9ce8d39
A
1101}
1102
1103mach_port_t
1104_pthread_reply_port(pthread_t t)
1105{
1106 return t->reply_port;
1107}
1108
5b2abdfb
A
1109
1110/* returns non-zero if the current thread is the main thread */
1111int
1112pthread_main_np(void)
1113{
1114 pthread_t self = pthread_self();
1115
1116 return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT);
1117}
1118
34e8f829
A
1119
1120#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
1121/* if we are passed in a pthread_t that is NULL, then we return
1122 the current thread's thread_id. So folks don't have to call
1123 pthread_self, in addition to us doing it, if they just want
1124 their thread_id.
1125*/
1126int
1127pthread_threadid_np(pthread_t thread, __uint64_t *thread_id)
1128{
1129 int rval=0;
1130 pthread_t self = pthread_self();
1131
1132 if (thread_id == NULL) {
1133 return(EINVAL);
1134 } else if (thread == NULL || thread == self) {
1135 *thread_id = self->thread_id;
1136 return rval;
1137 }
1138
1139 LOCK(_pthread_list_lock);
1140 if ((rval = _pthread_find_thread(thread)) != 0) {
1141 UNLOCK(_pthread_list_lock);
1142 return(rval);
1143 }
1144 *thread_id = thread->thread_id;
1145 UNLOCK(_pthread_list_lock);
1146 return rval;
1147}
1148#endif
1149
1150int
1151pthread_getname_np(pthread_t thread, char *threadname, size_t len)
1152{
1153 int rval;
1154 rval = 0;
1155
1156 if (thread == NULL)
1157 return(ESRCH);
1158
1159 LOCK(_pthread_list_lock);
1160 if ((rval = _pthread_find_thread(thread)) != 0) {
1161 UNLOCK(_pthread_list_lock);
1162 return(rval);
1163 }
1164 strlcpy(threadname, thread->pthread_name, len);
1165 UNLOCK(_pthread_list_lock);
1166 return rval;
1167}
1168
1169int
1170pthread_setname_np(const char *threadname)
1171{
1172 int rval;
1f2f436a 1173 int len;
34e8f829
A
1174
1175 rval = 0;
1176 len = strlen(threadname);
1f2f436a
A
1177
1178 /* protytype is in pthread_internals.h */
1179 rval = proc_setthreadname((void *)threadname, len);
34e8f829
A
1180 if(rval == 0)
1181 {
1f2f436a 1182 strlcpy((pthread_self())->pthread_name, threadname, MAXTHREADNAMESIZE);
34e8f829
A
1183 }
1184 return rval;
1185
1186}
1187
224c7076
A
1188static int
1189_new_pthread_create_suspended(pthread_t *thread,
1190 const pthread_attr_t *attr,
1191 void *(*start_routine)(void *),
1192 void *arg,
1193 int create_susp)
1194{
1195 pthread_attr_t *attrs;
1196 void *stack;
1197 int error;
1198 unsigned int flags;
34e8f829 1199 pthread_t t,t2;
224c7076
A
1200 kern_return_t kern_res;
1201 mach_port_t kernel_thread = MACH_PORT_NULL;
1202 int needresume;
1203 task_t self = mach_task_self();
1204 int kernalloc = 0;
1205 int susp = create_susp;
1206
1207 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
1208 { /* Set up default paramters */
1209 attrs = &_pthread_attr_default;
1210 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1211 return EINVAL;
1212 }
1213 error = 0;
1214
1215 if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
1216 (attrs->param.sched_priority != default_priority)) && (create_susp == 0)) {
1217 needresume = 1;
1218 susp = 1;
1219 } else
1220 needresume = 0;
1221
1222 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1223 * any change in priority or policy is needed here.
1224 */
1225 if ((__oldstyle == 1) || (create_susp != 0)) {
1226 /* Rosetta or pthread_create_suspended() */
1227 /* running under rosetta */
1228 /* Allocate a stack for the thread */
34e8f829 1229#if PTH_TRACE
224c7076
A
1230 __kdebug_trace(0x9000000, create_susp, 0, 0, 0, 0);
1231#endif
1232 if ((error = _pthread_allocate_stack(attrs, &stack)) != 0) {
1233 return(error);
1234 }
1235 t = (pthread_t)malloc(sizeof(struct _pthread));
1236 *thread = t;
1237 if (susp) {
1238 /* Create the Mach thread for this thread */
1239 PTHREAD_MACH_CALL(thread_create(self, &kernel_thread), kern_res);
1240 if (kern_res != KERN_SUCCESS)
1241 {
1242 printf("Can't create thread: %d\n", kern_res);
1243 return(EINVAL);
1244 }
1245 }
1246 if ((error = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
1247 {
1248 return(error);
1249 }
1250 set_malloc_singlethreaded(0);
1251 __is_threaded = 1;
1252
1253 /* Send it on it's way */
1254 t->arg = arg;
1255 t->fun = start_routine;
1256 t->newstyle = 0;
1257 /* Now set it up to execute */
1258 LOCK(_pthread_list_lock);
1259 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
34e8f829 1260#if PTH_LISTTRACE
224c7076
A
1261 __kdebug_trace(0x900000c, t, 0, 0, 4, 0);
1262#endif
1263 _pthread_count++;
1264 UNLOCK(_pthread_list_lock);
1265 _pthread_setup(t, _pthread_body, stack, susp, needresume);
1266 return(0);
1267 } else {
1268
1269 flags = 0;
1270 if (attrs->fastpath == 1)
1271 kernalloc = 1;
1272
1273 if (attrs->detached == PTHREAD_CREATE_DETACHED)
1274 flags |= PTHREAD_START_DETACHED;
1275 if (attrs->schedset != 0) {
1276 flags |= PTHREAD_START_SETSCHED;
1277 flags |= ((attrs->policy & PTHREAD_START_POLICY_MASK) << PTHREAD_START_POLICY_BITSHIFT);
1278 flags |= (attrs->param.sched_priority & PTHREAD_START_IMPORTANCE_MASK);
1279 }
1280
1281 set_malloc_singlethreaded(0);
1282 __is_threaded = 1;
1283
1284 if (kernalloc == 0) {
1285 /* Allocate a stack for the thread */
1286 flags |= PTHREAD_START_CUSTOM;
1287 if ((error = _pthread_create_pthread_onstack(attrs, &stack, &t)) != 0) {
1288 return(error);
1289 }
1290 /* Send it on it's way */
1291 t->arg = arg;
1292 t->fun = start_routine;
1293 t->newstyle = 1;
1294
34e8f829 1295#if PTH_TRACE
224c7076
A
1296 __kdebug_trace(0x9000004, t, flags, 0, 0, 0);
1297#endif
1298
34e8f829 1299 if ((t2 = __bsdthread_create(start_routine, arg, stack, t, flags)) == (pthread_t)-1) {
224c7076
A
1300 _pthread_free_pthread_onstack(t, 1, 0);
1301 return (EAGAIN);
1302 }
34e8f829 1303 else t=t2;
224c7076
A
1304 LOCK(_pthread_list_lock);
1305 t->parentcheck = 1;
1306 if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) {
1307 /* detached child exited, mop up */
1308 UNLOCK(_pthread_list_lock);
34e8f829 1309#if PTH_TRACE
224c7076
A
1310 __kdebug_trace(0x9000008, t, 0, 0, 1, 0);
1311#endif
34e8f829 1312 if(t->freeStackOnExit)
1f2f436a 1313 vm_deallocate(self, (mach_vm_address_t)(uintptr_t)t, pthreadsize);
34e8f829 1314 else
224c7076
A
1315 free(t);
1316 } else if (t->childrun == 0) {
1317 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1318 _pthread_count++;
34e8f829 1319#if PTH_LISTTRACE
224c7076
A
1320 __kdebug_trace(0x900000c, t, 0, 0, 1, 0);
1321#endif
1322 UNLOCK(_pthread_list_lock);
1323 } else
1324 UNLOCK(_pthread_list_lock);
1325
1326 *thread = t;
1327
34e8f829 1328#if PTH_TRACE
224c7076
A
1329 __kdebug_trace(0x9000014, t, 0, 0, 1, 0);
1330#endif
1331 return (0);
1332
1333 } else {
1334 /* kernel allocation */
34e8f829 1335#if PTH_TRACE
224c7076
A
1336 __kdebug_trace(0x9000018, flags, 0, 0, 0, 0);
1337#endif
34e8f829 1338 if ((t = __bsdthread_create(start_routine, arg, (void *)attrs->stacksize, NULL, flags)) == (pthread_t)-1)
224c7076
A
1339 return (EAGAIN);
1340 /* Now set it up to execute */
1341 LOCK(_pthread_list_lock);
1342 t->parentcheck = 1;
1343 if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) {
1344 /* detached child exited, mop up */
1345 UNLOCK(_pthread_list_lock);
34e8f829 1346#if PTH_TRACE
224c7076
A
1347 __kdebug_trace(0x9000008, t, pthreadsize, 0, 2, 0);
1348#endif
1f2f436a 1349 vm_deallocate(self, (mach_vm_address_t)(uintptr_t)t, pthreadsize);
224c7076
A
1350 } else if (t->childrun == 0) {
1351 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1352 _pthread_count++;
34e8f829 1353#if PTH_LISTTRACE
224c7076
A
1354 __kdebug_trace(0x900000c, t, 0, 0, 2, 0);
1355#endif
1356 UNLOCK(_pthread_list_lock);
1357 } else
1358 UNLOCK(_pthread_list_lock);
1359
1360 *thread = t;
1361
34e8f829 1362#if PTH_TRACE
224c7076
A
1363 __kdebug_trace(0x9000014, t, 0, 0, 2, 0);
1364#endif
1365 return(0);
1366 }
1367 }
1368}
1369
e9ce8d39
A
1370static int
1371_pthread_create_suspended(pthread_t *thread,
1372 const pthread_attr_t *attr,
1373 void *(*start_routine)(void *),
1374 void *arg,
1375 int suspended)
1376{
5b2abdfb
A
1377 pthread_attr_t *attrs;
1378 void *stack;
e9ce8d39
A
1379 int res;
1380 pthread_t t;
1381 kern_return_t kern_res;
5b2abdfb
A
1382 mach_port_t kernel_thread = MACH_PORT_NULL;
1383 int needresume;
1384
e9ce8d39
A
1385 if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
1386 { /* Set up default paramters */
5b2abdfb
A
1387 attrs = &_pthread_attr_default;
1388 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
e9ce8d39 1389 return EINVAL;
5b2abdfb 1390 }
224c7076 1391 res = 0;
5b2abdfb
A
1392
1393 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1394 * any change in priority or policy is needed here.
1395 */
1396 if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
1397 (attrs->param.sched_priority != default_priority)) && (suspended == 0)) {
1398 needresume = 1;
1399 suspended = 1;
1400 } else
1401 needresume = 0;
1402
e9ce8d39
A
1403 do
1404 {
1405 /* Allocate a stack for the thread */
1406 if ((res = _pthread_allocate_stack(attrs, &stack)) != 0) {
1407 break;
1408 }
1409 t = (pthread_t)malloc(sizeof(struct _pthread));
1410 *thread = t;
5b2abdfb
A
1411 if (suspended) {
1412 /* Create the Mach thread for this thread */
1413 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread), kern_res);
1414 if (kern_res != KERN_SUCCESS)
1415 {
1416 printf("Can't create thread: %d\n", kern_res);
1417 res = EINVAL; /* Need better error here? */
1418 break;
1419 }
e9ce8d39
A
1420 }
1421 if ((res = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
1422 {
1423 break;
1424 }
5b2abdfb
A
1425 set_malloc_singlethreaded(0);
1426 __is_threaded = 1;
5b2abdfb
A
1427
1428 /* Send it on it's way */
e9ce8d39
A
1429 t->arg = arg;
1430 t->fun = start_routine;
1431 /* Now set it up to execute */
9385eb3d 1432 LOCK(_pthread_list_lock);
224c7076 1433 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
34e8f829 1434#if PTH_LISTTRACE
224c7076
A
1435 __kdebug_trace(0x900000c, t, 0, 0, 5, 0);
1436#endif
9385eb3d
A
1437 _pthread_count++;
1438 UNLOCK(_pthread_list_lock);
5b2abdfb 1439 _pthread_setup(t, _pthread_body, stack, suspended, needresume);
e9ce8d39
A
1440 } while (0);
1441 return (res);
1442}
1443
1444int
1445pthread_create(pthread_t *thread,
1446 const pthread_attr_t *attr,
1447 void *(*start_routine)(void *),
1448 void *arg)
1449{
224c7076 1450 return _new_pthread_create_suspended(thread, attr, start_routine, arg, 0);
e9ce8d39
A
1451}
1452
1453int
1454pthread_create_suspended_np(pthread_t *thread,
1455 const pthread_attr_t *attr,
1456 void *(*start_routine)(void *),
1457 void *arg)
1458{
1459 return _pthread_create_suspended(thread, attr, start_routine, arg, 1);
1460}
1461
1462/*
1463 * Make a thread 'undetached' - no longer 'joinable' with other threads.
1464 */
1465int
1466pthread_detach(pthread_t thread)
1467{
224c7076
A
1468 int newstyle = 0;
1469 int ret;
1470
1471 if ((ret = _pthread_lookup_thread(thread, NULL, 1)) != 0)
1472 return (ret); /* Not a valid thread */
1473
1474 LOCK(thread->lock);
1475 newstyle = thread->newstyle;
1476 if (thread->detached & PTHREAD_CREATE_JOINABLE)
e9ce8d39 1477 {
224c7076
A
1478 if (thread->detached & _PTHREAD_EXITED) {
1479 UNLOCK(thread->lock);
1480 pthread_join(thread, NULL);
1481 return 0;
1482 } else {
1483 if (newstyle == 0) {
5b2abdfb
A
1484 semaphore_t death = thread->death;
1485
1486 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
1487 thread->detached |= PTHREAD_CREATE_DETACHED;
1488 UNLOCK(thread->lock);
1489 if (death)
1490 (void) semaphore_signal(death);
224c7076
A
1491 } else {
1492 mach_port_t joinport = thread->joiner_notify;
1493
1494 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
1495 thread->detached |= PTHREAD_CREATE_DETACHED;
1496
1497 UNLOCK(thread->lock);
1498 if (joinport) {
1499 semaphore_signal(joinport);
1500 }
e9ce8d39 1501 }
224c7076 1502 return(0);
e9ce8d39 1503 }
5b2abdfb 1504 } else {
224c7076
A
1505 UNLOCK(thread->lock);
1506 return (EINVAL);
e9ce8d39
A
1507 }
1508}
1509
e9ce8d39 1510
5b2abdfb
A
1511/*
1512 * pthread_kill call to system call
1513 */
5b2abdfb
A
1514int
1515pthread_kill (
1516 pthread_t th,
1517 int sig)
1518{
1519 int error = 0;
224c7076 1520 mach_port_t kport = MACH_PORT_NULL;
5b2abdfb
A
1521
1522 if ((sig < 0) || (sig > NSIG))
1523 return(EINVAL);
1524
224c7076
A
1525 if (_pthread_lookup_thread(th, &kport, 0) != 0)
1526 return (ESRCH); /* Not a valid thread */
1527
34e8f829
A
1528 /* if the thread is a workqueue thread, just return error */
1529 if ((th->wqthread != 0) && (th->wqkillset ==0)) {
1530 return(ENOTSUP);
1531 }
1532
224c7076
A
1533 error = __pthread_kill(kport, sig);
1534
1535 if (error == -1)
1536 error = errno;
1537 return(error);
5b2abdfb
A
1538}
1539
34e8f829
A
1540int
1541__pthread_workqueue_setkill(int enable)
1542{
1543 pthread_t self = pthread_self();
1544
1545 LOCK(self->lock);
1546 if (enable == 0)
1547 self->wqkillset = 0;
1548 else
1549 self->wqkillset = 1;
1550 UNLOCK(self->lock);
1551
1552 return(0);
1553
1554}
1555
5b2abdfb
A
1556/* Announce that there are pthread resources ready to be reclaimed in a */
1557/* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
1558/* thread underneath is terminated right away. */
1559static
1560void _pthread_become_available(pthread_t thread, mach_port_t kernel_thread) {
3d9156a7 1561 pthread_reap_msg_t msg;
5b2abdfb
A
1562 kern_return_t ret;
1563
1564 msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,
1565 MACH_MSG_TYPE_MOVE_SEND);
e9ce8d39
A
1566 msg.header.msgh_size = sizeof msg - sizeof msg.trailer;
1567 msg.header.msgh_remote_port = thread_recycle_port;
5b2abdfb 1568 msg.header.msgh_local_port = kernel_thread;
3d9156a7
A
1569 msg.header.msgh_id = 0x44454144; /* 'DEAD' */
1570 msg.thread = thread;
5b2abdfb
A
1571 ret = mach_msg_send(&msg.header);
1572 assert(ret == MACH_MSG_SUCCESS);
e9ce8d39
A
1573}
1574
5b2abdfb 1575/* Reap the resources for available threads */
3d9156a7 1576__private_extern__
224c7076 1577int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr, int conforming) {
5b2abdfb
A
1578 mach_port_type_t ptype;
1579 kern_return_t ret;
1580 task_t self;
1581
1582 self = mach_task_self();
1583 if (kernel_thread != MACH_PORT_DEAD) {
1584 ret = mach_port_type(self, kernel_thread, &ptype);
1585 if (ret == KERN_SUCCESS && ptype != MACH_PORT_TYPE_DEAD_NAME) {
1586 /* not quite dead yet... */
1587 return EAGAIN;
1588 }
1589 ret = mach_port_deallocate(self, kernel_thread);
1590 if (ret != KERN_SUCCESS) {
1591 fprintf(stderr,
1592 "mach_port_deallocate(kernel_thread) failed: %s\n",
1593 mach_error_string(ret));
1594 }
1595 }
e9ce8d39 1596
5b2abdfb
A
1597 if (th->reply_port != MACH_PORT_NULL) {
1598 ret = mach_port_mod_refs(self, th->reply_port,
1599 MACH_PORT_RIGHT_RECEIVE, -1);
1600 if (ret != KERN_SUCCESS) {
1601 fprintf(stderr,
1602 "mach_port_mod_refs(reply_port) failed: %s\n",
1603 mach_error_string(ret));
1604 }
1605 }
e9ce8d39 1606
5b2abdfb 1607 if (th->freeStackOnExit) {
e9ce8d39 1608 vm_address_t addr = (vm_address_t)th->stackaddr;
5b2abdfb
A
1609 vm_size_t size;
1610
9385eb3d 1611 size = (vm_size_t)th->stacksize + th->guardsize;
5b2abdfb 1612
e9ce8d39 1613 addr -= size;
5b2abdfb 1614 ret = vm_deallocate(self, addr, size);
e9ce8d39 1615 if (ret != KERN_SUCCESS) {
5b2abdfb
A
1616 fprintf(stderr,
1617 "vm_deallocate(stack) failed: %s\n",
1618 mach_error_string(ret));
e9ce8d39 1619 }
5b2abdfb
A
1620 }
1621
224c7076
A
1622
1623 if (value_ptr)
5b2abdfb 1624 *value_ptr = th->exit_value;
224c7076
A
1625 if (conforming) {
1626 if ((th->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
34e8f829 1627 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING) && (value_ptr != NULL))
224c7076
A
1628 *value_ptr = PTHREAD_CANCELED;
1629 th->sig = _PTHREAD_NO_SIG;
1630 }
1631
5b2abdfb
A
1632
1633 if (th != &_thread)
e9ce8d39 1634 free(th);
5b2abdfb 1635
224c7076 1636 return 0;
5b2abdfb
A
1637}
1638
1639static
1640void _pthread_reap_threads(void)
1641{
3d9156a7 1642 pthread_reap_msg_t msg;
5b2abdfb
A
1643 kern_return_t ret;
1644
1645 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
3d9156a7 1646 sizeof msg, thread_recycle_port,
5b2abdfb
A
1647 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
1648 while (ret == MACH_MSG_SUCCESS) {
1649 mach_port_t kernel_thread = msg.header.msgh_remote_port;
3d9156a7 1650 pthread_t thread = msg.thread;
5b2abdfb 1651
34e8f829
A
1652 /* deal with race with thread_create_running() */
1653 if (kernel_thread == MACH_PORT_NULL &&
1654 kernel_thread != thread->kernel_thread) {
1655 kernel_thread = thread->kernel_thread;
1656 }
1657
1658 if ( kernel_thread == MACH_PORT_NULL ||
1659 _pthread_reap_thread(thread, kernel_thread, (void **)0, 0) == EAGAIN)
5b2abdfb
A
1660 {
1661 /* not dead yet, put it back for someone else to reap, stop here */
1662 _pthread_become_available(thread, kernel_thread);
1663 return;
1664 }
34e8f829 1665
5b2abdfb 1666 ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
3d9156a7 1667 sizeof msg, thread_recycle_port,
5b2abdfb 1668 MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
e9ce8d39
A
1669 }
1670}
1671
3b2a1fe8 1672/* For compatibility... */
e9ce8d39
A
1673
1674pthread_t
3b2a1fe8
A
1675_pthread_self() {
1676 return pthread_self();
e9ce8d39
A
1677}
1678
1679/*
1680 * Terminate a thread.
1681 */
224c7076
A
1682int __disable_threadsignal(int);
1683
3d9156a7
A
1684static void
1685_pthread_exit(pthread_t self, void *value_ptr)
e9ce8d39 1686{
3d9156a7 1687 struct __darwin_pthread_handler_rec *handler;
e9ce8d39 1688 kern_return_t kern_res;
5b2abdfb 1689 int thread_count;
224c7076 1690 int newstyle = self->newstyle;
5b2abdfb
A
1691
1692 /* Make this thread not to receive any signals */
224c7076
A
1693 __disable_threadsignal(1);
1694
34e8f829 1695#if PTH_TRACE
224c7076
A
1696 __kdebug_trace(0x900001c, self, newstyle, 0, 0, 0);
1697#endif
1698
1699 /* set cancel state to disable and type to deferred */
1700 _pthread_setcancelstate_exit(self, value_ptr, __unix_conforming);
5b2abdfb 1701
3d9156a7 1702 while ((handler = self->__cleanup_stack) != 0)
e9ce8d39 1703 {
3d9156a7
A
1704 (handler->__routine)(handler->__arg);
1705 self->__cleanup_stack = handler->__next;
e9ce8d39
A
1706 }
1707 _pthread_tsd_cleanup(self);
5b2abdfb 1708
224c7076
A
1709 if (newstyle == 0) {
1710 _pthread_reap_threads();
1711
1712 LOCK(self->lock);
1713 self->detached |= _PTHREAD_EXITED;
1714
1715 if (self->detached & PTHREAD_CREATE_JOINABLE) {
1716 mach_port_t death = self->death;
1717 self->exit_value = value_ptr;
1718 UNLOCK(self->lock);
1719 /* the joiner will need a kernel thread reference, leave ours for it */
1720 if (death) {
1721 PTHREAD_MACH_CALL(semaphore_signal(death), kern_res);
1722 if (kern_res != KERN_SUCCESS)
1723 fprintf(stderr,
1724 "semaphore_signal(death) failed: %s\n",
1725 mach_error_string(kern_res));
1726 }
1727 LOCK(_pthread_list_lock);
1728 thread_count = --_pthread_count;
1729 UNLOCK(_pthread_list_lock);
1730 } else {
1731 UNLOCK(self->lock);
1732 LOCK(_pthread_list_lock);
1733 TAILQ_REMOVE(&__pthread_head, self, plist);
34e8f829 1734#if PTH_LISTTRACE
224c7076
A
1735 __kdebug_trace(0x9000010, self, 0, 0, 5, 0);
1736#endif
1737 thread_count = --_pthread_count;
1738 UNLOCK(_pthread_list_lock);
1739 /* with no joiner, we let become available consume our cached ref */
1740 _pthread_become_available(self, self->kernel_thread);
1741 }
5b2abdfb 1742
224c7076
A
1743 if (thread_count <= 0)
1744 exit(0);
5b2abdfb 1745
224c7076
A
1746 /* Use a new reference to terminate ourselves. Should never return. */
1747 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res);
1748 fprintf(stderr, "thread_terminate(mach_thread_self()) failed: %s\n",
1749 mach_error_string(kern_res));
5b2abdfb 1750 } else {
224c7076 1751 semaphore_t joinsem = SEMAPHORE_NULL;
e9ce8d39 1752
34e8f829 1753 if ((self->joiner_notify == (mach_port_t)0) && (self->detached & PTHREAD_CREATE_JOINABLE))
224c7076
A
1754 joinsem = new_sem_from_pool();
1755 LOCK(self->lock);
1756 self->detached |= _PTHREAD_EXITED;
5b2abdfb 1757
224c7076
A
1758 self->exit_value = value_ptr;
1759 if (self->detached & PTHREAD_CREATE_JOINABLE) {
34e8f829 1760 if (self->joiner_notify == (mach_port_t)0) {
224c7076
A
1761 self->joiner_notify = joinsem;
1762 joinsem = SEMAPHORE_NULL;
1763 }
1764 UNLOCK(self->lock);
1765 if (joinsem != SEMAPHORE_NULL)
1766 restore_sem_to_pool(joinsem);
1767 _pthread_free_pthread_onstack(self, 0, 1);
1768 } else {
1769 UNLOCK(self->lock);
1770 /* with no joiner, we let become available consume our cached ref */
1771 if (joinsem != SEMAPHORE_NULL)
1772 restore_sem_to_pool(joinsem);
1773 _pthread_free_pthread_onstack(self, 1, 1);
1774 }
1775 }
34e8f829 1776 LIBC_ABORT("thread %p didn't exit", self);
e9ce8d39
A
1777}
1778
3d9156a7
A
1779void
1780pthread_exit(void *value_ptr)
e9ce8d39 1781{
224c7076 1782 pthread_t self = pthread_self();
34e8f829
A
1783 /* if the current thread is a workqueue thread, just crash the app, as per libdispatch folks */
1784 if (self->wqthread == 0) {
224c7076 1785 _pthread_exit(self, value_ptr);
34e8f829
A
1786 } else {
1787 LIBC_ABORT("pthread_exit() may only be called against threads created via pthread_create()");
1788 }
e9ce8d39
A
1789}
1790
1791/*
1792 * Get the scheduling policy and scheduling paramters for a thread.
1793 */
1794int
1795pthread_getschedparam(pthread_t thread,
1796 int *policy,
1797 struct sched_param *param)
1798{
224c7076
A
1799 int ret;
1800
1801 if (thread == NULL)
1802 return(ESRCH);
1803
1804 LOCK(_pthread_list_lock);
1805
1806 if ((ret = _pthread_find_thread(thread)) != 0) {
1807 UNLOCK(_pthread_list_lock);
1808 return(ret);
e9ce8d39 1809 }
224c7076
A
1810 if (policy != 0)
1811 *policy = thread->policy;
1812 if (param != 0)
1813 *param = thread->param;
1814 UNLOCK(_pthread_list_lock);
1815
1816 return(0);
e9ce8d39
A
1817}
1818
1819/*
1820 * Set the scheduling policy and scheduling paramters for a thread.
1821 */
b5d655f7
A
1822static int
1823pthread_setschedparam_internal(pthread_t thread,
1824 mach_port_t kport,
e9ce8d39
A
1825 int policy,
1826 const struct sched_param *param)
1827{
1828 policy_base_data_t bases;
1829 policy_base_t base;
1830 mach_msg_type_number_t count;
1831 kern_return_t ret;
1832
224c7076 1833 switch (policy)
e9ce8d39 1834 {
e9ce8d39
A
1835 case SCHED_OTHER:
1836 bases.ts.base_priority = param->sched_priority;
1837 base = (policy_base_t)&bases.ts;
1838 count = POLICY_TIMESHARE_BASE_COUNT;
1839 break;
1840 case SCHED_FIFO:
1841 bases.fifo.base_priority = param->sched_priority;
1842 base = (policy_base_t)&bases.fifo;
1843 count = POLICY_FIFO_BASE_COUNT;
1844 break;
1845 case SCHED_RR:
1846 bases.rr.base_priority = param->sched_priority;
1847 /* quantum isn't public yet */
1848 bases.rr.quantum = param->quantum;
1849 base = (policy_base_t)&bases.rr;
1850 count = POLICY_RR_BASE_COUNT;
1851 break;
1852 default:
1853 return (EINVAL);
e9ce8d39 1854 }
b5d655f7 1855 ret = thread_policy(kport, policy, base, count, TRUE);
224c7076
A
1856 if (ret != KERN_SUCCESS)
1857 return (EINVAL);
224c7076 1858 return (0);
e9ce8d39
A
1859}
1860
b5d655f7
A
1861int
1862pthread_setschedparam(pthread_t t,
1863 int policy,
1864 const struct sched_param *param)
1865{
1866 mach_port_t kport = MACH_PORT_NULL;
1867 int error;
1868 int bypass = 1;
1869
1870 if (t != pthread_self() && t != &_thread ) { //since the main thread will not get de-allocated from underneath us
1871 bypass = 0;
1872 if (_pthread_lookup_thread(t, &kport, 0) != 0)
1873 return(ESRCH);
1874 } else
1875 kport = t->kernel_thread;
1876
1877 error = pthread_setschedparam_internal(t, kport, policy, param);
1878 if (error == 0) {
1879 if (bypass == 0) {
1880 /* ensure the thread is still valid */
1881 LOCK(_pthread_list_lock);
1882 if ((error = _pthread_find_thread(t)) != 0) {
1883 UNLOCK(_pthread_list_lock);
1884 return(error);
1885 }
1886 t->policy = policy;
1887 t->param = *param;
1888 UNLOCK(_pthread_list_lock);
1889 } else {
1890 t->policy = policy;
1891 t->param = *param;
1892 }
1893 }
1894 return(error);
1895}
1896
e9ce8d39
A
1897/*
1898 * Get the minimum priority for the given policy
1899 */
1900int
1901sched_get_priority_min(int policy)
1902{
1903 return default_priority - 16;
1904}
1905
1906/*
1907 * Get the maximum priority for the given policy
1908 */
1909int
1910sched_get_priority_max(int policy)
1911{
1912 return default_priority + 16;
1913}
1914
1915/*
1916 * Determine if two thread identifiers represent the same thread.
1917 */
1918int
1919pthread_equal(pthread_t t1,
1920 pthread_t t2)
1921{
1922 return (t1 == t2);
1923}
1924
1f2f436a
A
1925// Force LLVM not to optimise this to a call to __pthread_set_self, if it does
1926// then _pthread_set_self won't be bound when secondary threads try and start up.
1927void __attribute__((noinline))
9385eb3d
A
1928_pthread_set_self(pthread_t p)
1929{
1f2f436a
A
1930 extern void __pthread_set_self(void *);
1931
9385eb3d 1932 if (p == 0) {
1f2f436a
A
1933 if (_thread.tsd[0] != 0) {
1934 bzero(&_thread, sizeof(struct _pthread));
1935 }
9385eb3d
A
1936 p = &_thread;
1937 }
1938 p->tsd[0] = p;
1f2f436a 1939 __pthread_set_self(&p->tsd[0]);
9385eb3d
A
1940}
1941
e9ce8d39
A
1942void
1943cthread_set_self(void *cself)
1944{
1945 pthread_t self = pthread_self();
1946 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1947 _pthread_set_self(cself);
1948 return;
1949 }
1950 self->cthread_self = cself;
1951}
1952
1953void *
1954ur_cthread_self(void) {
1955 pthread_t self = pthread_self();
1956 if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
1957 return (void *)self;
1958 }
1959 return self->cthread_self;
1960}
1961
224c7076
A
1962/*
1963 * cancellation handler for pthread once as the init routine can have a
1964 * cancellation point. In that case we need to restore the spin unlock
1965 */
1966void
1967__pthread_once_cancel_handler(pthread_once_t *once_control)
1968{
1969 _spin_unlock(&once_control->lock);
1970}
1971
1972
e9ce8d39
A
1973/*
1974 * Execute a function exactly one time in a thread-safe fashion.
1975 */
1976int
1977pthread_once(pthread_once_t *once_control,
1978 void (*init_routine)(void))
1979{
9385eb3d 1980 _spin_lock(&once_control->lock);
e9ce8d39
A
1981 if (once_control->sig == _PTHREAD_ONCE_SIG_init)
1982 {
34e8f829 1983 pthread_cleanup_push((void (*)(void *))__pthread_once_cancel_handler, once_control);
e9ce8d39 1984 (*init_routine)();
224c7076 1985 pthread_cleanup_pop(0);
e9ce8d39
A
1986 once_control->sig = _PTHREAD_ONCE_SIG;
1987 }
9385eb3d 1988 _spin_unlock(&once_control->lock);
224c7076 1989 return (0); /* Spec defines no possible errors! */
e9ce8d39
A
1990}
1991
e9ce8d39
A
1992/*
1993 * Insert a cancellation point in a thread.
1994 */
3d9156a7
A
1995__private_extern__ void
1996_pthread_testcancel(pthread_t thread, int isconforming)
e9ce8d39
A
1997{
1998 LOCK(thread->lock);
1999 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
2000 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
2001 {
2002 UNLOCK(thread->lock);
3d9156a7
A
2003 if (isconforming)
2004 pthread_exit(PTHREAD_CANCELED);
2005 else
2006 pthread_exit(0);
e9ce8d39
A
2007 }
2008 UNLOCK(thread->lock);
2009}
2010
e9ce8d39 2011
e9ce8d39 2012
5b2abdfb
A
2013int
2014pthread_getconcurrency(void)
2015{
2016 return(pthread_concurrency);
2017}
2018
2019int
2020pthread_setconcurrency(int new_level)
2021{
224c7076
A
2022 if (new_level < 0)
2023 return EINVAL;
5b2abdfb 2024 pthread_concurrency = new_level;
224c7076 2025 return(0);
5b2abdfb
A
2026}
2027
e9ce8d39
A
2028/*
2029 * Perform package initialization - called automatically when application starts
2030 */
1f2f436a 2031int
e9ce8d39
A
2032pthread_init(void)
2033{
5b2abdfb 2034 pthread_attr_t *attrs;
e9ce8d39
A
2035 pthread_t thread;
2036 kern_return_t kr;
e9ce8d39
A
2037 host_priority_info_data_t priority_info;
2038 host_info_t info;
2039 host_flavor_t flavor;
5b2abdfb 2040 host_t host;
e9ce8d39
A
2041 mach_msg_type_number_t count;
2042 int mib[2];
34e8f829 2043 int ncpus = 0;
e9ce8d39 2044 size_t len;
59e0d9fe 2045 void *stackaddr;
e9ce8d39 2046
224c7076
A
2047 pthreadsize = round_page(sizeof (struct _pthread));
2048 count = HOST_PRIORITY_INFO_COUNT;
e9ce8d39
A
2049 info = (host_info_t)&priority_info;
2050 flavor = HOST_PRIORITY_INFO;
5b2abdfb
A
2051 host = mach_host_self();
2052 kr = host_info(host, flavor, info, &count);
e9ce8d39
A
2053 if (kr != KERN_SUCCESS)
2054 printf("host_info failed (%d); probably need privilege.\n", kr);
2055 else {
2056 default_priority = priority_info.user_priority;
9385eb3d
A
2057 min_priority = priority_info.minimum_priority;
2058 max_priority = priority_info.maximum_priority;
e9ce8d39 2059 }
5b2abdfb 2060 attrs = &_pthread_attr_default;
e9ce8d39 2061 pthread_attr_init(attrs);
e9ce8d39 2062
224c7076 2063 TAILQ_INIT(&__pthread_head);
9385eb3d 2064 LOCK_INIT(_pthread_list_lock);
5b2abdfb 2065 thread = &_thread;
224c7076 2066 TAILQ_INSERT_HEAD(&__pthread_head, thread, plist);
5b2abdfb 2067 _pthread_set_self(thread);
34e8f829
A
2068#if PTH_LISTTRACE
2069 __kdebug_trace(0x900000c, thread, 0, 0, 10, 0);
2070#endif
2071
224c7076
A
2072 /* In case of dyld reset the tsd keys from 1 - 10 */
2073 _pthread_keys_init();
2074
2075 mib[0] = CTL_KERN;
2076 mib[1] = KERN_USRSTACK;
2077 len = sizeof (stackaddr);
2078 if (sysctl (mib, 2, &stackaddr, &len, NULL, 0) != 0)
2079 stackaddr = (void *)USRSTACK;
59e0d9fe 2080 _pthread_create(thread, attrs, stackaddr, mach_thread_self());
34e8f829 2081 thread->stacksize = DFLSSIZ; //initialize main thread's stacksize based on vmparam.h
5b2abdfb 2082 thread->detached = PTHREAD_CREATE_JOINABLE|_PTHREAD_CREATE_PARENT;
e9ce8d39 2083
224c7076 2084 _init_cpu_capabilities();
34e8f829 2085 if ((ncpus = _NumCPUs()) > 1)
224c7076 2086 _spin_tries = MP_SPIN_TRIES;
e3cf15b6 2087
34e8f829
A
2088 workq_targetconc[WORKQ_HIGH_PRIOQUEUE] = ncpus;
2089 workq_targetconc[WORKQ_DEFAULT_PRIOQUEUE] = ncpus;
2090 workq_targetconc[WORKQ_LOW_PRIOQUEUE] = ncpus;
1f2f436a 2091 workq_targetconc[WORKQ_BG_PRIOQUEUE] = ncpus;
34e8f829 2092
9385eb3d
A
2093 mach_port_deallocate(mach_task_self(), host);
2094
224c7076
A
2095#if defined(__ppc__)
2096 IF_ROSETTA() {
2097 __oldstyle = 1;
2098 }
2099#endif
e3cf15b6 2100
59e0d9fe
A
2101#if defined(_OBJC_PAGE_BASE_ADDRESS)
2102{
2103 vm_address_t objcRTPage = (vm_address_t)_OBJC_PAGE_BASE_ADDRESS;
2104 kr = vm_map(mach_task_self(),
2105 &objcRTPage, vm_page_size * 4, vm_page_size - 1,
2106 VM_FLAGS_FIXED | VM_MAKE_TAG(0), // Which tag to use?
2107 MACH_PORT_NULL,
2108 (vm_address_t)0, FALSE,
2109 (vm_prot_t)0, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE,
2110 VM_INHERIT_DEFAULT);
2111 /* We ignore the return result here. The ObjC runtime will just have to deal. */
2112}
2113#endif
1f2f436a
A
2114 //added so that thread_recycle_port is initialized on new launch.
2115 _pthread_fork_child_postinit();
e9ce8d39 2116 mig_init(1); /* enable multi-threaded mig interfaces */
224c7076 2117 if (__oldstyle == 0) {
51282358 2118#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
1f2f436a 2119 __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)), _pthread_start, &workq_targetconc[0], (uintptr_t)(&thread->tsd[__PTK_LIBDISPATCH_KEY0]) - (uintptr_t)(&thread->tsd[0]));
224c7076 2120#else
1f2f436a 2121 __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)), NULL, &workq_targetconc[0], (uintptr_t)&thread->tsd[__PTK_LIBDISPATCH_KEY0] - (uintptr_t)thread);
224c7076
A
2122#endif
2123 }
34e8f829
A
2124
2125#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2126 if( (thread->thread_id = __thread_selfid()) == (__uint64_t)-1)
2127 printf("Failed to set thread_id in pthread_init\n");
34e8f829 2128#endif
1f2f436a 2129 return 0;
e9ce8d39
A
2130}
2131
2132int sched_yield(void)
2133{
2134 swtch_pri(0);
2135 return 0;
2136}
2137
224c7076 2138/* This used to be the "magic" that gets the initialization routine called when the application starts */
1f2f436a
A
2139/*
2140 * (These has been moved to setenv.c, so we can use it to fix a less than 10.5
2141 * crt1.o issue)
2142 * static int _do_nothing(void) { return 0; }
2143 * int (*_cthread_init_routine)(void) = _do_nothing;
2144 */
e9ce8d39
A
2145
2146/* Get a semaphore from the pool, growing it if necessary */
2147
2148__private_extern__ semaphore_t new_sem_from_pool(void) {
2149 kern_return_t res;
2150 semaphore_t sem;
2151 int i;
2152
2153 LOCK(sem_pool_lock);
2154 if (sem_pool_current == sem_pool_count) {
2155 sem_pool_count += 16;
2156 sem_pool = realloc(sem_pool, sem_pool_count * sizeof(semaphore_t));
2157 for (i = sem_pool_current; i < sem_pool_count; i++) {
2158 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool[i], SYNC_POLICY_FIFO, 0), res);
2159 }
2160 }
2161 sem = sem_pool[sem_pool_current++];
2162 UNLOCK(sem_pool_lock);
2163 return sem;
2164}
2165
2166/* Put a semaphore back into the pool */
2167__private_extern__ void restore_sem_to_pool(semaphore_t sem) {
2168 LOCK(sem_pool_lock);
2169 sem_pool[--sem_pool_current] = sem;
2170 UNLOCK(sem_pool_lock);
2171}
2172
2173static void sem_pool_reset(void) {
2174 LOCK(sem_pool_lock);
2175 sem_pool_count = 0;
2176 sem_pool_current = 0;
2177 sem_pool = NULL;
2178 UNLOCK(sem_pool_lock);
2179}
2180
9385eb3d 2181__private_extern__ void _pthread_fork_child(pthread_t p) {
e9ce8d39
A
2182 /* Just in case somebody had it locked... */
2183 UNLOCK(sem_pool_lock);
2184 sem_pool_reset();
9385eb3d
A
2185 /* No need to hold the pthread_list_lock as no one other than this
2186 * thread is present at this time
2187 */
224c7076 2188 TAILQ_INIT(&__pthread_head);
9385eb3d 2189 LOCK_INIT(_pthread_list_lock);
224c7076 2190 TAILQ_INSERT_HEAD(&__pthread_head, p, plist);
34e8f829
A
2191#if PTH_LISTTRACE
2192 __kdebug_trace(0x900000c, p, 0, 0, 10, 0);
2193#endif
5b2abdfb 2194 _pthread_count = 1;
34e8f829
A
2195#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2196 if( (p->thread_id = __thread_selfid()) == (__uint64_t)-1)
2197 printf("Failed to set thread_id in pthread_fork_child\n");
2198#endif
e9ce8d39
A
2199}
2200
1f2f436a
A
2201void _pthread_fork_child_postinit() {
2202 kern_return_t kr;
2203
2204 kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &thread_recycle_port);
2205 if (kr != KERN_SUCCESS) {
2206 abort();
2207 }
2208}
2209
224c7076
A
2210/*
2211 * Query/update the cancelability 'state' of a thread
2212 */
2213int
2214_pthread_setcancelstate_internal(int state, int *oldstate, int conforming)
3d9156a7 2215{
224c7076 2216 pthread_t self = pthread_self();
3d9156a7 2217
3d9156a7 2218
224c7076
A
2219 switch (state) {
2220 case PTHREAD_CANCEL_ENABLE:
2221 if (conforming)
2222 __pthread_canceled(1);
2223 break;
2224 case PTHREAD_CANCEL_DISABLE:
2225 if (conforming)
2226 __pthread_canceled(2);
2227 break;
2228 default:
2229 return EINVAL;
3d9156a7 2230 }
3d9156a7 2231
224c7076
A
2232 self = pthread_self();
2233 LOCK(self->lock);
2234 if (oldstate)
2235 *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
2236 self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK;
2237 self->cancel_state |= state;
2238 UNLOCK(self->lock);
2239 if (!conforming)
2240 _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
2241 return (0);
2242}
3d9156a7 2243
224c7076
A
2244/* When a thread exits set the cancellation state to DISABLE and DEFERRED */
2245static void
2246_pthread_setcancelstate_exit(pthread_t self, void * value_ptr, int conforming)
3d9156a7 2247{
224c7076
A
2248 LOCK(self->lock);
2249 self->cancel_state &= ~(_PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK);
2250 self->cancel_state |= (PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED);
2251 if ((value_ptr == PTHREAD_CANCELED)) {
2252// 4597450: begin
2253 self->detached |= _PTHREAD_WASCANCEL;
2254// 4597450: end
2255 }
2256 UNLOCK(self->lock);
2257}
3d9156a7 2258
224c7076
A
2259int
2260_pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming)
2261{
2262 kern_return_t res;
1f2f436a 2263 int ret;
3d9156a7 2264
34e8f829 2265#if PTH_TRACE
224c7076
A
2266 __kdebug_trace(0x9000028, thread, 0, 0, 1, 0);
2267#endif
2268 /* The scenario where the joiner was waiting for the thread and
2269 * the pthread detach happened on that thread. Then the semaphore
2270 * will trigger but by the time joiner runs, the target thread could be
2271 * freed. So we need to make sure that the thread is still in the list
2272 * and is joinable before we continue with the join.
2273 */
2274 LOCK(_pthread_list_lock);
2275 if ((ret = _pthread_find_thread(thread)) != 0) {
2276 UNLOCK(_pthread_list_lock);
2277 /* returns ESRCH */
2278 return(ret);
2279 }
2280 if ((thread->detached & PTHREAD_CREATE_JOINABLE) == 0) {
2281 /* the thread might be a detached thread */
2282 UNLOCK(_pthread_list_lock);
2283 return(ESRCH);
3d9156a7 2284
224c7076
A
2285 }
2286 /* It is still a joinable thread and needs to be reaped */
2287 TAILQ_REMOVE(&__pthread_head, thread, plist);
34e8f829 2288#if PTH_LISTTRACE
224c7076
A
2289 __kdebug_trace(0x9000010, thread, 0, 0, 3, 0);
2290#endif
2291 UNLOCK(_pthread_list_lock);
3d9156a7 2292
224c7076
A
2293 if (value_ptr)
2294 *value_ptr = thread->exit_value;
2295 if (conforming) {
2296 if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
34e8f829 2297 (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING) && (value_ptr != NULL)) {
224c7076
A
2298 *value_ptr = PTHREAD_CANCELED;
2299 }
2300 }
2301 if (thread->reply_port != MACH_PORT_NULL) {
2302 res = mach_port_mod_refs(mach_task_self(), thread->reply_port, MACH_PORT_RIGHT_RECEIVE, -1);
2303 if (res != KERN_SUCCESS)
2304 fprintf(stderr,"mach_port_mod_refs(reply_port) failed: %s\n",mach_error_string(res));
2305 thread->reply_port = MACH_PORT_NULL;
2306 }
2307 if (thread->freeStackOnExit) {
2308 thread->sig = _PTHREAD_NO_SIG;
34e8f829 2309#if PTH_TRACE
224c7076
A
2310 __kdebug_trace(0x9000028, thread, 0, 0, 2, 0);
2311#endif
1f2f436a 2312 vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)thread, pthreadsize);
224c7076
A
2313 } else {
2314 thread->sig = _PTHREAD_NO_SIG;
34e8f829 2315#if PTH_TRACE
224c7076
A
2316 __kdebug_trace(0x9000028, thread, 0, 0, 3, 0);
2317#endif
2318 free(thread);
2319 }
2320 return(0);
2321}
3d9156a7 2322
224c7076
A
2323/* ALWAYS called with list lock and return with list lock */
2324int
2325_pthread_find_thread(pthread_t thread)
2326{
2327 pthread_t p;
3d9156a7 2328
224c7076
A
2329loop:
2330 TAILQ_FOREACH(p, &__pthread_head, plist) {
2331 if (p == thread) {
2332 if (thread->kernel_thread == MACH_PORT_NULL) {
3d9156a7 2333 UNLOCK(_pthread_list_lock);
224c7076
A
2334 sched_yield();
2335 LOCK(_pthread_list_lock);
2336 goto loop;
2337 }
2338 return(0);
3d9156a7 2339 }
3d9156a7 2340 }
224c7076 2341 return(ESRCH);
3d9156a7
A
2342}
2343
3d9156a7 2344int
224c7076 2345_pthread_lookup_thread(pthread_t thread, mach_port_t * portp, int only_joinable)
3d9156a7 2346{
224c7076
A
2347 mach_port_t kport;
2348 int ret = 0;
2349
2350 if (thread == NULL)
2351 return(ESRCH);
2352
2353 LOCK(_pthread_list_lock);
2354
2355 if ((ret = _pthread_find_thread(thread)) != 0) {
2356 UNLOCK(_pthread_list_lock);
2357 return(ret);
2358 }
2359 if ((only_joinable != 0) && ((thread->detached & PTHREAD_CREATE_DETACHED) != 0)) {
2360 UNLOCK(_pthread_list_lock);
2361 return(EINVAL);
2362 }
2363 kport = thread->kernel_thread;
2364 UNLOCK(_pthread_list_lock);
2365 if (portp != NULL)
2366 *portp = kport;
2367 return(0);
2368}
3d9156a7 2369
224c7076
A
2370/* XXXXXXXXXXXXX Pthread Workqueue Attributes XXXXXXXXXXXXXXXXXX */
2371int
2372pthread_workqueue_attr_init_np(pthread_workqueue_attr_t * attrp)
2373{
34e8f829
A
2374 attrp->queueprio = WORKQ_DEFAULT_PRIOQUEUE;
2375 attrp->sig = PTHREAD_WORKQUEUE_ATTR_SIG;
2376 attrp->overcommit = 0;
224c7076
A
2377 return(0);
2378}
2379
2380int
2381pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t * attr)
2382{
34e8f829 2383 if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG)
3d9156a7 2384 {
224c7076 2385 return (0);
3d9156a7
A
2386 } else
2387 {
224c7076 2388 return (EINVAL); /* Not an attribute structure! */
3d9156a7
A
2389 }
2390}
2391
224c7076 2392int
34e8f829 2393pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t * attr, int * qpriop)
224c7076 2394{
34e8f829
A
2395 if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
2396 *qpriop = attr->queueprio;
224c7076
A
2397 return (0);
2398 } else {
2399 return (EINVAL); /* Not an attribute structure! */
2400 }
2401}
3d9156a7 2402
3d9156a7 2403
224c7076 2404int
34e8f829 2405pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t * attr, int qprio)
224c7076 2406{
34e8f829 2407int error = 0;
224c7076 2408
34e8f829
A
2409 if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
2410 switch(qprio) {
2411 case WORKQ_HIGH_PRIOQUEUE:
2412 case WORKQ_DEFAULT_PRIOQUEUE:
2413 case WORKQ_LOW_PRIOQUEUE:
1f2f436a 2414 case WORKQ_BG_PRIOQUEUE:
34e8f829
A
2415 attr->queueprio = qprio;
2416 break;
2417 default:
2418 error = EINVAL;
2419 }
224c7076 2420 } else {
34e8f829 2421 error = EINVAL;
224c7076 2422 }
34e8f829 2423 return (error);
224c7076 2424}
3d9156a7 2425
3d9156a7 2426
224c7076 2427int
34e8f829 2428pthread_workqueue_attr_getovercommit_np(const pthread_workqueue_attr_t * attr, int * ocommp)
224c7076 2429{
34e8f829
A
2430 if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
2431 *ocommp = attr->overcommit;
224c7076
A
2432 return (0);
2433 } else {
2434 return (EINVAL); /* Not an attribute structure! */
2435 }
2436}
2437
224c7076
A
2438
2439int
34e8f829 2440pthread_workqueue_attr_setovercommit_np(pthread_workqueue_attr_t * attr, int ocomm)
224c7076 2441{
34e8f829 2442int error = 0;
224c7076 2443
34e8f829
A
2444 if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) {
2445 attr->overcommit = ocomm;
224c7076 2446 } else {
34e8f829 2447 error = EINVAL;
224c7076 2448 }
34e8f829 2449 return (error);
224c7076 2450}
224c7076
A
2451/* XXXXXXXXXXXXX Pthread Workqueue support routines XXXXXXXXXXXXXXXXXX */
2452
2453static void
2454workqueue_list_lock()
2455{
2456 OSSpinLockLock(&__workqueue_list_lock);
2457}
2458
2459static void
2460workqueue_list_unlock()
2461{
2462 OSSpinLockUnlock(&__workqueue_list_lock);
2463}
2464
2465int
2466pthread_workqueue_init_np()
2467{
2468 int ret;
2469
2470 workqueue_list_lock();
2471 ret =_pthread_work_internal_init();
2472 workqueue_list_unlock();
2473
2474 return(ret);
2475}
2476
34e8f829
A
2477int
2478pthread_workqueue_requestconcurrency_np(int queue, int request_concurrency)
2479{
2480 int error = 0;
2481
2482 if (queue < 0 || queue > WORKQ_NUM_PRIOQUEUE)
2483 return(EINVAL);
2484
2485 error =__workq_kernreturn(WQOPS_THREAD_SETCONC, NULL, request_concurrency, queue);
2486
2487 if (error == -1)
2488 return(errno);
2489 return(0);
2490}
2491
2492void
2493pthread_workqueue_atfork_prepare(void)
2494{
2495 /*
2496 * NOTE: Any workq additions here
2497 * should be for i386,x86_64 only
2498 */
2499 dispatch_atfork_prepare();
2500}
2501
2502void
2503pthread_workqueue_atfork_parent(void)
2504{
2505 /*
2506 * NOTE: Any workq additions here
2507 * should be for i386,x86_64 only
2508 */
2509 dispatch_atfork_parent();
2510}
2511
2512void
2513pthread_workqueue_atfork_child(void)
2514{
51282358 2515#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
34e8f829
A
2516 /*
2517 * NOTE: workq additions here
2518 * are for i386,x86_64 only as
2519 * ppc and arm do not support it
2520 */
2521 __workqueue_list_lock = OS_SPINLOCK_INIT;
2522 if (kernel_workq_setup != 0){
2523 kernel_workq_setup = 0;
2524 _pthread_work_internal_init();
2525 }
2526#endif
2527 dispatch_atfork_child();
2528}
2529
224c7076
A
2530static int
2531_pthread_work_internal_init(void)
2532{
2533 int i, error;
2534 pthread_workqueue_head_t headp;
224c7076 2535 pthread_workqueue_t wq;
1f2f436a
A
2536#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2537 pthread_t self = pthread_self();
2538#endif
224c7076
A
2539
2540 if (kernel_workq_setup == 0) {
51282358 2541#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
1f2f436a 2542 __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)), _pthread_start, &workq_targetconc[0], (uintptr_t)(&self->tsd[__PTK_LIBDISPATCH_KEY0]) - (uintptr_t)(&self->tsd[0]));
224c7076 2543#else
1f2f436a 2544 __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)),NULL,NULL,0);
224c7076
A
2545#endif
2546
34e8f829
A
2547 _pthread_wq_attr_default.queueprio = WORKQ_DEFAULT_PRIOQUEUE;
2548 _pthread_wq_attr_default.sig = PTHREAD_WORKQUEUE_ATTR_SIG;
224c7076 2549
1f2f436a 2550 for( i = 0; i< WORKQ_NUM_PRIOQUEUE; i++) {
224c7076
A
2551 headp = __pthread_wq_head_tbl[i];
2552 TAILQ_INIT(&headp->wqhead);
2553 headp->next_workq = 0;
2554 }
2555
1f2f436a
A
2556 __workqueue_pool_ptr = NULL;
2557 __workqueue_pool_size = round_page(sizeof(struct _pthread_workitem) * WORKITEM_POOL_SIZE);
2558
2559 __workqueue_pool_ptr = (struct _pthread_workitem *)mmap(NULL, __workqueue_pool_size,
2560 PROT_READ|PROT_WRITE,
2561 MAP_ANON | MAP_PRIVATE,
2562 0,
2563 0);
2564
2565 if (__workqueue_pool_ptr == MAP_FAILED) {
2566 /* Not expected to fail, if it does, always malloc for work items */
2567 __workqueue_nitems = WORKITEM_POOL_SIZE;
2568 __workqueue_pool_ptr = NULL;
2569 } else
2570 __workqueue_nitems = 0;
2571
2572 /* sets up the workitem pool */
2573 grow_workitem();
2574
2575 /* since the size is less than a page, leaving this in malloc pool */
224c7076
A
2576 wq = (struct _pthread_workqueue *)malloc(sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE);
2577 bzero(wq, (sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE));
2578 for (i = 0; i < WORKQUEUE_POOL_SIZE; i++) {
2579 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, &wq[i], wq_list);
2580 }
2581
2582 if (error = __workq_open()) {
2583 TAILQ_INIT(&__pthread_workitem_pool_head);
2584 TAILQ_INIT(&__pthread_workqueue_pool_head);
1f2f436a
A
2585 if (__workqueue_pool_ptr != NULL) {
2586 munmap((void *)__workqueue_pool_ptr, __workqueue_pool_size);
2587 }
224c7076
A
2588 free(wq);
2589 return(ENOMEM);
2590 }
2591 kernel_workq_setup = 1;
2592 }
2593 return(0);
2594}
2595
2596
2597/* This routine is called with list lock held */
2598static pthread_workitem_t
2599alloc_workitem(void)
2600{
2601 pthread_workitem_t witem;
2602
2603 if (TAILQ_EMPTY(&__pthread_workitem_pool_head)) {
1f2f436a
A
2604 /* the chunk size is set so some multiple of it is pool size */
2605 if (__workqueue_nitems < WORKITEM_POOL_SIZE) {
2606 grow_workitem();
2607 } else {
2608 workqueue_list_unlock();
2609 witem = malloc(sizeof(struct _pthread_workitem));
2610 workqueue_list_lock();
2611 witem->fromcache = 0;
2612 goto out;
2613 }
224c7076 2614 }
1f2f436a
A
2615 witem = TAILQ_FIRST(&__pthread_workitem_pool_head);
2616 TAILQ_REMOVE(&__pthread_workitem_pool_head, witem, item_entry);
2617 witem->fromcache = 1;
2618out:
2619 witem->flags = 0;
2620 witem->item_entry.tqe_next = 0;
2621 witem->item_entry.tqe_prev = 0;
2622 user_workitem_count++;
224c7076
A
2623 return(witem);
2624}
2625
2626/* This routine is called with list lock held */
2627static void
2628free_workitem(pthread_workitem_t witem)
2629{
1f2f436a
A
2630 user_workitem_count--;
2631 witem->flags = 0;
2632 if (witem->fromcache != 0)
2633 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, witem, item_entry);
2634 else
2635 free(witem);
2636}
2637
2638static void
2639grow_workitem(void)
2640{
2641 pthread_workitem_t witemp;
2642 int i;
2643
2644 witemp = &__workqueue_pool_ptr[__workqueue_nitems];
2645 bzero(witemp, (sizeof(struct _pthread_workitem) * WORKITEM_CHUNK_SIZE));
2646 for (i = 0; i < WORKITEM_CHUNK_SIZE; i++) {
2647 witemp[i].fromcache = 1;
2648 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, &witemp[i], item_entry);
2649 }
2650 __workqueue_nitems += WORKITEM_CHUNK_SIZE;
224c7076
A
2651}
2652
2653/* This routine is called with list lock held */
2654static pthread_workqueue_t
2655alloc_workqueue(void)
2656{
2657 pthread_workqueue_t wq;
2658
2659 if (TAILQ_EMPTY(&__pthread_workqueue_pool_head)) {
2660 workqueue_list_unlock();
2661 wq = malloc(sizeof(struct _pthread_workqueue));
2662 workqueue_list_lock();
2663 } else {
2664 wq = TAILQ_FIRST(&__pthread_workqueue_pool_head);
2665 TAILQ_REMOVE(&__pthread_workqueue_pool_head, wq, wq_list);
2666 }
2667 user_workq_count++;
2668 return(wq);
2669}
2670
2671/* This routine is called with list lock held */
2672static void
2673free_workqueue(pthread_workqueue_t wq)
2674{
2675 user_workq_count--;
2676 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, wq, wq_list);
2677}
2678
2679static void
2680_pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * attr)
2681{
2682 bzero(wq, sizeof(struct _pthread_workqueue));
2683 if (attr != NULL) {
224c7076 2684 wq->queueprio = attr->queueprio;
34e8f829 2685 wq->overcommit = attr->overcommit;
224c7076 2686 } else {
34e8f829
A
2687 wq->queueprio = WORKQ_DEFAULT_PRIOQUEUE;
2688 wq->overcommit = 0;
224c7076
A
2689 }
2690 LOCK_INIT(wq->lock);
2691 wq->flags = 0;
2692 TAILQ_INIT(&wq->item_listhead);
2693 TAILQ_INIT(&wq->item_kernhead);
34e8f829
A
2694#if WQ_LISTTRACE
2695 __kdebug_trace(0x90080ac, wq, &wq->item_listhead, wq->item_listhead.tqh_first, wq->item_listhead.tqh_last, 0);
2696#endif
224c7076
A
2697 wq->wq_list.tqe_next = 0;
2698 wq->wq_list.tqe_prev = 0;
34e8f829 2699 wq->sig = PTHREAD_WORKQUEUE_SIG;
224c7076
A
2700 wq->headp = __pthread_wq_head_tbl[wq->queueprio];
2701}
2702
2703int
2704valid_workq(pthread_workqueue_t workq)
2705{
34e8f829 2706 if (workq->sig == PTHREAD_WORKQUEUE_SIG)
224c7076
A
2707 return(1);
2708 else
2709 return(0);
2710}
2711
2712
2713/* called with list lock */
2714static void
2715pick_nextworkqueue_droplock()
2716{
2717 int i, curwqprio, val, found;
2718 pthread_workqueue_head_t headp;
2719 pthread_workqueue_t workq;
2720 pthread_workqueue_t nworkq = NULL;
2721
34e8f829
A
2722#if WQ_TRACE
2723 __kdebug_trace(0x9008098, kernel_workq_count, 0, 0, 0, 0);
2724#endif
224c7076
A
2725loop:
2726 while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) {
2727 found = 0;
1f2f436a 2728 for (i = 0; i < WORKQ_NUM_PRIOQUEUE; i++) {
224c7076
A
2729 wqreadyprio = i; /* because there is nothing else higher to run */
2730 headp = __pthread_wq_head_tbl[i];
2731
2732 if (TAILQ_EMPTY(&headp->wqhead))
2733 continue;
2734 workq = headp->next_workq;
2735 if (workq == NULL)
2736 workq = TAILQ_FIRST(&headp->wqhead);
2737 curwqprio = workq->queueprio;
2738 nworkq = workq; /* starting pt */
2739 while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) {
2740 headp->next_workq = TAILQ_NEXT(workq, wq_list);
2741 if (headp->next_workq == NULL)
2742 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
34e8f829
A
2743#if WQ_TRACE
2744 __kdebug_trace(0x9008098, kernel_workq_count, workq, 0, 1, 0);
2745#endif
224c7076
A
2746 val = post_nextworkitem(workq);
2747
2748 if (val != 0) {
2749 /* things could have changed so reasses */
2750 /* If kernel queue is full , skip */
2751 if (kernel_workq_count >= KERNEL_WORKQ_ELEM_MAX)
2752 break;
2753 /* If anything with higher prio arrived, then reevaluate */
2754 if (wqreadyprio < curwqprio)
2755 goto loop; /* we need re evaluate again */
2756 /* we can post some more work items */
2757 found = 1;
2758 }
2759
2760 /* cannot use workq here as it could be freed */
2761 if (TAILQ_EMPTY(&headp->wqhead))
2762 break;
2763 /* if we found nothing to run and only one workqueue in the list, skip */
2764 if ((val == 0) && (workq == headp->next_workq))
2765 break;
2766 workq = headp->next_workq;
2767 if (workq == NULL)
2768 workq = TAILQ_FIRST(&headp->wqhead);
2769 if (val != 0)
2770 nworkq = workq;
2771 /* if we found nothing to run and back to workq where we started */
2772 if ((val == 0) && (workq == nworkq))
2773 break;
2774 }
2775 if (kernel_workq_count >= KERNEL_WORKQ_ELEM_MAX)
2776 break;
2777 }
2778 /* nothing found to run? */
2779 if (found == 0)
3d9156a7 2780 break;
224c7076
A
2781 }
2782 workqueue_list_unlock();
2783}
2784
2785static int
2786post_nextworkitem(pthread_workqueue_t workq)
2787{
34e8f829 2788 int error, prio;
224c7076
A
2789 pthread_workitem_t witem;
2790 pthread_workqueue_head_t headp;
2791 void (*func)(pthread_workqueue_t, void *);
2792
2793 if ((workq->flags & PTHREAD_WORKQ_SUSPEND) == PTHREAD_WORKQ_SUSPEND) {
2794 return(0);
2795 }
34e8f829
A
2796#if WQ_TRACE
2797 __kdebug_trace(0x900809c, workq, workq->item_listhead.tqh_first, 0, 1, 0);
2798#endif
224c7076
A
2799 if (TAILQ_EMPTY(&workq->item_listhead)) {
2800 return(0);
2801 }
34e8f829
A
2802 if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON)
2803 return(0);
2804
224c7076
A
2805 witem = TAILQ_FIRST(&workq->item_listhead);
2806 headp = workq->headp;
34e8f829
A
2807#if WQ_TRACE
2808 __kdebug_trace(0x900809c, workq, witem, 0, 0xee, 0);
2809#endif
224c7076 2810 if ((witem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) {
34e8f829
A
2811#if WQ_TRACE
2812 __kdebug_trace(0x9000064, workq, 0, 0, 2, 0);
2813#endif
224c7076
A
2814
2815 if ((witem->flags & PTH_WQITEM_APPLIED) != 0) {
2816 return(0);
2817 }
2818 /* Also barrier when nothing is there needs to be handled */
2819 /* Nothing to wait for */
2820 if (workq->kq_count != 0) {
2821 witem->flags |= PTH_WQITEM_APPLIED;
2822 workq->flags |= PTHREAD_WORKQ_BARRIER_ON;
2823 workq->barrier_count = workq->kq_count;
2824#if WQ_TRACE
2825 __kdebug_trace(0x9000064, 1, workq->barrier_count, 0, 0, 0);
2826#endif
2827 return(1);
2828 } else {
2829#if WQ_TRACE
2830 __kdebug_trace(0x9000064, 2, workq->barrier_count, 0, 0, 0);
2831#endif
2832 if (witem->func != NULL) {
34e8f829
A
2833 /* since we are going to drop list lock */
2834 witem->flags |= PTH_WQITEM_APPLIED;
2835 workq->flags |= PTHREAD_WORKQ_BARRIER_ON;
224c7076 2836 workqueue_list_unlock();
34e8f829 2837 func = (void (*)(pthread_workqueue_t, void *))witem->func;
224c7076 2838 (*func)(workq, witem->func_arg);
34e8f829
A
2839#if WQ_TRACE
2840 __kdebug_trace(0x9000064, 3, workq->barrier_count, 0, 0, 0);
2841#endif
224c7076 2842 workqueue_list_lock();
34e8f829 2843 workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
224c7076
A
2844 }
2845 TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
34e8f829
A
2846#if WQ_LISTTRACE
2847 __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
2848#endif
224c7076 2849 free_workitem(witem);
34e8f829
A
2850#if WQ_TRACE
2851 __kdebug_trace(0x9000064, 4, workq->barrier_count, 0, 0, 0);
2852#endif
224c7076
A
2853 return(1);
2854 }
2855 } else if ((witem->flags & PTH_WQITEM_DESTROY) == PTH_WQITEM_DESTROY) {
2856#if WQ_TRACE
2857 __kdebug_trace(0x9000068, 1, workq->kq_count, 0, 0, 0);
2858#endif
2859 if ((witem->flags & PTH_WQITEM_APPLIED) != 0) {
2860 return(0);
2861 }
2862 witem->flags |= PTH_WQITEM_APPLIED;
2863 workq->flags |= (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON);
2864 workq->barrier_count = workq->kq_count;
34e8f829 2865 workq->term_callback = (void (*)(struct _pthread_workqueue *,void *))witem->func;
224c7076
A
2866 workq->term_callarg = witem->func_arg;
2867 TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
34e8f829
A
2868#if WQ_LISTTRACE
2869 __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
2870#endif
224c7076
A
2871 if ((TAILQ_EMPTY(&workq->item_listhead)) && (workq->kq_count == 0)) {
2872 if (!(TAILQ_EMPTY(&workq->item_kernhead))) {
2873#if WQ_TRACE
2874 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 0xff, 0);
2875#endif
2876 }
224c7076
A
2877 free_workitem(witem);
2878 workq->flags |= PTHREAD_WORKQ_DESTROYED;
2879#if WQ_TRACE
2880 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 1, 0);
2881#endif
2882 headp = __pthread_wq_head_tbl[workq->queueprio];
2883 if (headp->next_workq == workq) {
2884 headp->next_workq = TAILQ_NEXT(workq, wq_list);
2885 if (headp->next_workq == NULL) {
2886 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
2887 if (headp->next_workq == workq)
2888 headp->next_workq = NULL;
2889 }
2890 }
2891 workq->sig = 0;
2892 TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
2893 if (workq->term_callback != NULL) {
2894 workqueue_list_unlock();
2895 (*workq->term_callback)(workq, workq->term_callarg);
2896 workqueue_list_lock();
2897 }
2898 free_workqueue(workq);
2899 return(1);
34e8f829 2900 } else {
224c7076 2901 TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry);
34e8f829
A
2902#if WQ_LISTTRACE
2903 __kdebug_trace(0x90080b0, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
2904#endif
2905 }
224c7076
A
2906#if WQ_TRACE
2907 __kdebug_trace(0x9000068, 2, workq->barrier_count, 0, 0, 0);
2908#endif
2909 return(1);
2910 } else {
2911#if WQ_TRACE
2912 __kdebug_trace(0x9000060, witem, workq, witem->func_arg, 0xfff, 0);
2913#endif
2914 TAILQ_REMOVE(&workq->item_listhead, witem, item_entry);
34e8f829
A
2915#if WQ_LISTTRACE
2916 __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
2917#endif
224c7076
A
2918 TAILQ_INSERT_TAIL(&workq->item_kernhead, witem, item_entry);
2919 if ((witem->flags & PTH_WQITEM_KERN_COUNT) == 0) {
2920 workq->kq_count++;
2921 witem->flags |= PTH_WQITEM_KERN_COUNT;
2922 }
2923 OSAtomicIncrement32(&kernel_workq_count);
2924 workqueue_list_unlock();
34e8f829
A
2925
2926 prio = workq->queueprio;
2927 if (workq->overcommit != 0) {
2928 prio |= WORKQUEUE_OVERCOMMIT;
2929 }
2930
2931 if (( error =__workq_kernreturn(WQOPS_QUEUE_ADD, witem, workq->affinity, prio)) == -1) {
224c7076
A
2932 OSAtomicDecrement32(&kernel_workq_count);
2933 workqueue_list_lock();
2934#if WQ_TRACE
2935 __kdebug_trace(0x900007c, witem, workq, witem->func_arg, workq->kq_count, 0);
2936#endif
2937 TAILQ_REMOVE(&workq->item_kernhead, witem, item_entry);
2938 TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry);
34e8f829
A
2939#if WQ_LISTTRACE
2940 __kdebug_trace(0x90080b0, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
2941#endif
224c7076
A
2942 if ((workq->flags & (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON)) != 0)
2943 workq->flags |= PTHREAD_WORKQ_REQUEUED;
2944 } else
2945 workqueue_list_lock();
2946#if WQ_TRACE
2947 __kdebug_trace(0x9000060, witem, workq, witem->func_arg, workq->kq_count, 0);
2948#endif
2949 return(1);
2950 }
2951 /* noone should come here */
2952#if 1
2953 printf("error in logic for next workitem\n");
34e8f829 2954 LIBC_ABORT("error in logic for next workitem");
224c7076
A
2955#endif
2956 return(0);
2957}
2958
2959void
2960_pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse)
2961{
2962 int ret;
2963 pthread_attr_t *attrs = &_pthread_attr_default;
2964 pthread_workqueue_t workq;
34e8f829 2965#if WQ_DEBUG
224c7076 2966 pthread_t pself;
34e8f829 2967#endif
224c7076
A
2968
2969
2970 workq = item->workq;
2971 if (reuse == 0) {
2972 /* reuse is set to 0, when a thread is newly created to run a workitem */
2973 _pthread_struct_init(self, attrs, stackaddr, DEFAULT_STACK_SIZE, 1, 1);
2974 self->wqthread = 1;
34e8f829 2975 self->wqkillset = 0;
224c7076
A
2976 self->parentcheck = 1;
2977
2978 /* These are not joinable threads */
2979 self->detached &= ~PTHREAD_CREATE_JOINABLE;
2980 self->detached |= PTHREAD_CREATE_DETACHED;
51282358 2981#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
224c7076
A
2982 _pthread_set_self(self);
2983#endif
2984#if WQ_TRACE
2985 __kdebug_trace(0x9000050, self, item, item->func_arg, 0, 0);
2986#endif
2987 self->kernel_thread = kport;
34e8f829 2988 self->fun = (void *(*)(void *))item->func;
224c7076
A
2989 self->arg = item->func_arg;
2990 /* Add to the pthread list */
2991 LOCK(_pthread_list_lock);
2992 TAILQ_INSERT_TAIL(&__pthread_head, self, plist);
34e8f829 2993#if PTH_LISTTRACE
224c7076
A
2994 __kdebug_trace(0x900000c, self, 0, 0, 10, 0);
2995#endif
2996 _pthread_count++;
2997 UNLOCK(_pthread_list_lock);
34e8f829
A
2998
2999#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
3000 if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1)
3001 printf("Failed to set thread_id in pthread_wqthread\n");
3002#endif
3003
224c7076
A
3004 } else {
3005 /* reuse is set to 1, when a thread is resued to run another work item */
3006#if WQ_TRACE
3007 __kdebug_trace(0x9000054, self, item, item->func_arg, 0, 0);
3008#endif
3009 /* reset all tsd from 1 to KEYS_MAX */
34e8f829
A
3010 if (self == NULL)
3011 LIBC_ABORT("_pthread_wqthread: pthread %p setup to be NULL", self);
224c7076 3012
34e8f829 3013 self->fun = (void *(*)(void *))item->func;
224c7076
A
3014 self->arg = item->func_arg;
3015 }
3016
3017#if WQ_DEBUG
3018 if (reuse == 0) {
3019 pself = pthread_self();
3020 if (self != pself) {
3021#if WQ_TRACE
3022 __kdebug_trace(0x9000078, self, pself, item->func_arg, 0, 0);
3023#endif
3024 printf("pthread_self not set: pself %p, passed in %p\n", pself, self);
3025 _pthread_set_self(self);
3026 pself = pthread_self();
3027 if (self != pself)
3028 printf("(2)pthread_self not set: pself %p, passed in %p\n", pself, self);
3029 pself = self;
3030 }
3031 } else {
3032 pself = pthread_self();
3033 if (self != pself) {
3034 printf("(3)pthread_self not set in reuse: pself %p, passed in %p\n", pself, self);
34e8f829 3035 LIBC_ABORT("(3)pthread_self not set in reuse: pself %p, passed in %p", pself, self);
224c7076
A
3036 }
3037 }
3038#endif /* WQ_DEBUG */
3039
3040 self->cur_workq = workq;
3041 self->cur_workitem = item;
3042 OSAtomicDecrement32(&kernel_workq_count);
3043
34e8f829 3044 ret = (int)(intptr_t)(*self->fun)(self->arg);
224c7076 3045
34e8f829
A
3046 /* If we reach here without going through the above initialization path then don't go through
3047 * with the teardown code path ( e.g. setjmp/longjmp ). Instead just exit this thread.
3048 */
3049 if(self != pthread_self()) {
3050 pthread_exit(PTHREAD_CANCELED);
3051 }
3052
224c7076
A
3053 workqueue_exit(self, workq, item);
3054
3055}
3056
3057static void
3058workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item)
3059{
224c7076
A
3060 pthread_workitem_t baritem;
3061 pthread_workqueue_head_t headp;
3062 void (*func)(pthread_workqueue_t, void *);
3063
3064 workqueue_list_lock();
3065
3066 TAILQ_REMOVE(&workq->item_kernhead, item, item_entry);
3067 workq->kq_count--;
3068#if WQ_TRACE
3069 __kdebug_trace(0x9000070, self, 1, item->func_arg, workq->kq_count, 0);
3070#endif
224c7076
A
3071 free_workitem(item);
3072
3073 if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON) {
3074 workq->barrier_count--;
3075#if WQ_TRACE
3076 __kdebug_trace(0x9000084, self, workq->barrier_count, workq->kq_count, 1, 0);
3077#endif
3078 if (workq->barrier_count <= 0 ) {
3079 /* Need to remove barrier item from the list */
3080 baritem = TAILQ_FIRST(&workq->item_listhead);
3081#if WQ_DEBUG
3082 if ((baritem->flags & (PTH_WQITEM_BARRIER | PTH_WQITEM_DESTROY| PTH_WQITEM_APPLIED)) == 0)
3083 printf("Incorect bar item being removed in barrier processing\n");
3084#endif /* WQ_DEBUG */
3085 /* if the front item is a barrier and call back is registered, run that */
3086 if (((baritem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) && (baritem->func != NULL)) {
3087 workqueue_list_unlock();
34e8f829 3088 func = (void (*)(pthread_workqueue_t, void *))baritem->func;
224c7076
A
3089 (*func)(workq, baritem->func_arg);
3090 workqueue_list_lock();
3091 }
3092 TAILQ_REMOVE(&workq->item_listhead, baritem, item_entry);
34e8f829
A
3093#if WQ_LISTTRACE
3094 __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
3095#endif
224c7076
A
3096 free_workitem(baritem);
3097 workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON;
3098#if WQ_TRACE
3099 __kdebug_trace(0x9000058, self, item, item->func_arg, 0, 0);
3100#endif
3101 if ((workq->flags & PTHREAD_WORKQ_TERM_ON) != 0) {
3102 headp = __pthread_wq_head_tbl[workq->queueprio];
3103 workq->flags |= PTHREAD_WORKQ_DESTROYED;
3104#if WQ_TRACE
3105 __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 2, 0);
3106#endif
3107 if (headp->next_workq == workq) {
3108 headp->next_workq = TAILQ_NEXT(workq, wq_list);
3109 if (headp->next_workq == NULL) {
3110 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
3111 if (headp->next_workq == workq)
3112 headp->next_workq = NULL;
3113 }
3114 }
3115 TAILQ_REMOVE(&headp->wqhead, workq, wq_list);
3116 workq->sig = 0;
3117 if (workq->term_callback != NULL) {
3118 workqueue_list_unlock();
3119 (*workq->term_callback)(workq, workq->term_callarg);
3120 workqueue_list_lock();
3121 }
3122 free_workqueue(workq);
3123 } else {
3124 /* if there are higher prio schedulabel item reset to wqreadyprio */
3125 if ((workq->queueprio < wqreadyprio) && (!(TAILQ_EMPTY(&workq->item_listhead))))
3126 wqreadyprio = workq->queueprio;
3127 }
3128 }
3129 }
3130#if WQ_TRACE
3131 else {
3132 __kdebug_trace(0x9000070, self, 2, item->func_arg, workq->barrier_count, 0);
3133 }
3134
3135 __kdebug_trace(0x900005c, self, item, 0, 0, 0);
3136#endif
3137 pick_nextworkqueue_droplock();
3138 _pthread_workq_return(self);
3139}
3140
3141static void
3142_pthread_workq_return(pthread_t self)
3143{
34e8f829 3144 __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0);
224c7076
A
3145
3146 /* This is the way to terminate the thread */
3147 _pthread_exit(self, NULL);
3148}
3149
3150
224c7076
A
3151/* XXXXXXXXXXXXX Pthread Workqueue functions XXXXXXXXXXXXXXXXXX */
3152
3153int
3154pthread_workqueue_create_np(pthread_workqueue_t * workqp, const pthread_workqueue_attr_t * attr)
3155{
3156 pthread_workqueue_t wq;
3157 pthread_workqueue_head_t headp;
3158
34e8f829
A
3159#if defined(__ppc__)
3160 IF_ROSETTA() {
3161 return(ENOTSUP);
3162 }
3163#endif
3164 if ((attr != NULL) && (attr->sig != PTHREAD_WORKQUEUE_ATTR_SIG)) {
224c7076
A
3165 return(EINVAL);
3166 }
3167
3168 if (__is_threaded == 0)
3169 __is_threaded = 1;
3170
3171 workqueue_list_lock();
3172 if (kernel_workq_setup == 0) {
3173 int ret = _pthread_work_internal_init();
3174 if (ret != 0) {
3175 workqueue_list_unlock();
3176 return(ret);
3177 }
3178 }
3179
3180 wq = alloc_workqueue();
3181
3182 _pthread_workq_init(wq, attr);
3183
3184 headp = __pthread_wq_head_tbl[wq->queueprio];
3185 TAILQ_INSERT_TAIL(&headp->wqhead, wq, wq_list);
3186 if (headp->next_workq == NULL) {
3187 headp->next_workq = TAILQ_FIRST(&headp->wqhead);
3188 }
3189
3190 workqueue_list_unlock();
3191
3192 *workqp = wq;
3193
3194 return(0);
3195}
3196
3197int
34e8f829 3198pthread_workqueue_additem_np(pthread_workqueue_t workq, void ( *workitem_func)(void *), void * workitem_arg, pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp)
224c7076
A
3199{
3200 pthread_workitem_t witem;
3201
3202 if (valid_workq(workq) == 0) {
3203 return(EINVAL);
3204 }
3205
3206 workqueue_list_lock();
3207
3208 /*
3209 * Allocate the workitem here as it can drop the lock.
3210 * Also we can evaluate the workqueue state only once.
3211 */
3212 witem = alloc_workitem();
3213 witem->func = workitem_func;
3214 witem->func_arg = workitem_arg;
224c7076 3215 witem->workq = workq;
224c7076
A
3216
3217 /* alloc workitem can drop the lock, check the state */
3218 if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) {
3219 free_workitem(witem);
3220 workqueue_list_unlock();
3221 *itemhandlep = 0;
3222 return(ESRCH);
3223 }
3224
3225 if (itemhandlep != NULL)
3226 *itemhandlep = (pthread_workitem_handle_t *)witem;
34e8f829 3227 if (gencountp != NULL)
1f2f436a 3228 *gencountp = 0;
34e8f829
A
3229#if WQ_TRACE
3230 __kdebug_trace(0x9008090, witem, witem->func, witem->func_arg, workq, 0);
3231#endif
224c7076 3232 TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry);
34e8f829
A
3233#if WQ_LISTTRACE
3234 __kdebug_trace(0x90080a4, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0);
3235#endif
224c7076 3236
224c7076
A
3237 if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio))
3238 wqreadyprio = workq->queueprio;
3239
3240 pick_nextworkqueue_droplock();
3241
3242 return(0);
3243}
3244
34e8f829
A
3245int
3246pthread_workqueue_getovercommit_np(pthread_workqueue_t workq, unsigned int *ocommp)
224c7076 3247{
34e8f829
A
3248 if (valid_workq(workq) == 0) {
3249 return(EINVAL);
3250 }
3251
3252 if (ocommp != NULL)
3253 *ocommp = workq->overcommit;
224c7076
A
3254 return(0);
3255}
3256
224c7076 3257
224c7076
A
3258#else /* !BUILDING_VARIANT ] [ */
3259extern int __unix_conforming;
3260extern int _pthread_count;
3261extern pthread_lock_t _pthread_list_lock;
3262extern void _pthread_testcancel(pthread_t thread, int isconforming);
3263extern int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr, int conforming);
3264
3265#endif /* !BUILDING_VARIANT ] */
3266
3d9156a7 3267#if __DARWIN_UNIX03
224c7076
A
3268
3269__private_extern__ void
3270__posix_join_cleanup(void *arg)
3271{
3272 pthread_t thread = (pthread_t)arg;
3273 int already_exited, res;
3274 void * dummy;
3275 semaphore_t death;
1f2f436a 3276 int newstyle;
224c7076
A
3277
3278 LOCK(thread->lock);
3279 already_exited = (thread->detached & _PTHREAD_EXITED);
3280
3281 newstyle = thread->newstyle;
3282
3283#if WQ_TRACE
3284 __kdebug_trace(0x900002c, thread, newstyle, 0, 0, 0);
3285#endif
34e8f829 3286 if (newstyle == 0) {
224c7076
A
3287 death = thread->death;
3288 if (!already_exited){
3289 thread->joiner = (struct _pthread *)NULL;
3290 UNLOCK(thread->lock);
3291 restore_sem_to_pool(death);
3292 } else {
3293 UNLOCK(thread->lock);
3294 while ((res = _pthread_reap_thread(thread,
3295 thread->kernel_thread,
3296 &dummy, 1)) == EAGAIN)
3297 {
3298 sched_yield();
3299 }
3300 restore_sem_to_pool(death);
3301
3302 }
3303
3304 } else {
3305 /* leave another thread to join */
3306 thread->joiner = (struct _pthread *)NULL;
3307 UNLOCK(thread->lock);
3d9156a7 3308 }
224c7076 3309}
3d9156a7 3310
224c7076
A
3311#endif /* __DARWIN_UNIX03 */
3312
3313
3314/*
3315 * Wait for a thread to terminate and obtain its exit value.
3316 */
3317/*
3318int
3319pthread_join(pthread_t thread,
3320 void **value_ptr)
3321
3322moved to pthread_cancelable.c */
3323
3324/*
3325 * Cancel a thread
3326 */
3327int
3328pthread_cancel(pthread_t thread)
3329{
3330#if __DARWIN_UNIX03
3331 if (__unix_conforming == 0)
3332 __unix_conforming = 1;
3333#endif /* __DARWIN_UNIX03 */
3334
3335 if (_pthread_lookup_thread(thread, NULL, 0) != 0)
3336 return(ESRCH);
3337
34e8f829
A
3338 /* if the thread is a workqueue thread, then return error */
3339 if (thread->wqthread != 0) {
3340 return(ENOTSUP);
3341 }
224c7076
A
3342#if __DARWIN_UNIX03
3343 int state;
3344
3345 LOCK(thread->lock);
3346 state = thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
3347 UNLOCK(thread->lock);
3348 if (state & PTHREAD_CANCEL_ENABLE)
3349 __pthread_markcancel(thread->kernel_thread);
3350#else /* __DARWIN_UNIX03 */
3351 thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
3d9156a7
A
3352#endif /* __DARWIN_UNIX03 */
3353 return (0);
3354}
3355
224c7076
A
3356void
3357pthread_testcancel(void)
3358{
3359 pthread_t self = pthread_self();
3360
3361#if __DARWIN_UNIX03
3362 if (__unix_conforming == 0)
3363 __unix_conforming = 1;
3364 _pthread_testcancel(self, 1);
3365#else /* __DARWIN_UNIX03 */
3366 _pthread_testcancel(self, 0);
3367#endif /* __DARWIN_UNIX03 */
3368
3369}
3370
3371
3372/*
3373 * Query/update the cancelability 'state' of a thread
3374 */
3375int
3376pthread_setcancelstate(int state, int *oldstate)
3377{
3378#if __DARWIN_UNIX03
3379 if (__unix_conforming == 0) {
3380 __unix_conforming = 1;
3381 }
3382 return (_pthread_setcancelstate_internal(state, oldstate, 1));
3383#else /* __DARWIN_UNIX03 */
3384 return (_pthread_setcancelstate_internal(state, oldstate, 0));
3385#endif /* __DARWIN_UNIX03 */
3386
3387}
3388
3389
3390
3d9156a7
A
3391/*
3392 * Query/update the cancelability 'type' of a thread
3393 */
3394int
3395pthread_setcanceltype(int type, int *oldtype)
3396{
3397 pthread_t self = pthread_self();
3398
3399#if __DARWIN_UNIX03
3400 if (__unix_conforming == 0)
3401 __unix_conforming = 1;
3402#endif /* __DARWIN_UNIX03 */
3403
3404 if ((type != PTHREAD_CANCEL_DEFERRED) &&
3405 (type != PTHREAD_CANCEL_ASYNCHRONOUS))
3406 return EINVAL;
3407 self = pthread_self();
3408 LOCK(self->lock);
3409 if (oldtype)
3410 *oldtype = self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK;
3411 self->cancel_state &= ~_PTHREAD_CANCEL_TYPE_MASK;
3412 self->cancel_state |= type;
3413 UNLOCK(self->lock);
3414#if !__DARWIN_UNIX03
3415 _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
3416#endif /* __DARWIN_UNIX03 */
3417 return (0);
3418}
3419
224c7076
A
3420int
3421pthread_sigmask(int how, const sigset_t * set, sigset_t * oset)
3422{
3423#if __DARWIN_UNIX03
3424 int err = 0;
3425
3426 if (__pthread_sigmask(how, set, oset) == -1) {
3427 err = errno;
3428 }
3429 return(err);
3430#else /* __DARWIN_UNIX03 */
3431 return(__pthread_sigmask(how, set, oset));
3432#endif /* __DARWIN_UNIX03 */
3433}
3434
3435/*
3436int
3437sigwait(const sigset_t * set, int * sig)
3438
3439moved to pthread_cancelable.c */