]>
Commit | Line | Data |
---|---|---|
9385eb3d | 1 | /* |
34e8f829 | 2 | * Copyright (c) 2000-2008 Apple Inc. All rights reserved. |
9385eb3d A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
9385eb3d A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
20 | * | |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
e9ce8d39 A |
23 | /* |
24 | * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 | |
25 | * All Rights Reserved | |
26 | * | |
27 | * Permission to use, copy, modify, and distribute this software and | |
28 | * its documentation for any purpose and without fee is hereby granted, | |
29 | * provided that the above copyright notice appears in all copies and | |
30 | * that both the copyright notice and this permission notice appear in | |
31 | * supporting documentation. | |
32 | * | |
33 | * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE | |
34 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
35 | * FOR A PARTICULAR PURPOSE. | |
36 | * | |
37 | * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR | |
38 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM | |
39 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, | |
40 | * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION | |
41 | * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
42 | * | |
43 | */ | |
44 | /* | |
45 | * MkLinux | |
46 | */ | |
47 | ||
48 | /* | |
49 | * POSIX Pthread Library | |
50 | */ | |
51 | ||
9385eb3d | 52 | #include "pthread_internals.h" |
224c7076 | 53 | #include "pthread_workqueue.h" |
9385eb3d | 54 | |
e9ce8d39 A |
55 | #include <assert.h> |
56 | #include <stdio.h> /* For printf(). */ | |
57 | #include <stdlib.h> | |
58 | #include <errno.h> /* For __mach_errno_addr() prototype. */ | |
224c7076 | 59 | #include <signal.h> |
e9ce8d39 A |
60 | #include <sys/time.h> |
61 | #include <sys/resource.h> | |
62 | #include <sys/sysctl.h> | |
9385eb3d | 63 | #include <sys/queue.h> |
1f2f436a | 64 | #include <sys/mman.h> |
e9ce8d39 A |
65 | #include <machine/vmparam.h> |
66 | #include <mach/vm_statistics.h> | |
1f2f436a | 67 | #include <mach/mach_init.h> |
9385eb3d A |
68 | #define __APPLE_API_PRIVATE |
69 | #include <machine/cpu_capabilities.h> | |
224c7076 A |
70 | #include <libkern/OSAtomic.h> |
71 | #if defined(__ppc__) | |
72 | #include <libkern/OSCrossEndian.h> | |
73 | #endif | |
ad3c9f2a | 74 | #include <dispatch/private.h> /* for at_fork handlers */ |
e9ce8d39 | 75 | |
3d9156a7 | 76 | |
34e8f829 A |
77 | extern int _pthread_setcancelstate_internal(int state, int *oldstate, int conforming); |
78 | extern int __pthread_sigmask(int, const sigset_t *, sigset_t *); | |
79 | ||
3d9156a7 A |
80 | #ifndef BUILDING_VARIANT /* [ */ |
81 | ||
224c7076 A |
82 | __private_extern__ struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head); |
83 | ||
84 | ||
e9ce8d39 | 85 | |
34e8f829 A |
86 | int32_t workq_targetconc[WORKQ_NUM_PRIOQUEUE]; |
87 | ||
e9ce8d39 A |
88 | /* Per-thread kernel support */ |
89 | extern void _pthread_set_self(pthread_t); | |
90 | extern void mig_init(int); | |
224c7076 A |
91 | static int _pthread_create_pthread_onstack(pthread_attr_t *attrs, void **stack, pthread_t *thread); |
92 | static kern_return_t _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread); | |
34e8f829 | 93 | static void _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero); |
224c7076 A |
94 | static int _new_pthread_create_suspended(pthread_t *thread, |
95 | const pthread_attr_t *attr, | |
96 | void *(*start_routine)(void *), | |
97 | void *arg, | |
98 | int create_susp); | |
e9ce8d39 | 99 | |
ad3c9f2a A |
100 | /* the registered libdispatch worker function */ |
101 | static void (*__libdispatch_workerfunction)(int, int, void *) = NULL; | |
102 | ||
9385eb3d A |
103 | /* Get CPU capabilities from the kernel */ |
104 | __private_extern__ void _init_cpu_capabilities(void); | |
105 | ||
e9ce8d39 A |
106 | /* Needed to tell the malloc subsystem we're going multithreaded */ |
107 | extern void set_malloc_singlethreaded(int); | |
108 | ||
109 | /* Used when we need to call into the kernel with no reply port */ | |
110 | extern pthread_lock_t reply_port_lock; | |
224c7076 | 111 | int _pthread_find_thread(pthread_t thread); |
e9ce8d39 | 112 | |
3d9156a7 A |
113 | /* Mach message used to notify that a thread needs to be reaped */ |
114 | ||
115 | typedef struct _pthread_reap_msg_t { | |
116 | mach_msg_header_t header; | |
117 | pthread_t thread; | |
118 | mach_msg_trailer_t trailer; | |
119 | } pthread_reap_msg_t; | |
120 | ||
ad3c9f2a A |
121 | /* Utilitie */ |
122 | ||
123 | __private_extern__ uintptr_t commpage_pfz_base=0; | |
124 | ||
125 | void __pthread_pfz_setup(const char *apple[]) __attribute__ ((visibility ("hidden"))); | |
126 | ||
127 | static uintptr_t __pfz_from_kernel(const char *str) | |
128 | { | |
129 | unsigned long tmpval; | |
130 | /* Skip over key to the first value */ | |
131 | str = strchr(str, '='); | |
132 | if (str == NULL) | |
133 | return 0; | |
134 | str++; | |
135 | tmpval = strtoul(str, NULL, 0); /* may err by 0 or ULONG_MAX */ | |
136 | if (tmpval == ULONG_MAX) | |
137 | tmpval = 0; | |
138 | ||
139 | return (uintptr_t) tmpval; | |
140 | } | |
141 | ||
142 | void | |
143 | __pthread_pfz_setup(const char *apple[]) | |
144 | { | |
145 | const char **p; | |
146 | for (p = apple; p && *p; p++) { | |
147 | /* checking if matching apple variable is at begining */ | |
148 | if (strstr(*p, "pfz=") == *p) { | |
149 | commpage_pfz_base = __pfz_from_kernel(*p); | |
150 | bzero(*p,strlen(*p)); | |
151 | break; | |
152 | } | |
153 | } | |
154 | ||
155 | if (commpage_pfz_base == 0) | |
156 | commpage_pfz_base = _COMM_PAGE_TEXT_START; | |
157 | ||
158 | return; | |
159 | } | |
160 | ||
161 | ||
5b2abdfb A |
162 | /* We'll implement this when the main thread is a pthread */ |
163 | /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */ | |
164 | static struct _pthread _thread = {0}; | |
e9ce8d39 | 165 | |
5b2abdfb A |
166 | /* This global should be used (carefully) by anyone needing to know if a |
167 | ** pthread has been created. | |
168 | */ | |
169 | int __is_threaded = 0; | |
9385eb3d | 170 | /* _pthread_count is protected by _pthread_list_lock */ |
5b2abdfb | 171 | static int _pthread_count = 1; |
3d9156a7 | 172 | int __unix_conforming = 0; |
ad3c9f2a A |
173 | static int __workqueue_newspis = 0; |
174 | static int __workqueue_oldspis = 0; | |
224c7076 | 175 | __private_extern__ size_t pthreadsize = 0; |
3d9156a7 | 176 | |
224c7076 A |
177 | /* under rosetta we will use old style creation of threads */ |
178 | static int __oldstyle = 0; | |
5b2abdfb | 179 | |
9385eb3d | 180 | __private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER; |
5b2abdfb A |
181 | |
182 | /* Same implementation as LOCK, but without the __is_threaded check */ | |
3b2a1fe8 | 183 | int _spin_tries = 0; |
34e8f829 | 184 | extern kern_return_t syscall_thread_switch(mach_port_name_t, int, mach_msg_timeout_t); |
5b2abdfb A |
185 | __private_extern__ void _spin_lock_retry(pthread_lock_t *lock) |
186 | { | |
187 | int tries = _spin_tries; | |
188 | do { | |
189 | if (tries-- > 0) | |
190 | continue; | |
191 | syscall_thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1); | |
192 | tries = _spin_tries; | |
193 | } while(!_spin_lock_try(lock)); | |
194 | } | |
195 | ||
1f2f436a | 196 | static mach_port_t thread_recycle_port = MACH_PORT_NULL; |
e9ce8d39 A |
197 | |
198 | /* These are used to keep track of a semaphore pool shared by mutexes and condition | |
199 | ** variables. | |
200 | */ | |
201 | ||
202 | static semaphore_t *sem_pool = NULL; | |
203 | static int sem_pool_count = 0; | |
204 | static int sem_pool_current = 0; | |
205 | static pthread_lock_t sem_pool_lock = LOCK_INITIALIZER; | |
206 | ||
207 | static int default_priority; | |
208 | static int max_priority; | |
209 | static int min_priority; | |
5b2abdfb | 210 | static int pthread_concurrency; |
e9ce8d39 | 211 | |
224c7076 A |
212 | static OSSpinLock __workqueue_list_lock = OS_SPINLOCK_INIT; |
213 | ||
1f2f436a | 214 | static void _pthread_exit(pthread_t self, void *value_ptr) __dead2; |
224c7076 A |
215 | static void _pthread_setcancelstate_exit(pthread_t self, void *value_ptr, int conforming); |
216 | static pthread_attr_t _pthread_attr_default = {0}; | |
217 | static void _pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * attr); | |
224c7076 A |
218 | static int kernel_workq_setup = 0; |
219 | static volatile int32_t kernel_workq_count = 0; | |
1f2f436a A |
220 | static volatile unsigned int user_workq_count = 0; /* number of outstanding workqueues */ |
221 | static volatile unsigned int user_workitem_count = 0; /* number of outstanding workitems */ | |
224c7076 A |
222 | #define KERNEL_WORKQ_ELEM_MAX 64 /* Max number of elements in the kerrel */ |
223 | static int wqreadyprio = 0; /* current highest prio queue ready with items */ | |
224 | ||
225 | __private_extern__ struct __pthread_workitem_pool __pthread_workitem_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workitem_pool_head); | |
226 | __private_extern__ struct __pthread_workqueue_pool __pthread_workqueue_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workqueue_pool_head); | |
227 | ||
1f2f436a A |
228 | static struct _pthread_workitem * __workqueue_pool_ptr; |
229 | static size_t __workqueue_pool_size = 0; | |
230 | static int __workqueue_nitems = 0; | |
231 | ||
224c7076 A |
232 | struct _pthread_workqueue_head __pthread_workq0_head; |
233 | struct _pthread_workqueue_head __pthread_workq1_head; | |
234 | struct _pthread_workqueue_head __pthread_workq2_head; | |
1f2f436a A |
235 | struct _pthread_workqueue_head __pthread_workq3_head; |
236 | pthread_workqueue_head_t __pthread_wq_head_tbl[WORKQ_NUM_PRIOQUEUE] = {&__pthread_workq0_head, &__pthread_workq1_head, &__pthread_workq2_head, &__pthread_workq3_head}; | |
224c7076 A |
237 | |
238 | static void workqueue_list_lock(void); | |
239 | static void workqueue_list_unlock(void); | |
240 | static int valid_workq(pthread_workqueue_t); | |
241 | static void pick_nextworkqueue_droplock(void); | |
242 | static int post_nextworkitem(pthread_workqueue_t workq); | |
243 | static void _pthread_workq_return(pthread_t self); | |
244 | static pthread_workqueue_attr_t _pthread_wq_attr_default = {0}; | |
224c7076 | 245 | extern void start_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse); |
34e8f829 | 246 | extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags); |
224c7076 A |
247 | static pthread_workitem_t alloc_workitem(void); |
248 | static void free_workitem(pthread_workitem_t); | |
1f2f436a | 249 | static void grow_workitem(void); |
224c7076 A |
250 | static pthread_workqueue_t alloc_workqueue(void); |
251 | static void free_workqueue(pthread_workqueue_t); | |
252 | static int _pthread_work_internal_init(void); | |
253 | static void workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item); | |
1f2f436a | 254 | void _pthread_fork_child_postinit(); |
224c7076 | 255 | |
34e8f829 A |
256 | void pthread_workqueue_atfork_prepare(void); |
257 | void pthread_workqueue_atfork_parent(void); | |
258 | void pthread_workqueue_atfork_child(void); | |
259 | ||
260 | extern void dispatch_atfork_prepare(void); | |
261 | extern void dispatch_atfork_parent(void); | |
262 | extern void dispatch_atfork_child(void); | |
263 | ||
264 | /* workq_kernreturn commands */ | |
224c7076 A |
265 | #define WQOPS_QUEUE_ADD 1 |
266 | #define WQOPS_QUEUE_REMOVE 2 | |
267 | #define WQOPS_THREAD_RETURN 4 | |
34e8f829 | 268 | #define WQOPS_THREAD_SETCONC 8 |
ad3c9f2a A |
269 | #define WQOPS_QUEUE_NEWSPISUPP 0x10 /* this is to check for newer SPI support */ |
270 | #define WQOPS_QUEUE_REQTHREADS 0x20 /* request number of threads of a prio */ | |
271 | ||
272 | /* flag values for reuse field in the libc side _pthread_wqthread */ | |
273 | #define WQ_FLAG_THREAD_PRIOMASK 0x0000ffff | |
274 | #define WQ_FLAG_THREAD_OVERCOMMIT 0x00010000 /* thread is with overcommit prio */ | |
275 | #define WQ_FLAG_THREAD_REUSE 0x00020000 /* thread is being reused */ | |
276 | #define WQ_FLAG_THREAD_NEWSPI 0x00040000 /* the call is with new SPIs */ | |
277 | ||
278 | ||
279 | #define WORKQUEUE_OVERCOMMIT 0x10000 /* the work_kernreturn() for overcommit in prio field */ | |
3d9156a7 | 280 | |
5b2abdfb | 281 | /* |
224c7076 A |
282 | * Flags filed passed to bsdthread_create and back in pthread_start |
283 | 31 <---------------------------------> 0 | |
284 | _________________________________________ | |
285 | | flags(8) | policy(8) | importance(16) | | |
286 | ----------------------------------------- | |
287 | */ | |
34e8f829 | 288 | __private_extern__ |
224c7076 | 289 | void _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags); |
e9ce8d39 | 290 | |
51282358 | 291 | __private_extern__ |
34e8f829 A |
292 | void _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse); |
293 | ||
224c7076 A |
294 | #define PTHREAD_START_CUSTOM 0x01000000 |
295 | #define PTHREAD_START_SETSCHED 0x02000000 | |
296 | #define PTHREAD_START_DETACHED 0x04000000 | |
297 | #define PTHREAD_START_POLICY_BITSHIFT 16 | |
298 | #define PTHREAD_START_POLICY_MASK 0xff | |
299 | #define PTHREAD_START_IMPORTANCE_MASK 0xffff | |
e9ce8d39 | 300 | |
b5d655f7 | 301 | static int pthread_setschedparam_internal(pthread_t, mach_port_t, int, const struct sched_param *); |
34e8f829 | 302 | extern pthread_t __bsdthread_create(void *(*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags); |
1f2f436a | 303 | extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, pthread_workitem_t, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t); |
224c7076 | 304 | extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem); |
34e8f829 A |
305 | extern __uint64_t __thread_selfid( void ); |
306 | extern int __pthread_canceled(int); | |
307 | extern void _pthread_keys_init(void); | |
308 | extern int __pthread_kill(mach_port_t, int); | |
309 | extern int __pthread_markcancel(int); | |
310 | extern int __workq_open(void); | |
311 | ||
34e8f829 A |
312 | |
313 | extern int __workq_kernreturn(int, pthread_workitem_t, int, int); | |
e9ce8d39 | 314 | |
59e0d9fe | 315 | #if defined(__ppc__) || defined(__ppc64__) |
5b2abdfb | 316 | static const vm_address_t PTHREAD_STACK_HINT = 0xF0000000; |
8e029c65 | 317 | #elif defined(__i386__) || defined(__x86_64__) |
5b2abdfb | 318 | static const vm_address_t PTHREAD_STACK_HINT = 0xB0000000; |
ad3c9f2a | 319 | #elif defined(__arm__) |
b5d655f7 | 320 | static const vm_address_t PTHREAD_STACK_HINT = 0x30000000; |
5b2abdfb A |
321 | #else |
322 | #error Need to define a stack address hint for this architecture | |
323 | #endif | |
324 | ||
9385eb3d A |
325 | /* Set the base address to use as the stack pointer, before adjusting due to the ABI |
326 | * The guardpages for stackoverflow protection is also allocated here | |
327 | * If the stack was already allocated(stackaddr in attr) then there are no guardpages | |
328 | * set up for the thread | |
329 | */ | |
e9ce8d39 A |
330 | |
331 | static int | |
5b2abdfb | 332 | _pthread_allocate_stack(pthread_attr_t *attrs, void **stack) |
e9ce8d39 A |
333 | { |
334 | kern_return_t kr; | |
3d9156a7 | 335 | vm_address_t stackaddr; |
9385eb3d | 336 | size_t guardsize; |
224c7076 | 337 | |
e9ce8d39 A |
338 | assert(attrs->stacksize >= PTHREAD_STACK_MIN); |
339 | if (attrs->stackaddr != NULL) { | |
9385eb3d | 340 | /* No guard pages setup in this case */ |
3d9156a7 | 341 | assert(((uintptr_t)attrs->stackaddr % vm_page_size) == 0); |
5b2abdfb A |
342 | *stack = attrs->stackaddr; |
343 | return 0; | |
e9ce8d39 | 344 | } |
5b2abdfb | 345 | |
3d9156a7 A |
346 | guardsize = attrs->guardsize; |
347 | stackaddr = PTHREAD_STACK_HINT; | |
348 | kr = vm_map(mach_task_self(), &stackaddr, | |
9385eb3d | 349 | attrs->stacksize + guardsize, |
5b2abdfb A |
350 | vm_page_size-1, |
351 | VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL, | |
352 | 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, | |
353 | VM_INHERIT_DEFAULT); | |
354 | if (kr != KERN_SUCCESS) | |
355 | kr = vm_allocate(mach_task_self(), | |
3d9156a7 | 356 | &stackaddr, attrs->stacksize + guardsize, |
5b2abdfb | 357 | VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE); |
e9ce8d39 A |
358 | if (kr != KERN_SUCCESS) { |
359 | return EAGAIN; | |
360 | } | |
5b2abdfb A |
361 | /* The guard page is at the lowest address */ |
362 | /* The stack base is the highest address */ | |
9385eb3d | 363 | if (guardsize) |
3d9156a7 A |
364 | kr = vm_protect(mach_task_self(), stackaddr, guardsize, FALSE, VM_PROT_NONE); |
365 | *stack = (void *)(stackaddr + attrs->stacksize + guardsize); | |
224c7076 A |
366 | return 0; |
367 | } | |
e9ce8d39 | 368 | |
224c7076 A |
369 | static int |
370 | _pthread_create_pthread_onstack(pthread_attr_t *attrs, void **stack, pthread_t *thread) | |
371 | { | |
372 | kern_return_t kr; | |
373 | pthread_t t; | |
374 | vm_address_t stackaddr; | |
375 | size_t guardsize, allocsize; | |
376 | ||
377 | assert(attrs->stacksize >= PTHREAD_STACK_MIN); | |
378 | ||
379 | if (attrs->stackaddr != NULL) { | |
380 | /* No guard pages setup in this case */ | |
381 | assert(((uintptr_t)attrs->stackaddr % vm_page_size) == 0); | |
382 | *stack = attrs->stackaddr; | |
383 | t = (pthread_t)malloc(pthreadsize); | |
384 | _pthread_struct_init(t, attrs, attrs->stackaddr, 0, 0, 0); | |
385 | t->freeStackOnExit = 0; | |
386 | t->freeaddr = 0; | |
387 | t->freesize = 0; | |
388 | *thread = t; | |
389 | return 0; | |
390 | } | |
391 | ||
392 | guardsize = attrs->guardsize; | |
393 | allocsize = attrs->stacksize + guardsize + pthreadsize; | |
394 | stackaddr = PTHREAD_STACK_HINT; | |
395 | kr = vm_map(mach_task_self(), &stackaddr, | |
396 | allocsize, | |
397 | vm_page_size-1, | |
398 | VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL, | |
399 | 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, | |
400 | VM_INHERIT_DEFAULT); | |
401 | if (kr != KERN_SUCCESS) | |
402 | kr = vm_allocate(mach_task_self(), | |
403 | &stackaddr, allocsize, | |
404 | VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE); | |
405 | if (kr != KERN_SUCCESS) { | |
406 | return EAGAIN; | |
407 | } | |
408 | /* The guard page is at the lowest address */ | |
409 | /* The stack base is the highest address */ | |
410 | if (guardsize) | |
411 | kr = vm_protect(mach_task_self(), stackaddr, guardsize, FALSE, VM_PROT_NONE); | |
412 | ||
413 | ||
414 | *stack = (void *)(stackaddr + attrs->stacksize + guardsize); | |
415 | ||
416 | t = (pthread_t)(stackaddr + attrs->stacksize + guardsize); | |
417 | _pthread_struct_init(t, attrs, *stack, 0, 0, 1); | |
418 | t->kernalloc = 0; | |
419 | t->freesize = allocsize; | |
420 | t->freeaddr = (void *)stackaddr; | |
421 | t->freeStackOnExit = 1; | |
422 | *thread = t; | |
423 | ||
424 | return 0; | |
425 | } | |
426 | ||
427 | static kern_return_t | |
428 | _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread) | |
429 | { | |
430 | kern_return_t res = 0; | |
431 | vm_address_t freeaddr; | |
432 | size_t freesize; | |
224c7076 A |
433 | int thread_count; |
434 | mach_port_t kport; | |
435 | semaphore_t joinsem = SEMAPHORE_NULL; | |
436 | ||
34e8f829 | 437 | #if PTH_TRACE |
224c7076 | 438 | __kdebug_trace(0x900001c, freestruct, termthread, 0, 0, 0); |
e9ce8d39 | 439 | #endif |
224c7076 A |
440 | kport = t->kernel_thread; |
441 | joinsem = t->joiner_notify; | |
442 | ||
443 | if (t->freeStackOnExit) { | |
444 | freeaddr = (vm_address_t)t->freeaddr; | |
445 | if (freestruct) | |
446 | freesize = t->stacksize + t->guardsize + pthreadsize; | |
447 | else | |
448 | freesize = t->stacksize + t->guardsize; | |
449 | if (termthread) { | |
450 | mig_dealloc_reply_port(MACH_PORT_NULL); | |
451 | LOCK(_pthread_list_lock); | |
452 | if (freestruct != 0) { | |
453 | TAILQ_REMOVE(&__pthread_head, t, plist); | |
454 | /* if parent has not returned from create yet keep pthread_t */ | |
34e8f829 | 455 | #if PTH_LISTTRACE |
224c7076 A |
456 | __kdebug_trace(0x9000010, t, 0, 0, 1, 0); |
457 | #endif | |
458 | if (t->parentcheck == 0) | |
459 | freesize -= pthreadsize; | |
e9ce8d39 | 460 | } |
224c7076 A |
461 | t->childexit = 1; |
462 | thread_count = --_pthread_count; | |
463 | UNLOCK(_pthread_list_lock); | |
464 | ||
34e8f829 | 465 | #if PTH_TRACE |
224c7076 | 466 | __kdebug_trace(0x9000020, freeaddr, freesize, kport, 1, 0); |
e9ce8d39 | 467 | #endif |
224c7076 A |
468 | if (thread_count <=0) |
469 | exit(0); | |
470 | else | |
34e8f829 A |
471 | __bsdthread_terminate((void *)freeaddr, freesize, kport, joinsem); |
472 | LIBC_ABORT("thread %p didn't terminate", t); | |
224c7076 | 473 | } else { |
34e8f829 | 474 | #if PTH_TRACE |
224c7076 A |
475 | __kdebug_trace(0x9000024, freeaddr, freesize, 0, 1, 0); |
476 | #endif | |
477 | res = vm_deallocate(mach_task_self(), freeaddr, freesize); | |
e9ce8d39 | 478 | } |
224c7076 A |
479 | } else { |
480 | if (termthread) { | |
481 | mig_dealloc_reply_port(MACH_PORT_NULL); | |
482 | LOCK(_pthread_list_lock); | |
483 | if (freestruct != 0) { | |
484 | TAILQ_REMOVE(&__pthread_head, t, plist); | |
34e8f829 | 485 | #if PTH_LISTTRACE |
224c7076 A |
486 | __kdebug_trace(0x9000010, t, 0, 0, 2, 0); |
487 | #endif | |
488 | } | |
489 | thread_count = --_pthread_count; | |
490 | t->childexit = 1; | |
491 | UNLOCK(_pthread_list_lock); | |
492 | ||
493 | if (freestruct) { | |
34e8f829 | 494 | #if PTH_TRACE |
224c7076 A |
495 | __kdebug_trace(0x9000008, t, 0, 0, 2, 0); |
496 | #endif | |
497 | free(t); | |
e9ce8d39 | 498 | } |
224c7076 A |
499 | |
500 | freeaddr = 0; | |
501 | freesize = 0; | |
34e8f829 | 502 | #if PTH_TRACE |
224c7076 A |
503 | __kdebug_trace(0x9000020, 0, 0, kport, 2, 0); |
504 | #endif | |
505 | ||
506 | if (thread_count <=0) | |
507 | exit(0); | |
508 | else | |
509 | __bsdthread_terminate(NULL, 0, kport, joinsem); | |
34e8f829 | 510 | LIBC_ABORT("thread %p didn't terminate", t); |
224c7076 A |
511 | } else if (freestruct) { |
512 | t->sig = _PTHREAD_NO_SIG; | |
34e8f829 | 513 | #if PTH_TRACE |
224c7076 | 514 | __kdebug_trace(0x9000024, t, 0, 0, 2, 0); |
e9ce8d39 | 515 | #endif |
224c7076 | 516 | free(t); |
e9ce8d39 | 517 | } |
e9ce8d39 | 518 | } |
224c7076 A |
519 | return(res); |
520 | } | |
521 | ||
e9ce8d39 | 522 | |
5b2abdfb | 523 | |
e9ce8d39 A |
524 | /* |
525 | * Destroy a thread attribute structure | |
526 | */ | |
527 | int | |
528 | pthread_attr_destroy(pthread_attr_t *attr) | |
529 | { | |
530 | if (attr->sig == _PTHREAD_ATTR_SIG) | |
531 | { | |
224c7076 A |
532 | attr->sig = 0; |
533 | return (0); | |
e9ce8d39 A |
534 | } else |
535 | { | |
536 | return (EINVAL); /* Not an attribute structure! */ | |
537 | } | |
538 | } | |
539 | ||
540 | /* | |
541 | * Get the 'detach' state from a thread attribute structure. | |
542 | * Note: written as a helper function for info hiding | |
543 | */ | |
544 | int | |
545 | pthread_attr_getdetachstate(const pthread_attr_t *attr, | |
546 | int *detachstate) | |
547 | { | |
548 | if (attr->sig == _PTHREAD_ATTR_SIG) | |
549 | { | |
550 | *detachstate = attr->detached; | |
224c7076 | 551 | return (0); |
e9ce8d39 A |
552 | } else |
553 | { | |
554 | return (EINVAL); /* Not an attribute structure! */ | |
555 | } | |
556 | } | |
557 | ||
558 | /* | |
559 | * Get the 'inherit scheduling' info from a thread attribute structure. | |
560 | * Note: written as a helper function for info hiding | |
561 | */ | |
562 | int | |
563 | pthread_attr_getinheritsched(const pthread_attr_t *attr, | |
564 | int *inheritsched) | |
565 | { | |
566 | if (attr->sig == _PTHREAD_ATTR_SIG) | |
567 | { | |
568 | *inheritsched = attr->inherit; | |
224c7076 | 569 | return (0); |
e9ce8d39 A |
570 | } else |
571 | { | |
572 | return (EINVAL); /* Not an attribute structure! */ | |
573 | } | |
574 | } | |
575 | ||
576 | /* | |
577 | * Get the scheduling parameters from a thread attribute structure. | |
578 | * Note: written as a helper function for info hiding | |
579 | */ | |
580 | int | |
581 | pthread_attr_getschedparam(const pthread_attr_t *attr, | |
582 | struct sched_param *param) | |
583 | { | |
584 | if (attr->sig == _PTHREAD_ATTR_SIG) | |
585 | { | |
586 | *param = attr->param; | |
224c7076 | 587 | return (0); |
e9ce8d39 A |
588 | } else |
589 | { | |
590 | return (EINVAL); /* Not an attribute structure! */ | |
591 | } | |
592 | } | |
593 | ||
594 | /* | |
595 | * Get the scheduling policy from a thread attribute structure. | |
596 | * Note: written as a helper function for info hiding | |
597 | */ | |
598 | int | |
599 | pthread_attr_getschedpolicy(const pthread_attr_t *attr, | |
600 | int *policy) | |
601 | { | |
602 | if (attr->sig == _PTHREAD_ATTR_SIG) | |
603 | { | |
604 | *policy = attr->policy; | |
224c7076 | 605 | return (0); |
e9ce8d39 A |
606 | } else |
607 | { | |
608 | return (EINVAL); /* Not an attribute structure! */ | |
609 | } | |
610 | } | |
611 | ||
9385eb3d A |
612 | /* Retain the existing stack size of 512K and not depend on Main thread default stack size */ |
613 | static const size_t DEFAULT_STACK_SIZE = (512*1024); | |
e9ce8d39 A |
614 | /* |
615 | * Initialize a thread attribute structure to default values. | |
616 | */ | |
617 | int | |
618 | pthread_attr_init(pthread_attr_t *attr) | |
619 | { | |
224c7076 A |
620 | attr->stacksize = DEFAULT_STACK_SIZE; |
621 | attr->stackaddr = NULL; | |
e9ce8d39 | 622 | attr->sig = _PTHREAD_ATTR_SIG; |
e9ce8d39 A |
623 | attr->param.sched_priority = default_priority; |
624 | attr->param.quantum = 10; /* quantum isn't public yet */ | |
e9ce8d39 | 625 | attr->detached = PTHREAD_CREATE_JOINABLE; |
9385eb3d A |
626 | attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED; |
627 | attr->policy = _PTHREAD_DEFAULT_POLICY; | |
224c7076 A |
628 | attr->freeStackOnExit = 1; |
629 | attr->fastpath = 1; | |
630 | attr->schedset = 0; | |
9385eb3d | 631 | attr->guardsize = vm_page_size; |
224c7076 | 632 | return (0); |
e9ce8d39 A |
633 | } |
634 | ||
635 | /* | |
636 | * Set the 'detach' state in a thread attribute structure. | |
637 | * Note: written as a helper function for info hiding | |
638 | */ | |
639 | int | |
640 | pthread_attr_setdetachstate(pthread_attr_t *attr, | |
641 | int detachstate) | |
642 | { | |
643 | if (attr->sig == _PTHREAD_ATTR_SIG) | |
644 | { | |
645 | if ((detachstate == PTHREAD_CREATE_JOINABLE) || | |
646 | (detachstate == PTHREAD_CREATE_DETACHED)) | |
647 | { | |
648 | attr->detached = detachstate; | |
224c7076 | 649 | return (0); |
e9ce8d39 A |
650 | } else |
651 | { | |
652 | return (EINVAL); | |
653 | } | |
654 | } else | |
655 | { | |
656 | return (EINVAL); /* Not an attribute structure! */ | |
657 | } | |
658 | } | |
659 | ||
660 | /* | |
661 | * Set the 'inherit scheduling' state in a thread attribute structure. | |
662 | * Note: written as a helper function for info hiding | |
663 | */ | |
664 | int | |
665 | pthread_attr_setinheritsched(pthread_attr_t *attr, | |
666 | int inheritsched) | |
667 | { | |
668 | if (attr->sig == _PTHREAD_ATTR_SIG) | |
669 | { | |
670 | if ((inheritsched == PTHREAD_INHERIT_SCHED) || | |
671 | (inheritsched == PTHREAD_EXPLICIT_SCHED)) | |
672 | { | |
673 | attr->inherit = inheritsched; | |
224c7076 | 674 | return (0); |
e9ce8d39 A |
675 | } else |
676 | { | |
677 | return (EINVAL); | |
678 | } | |
679 | } else | |
680 | { | |
681 | return (EINVAL); /* Not an attribute structure! */ | |
682 | } | |
683 | } | |
684 | ||
685 | /* | |
686 | * Set the scheduling paramters in a thread attribute structure. | |
687 | * Note: written as a helper function for info hiding | |
688 | */ | |
689 | int | |
690 | pthread_attr_setschedparam(pthread_attr_t *attr, | |
691 | const struct sched_param *param) | |
692 | { | |
693 | if (attr->sig == _PTHREAD_ATTR_SIG) | |
694 | { | |
695 | /* TODO: Validate sched_param fields */ | |
696 | attr->param = *param; | |
224c7076 A |
697 | attr->schedset = 1; |
698 | return (0); | |
e9ce8d39 A |
699 | } else |
700 | { | |
701 | return (EINVAL); /* Not an attribute structure! */ | |
702 | } | |
703 | } | |
704 | ||
705 | /* | |
706 | * Set the scheduling policy in a thread attribute structure. | |
707 | * Note: written as a helper function for info hiding | |
708 | */ | |
709 | int | |
710 | pthread_attr_setschedpolicy(pthread_attr_t *attr, | |
711 | int policy) | |
712 | { | |
713 | if (attr->sig == _PTHREAD_ATTR_SIG) | |
714 | { | |
715 | if ((policy == SCHED_OTHER) || | |
716 | (policy == SCHED_RR) || | |
717 | (policy == SCHED_FIFO)) | |
718 | { | |
719 | attr->policy = policy; | |
224c7076 A |
720 | attr->schedset = 1; |
721 | return (0); | |
e9ce8d39 A |
722 | } else |
723 | { | |
724 | return (EINVAL); | |
725 | } | |
726 | } else | |
727 | { | |
728 | return (EINVAL); /* Not an attribute structure! */ | |
729 | } | |
730 | } | |
731 | ||
732 | /* | |
733 | * Set the scope for the thread. | |
734 | * We currently only provide PTHREAD_SCOPE_SYSTEM | |
735 | */ | |
736 | int | |
737 | pthread_attr_setscope(pthread_attr_t *attr, | |
738 | int scope) | |
739 | { | |
740 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
741 | if (scope == PTHREAD_SCOPE_SYSTEM) { | |
742 | /* No attribute yet for the scope */ | |
224c7076 | 743 | return (0); |
e9ce8d39 A |
744 | } else if (scope == PTHREAD_SCOPE_PROCESS) { |
745 | return (ENOTSUP); | |
746 | } | |
747 | } | |
748 | return (EINVAL); /* Not an attribute structure! */ | |
749 | } | |
750 | ||
751 | /* | |
752 | * Get the scope for the thread. | |
753 | * We currently only provide PTHREAD_SCOPE_SYSTEM | |
754 | */ | |
755 | int | |
224c7076 | 756 | pthread_attr_getscope(const pthread_attr_t *attr, |
e9ce8d39 A |
757 | int *scope) |
758 | { | |
759 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
760 | *scope = PTHREAD_SCOPE_SYSTEM; | |
224c7076 | 761 | return (0); |
e9ce8d39 A |
762 | } |
763 | return (EINVAL); /* Not an attribute structure! */ | |
764 | } | |
765 | ||
766 | /* Get the base stack address of the given thread */ | |
767 | int | |
768 | pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr) | |
769 | { | |
770 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
771 | *stackaddr = attr->stackaddr; | |
224c7076 | 772 | return (0); |
e9ce8d39 A |
773 | } else { |
774 | return (EINVAL); /* Not an attribute structure! */ | |
775 | } | |
776 | } | |
777 | ||
778 | int | |
779 | pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr) | |
780 | { | |
3d9156a7 | 781 | if ((attr->sig == _PTHREAD_ATTR_SIG) && (((uintptr_t)stackaddr % vm_page_size) == 0)) { |
e9ce8d39 | 782 | attr->stackaddr = stackaddr; |
224c7076 A |
783 | attr->freeStackOnExit = 0; |
784 | attr->fastpath = 0; | |
785 | return (0); | |
e9ce8d39 A |
786 | } else { |
787 | return (EINVAL); /* Not an attribute structure! */ | |
788 | } | |
789 | } | |
790 | ||
791 | int | |
792 | pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize) | |
793 | { | |
794 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
795 | *stacksize = attr->stacksize; | |
224c7076 | 796 | return (0); |
e9ce8d39 A |
797 | } else { |
798 | return (EINVAL); /* Not an attribute structure! */ | |
799 | } | |
800 | } | |
801 | ||
802 | int | |
803 | pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize) | |
804 | { | |
805 | if ((attr->sig == _PTHREAD_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) { | |
806 | attr->stacksize = stacksize; | |
224c7076 | 807 | return (0); |
e9ce8d39 A |
808 | } else { |
809 | return (EINVAL); /* Not an attribute structure! */ | |
810 | } | |
811 | } | |
812 | ||
5b2abdfb A |
813 | int |
814 | pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize) | |
815 | { | |
816 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
3d9156a7 | 817 | *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize); |
5b2abdfb | 818 | *stacksize = attr->stacksize; |
224c7076 | 819 | return (0); |
5b2abdfb A |
820 | } else { |
821 | return (EINVAL); /* Not an attribute structure! */ | |
822 | } | |
823 | } | |
824 | ||
9385eb3d A |
825 | /* By SUSV spec, the stackaddr is the base address, the lowest addressable |
826 | * byte address. This is not the same as in pthread_attr_setstackaddr. | |
827 | */ | |
5b2abdfb A |
828 | int |
829 | pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize) | |
830 | { | |
831 | if ((attr->sig == _PTHREAD_ATTR_SIG) && | |
3d9156a7 A |
832 | (((uintptr_t)stackaddr % vm_page_size) == 0) && |
833 | ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) { | |
834 | attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize); | |
5b2abdfb | 835 | attr->stacksize = stacksize; |
224c7076 A |
836 | attr->freeStackOnExit = 0; |
837 | attr->fastpath = 0; | |
838 | return (0); | |
5b2abdfb A |
839 | } else { |
840 | return (EINVAL); /* Not an attribute structure! */ | |
841 | } | |
842 | } | |
843 | ||
9385eb3d A |
844 | |
845 | /* | |
846 | * Set the guardsize attribute in the attr. | |
847 | */ | |
848 | int | |
849 | pthread_attr_setguardsize(pthread_attr_t *attr, | |
850 | size_t guardsize) | |
851 | { | |
852 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
853 | /* Guardsize of 0 is valid, ot means no guard */ | |
854 | if ((guardsize % vm_page_size) == 0) { | |
855 | attr->guardsize = guardsize; | |
224c7076 A |
856 | attr->fastpath = 0; |
857 | return (0); | |
9385eb3d A |
858 | } else |
859 | return(EINVAL); | |
860 | } | |
861 | return (EINVAL); /* Not an attribute structure! */ | |
862 | } | |
863 | ||
864 | /* | |
865 | * Get the guardsize attribute in the attr. | |
866 | */ | |
867 | int | |
868 | pthread_attr_getguardsize(const pthread_attr_t *attr, | |
869 | size_t *guardsize) | |
870 | { | |
871 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
872 | *guardsize = attr->guardsize; | |
224c7076 | 873 | return (0); |
9385eb3d A |
874 | } |
875 | return (EINVAL); /* Not an attribute structure! */ | |
876 | } | |
877 | ||
878 | ||
e9ce8d39 A |
879 | /* |
880 | * Create and start execution of a new thread. | |
881 | */ | |
882 | ||
883 | static void | |
884 | _pthread_body(pthread_t self) | |
885 | { | |
e9ce8d39 | 886 | _pthread_set_self(self); |
ad3c9f2a | 887 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
1f2f436a A |
888 | if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1) |
889 | printf("Failed to set thread_id in _pthread_body\n"); | |
890 | #endif | |
3d9156a7 | 891 | _pthread_exit(self, (self->fun)(self->arg)); |
e9ce8d39 A |
892 | } |
893 | ||
224c7076 A |
894 | void |
895 | _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int pflags) | |
896 | { | |
224c7076 A |
897 | #if WQ_DEBUG |
898 | pthread_t pself; | |
899 | #endif | |
900 | pthread_attr_t *attrs = &_pthread_attr_default; | |
901 | char * stackaddr; | |
902 | ||
903 | if ((pflags & PTHREAD_START_CUSTOM) == 0) { | |
34e8f829 | 904 | stackaddr = (char *)self; |
224c7076 | 905 | _pthread_struct_init(self, attrs, stackaddr, stacksize, 1, 1); |
ad3c9f2a | 906 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
34e8f829 | 907 | _pthread_set_self(self); |
51282358 | 908 | #endif |
224c7076 A |
909 | LOCK(_pthread_list_lock); |
910 | if (pflags & PTHREAD_START_SETSCHED) { | |
911 | self->policy = ((pflags >> PTHREAD_START_POLICY_BITSHIFT) & PTHREAD_START_POLICY_MASK); | |
912 | self->param.sched_priority = (pflags & PTHREAD_START_IMPORTANCE_MASK); | |
913 | } | |
914 | /* These are not joinable threads */ | |
915 | if ((pflags & PTHREAD_START_DETACHED) == PTHREAD_START_DETACHED) { | |
916 | self->detached &= ~PTHREAD_CREATE_JOINABLE; | |
917 | self->detached |= PTHREAD_CREATE_DETACHED; | |
918 | } | |
34e8f829 | 919 | } else { |
ad3c9f2a | 920 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
34e8f829 | 921 | _pthread_set_self(self); |
51282358 | 922 | #endif |
224c7076 | 923 | LOCK(_pthread_list_lock); |
34e8f829 | 924 | } |
224c7076 A |
925 | self->kernel_thread = kport; |
926 | self->fun = fun; | |
927 | self->arg = funarg; | |
928 | ||
929 | /* Add to the pthread list */ | |
930 | if (self->parentcheck == 0) { | |
931 | TAILQ_INSERT_TAIL(&__pthread_head, self, plist); | |
34e8f829 | 932 | #if PTH_LISTTRACE |
224c7076 A |
933 | __kdebug_trace(0x900000c, self, 0, 0, 3, 0); |
934 | #endif | |
935 | _pthread_count++; | |
936 | } | |
937 | self->childrun = 1; | |
938 | UNLOCK(_pthread_list_lock); | |
34e8f829 | 939 | |
ad3c9f2a | 940 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
34e8f829 A |
941 | if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1) |
942 | printf("Failed to set thread_id in pthread_start\n"); | |
224c7076 A |
943 | #endif |
944 | ||
945 | #if WQ_DEBUG | |
946 | pself = pthread_self(); | |
947 | if (self != pself) | |
34e8f829 | 948 | LIBC_ABORT("self %p != pself %p", self, pself); |
224c7076 | 949 | #endif |
34e8f829 | 950 | #if PTH_TRACE |
224c7076 A |
951 | __kdebug_trace(0x9000030, self, pflags, 0, 0, 0); |
952 | #endif | |
953 | ||
954 | _pthread_exit(self, (self->fun)(self->arg)); | |
955 | } | |
956 | ||
e9ce8d39 A |
957 | int |
958 | _pthread_create(pthread_t t, | |
959 | const pthread_attr_t *attrs, | |
5b2abdfb | 960 | void *stack, |
e9ce8d39 A |
961 | const mach_port_t kernel_thread) |
962 | { | |
963 | int res; | |
224c7076 | 964 | res = 0; |
5b2abdfb | 965 | |
e9ce8d39 A |
966 | do |
967 | { | |
968 | memset(t, 0, sizeof(*t)); | |
224c7076 A |
969 | t->newstyle = 0; |
970 | t->schedset = 0; | |
971 | t->kernalloc = 0; | |
9385eb3d | 972 | t->tsd[0] = t; |
224c7076 A |
973 | t->max_tsd_key = 0; |
974 | t->wqthread = 0; | |
975 | t->cur_workq = 0; | |
976 | t->cur_workitem = 0; | |
e9ce8d39 A |
977 | t->stacksize = attrs->stacksize; |
978 | t->stackaddr = (void *)stack; | |
9385eb3d | 979 | t->guardsize = attrs->guardsize; |
e9ce8d39 A |
980 | t->kernel_thread = kernel_thread; |
981 | t->detached = attrs->detached; | |
982 | t->inherit = attrs->inherit; | |
983 | t->policy = attrs->policy; | |
984 | t->param = attrs->param; | |
224c7076 | 985 | t->freeStackOnExit = attrs->freeStackOnExit; |
1f2f436a | 986 | t->cancel_error = 0; |
e9ce8d39 A |
987 | t->sig = _PTHREAD_SIG; |
988 | t->reply_port = MACH_PORT_NULL; | |
989 | t->cthread_self = NULL; | |
990 | LOCK_INIT(t->lock); | |
224c7076 A |
991 | t->plist.tqe_next = (struct _pthread *)0; |
992 | t->plist.tqe_prev = (struct _pthread **)0; | |
e9ce8d39 | 993 | t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; |
3d9156a7 | 994 | t->__cleanup_stack = (struct __darwin_pthread_handler_rec *)NULL; |
5b2abdfb A |
995 | t->death = SEMAPHORE_NULL; |
996 | ||
997 | if (kernel_thread != MACH_PORT_NULL) | |
b5d655f7 | 998 | (void)pthread_setschedparam_internal(t, kernel_thread, t->policy, &t->param); |
e9ce8d39 A |
999 | } while (0); |
1000 | return (res); | |
1001 | } | |
1002 | ||
224c7076 A |
1003 | void |
1004 | _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero) | |
1005 | { | |
1f2f436a | 1006 | mach_vm_offset_t stackaddr = (mach_vm_offset_t)(uintptr_t)stack; |
224c7076 A |
1007 | |
1008 | if (nozero == 0) { | |
1009 | memset(t, 0, sizeof(*t)); | |
1010 | t->plist.tqe_next = (struct _pthread *)0; | |
1011 | t->plist.tqe_prev = (struct _pthread **)0; | |
1012 | } | |
1013 | t->schedset = attrs->schedset; | |
1014 | t->tsd[0] = t; | |
1015 | if (kernalloc != 0) { | |
1f2f436a | 1016 | stackaddr = (mach_vm_offset_t)(uintptr_t)t; |
224c7076 A |
1017 | |
1018 | /* if allocated from kernel set values appropriately */ | |
1019 | t->stacksize = stacksize; | |
1f2f436a | 1020 | t->stackaddr = (void *)(uintptr_t)stackaddr; |
224c7076 | 1021 | t->freeStackOnExit = 1; |
1f2f436a | 1022 | t->freeaddr = (void *)(uintptr_t)(stackaddr - stacksize - vm_page_size); |
224c7076 A |
1023 | t->freesize = pthreadsize + stacksize + vm_page_size; |
1024 | } else { | |
1025 | t->stacksize = attrs->stacksize; | |
1026 | t->stackaddr = (void *)stack; | |
1027 | } | |
1028 | t->guardsize = attrs->guardsize; | |
1029 | t->detached = attrs->detached; | |
1030 | t->inherit = attrs->inherit; | |
1031 | t->policy = attrs->policy; | |
1032 | t->param = attrs->param; | |
1f2f436a | 1033 | t->cancel_error = 0; |
224c7076 A |
1034 | t->sig = _PTHREAD_SIG; |
1035 | t->reply_port = MACH_PORT_NULL; | |
1036 | t->cthread_self = NULL; | |
1037 | LOCK_INIT(t->lock); | |
1038 | t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; | |
1039 | t->__cleanup_stack = (struct __darwin_pthread_handler_rec *)NULL; | |
1040 | t->death = SEMAPHORE_NULL; | |
1041 | t->newstyle = 1; | |
1042 | t->kernalloc = kernalloc; | |
1043 | t->wqthread = 0; | |
1044 | t->cur_workq = 0; | |
1045 | t->cur_workitem = 0; | |
1046 | t->max_tsd_key = 0; | |
1047 | } | |
1048 | ||
5b2abdfb | 1049 | /* Need to deprecate this in future */ |
e9ce8d39 A |
1050 | int |
1051 | _pthread_is_threaded(void) | |
1052 | { | |
1053 | return __is_threaded; | |
1054 | } | |
1055 | ||
5b2abdfb A |
1056 | /* Non portable public api to know whether this process has(had) atleast one thread |
1057 | * apart from main thread. There could be race if there is a thread in the process of | |
1058 | * creation at the time of call . It does not tell whether there are more than one thread | |
1059 | * at this point of time. | |
1060 | */ | |
1061 | int | |
1062 | pthread_is_threaded_np(void) | |
1063 | { | |
1064 | return (__is_threaded); | |
1065 | } | |
1066 | ||
e9ce8d39 A |
1067 | mach_port_t |
1068 | pthread_mach_thread_np(pthread_t t) | |
1069 | { | |
224c7076 A |
1070 | mach_port_t kport = MACH_PORT_NULL; |
1071 | ||
34e8f829 A |
1072 | if (t == NULL) |
1073 | goto out; | |
1074 | ||
1075 | /* | |
1076 | * If the call is on self, return the kernel port. We cannot | |
1077 | * add this bypass for main thread as it might have exited, | |
1078 | * and we should not return stale port info. | |
1079 | */ | |
1080 | if (t == pthread_self()) | |
1081 | { | |
1082 | kport = t->kernel_thread; | |
1083 | goto out; | |
1084 | } | |
1085 | ||
224c7076 | 1086 | if (_pthread_lookup_thread(t, &kport, 0) != 0) |
34e8f829 | 1087 | return((mach_port_t)0); |
224c7076 | 1088 | |
34e8f829 | 1089 | out: |
224c7076 A |
1090 | return(kport); |
1091 | } | |
5b2abdfb | 1092 | |
224c7076 A |
1093 | pthread_t pthread_from_mach_thread_np(mach_port_t kernel_thread) |
1094 | { | |
1095 | struct _pthread * p = NULL; | |
5b2abdfb | 1096 | |
224c7076 A |
1097 | /* No need to wait as mach port is already known */ |
1098 | LOCK(_pthread_list_lock); | |
1099 | TAILQ_FOREACH(p, &__pthread_head, plist) { | |
1100 | if (p->kernel_thread == kernel_thread) | |
1101 | break; | |
1102 | } | |
1103 | UNLOCK(_pthread_list_lock); | |
1104 | return p; | |
e9ce8d39 A |
1105 | } |
1106 | ||
1107 | size_t | |
1108 | pthread_get_stacksize_np(pthread_t t) | |
1109 | { | |
1f2f436a | 1110 | int ret; |
224c7076 A |
1111 | size_t size = 0; |
1112 | ||
1113 | if (t == NULL) | |
1114 | return(ESRCH); | |
1115 | ||
34e8f829 A |
1116 | if ( t == pthread_self() || t == &_thread ) //since the main thread will not get de-allocated from underneath us |
1117 | { | |
1118 | size=t->stacksize; | |
1119 | return size; | |
1120 | } | |
1121 | ||
1122 | ||
224c7076 A |
1123 | LOCK(_pthread_list_lock); |
1124 | ||
1125 | if ((ret = _pthread_find_thread(t)) != 0) { | |
1126 | UNLOCK(_pthread_list_lock); | |
1127 | return(ret); | |
1128 | } | |
34e8f829 A |
1129 | |
1130 | size=t->stacksize; | |
224c7076 | 1131 | UNLOCK(_pthread_list_lock); |
34e8f829 | 1132 | |
224c7076 | 1133 | return(size); |
e9ce8d39 A |
1134 | } |
1135 | ||
1136 | void * | |
1137 | pthread_get_stackaddr_np(pthread_t t) | |
1138 | { | |
224c7076 A |
1139 | int ret; |
1140 | void * addr = NULL; | |
1141 | ||
1142 | if (t == NULL) | |
1f2f436a | 1143 | return((void *)(uintptr_t)ESRCH); |
224c7076 | 1144 | |
34e8f829 A |
1145 | if(t == pthread_self() || t == &_thread) //since the main thread will not get deallocated from underneath us |
1146 | return t->stackaddr; | |
1147 | ||
224c7076 A |
1148 | LOCK(_pthread_list_lock); |
1149 | ||
1150 | if ((ret = _pthread_find_thread(t)) != 0) { | |
1151 | UNLOCK(_pthread_list_lock); | |
1f2f436a | 1152 | return((void *)(uintptr_t)ret); |
224c7076 A |
1153 | } |
1154 | addr = t->stackaddr; | |
1155 | UNLOCK(_pthread_list_lock); | |
1156 | ||
1157 | return(addr); | |
e9ce8d39 A |
1158 | } |
1159 | ||
1160 | mach_port_t | |
1161 | _pthread_reply_port(pthread_t t) | |
1162 | { | |
1163 | return t->reply_port; | |
1164 | } | |
1165 | ||
5b2abdfb A |
1166 | |
1167 | /* returns non-zero if the current thread is the main thread */ | |
1168 | int | |
1169 | pthread_main_np(void) | |
1170 | { | |
1171 | pthread_t self = pthread_self(); | |
1172 | ||
1173 | return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT); | |
1174 | } | |
1175 | ||
34e8f829 | 1176 | |
ad3c9f2a | 1177 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
34e8f829 A |
1178 | /* if we are passed in a pthread_t that is NULL, then we return |
1179 | the current thread's thread_id. So folks don't have to call | |
1180 | pthread_self, in addition to us doing it, if they just want | |
1181 | their thread_id. | |
1182 | */ | |
1183 | int | |
1184 | pthread_threadid_np(pthread_t thread, __uint64_t *thread_id) | |
1185 | { | |
1186 | int rval=0; | |
1187 | pthread_t self = pthread_self(); | |
1188 | ||
1189 | if (thread_id == NULL) { | |
1190 | return(EINVAL); | |
1191 | } else if (thread == NULL || thread == self) { | |
1192 | *thread_id = self->thread_id; | |
1193 | return rval; | |
1194 | } | |
1195 | ||
1196 | LOCK(_pthread_list_lock); | |
1197 | if ((rval = _pthread_find_thread(thread)) != 0) { | |
1198 | UNLOCK(_pthread_list_lock); | |
1199 | return(rval); | |
1200 | } | |
1201 | *thread_id = thread->thread_id; | |
1202 | UNLOCK(_pthread_list_lock); | |
1203 | return rval; | |
1204 | } | |
1205 | #endif | |
1206 | ||
1207 | int | |
1208 | pthread_getname_np(pthread_t thread, char *threadname, size_t len) | |
1209 | { | |
1210 | int rval; | |
1211 | rval = 0; | |
1212 | ||
1213 | if (thread == NULL) | |
1214 | return(ESRCH); | |
1215 | ||
1216 | LOCK(_pthread_list_lock); | |
1217 | if ((rval = _pthread_find_thread(thread)) != 0) { | |
1218 | UNLOCK(_pthread_list_lock); | |
1219 | return(rval); | |
1220 | } | |
1221 | strlcpy(threadname, thread->pthread_name, len); | |
1222 | UNLOCK(_pthread_list_lock); | |
1223 | return rval; | |
1224 | } | |
1225 | ||
1226 | int | |
1227 | pthread_setname_np(const char *threadname) | |
1228 | { | |
ad3c9f2a A |
1229 | int rval = 0; |
1230 | int len = 0; | |
1231 | pthread_t current_thread = pthread_self(); | |
34e8f829 | 1232 | |
ad3c9f2a A |
1233 | if (threadname != NULL) |
1234 | len = strlen(threadname); | |
1f2f436a A |
1235 | |
1236 | /* protytype is in pthread_internals.h */ | |
1237 | rval = proc_setthreadname((void *)threadname, len); | |
ad3c9f2a A |
1238 | if (rval == 0) { |
1239 | if (threadname != NULL) { | |
1240 | strlcpy(current_thread->pthread_name, threadname, MAXTHREADNAMESIZE); | |
1241 | } else { | |
1242 | memset(current_thread->pthread_name, 0 , MAXTHREADNAMESIZE); | |
1243 | } | |
1244 | ||
34e8f829 A |
1245 | } |
1246 | return rval; | |
1247 | ||
1248 | } | |
1249 | ||
224c7076 A |
1250 | static int |
1251 | _new_pthread_create_suspended(pthread_t *thread, | |
1252 | const pthread_attr_t *attr, | |
1253 | void *(*start_routine)(void *), | |
1254 | void *arg, | |
1255 | int create_susp) | |
1256 | { | |
1257 | pthread_attr_t *attrs; | |
1258 | void *stack; | |
1259 | int error; | |
1260 | unsigned int flags; | |
34e8f829 | 1261 | pthread_t t,t2; |
224c7076 A |
1262 | kern_return_t kern_res; |
1263 | mach_port_t kernel_thread = MACH_PORT_NULL; | |
1264 | int needresume; | |
1265 | task_t self = mach_task_self(); | |
1266 | int kernalloc = 0; | |
1267 | int susp = create_susp; | |
1268 | ||
1269 | if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL) | |
1270 | { /* Set up default paramters */ | |
1271 | attrs = &_pthread_attr_default; | |
1272 | } else if (attrs->sig != _PTHREAD_ATTR_SIG) { | |
1273 | return EINVAL; | |
1274 | } | |
1275 | error = 0; | |
1276 | ||
1277 | if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) || | |
1278 | (attrs->param.sched_priority != default_priority)) && (create_susp == 0)) { | |
1279 | needresume = 1; | |
1280 | susp = 1; | |
1281 | } else | |
1282 | needresume = 0; | |
1283 | ||
1284 | /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for | |
1285 | * any change in priority or policy is needed here. | |
1286 | */ | |
1287 | if ((__oldstyle == 1) || (create_susp != 0)) { | |
1288 | /* Rosetta or pthread_create_suspended() */ | |
1289 | /* running under rosetta */ | |
1290 | /* Allocate a stack for the thread */ | |
34e8f829 | 1291 | #if PTH_TRACE |
224c7076 A |
1292 | __kdebug_trace(0x9000000, create_susp, 0, 0, 0, 0); |
1293 | #endif | |
1294 | if ((error = _pthread_allocate_stack(attrs, &stack)) != 0) { | |
1295 | return(error); | |
1296 | } | |
1297 | t = (pthread_t)malloc(sizeof(struct _pthread)); | |
1298 | *thread = t; | |
1299 | if (susp) { | |
1300 | /* Create the Mach thread for this thread */ | |
1301 | PTHREAD_MACH_CALL(thread_create(self, &kernel_thread), kern_res); | |
1302 | if (kern_res != KERN_SUCCESS) | |
1303 | { | |
1304 | printf("Can't create thread: %d\n", kern_res); | |
1305 | return(EINVAL); | |
1306 | } | |
1307 | } | |
1308 | if ((error = _pthread_create(t, attrs, stack, kernel_thread)) != 0) | |
1309 | { | |
1310 | return(error); | |
1311 | } | |
1312 | set_malloc_singlethreaded(0); | |
1313 | __is_threaded = 1; | |
1314 | ||
1315 | /* Send it on it's way */ | |
1316 | t->arg = arg; | |
1317 | t->fun = start_routine; | |
1318 | t->newstyle = 0; | |
1319 | /* Now set it up to execute */ | |
1320 | LOCK(_pthread_list_lock); | |
1321 | TAILQ_INSERT_TAIL(&__pthread_head, t, plist); | |
34e8f829 | 1322 | #if PTH_LISTTRACE |
224c7076 A |
1323 | __kdebug_trace(0x900000c, t, 0, 0, 4, 0); |
1324 | #endif | |
1325 | _pthread_count++; | |
1326 | UNLOCK(_pthread_list_lock); | |
1327 | _pthread_setup(t, _pthread_body, stack, susp, needresume); | |
1328 | return(0); | |
1329 | } else { | |
1330 | ||
1331 | flags = 0; | |
1332 | if (attrs->fastpath == 1) | |
1333 | kernalloc = 1; | |
1334 | ||
1335 | if (attrs->detached == PTHREAD_CREATE_DETACHED) | |
1336 | flags |= PTHREAD_START_DETACHED; | |
1337 | if (attrs->schedset != 0) { | |
1338 | flags |= PTHREAD_START_SETSCHED; | |
1339 | flags |= ((attrs->policy & PTHREAD_START_POLICY_MASK) << PTHREAD_START_POLICY_BITSHIFT); | |
1340 | flags |= (attrs->param.sched_priority & PTHREAD_START_IMPORTANCE_MASK); | |
1341 | } | |
1342 | ||
1343 | set_malloc_singlethreaded(0); | |
1344 | __is_threaded = 1; | |
1345 | ||
1346 | if (kernalloc == 0) { | |
1347 | /* Allocate a stack for the thread */ | |
1348 | flags |= PTHREAD_START_CUSTOM; | |
1349 | if ((error = _pthread_create_pthread_onstack(attrs, &stack, &t)) != 0) { | |
1350 | return(error); | |
1351 | } | |
1352 | /* Send it on it's way */ | |
1353 | t->arg = arg; | |
1354 | t->fun = start_routine; | |
1355 | t->newstyle = 1; | |
1356 | ||
34e8f829 | 1357 | #if PTH_TRACE |
224c7076 A |
1358 | __kdebug_trace(0x9000004, t, flags, 0, 0, 0); |
1359 | #endif | |
1360 | ||
34e8f829 | 1361 | if ((t2 = __bsdthread_create(start_routine, arg, stack, t, flags)) == (pthread_t)-1) { |
224c7076 A |
1362 | _pthread_free_pthread_onstack(t, 1, 0); |
1363 | return (EAGAIN); | |
1364 | } | |
34e8f829 | 1365 | else t=t2; |
224c7076 A |
1366 | LOCK(_pthread_list_lock); |
1367 | t->parentcheck = 1; | |
1368 | if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) { | |
1369 | /* detached child exited, mop up */ | |
1370 | UNLOCK(_pthread_list_lock); | |
34e8f829 | 1371 | #if PTH_TRACE |
224c7076 A |
1372 | __kdebug_trace(0x9000008, t, 0, 0, 1, 0); |
1373 | #endif | |
34e8f829 | 1374 | if(t->freeStackOnExit) |
1f2f436a | 1375 | vm_deallocate(self, (mach_vm_address_t)(uintptr_t)t, pthreadsize); |
34e8f829 | 1376 | else |
224c7076 A |
1377 | free(t); |
1378 | } else if (t->childrun == 0) { | |
1379 | TAILQ_INSERT_TAIL(&__pthread_head, t, plist); | |
1380 | _pthread_count++; | |
34e8f829 | 1381 | #if PTH_LISTTRACE |
224c7076 A |
1382 | __kdebug_trace(0x900000c, t, 0, 0, 1, 0); |
1383 | #endif | |
1384 | UNLOCK(_pthread_list_lock); | |
1385 | } else | |
1386 | UNLOCK(_pthread_list_lock); | |
1387 | ||
1388 | *thread = t; | |
1389 | ||
34e8f829 | 1390 | #if PTH_TRACE |
224c7076 A |
1391 | __kdebug_trace(0x9000014, t, 0, 0, 1, 0); |
1392 | #endif | |
1393 | return (0); | |
1394 | ||
1395 | } else { | |
1396 | /* kernel allocation */ | |
34e8f829 | 1397 | #if PTH_TRACE |
224c7076 A |
1398 | __kdebug_trace(0x9000018, flags, 0, 0, 0, 0); |
1399 | #endif | |
34e8f829 | 1400 | if ((t = __bsdthread_create(start_routine, arg, (void *)attrs->stacksize, NULL, flags)) == (pthread_t)-1) |
224c7076 A |
1401 | return (EAGAIN); |
1402 | /* Now set it up to execute */ | |
1403 | LOCK(_pthread_list_lock); | |
1404 | t->parentcheck = 1; | |
1405 | if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) { | |
1406 | /* detached child exited, mop up */ | |
1407 | UNLOCK(_pthread_list_lock); | |
34e8f829 | 1408 | #if PTH_TRACE |
224c7076 A |
1409 | __kdebug_trace(0x9000008, t, pthreadsize, 0, 2, 0); |
1410 | #endif | |
1f2f436a | 1411 | vm_deallocate(self, (mach_vm_address_t)(uintptr_t)t, pthreadsize); |
224c7076 A |
1412 | } else if (t->childrun == 0) { |
1413 | TAILQ_INSERT_TAIL(&__pthread_head, t, plist); | |
1414 | _pthread_count++; | |
34e8f829 | 1415 | #if PTH_LISTTRACE |
224c7076 A |
1416 | __kdebug_trace(0x900000c, t, 0, 0, 2, 0); |
1417 | #endif | |
1418 | UNLOCK(_pthread_list_lock); | |
1419 | } else | |
1420 | UNLOCK(_pthread_list_lock); | |
1421 | ||
1422 | *thread = t; | |
1423 | ||
34e8f829 | 1424 | #if PTH_TRACE |
224c7076 A |
1425 | __kdebug_trace(0x9000014, t, 0, 0, 2, 0); |
1426 | #endif | |
1427 | return(0); | |
1428 | } | |
1429 | } | |
1430 | } | |
1431 | ||
e9ce8d39 A |
1432 | static int |
1433 | _pthread_create_suspended(pthread_t *thread, | |
1434 | const pthread_attr_t *attr, | |
1435 | void *(*start_routine)(void *), | |
1436 | void *arg, | |
1437 | int suspended) | |
1438 | { | |
5b2abdfb A |
1439 | pthread_attr_t *attrs; |
1440 | void *stack; | |
e9ce8d39 A |
1441 | int res; |
1442 | pthread_t t; | |
1443 | kern_return_t kern_res; | |
5b2abdfb A |
1444 | mach_port_t kernel_thread = MACH_PORT_NULL; |
1445 | int needresume; | |
1446 | ||
e9ce8d39 A |
1447 | if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL) |
1448 | { /* Set up default paramters */ | |
5b2abdfb A |
1449 | attrs = &_pthread_attr_default; |
1450 | } else if (attrs->sig != _PTHREAD_ATTR_SIG) { | |
e9ce8d39 | 1451 | return EINVAL; |
5b2abdfb | 1452 | } |
224c7076 | 1453 | res = 0; |
5b2abdfb A |
1454 | |
1455 | /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for | |
1456 | * any change in priority or policy is needed here. | |
1457 | */ | |
1458 | if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) || | |
1459 | (attrs->param.sched_priority != default_priority)) && (suspended == 0)) { | |
1460 | needresume = 1; | |
1461 | suspended = 1; | |
1462 | } else | |
1463 | needresume = 0; | |
1464 | ||
e9ce8d39 A |
1465 | do |
1466 | { | |
1467 | /* Allocate a stack for the thread */ | |
1468 | if ((res = _pthread_allocate_stack(attrs, &stack)) != 0) { | |
1469 | break; | |
1470 | } | |
1471 | t = (pthread_t)malloc(sizeof(struct _pthread)); | |
1472 | *thread = t; | |
5b2abdfb A |
1473 | if (suspended) { |
1474 | /* Create the Mach thread for this thread */ | |
1475 | PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread), kern_res); | |
1476 | if (kern_res != KERN_SUCCESS) | |
1477 | { | |
1478 | printf("Can't create thread: %d\n", kern_res); | |
1479 | res = EINVAL; /* Need better error here? */ | |
1480 | break; | |
1481 | } | |
e9ce8d39 A |
1482 | } |
1483 | if ((res = _pthread_create(t, attrs, stack, kernel_thread)) != 0) | |
1484 | { | |
1485 | break; | |
1486 | } | |
5b2abdfb A |
1487 | set_malloc_singlethreaded(0); |
1488 | __is_threaded = 1; | |
5b2abdfb A |
1489 | |
1490 | /* Send it on it's way */ | |
e9ce8d39 A |
1491 | t->arg = arg; |
1492 | t->fun = start_routine; | |
1493 | /* Now set it up to execute */ | |
9385eb3d | 1494 | LOCK(_pthread_list_lock); |
224c7076 | 1495 | TAILQ_INSERT_TAIL(&__pthread_head, t, plist); |
34e8f829 | 1496 | #if PTH_LISTTRACE |
224c7076 A |
1497 | __kdebug_trace(0x900000c, t, 0, 0, 5, 0); |
1498 | #endif | |
9385eb3d A |
1499 | _pthread_count++; |
1500 | UNLOCK(_pthread_list_lock); | |
5b2abdfb | 1501 | _pthread_setup(t, _pthread_body, stack, suspended, needresume); |
e9ce8d39 A |
1502 | } while (0); |
1503 | return (res); | |
1504 | } | |
1505 | ||
1506 | int | |
1507 | pthread_create(pthread_t *thread, | |
1508 | const pthread_attr_t *attr, | |
1509 | void *(*start_routine)(void *), | |
1510 | void *arg) | |
1511 | { | |
224c7076 | 1512 | return _new_pthread_create_suspended(thread, attr, start_routine, arg, 0); |
e9ce8d39 A |
1513 | } |
1514 | ||
1515 | int | |
1516 | pthread_create_suspended_np(pthread_t *thread, | |
1517 | const pthread_attr_t *attr, | |
1518 | void *(*start_routine)(void *), | |
1519 | void *arg) | |
1520 | { | |
1521 | return _pthread_create_suspended(thread, attr, start_routine, arg, 1); | |
1522 | } | |
1523 | ||
1524 | /* | |
1525 | * Make a thread 'undetached' - no longer 'joinable' with other threads. | |
1526 | */ | |
1527 | int | |
1528 | pthread_detach(pthread_t thread) | |
1529 | { | |
224c7076 A |
1530 | int newstyle = 0; |
1531 | int ret; | |
1532 | ||
ad3c9f2a | 1533 | if ((ret = _pthread_lookup_thread(thread, NULL, 1)) != 0) { |
224c7076 | 1534 | return (ret); /* Not a valid thread */ |
ad3c9f2a A |
1535 | } |
1536 | ||
224c7076 A |
1537 | LOCK(thread->lock); |
1538 | newstyle = thread->newstyle; | |
1539 | if (thread->detached & PTHREAD_CREATE_JOINABLE) | |
e9ce8d39 | 1540 | { |
224c7076 A |
1541 | if (thread->detached & _PTHREAD_EXITED) { |
1542 | UNLOCK(thread->lock); | |
1543 | pthread_join(thread, NULL); | |
1544 | return 0; | |
1545 | } else { | |
1546 | if (newstyle == 0) { | |
5b2abdfb A |
1547 | semaphore_t death = thread->death; |
1548 | ||
1549 | thread->detached &= ~PTHREAD_CREATE_JOINABLE; | |
1550 | thread->detached |= PTHREAD_CREATE_DETACHED; | |
1551 | UNLOCK(thread->lock); | |
1552 | if (death) | |
1553 | (void) semaphore_signal(death); | |
224c7076 A |
1554 | } else { |
1555 | mach_port_t joinport = thread->joiner_notify; | |
1556 | ||
1557 | thread->detached &= ~PTHREAD_CREATE_JOINABLE; | |
1558 | thread->detached |= PTHREAD_CREATE_DETACHED; | |
1559 | ||
1560 | UNLOCK(thread->lock); | |
1561 | if (joinport) { | |
1562 | semaphore_signal(joinport); | |
1563 | } | |
e9ce8d39 | 1564 | } |
224c7076 | 1565 | return(0); |
e9ce8d39 | 1566 | } |
5b2abdfb | 1567 | } else { |
224c7076 A |
1568 | UNLOCK(thread->lock); |
1569 | return (EINVAL); | |
e9ce8d39 A |
1570 | } |
1571 | } | |
1572 | ||
e9ce8d39 | 1573 | |
ad3c9f2a | 1574 | /* |
5b2abdfb A |
1575 | * pthread_kill call to system call |
1576 | */ | |
5b2abdfb A |
1577 | int |
1578 | pthread_kill ( | |
1579 | pthread_t th, | |
1580 | int sig) | |
1581 | { | |
1582 | int error = 0; | |
224c7076 | 1583 | mach_port_t kport = MACH_PORT_NULL; |
5b2abdfb A |
1584 | |
1585 | if ((sig < 0) || (sig > NSIG)) | |
1586 | return(EINVAL); | |
1587 | ||
224c7076 A |
1588 | if (_pthread_lookup_thread(th, &kport, 0) != 0) |
1589 | return (ESRCH); /* Not a valid thread */ | |
1590 | ||
34e8f829 A |
1591 | /* if the thread is a workqueue thread, just return error */ |
1592 | if ((th->wqthread != 0) && (th->wqkillset ==0)) { | |
1593 | return(ENOTSUP); | |
1594 | } | |
1595 | ||
224c7076 A |
1596 | error = __pthread_kill(kport, sig); |
1597 | ||
1598 | if (error == -1) | |
1599 | error = errno; | |
1600 | return(error); | |
5b2abdfb A |
1601 | } |
1602 | ||
34e8f829 A |
1603 | int |
1604 | __pthread_workqueue_setkill(int enable) | |
1605 | { | |
1606 | pthread_t self = pthread_self(); | |
1607 | ||
1608 | LOCK(self->lock); | |
1609 | if (enable == 0) | |
1610 | self->wqkillset = 0; | |
1611 | else | |
1612 | self->wqkillset = 1; | |
1613 | UNLOCK(self->lock); | |
1614 | ||
1615 | return(0); | |
1616 | ||
1617 | } | |
1618 | ||
5b2abdfb A |
1619 | /* Announce that there are pthread resources ready to be reclaimed in a */ |
1620 | /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */ | |
1621 | /* thread underneath is terminated right away. */ | |
1622 | static | |
1623 | void _pthread_become_available(pthread_t thread, mach_port_t kernel_thread) { | |
3d9156a7 | 1624 | pthread_reap_msg_t msg; |
5b2abdfb A |
1625 | kern_return_t ret; |
1626 | ||
1627 | msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, | |
1628 | MACH_MSG_TYPE_MOVE_SEND); | |
e9ce8d39 A |
1629 | msg.header.msgh_size = sizeof msg - sizeof msg.trailer; |
1630 | msg.header.msgh_remote_port = thread_recycle_port; | |
5b2abdfb | 1631 | msg.header.msgh_local_port = kernel_thread; |
3d9156a7 A |
1632 | msg.header.msgh_id = 0x44454144; /* 'DEAD' */ |
1633 | msg.thread = thread; | |
5b2abdfb A |
1634 | ret = mach_msg_send(&msg.header); |
1635 | assert(ret == MACH_MSG_SUCCESS); | |
e9ce8d39 A |
1636 | } |
1637 | ||
5b2abdfb | 1638 | /* Reap the resources for available threads */ |
3d9156a7 | 1639 | __private_extern__ |
224c7076 | 1640 | int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr, int conforming) { |
5b2abdfb A |
1641 | mach_port_type_t ptype; |
1642 | kern_return_t ret; | |
1643 | task_t self; | |
1644 | ||
1645 | self = mach_task_self(); | |
1646 | if (kernel_thread != MACH_PORT_DEAD) { | |
1647 | ret = mach_port_type(self, kernel_thread, &ptype); | |
1648 | if (ret == KERN_SUCCESS && ptype != MACH_PORT_TYPE_DEAD_NAME) { | |
1649 | /* not quite dead yet... */ | |
1650 | return EAGAIN; | |
1651 | } | |
1652 | ret = mach_port_deallocate(self, kernel_thread); | |
1653 | if (ret != KERN_SUCCESS) { | |
1654 | fprintf(stderr, | |
1655 | "mach_port_deallocate(kernel_thread) failed: %s\n", | |
1656 | mach_error_string(ret)); | |
1657 | } | |
1658 | } | |
e9ce8d39 | 1659 | |
5b2abdfb A |
1660 | if (th->reply_port != MACH_PORT_NULL) { |
1661 | ret = mach_port_mod_refs(self, th->reply_port, | |
1662 | MACH_PORT_RIGHT_RECEIVE, -1); | |
1663 | if (ret != KERN_SUCCESS) { | |
1664 | fprintf(stderr, | |
1665 | "mach_port_mod_refs(reply_port) failed: %s\n", | |
1666 | mach_error_string(ret)); | |
1667 | } | |
1668 | } | |
e9ce8d39 | 1669 | |
5b2abdfb | 1670 | if (th->freeStackOnExit) { |
e9ce8d39 | 1671 | vm_address_t addr = (vm_address_t)th->stackaddr; |
5b2abdfb A |
1672 | vm_size_t size; |
1673 | ||
9385eb3d | 1674 | size = (vm_size_t)th->stacksize + th->guardsize; |
5b2abdfb | 1675 | |
e9ce8d39 | 1676 | addr -= size; |
5b2abdfb | 1677 | ret = vm_deallocate(self, addr, size); |
e9ce8d39 | 1678 | if (ret != KERN_SUCCESS) { |
5b2abdfb A |
1679 | fprintf(stderr, |
1680 | "vm_deallocate(stack) failed: %s\n", | |
1681 | mach_error_string(ret)); | |
e9ce8d39 | 1682 | } |
5b2abdfb A |
1683 | } |
1684 | ||
224c7076 A |
1685 | |
1686 | if (value_ptr) | |
5b2abdfb | 1687 | *value_ptr = th->exit_value; |
224c7076 A |
1688 | if (conforming) { |
1689 | if ((th->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) == | |
34e8f829 | 1690 | (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING) && (value_ptr != NULL)) |
224c7076 A |
1691 | *value_ptr = PTHREAD_CANCELED; |
1692 | th->sig = _PTHREAD_NO_SIG; | |
1693 | } | |
1694 | ||
5b2abdfb A |
1695 | |
1696 | if (th != &_thread) | |
e9ce8d39 | 1697 | free(th); |
5b2abdfb | 1698 | |
224c7076 | 1699 | return 0; |
5b2abdfb A |
1700 | } |
1701 | ||
1702 | static | |
1703 | void _pthread_reap_threads(void) | |
1704 | { | |
3d9156a7 | 1705 | pthread_reap_msg_t msg; |
5b2abdfb A |
1706 | kern_return_t ret; |
1707 | ||
1708 | ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0, | |
3d9156a7 | 1709 | sizeof msg, thread_recycle_port, |
5b2abdfb A |
1710 | MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); |
1711 | while (ret == MACH_MSG_SUCCESS) { | |
1712 | mach_port_t kernel_thread = msg.header.msgh_remote_port; | |
3d9156a7 | 1713 | pthread_t thread = msg.thread; |
5b2abdfb | 1714 | |
34e8f829 A |
1715 | /* deal with race with thread_create_running() */ |
1716 | if (kernel_thread == MACH_PORT_NULL && | |
1717 | kernel_thread != thread->kernel_thread) { | |
1718 | kernel_thread = thread->kernel_thread; | |
1719 | } | |
1720 | ||
1721 | if ( kernel_thread == MACH_PORT_NULL || | |
1722 | _pthread_reap_thread(thread, kernel_thread, (void **)0, 0) == EAGAIN) | |
5b2abdfb A |
1723 | { |
1724 | /* not dead yet, put it back for someone else to reap, stop here */ | |
1725 | _pthread_become_available(thread, kernel_thread); | |
1726 | return; | |
1727 | } | |
34e8f829 | 1728 | |
5b2abdfb | 1729 | ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0, |
3d9156a7 | 1730 | sizeof msg, thread_recycle_port, |
5b2abdfb | 1731 | MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); |
e9ce8d39 A |
1732 | } |
1733 | } | |
1734 | ||
3b2a1fe8 | 1735 | /* For compatibility... */ |
e9ce8d39 A |
1736 | |
1737 | pthread_t | |
3b2a1fe8 A |
1738 | _pthread_self() { |
1739 | return pthread_self(); | |
e9ce8d39 A |
1740 | } |
1741 | ||
1742 | /* | |
1743 | * Terminate a thread. | |
1744 | */ | |
224c7076 A |
1745 | int __disable_threadsignal(int); |
1746 | ||
3d9156a7 A |
1747 | static void |
1748 | _pthread_exit(pthread_t self, void *value_ptr) | |
e9ce8d39 | 1749 | { |
3d9156a7 | 1750 | struct __darwin_pthread_handler_rec *handler; |
e9ce8d39 | 1751 | kern_return_t kern_res; |
5b2abdfb | 1752 | int thread_count; |
224c7076 | 1753 | int newstyle = self->newstyle; |
5b2abdfb A |
1754 | |
1755 | /* Make this thread not to receive any signals */ | |
224c7076 A |
1756 | __disable_threadsignal(1); |
1757 | ||
34e8f829 | 1758 | #if PTH_TRACE |
224c7076 A |
1759 | __kdebug_trace(0x900001c, self, newstyle, 0, 0, 0); |
1760 | #endif | |
1761 | ||
1762 | /* set cancel state to disable and type to deferred */ | |
1763 | _pthread_setcancelstate_exit(self, value_ptr, __unix_conforming); | |
5b2abdfb | 1764 | |
3d9156a7 | 1765 | while ((handler = self->__cleanup_stack) != 0) |
e9ce8d39 | 1766 | { |
3d9156a7 A |
1767 | (handler->__routine)(handler->__arg); |
1768 | self->__cleanup_stack = handler->__next; | |
e9ce8d39 A |
1769 | } |
1770 | _pthread_tsd_cleanup(self); | |
5b2abdfb | 1771 | |
224c7076 A |
1772 | if (newstyle == 0) { |
1773 | _pthread_reap_threads(); | |
1774 | ||
1775 | LOCK(self->lock); | |
1776 | self->detached |= _PTHREAD_EXITED; | |
1777 | ||
1778 | if (self->detached & PTHREAD_CREATE_JOINABLE) { | |
1779 | mach_port_t death = self->death; | |
1780 | self->exit_value = value_ptr; | |
1781 | UNLOCK(self->lock); | |
1782 | /* the joiner will need a kernel thread reference, leave ours for it */ | |
1783 | if (death) { | |
1784 | PTHREAD_MACH_CALL(semaphore_signal(death), kern_res); | |
1785 | if (kern_res != KERN_SUCCESS) | |
1786 | fprintf(stderr, | |
1787 | "semaphore_signal(death) failed: %s\n", | |
1788 | mach_error_string(kern_res)); | |
1789 | } | |
1790 | LOCK(_pthread_list_lock); | |
1791 | thread_count = --_pthread_count; | |
1792 | UNLOCK(_pthread_list_lock); | |
1793 | } else { | |
1794 | UNLOCK(self->lock); | |
1795 | LOCK(_pthread_list_lock); | |
1796 | TAILQ_REMOVE(&__pthread_head, self, plist); | |
34e8f829 | 1797 | #if PTH_LISTTRACE |
224c7076 A |
1798 | __kdebug_trace(0x9000010, self, 0, 0, 5, 0); |
1799 | #endif | |
1800 | thread_count = --_pthread_count; | |
1801 | UNLOCK(_pthread_list_lock); | |
1802 | /* with no joiner, we let become available consume our cached ref */ | |
1803 | _pthread_become_available(self, self->kernel_thread); | |
1804 | } | |
5b2abdfb | 1805 | |
224c7076 A |
1806 | if (thread_count <= 0) |
1807 | exit(0); | |
5b2abdfb | 1808 | |
224c7076 A |
1809 | /* Use a new reference to terminate ourselves. Should never return. */ |
1810 | PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res); | |
1811 | fprintf(stderr, "thread_terminate(mach_thread_self()) failed: %s\n", | |
1812 | mach_error_string(kern_res)); | |
5b2abdfb | 1813 | } else { |
224c7076 | 1814 | semaphore_t joinsem = SEMAPHORE_NULL; |
e9ce8d39 | 1815 | |
34e8f829 | 1816 | if ((self->joiner_notify == (mach_port_t)0) && (self->detached & PTHREAD_CREATE_JOINABLE)) |
224c7076 A |
1817 | joinsem = new_sem_from_pool(); |
1818 | LOCK(self->lock); | |
1819 | self->detached |= _PTHREAD_EXITED; | |
5b2abdfb | 1820 | |
224c7076 A |
1821 | self->exit_value = value_ptr; |
1822 | if (self->detached & PTHREAD_CREATE_JOINABLE) { | |
34e8f829 | 1823 | if (self->joiner_notify == (mach_port_t)0) { |
224c7076 A |
1824 | self->joiner_notify = joinsem; |
1825 | joinsem = SEMAPHORE_NULL; | |
1826 | } | |
1827 | UNLOCK(self->lock); | |
1828 | if (joinsem != SEMAPHORE_NULL) | |
1829 | restore_sem_to_pool(joinsem); | |
1830 | _pthread_free_pthread_onstack(self, 0, 1); | |
1831 | } else { | |
1832 | UNLOCK(self->lock); | |
1833 | /* with no joiner, we let become available consume our cached ref */ | |
1834 | if (joinsem != SEMAPHORE_NULL) | |
1835 | restore_sem_to_pool(joinsem); | |
1836 | _pthread_free_pthread_onstack(self, 1, 1); | |
1837 | } | |
1838 | } | |
34e8f829 | 1839 | LIBC_ABORT("thread %p didn't exit", self); |
e9ce8d39 A |
1840 | } |
1841 | ||
3d9156a7 A |
1842 | void |
1843 | pthread_exit(void *value_ptr) | |
e9ce8d39 | 1844 | { |
224c7076 | 1845 | pthread_t self = pthread_self(); |
34e8f829 A |
1846 | /* if the current thread is a workqueue thread, just crash the app, as per libdispatch folks */ |
1847 | if (self->wqthread == 0) { | |
224c7076 | 1848 | _pthread_exit(self, value_ptr); |
34e8f829 A |
1849 | } else { |
1850 | LIBC_ABORT("pthread_exit() may only be called against threads created via pthread_create()"); | |
1851 | } | |
e9ce8d39 A |
1852 | } |
1853 | ||
1854 | /* | |
1855 | * Get the scheduling policy and scheduling paramters for a thread. | |
1856 | */ | |
1857 | int | |
1858 | pthread_getschedparam(pthread_t thread, | |
1859 | int *policy, | |
1860 | struct sched_param *param) | |
1861 | { | |
224c7076 A |
1862 | int ret; |
1863 | ||
1864 | if (thread == NULL) | |
1865 | return(ESRCH); | |
1866 | ||
1867 | LOCK(_pthread_list_lock); | |
1868 | ||
1869 | if ((ret = _pthread_find_thread(thread)) != 0) { | |
1870 | UNLOCK(_pthread_list_lock); | |
1871 | return(ret); | |
e9ce8d39 | 1872 | } |
224c7076 A |
1873 | if (policy != 0) |
1874 | *policy = thread->policy; | |
1875 | if (param != 0) | |
1876 | *param = thread->param; | |
1877 | UNLOCK(_pthread_list_lock); | |
1878 | ||
1879 | return(0); | |
e9ce8d39 A |
1880 | } |
1881 | ||
1882 | /* | |
1883 | * Set the scheduling policy and scheduling paramters for a thread. | |
1884 | */ | |
b5d655f7 A |
1885 | static int |
1886 | pthread_setschedparam_internal(pthread_t thread, | |
1887 | mach_port_t kport, | |
e9ce8d39 A |
1888 | int policy, |
1889 | const struct sched_param *param) | |
1890 | { | |
1891 | policy_base_data_t bases; | |
1892 | policy_base_t base; | |
1893 | mach_msg_type_number_t count; | |
1894 | kern_return_t ret; | |
1895 | ||
224c7076 | 1896 | switch (policy) |
e9ce8d39 | 1897 | { |
e9ce8d39 A |
1898 | case SCHED_OTHER: |
1899 | bases.ts.base_priority = param->sched_priority; | |
1900 | base = (policy_base_t)&bases.ts; | |
1901 | count = POLICY_TIMESHARE_BASE_COUNT; | |
1902 | break; | |
1903 | case SCHED_FIFO: | |
1904 | bases.fifo.base_priority = param->sched_priority; | |
1905 | base = (policy_base_t)&bases.fifo; | |
1906 | count = POLICY_FIFO_BASE_COUNT; | |
1907 | break; | |
1908 | case SCHED_RR: | |
1909 | bases.rr.base_priority = param->sched_priority; | |
1910 | /* quantum isn't public yet */ | |
1911 | bases.rr.quantum = param->quantum; | |
1912 | base = (policy_base_t)&bases.rr; | |
1913 | count = POLICY_RR_BASE_COUNT; | |
1914 | break; | |
1915 | default: | |
1916 | return (EINVAL); | |
e9ce8d39 | 1917 | } |
b5d655f7 | 1918 | ret = thread_policy(kport, policy, base, count, TRUE); |
224c7076 A |
1919 | if (ret != KERN_SUCCESS) |
1920 | return (EINVAL); | |
224c7076 | 1921 | return (0); |
e9ce8d39 A |
1922 | } |
1923 | ||
b5d655f7 A |
1924 | int |
1925 | pthread_setschedparam(pthread_t t, | |
1926 | int policy, | |
1927 | const struct sched_param *param) | |
1928 | { | |
1929 | mach_port_t kport = MACH_PORT_NULL; | |
1930 | int error; | |
1931 | int bypass = 1; | |
1932 | ||
1933 | if (t != pthread_self() && t != &_thread ) { //since the main thread will not get de-allocated from underneath us | |
1934 | bypass = 0; | |
1935 | if (_pthread_lookup_thread(t, &kport, 0) != 0) | |
1936 | return(ESRCH); | |
1937 | } else | |
1938 | kport = t->kernel_thread; | |
1939 | ||
1940 | error = pthread_setschedparam_internal(t, kport, policy, param); | |
1941 | if (error == 0) { | |
1942 | if (bypass == 0) { | |
1943 | /* ensure the thread is still valid */ | |
1944 | LOCK(_pthread_list_lock); | |
1945 | if ((error = _pthread_find_thread(t)) != 0) { | |
1946 | UNLOCK(_pthread_list_lock); | |
1947 | return(error); | |
1948 | } | |
1949 | t->policy = policy; | |
1950 | t->param = *param; | |
1951 | UNLOCK(_pthread_list_lock); | |
1952 | } else { | |
1953 | t->policy = policy; | |
1954 | t->param = *param; | |
1955 | } | |
1956 | } | |
1957 | return(error); | |
1958 | } | |
1959 | ||
e9ce8d39 A |
1960 | /* |
1961 | * Get the minimum priority for the given policy | |
1962 | */ | |
1963 | int | |
1964 | sched_get_priority_min(int policy) | |
1965 | { | |
1966 | return default_priority - 16; | |
1967 | } | |
1968 | ||
1969 | /* | |
1970 | * Get the maximum priority for the given policy | |
1971 | */ | |
1972 | int | |
1973 | sched_get_priority_max(int policy) | |
1974 | { | |
1975 | return default_priority + 16; | |
1976 | } | |
1977 | ||
1978 | /* | |
1979 | * Determine if two thread identifiers represent the same thread. | |
1980 | */ | |
1981 | int | |
1982 | pthread_equal(pthread_t t1, | |
1983 | pthread_t t2) | |
1984 | { | |
1985 | return (t1 == t2); | |
1986 | } | |
1987 | ||
1f2f436a A |
1988 | // Force LLVM not to optimise this to a call to __pthread_set_self, if it does |
1989 | // then _pthread_set_self won't be bound when secondary threads try and start up. | |
1990 | void __attribute__((noinline)) | |
9385eb3d A |
1991 | _pthread_set_self(pthread_t p) |
1992 | { | |
1f2f436a A |
1993 | extern void __pthread_set_self(void *); |
1994 | ||
9385eb3d | 1995 | if (p == 0) { |
1f2f436a A |
1996 | if (_thread.tsd[0] != 0) { |
1997 | bzero(&_thread, sizeof(struct _pthread)); | |
1998 | } | |
9385eb3d A |
1999 | p = &_thread; |
2000 | } | |
2001 | p->tsd[0] = p; | |
1f2f436a | 2002 | __pthread_set_self(&p->tsd[0]); |
9385eb3d A |
2003 | } |
2004 | ||
e9ce8d39 A |
2005 | void |
2006 | cthread_set_self(void *cself) | |
2007 | { | |
2008 | pthread_t self = pthread_self(); | |
2009 | if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) { | |
2010 | _pthread_set_self(cself); | |
2011 | return; | |
2012 | } | |
2013 | self->cthread_self = cself; | |
2014 | } | |
2015 | ||
2016 | void * | |
2017 | ur_cthread_self(void) { | |
2018 | pthread_t self = pthread_self(); | |
2019 | if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) { | |
2020 | return (void *)self; | |
2021 | } | |
2022 | return self->cthread_self; | |
2023 | } | |
2024 | ||
224c7076 A |
2025 | /* |
2026 | * cancellation handler for pthread once as the init routine can have a | |
2027 | * cancellation point. In that case we need to restore the spin unlock | |
2028 | */ | |
2029 | void | |
2030 | __pthread_once_cancel_handler(pthread_once_t *once_control) | |
2031 | { | |
2032 | _spin_unlock(&once_control->lock); | |
2033 | } | |
2034 | ||
2035 | ||
e9ce8d39 A |
2036 | /* |
2037 | * Execute a function exactly one time in a thread-safe fashion. | |
2038 | */ | |
2039 | int | |
2040 | pthread_once(pthread_once_t *once_control, | |
2041 | void (*init_routine)(void)) | |
2042 | { | |
9385eb3d | 2043 | _spin_lock(&once_control->lock); |
e9ce8d39 A |
2044 | if (once_control->sig == _PTHREAD_ONCE_SIG_init) |
2045 | { | |
34e8f829 | 2046 | pthread_cleanup_push((void (*)(void *))__pthread_once_cancel_handler, once_control); |
e9ce8d39 | 2047 | (*init_routine)(); |
224c7076 | 2048 | pthread_cleanup_pop(0); |
e9ce8d39 A |
2049 | once_control->sig = _PTHREAD_ONCE_SIG; |
2050 | } | |
9385eb3d | 2051 | _spin_unlock(&once_control->lock); |
224c7076 | 2052 | return (0); /* Spec defines no possible errors! */ |
e9ce8d39 A |
2053 | } |
2054 | ||
e9ce8d39 A |
2055 | /* |
2056 | * Insert a cancellation point in a thread. | |
2057 | */ | |
3d9156a7 A |
2058 | __private_extern__ void |
2059 | _pthread_testcancel(pthread_t thread, int isconforming) | |
e9ce8d39 A |
2060 | { |
2061 | LOCK(thread->lock); | |
2062 | if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) == | |
2063 | (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) | |
2064 | { | |
2065 | UNLOCK(thread->lock); | |
3d9156a7 A |
2066 | if (isconforming) |
2067 | pthread_exit(PTHREAD_CANCELED); | |
2068 | else | |
2069 | pthread_exit(0); | |
e9ce8d39 A |
2070 | } |
2071 | UNLOCK(thread->lock); | |
2072 | } | |
2073 | ||
e9ce8d39 | 2074 | |
e9ce8d39 | 2075 | |
5b2abdfb A |
2076 | int |
2077 | pthread_getconcurrency(void) | |
2078 | { | |
2079 | return(pthread_concurrency); | |
2080 | } | |
2081 | ||
2082 | int | |
2083 | pthread_setconcurrency(int new_level) | |
2084 | { | |
224c7076 A |
2085 | if (new_level < 0) |
2086 | return EINVAL; | |
5b2abdfb | 2087 | pthread_concurrency = new_level; |
224c7076 | 2088 | return(0); |
5b2abdfb A |
2089 | } |
2090 | ||
e9ce8d39 A |
2091 | /* |
2092 | * Perform package initialization - called automatically when application starts | |
2093 | */ | |
1f2f436a | 2094 | int |
e9ce8d39 A |
2095 | pthread_init(void) |
2096 | { | |
5b2abdfb | 2097 | pthread_attr_t *attrs; |
e9ce8d39 A |
2098 | pthread_t thread; |
2099 | kern_return_t kr; | |
e9ce8d39 A |
2100 | host_priority_info_data_t priority_info; |
2101 | host_info_t info; | |
2102 | host_flavor_t flavor; | |
5b2abdfb | 2103 | host_t host; |
e9ce8d39 A |
2104 | mach_msg_type_number_t count; |
2105 | int mib[2]; | |
34e8f829 | 2106 | int ncpus = 0; |
e9ce8d39 | 2107 | size_t len; |
59e0d9fe | 2108 | void *stackaddr; |
e9ce8d39 | 2109 | |
224c7076 A |
2110 | pthreadsize = round_page(sizeof (struct _pthread)); |
2111 | count = HOST_PRIORITY_INFO_COUNT; | |
e9ce8d39 A |
2112 | info = (host_info_t)&priority_info; |
2113 | flavor = HOST_PRIORITY_INFO; | |
5b2abdfb A |
2114 | host = mach_host_self(); |
2115 | kr = host_info(host, flavor, info, &count); | |
e9ce8d39 A |
2116 | if (kr != KERN_SUCCESS) |
2117 | printf("host_info failed (%d); probably need privilege.\n", kr); | |
2118 | else { | |
2119 | default_priority = priority_info.user_priority; | |
9385eb3d A |
2120 | min_priority = priority_info.minimum_priority; |
2121 | max_priority = priority_info.maximum_priority; | |
e9ce8d39 | 2122 | } |
5b2abdfb | 2123 | attrs = &_pthread_attr_default; |
e9ce8d39 | 2124 | pthread_attr_init(attrs); |
e9ce8d39 | 2125 | |
224c7076 | 2126 | TAILQ_INIT(&__pthread_head); |
9385eb3d | 2127 | LOCK_INIT(_pthread_list_lock); |
5b2abdfb | 2128 | thread = &_thread; |
224c7076 | 2129 | TAILQ_INSERT_HEAD(&__pthread_head, thread, plist); |
5b2abdfb | 2130 | _pthread_set_self(thread); |
34e8f829 A |
2131 | #if PTH_LISTTRACE |
2132 | __kdebug_trace(0x900000c, thread, 0, 0, 10, 0); | |
2133 | #endif | |
2134 | ||
224c7076 A |
2135 | /* In case of dyld reset the tsd keys from 1 - 10 */ |
2136 | _pthread_keys_init(); | |
2137 | ||
2138 | mib[0] = CTL_KERN; | |
2139 | mib[1] = KERN_USRSTACK; | |
2140 | len = sizeof (stackaddr); | |
2141 | if (sysctl (mib, 2, &stackaddr, &len, NULL, 0) != 0) | |
2142 | stackaddr = (void *)USRSTACK; | |
59e0d9fe | 2143 | _pthread_create(thread, attrs, stackaddr, mach_thread_self()); |
34e8f829 | 2144 | thread->stacksize = DFLSSIZ; //initialize main thread's stacksize based on vmparam.h |
5b2abdfb | 2145 | thread->detached = PTHREAD_CREATE_JOINABLE|_PTHREAD_CREATE_PARENT; |
e9ce8d39 | 2146 | |
224c7076 | 2147 | _init_cpu_capabilities(); |
34e8f829 | 2148 | if ((ncpus = _NumCPUs()) > 1) |
224c7076 | 2149 | _spin_tries = MP_SPIN_TRIES; |
e3cf15b6 | 2150 | |
34e8f829 A |
2151 | workq_targetconc[WORKQ_HIGH_PRIOQUEUE] = ncpus; |
2152 | workq_targetconc[WORKQ_DEFAULT_PRIOQUEUE] = ncpus; | |
2153 | workq_targetconc[WORKQ_LOW_PRIOQUEUE] = ncpus; | |
1f2f436a | 2154 | workq_targetconc[WORKQ_BG_PRIOQUEUE] = ncpus; |
34e8f829 | 2155 | |
9385eb3d A |
2156 | mach_port_deallocate(mach_task_self(), host); |
2157 | ||
224c7076 A |
2158 | #if defined(__ppc__) |
2159 | IF_ROSETTA() { | |
2160 | __oldstyle = 1; | |
2161 | } | |
2162 | #endif | |
e3cf15b6 | 2163 | |
59e0d9fe A |
2164 | #if defined(_OBJC_PAGE_BASE_ADDRESS) |
2165 | { | |
2166 | vm_address_t objcRTPage = (vm_address_t)_OBJC_PAGE_BASE_ADDRESS; | |
2167 | kr = vm_map(mach_task_self(), | |
2168 | &objcRTPage, vm_page_size * 4, vm_page_size - 1, | |
2169 | VM_FLAGS_FIXED | VM_MAKE_TAG(0), // Which tag to use? | |
2170 | MACH_PORT_NULL, | |
2171 | (vm_address_t)0, FALSE, | |
2172 | (vm_prot_t)0, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, | |
2173 | VM_INHERIT_DEFAULT); | |
2174 | /* We ignore the return result here. The ObjC runtime will just have to deal. */ | |
2175 | } | |
2176 | #endif | |
1f2f436a A |
2177 | //added so that thread_recycle_port is initialized on new launch. |
2178 | _pthread_fork_child_postinit(); | |
e9ce8d39 | 2179 | mig_init(1); /* enable multi-threaded mig interfaces */ |
224c7076 | 2180 | if (__oldstyle == 0) { |
ad3c9f2a | 2181 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
1f2f436a | 2182 | __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)), _pthread_start, &workq_targetconc[0], (uintptr_t)(&thread->tsd[__PTK_LIBDISPATCH_KEY0]) - (uintptr_t)(&thread->tsd[0])); |
224c7076 | 2183 | #else |
1f2f436a | 2184 | __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)), NULL, &workq_targetconc[0], (uintptr_t)&thread->tsd[__PTK_LIBDISPATCH_KEY0] - (uintptr_t)thread); |
224c7076 A |
2185 | #endif |
2186 | } | |
34e8f829 | 2187 | |
ad3c9f2a | 2188 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
34e8f829 A |
2189 | if( (thread->thread_id = __thread_selfid()) == (__uint64_t)-1) |
2190 | printf("Failed to set thread_id in pthread_init\n"); | |
34e8f829 | 2191 | #endif |
1f2f436a | 2192 | return 0; |
e9ce8d39 A |
2193 | } |
2194 | ||
2195 | int sched_yield(void) | |
2196 | { | |
2197 | swtch_pri(0); | |
2198 | return 0; | |
2199 | } | |
2200 | ||
224c7076 | 2201 | /* This used to be the "magic" that gets the initialization routine called when the application starts */ |
1f2f436a A |
2202 | /* |
2203 | * (These has been moved to setenv.c, so we can use it to fix a less than 10.5 | |
2204 | * crt1.o issue) | |
2205 | * static int _do_nothing(void) { return 0; } | |
2206 | * int (*_cthread_init_routine)(void) = _do_nothing; | |
2207 | */ | |
e9ce8d39 A |
2208 | |
2209 | /* Get a semaphore from the pool, growing it if necessary */ | |
2210 | ||
2211 | __private_extern__ semaphore_t new_sem_from_pool(void) { | |
2212 | kern_return_t res; | |
2213 | semaphore_t sem; | |
2214 | int i; | |
2215 | ||
2216 | LOCK(sem_pool_lock); | |
2217 | if (sem_pool_current == sem_pool_count) { | |
2218 | sem_pool_count += 16; | |
2219 | sem_pool = realloc(sem_pool, sem_pool_count * sizeof(semaphore_t)); | |
2220 | for (i = sem_pool_current; i < sem_pool_count; i++) { | |
2221 | PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool[i], SYNC_POLICY_FIFO, 0), res); | |
2222 | } | |
2223 | } | |
2224 | sem = sem_pool[sem_pool_current++]; | |
2225 | UNLOCK(sem_pool_lock); | |
2226 | return sem; | |
2227 | } | |
2228 | ||
2229 | /* Put a semaphore back into the pool */ | |
2230 | __private_extern__ void restore_sem_to_pool(semaphore_t sem) { | |
2231 | LOCK(sem_pool_lock); | |
2232 | sem_pool[--sem_pool_current] = sem; | |
2233 | UNLOCK(sem_pool_lock); | |
2234 | } | |
2235 | ||
2236 | static void sem_pool_reset(void) { | |
2237 | LOCK(sem_pool_lock); | |
2238 | sem_pool_count = 0; | |
2239 | sem_pool_current = 0; | |
2240 | sem_pool = NULL; | |
2241 | UNLOCK(sem_pool_lock); | |
2242 | } | |
2243 | ||
9385eb3d | 2244 | __private_extern__ void _pthread_fork_child(pthread_t p) { |
e9ce8d39 A |
2245 | /* Just in case somebody had it locked... */ |
2246 | UNLOCK(sem_pool_lock); | |
2247 | sem_pool_reset(); | |
9385eb3d A |
2248 | /* No need to hold the pthread_list_lock as no one other than this |
2249 | * thread is present at this time | |
2250 | */ | |
224c7076 | 2251 | TAILQ_INIT(&__pthread_head); |
9385eb3d | 2252 | LOCK_INIT(_pthread_list_lock); |
224c7076 | 2253 | TAILQ_INSERT_HEAD(&__pthread_head, p, plist); |
34e8f829 A |
2254 | #if PTH_LISTTRACE |
2255 | __kdebug_trace(0x900000c, p, 0, 0, 10, 0); | |
2256 | #endif | |
5b2abdfb | 2257 | _pthread_count = 1; |
ad3c9f2a | 2258 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
34e8f829 A |
2259 | if( (p->thread_id = __thread_selfid()) == (__uint64_t)-1) |
2260 | printf("Failed to set thread_id in pthread_fork_child\n"); | |
2261 | #endif | |
e9ce8d39 A |
2262 | } |
2263 | ||
1f2f436a A |
2264 | void _pthread_fork_child_postinit() { |
2265 | kern_return_t kr; | |
2266 | ||
2267 | kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &thread_recycle_port); | |
2268 | if (kr != KERN_SUCCESS) { | |
2269 | abort(); | |
2270 | } | |
2271 | } | |
2272 | ||
224c7076 A |
2273 | /* |
2274 | * Query/update the cancelability 'state' of a thread | |
2275 | */ | |
2276 | int | |
2277 | _pthread_setcancelstate_internal(int state, int *oldstate, int conforming) | |
3d9156a7 | 2278 | { |
224c7076 | 2279 | pthread_t self = pthread_self(); |
3d9156a7 | 2280 | |
3d9156a7 | 2281 | |
224c7076 A |
2282 | switch (state) { |
2283 | case PTHREAD_CANCEL_ENABLE: | |
2284 | if (conforming) | |
2285 | __pthread_canceled(1); | |
2286 | break; | |
2287 | case PTHREAD_CANCEL_DISABLE: | |
2288 | if (conforming) | |
2289 | __pthread_canceled(2); | |
2290 | break; | |
2291 | default: | |
2292 | return EINVAL; | |
3d9156a7 | 2293 | } |
3d9156a7 | 2294 | |
224c7076 A |
2295 | self = pthread_self(); |
2296 | LOCK(self->lock); | |
2297 | if (oldstate) | |
2298 | *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK; | |
2299 | self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK; | |
2300 | self->cancel_state |= state; | |
2301 | UNLOCK(self->lock); | |
2302 | if (!conforming) | |
2303 | _pthread_testcancel(self, 0); /* See if we need to 'die' now... */ | |
2304 | return (0); | |
2305 | } | |
3d9156a7 | 2306 | |
224c7076 A |
2307 | /* When a thread exits set the cancellation state to DISABLE and DEFERRED */ |
2308 | static void | |
2309 | _pthread_setcancelstate_exit(pthread_t self, void * value_ptr, int conforming) | |
3d9156a7 | 2310 | { |
224c7076 A |
2311 | LOCK(self->lock); |
2312 | self->cancel_state &= ~(_PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK); | |
2313 | self->cancel_state |= (PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED); | |
2314 | if ((value_ptr == PTHREAD_CANCELED)) { | |
2315 | // 4597450: begin | |
2316 | self->detached |= _PTHREAD_WASCANCEL; | |
2317 | // 4597450: end | |
2318 | } | |
2319 | UNLOCK(self->lock); | |
2320 | } | |
3d9156a7 | 2321 | |
224c7076 A |
2322 | int |
2323 | _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming) | |
2324 | { | |
2325 | kern_return_t res; | |
1f2f436a | 2326 | int ret; |
3d9156a7 | 2327 | |
34e8f829 | 2328 | #if PTH_TRACE |
224c7076 A |
2329 | __kdebug_trace(0x9000028, thread, 0, 0, 1, 0); |
2330 | #endif | |
2331 | /* The scenario where the joiner was waiting for the thread and | |
2332 | * the pthread detach happened on that thread. Then the semaphore | |
2333 | * will trigger but by the time joiner runs, the target thread could be | |
2334 | * freed. So we need to make sure that the thread is still in the list | |
2335 | * and is joinable before we continue with the join. | |
2336 | */ | |
2337 | LOCK(_pthread_list_lock); | |
2338 | if ((ret = _pthread_find_thread(thread)) != 0) { | |
2339 | UNLOCK(_pthread_list_lock); | |
2340 | /* returns ESRCH */ | |
2341 | return(ret); | |
2342 | } | |
2343 | if ((thread->detached & PTHREAD_CREATE_JOINABLE) == 0) { | |
2344 | /* the thread might be a detached thread */ | |
2345 | UNLOCK(_pthread_list_lock); | |
2346 | return(ESRCH); | |
3d9156a7 | 2347 | |
224c7076 A |
2348 | } |
2349 | /* It is still a joinable thread and needs to be reaped */ | |
2350 | TAILQ_REMOVE(&__pthread_head, thread, plist); | |
34e8f829 | 2351 | #if PTH_LISTTRACE |
224c7076 A |
2352 | __kdebug_trace(0x9000010, thread, 0, 0, 3, 0); |
2353 | #endif | |
2354 | UNLOCK(_pthread_list_lock); | |
3d9156a7 | 2355 | |
224c7076 A |
2356 | if (value_ptr) |
2357 | *value_ptr = thread->exit_value; | |
2358 | if (conforming) { | |
2359 | if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) == | |
34e8f829 | 2360 | (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING) && (value_ptr != NULL)) { |
224c7076 A |
2361 | *value_ptr = PTHREAD_CANCELED; |
2362 | } | |
2363 | } | |
2364 | if (thread->reply_port != MACH_PORT_NULL) { | |
2365 | res = mach_port_mod_refs(mach_task_self(), thread->reply_port, MACH_PORT_RIGHT_RECEIVE, -1); | |
2366 | if (res != KERN_SUCCESS) | |
2367 | fprintf(stderr,"mach_port_mod_refs(reply_port) failed: %s\n",mach_error_string(res)); | |
2368 | thread->reply_port = MACH_PORT_NULL; | |
2369 | } | |
2370 | if (thread->freeStackOnExit) { | |
2371 | thread->sig = _PTHREAD_NO_SIG; | |
34e8f829 | 2372 | #if PTH_TRACE |
224c7076 A |
2373 | __kdebug_trace(0x9000028, thread, 0, 0, 2, 0); |
2374 | #endif | |
1f2f436a | 2375 | vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)thread, pthreadsize); |
224c7076 A |
2376 | } else { |
2377 | thread->sig = _PTHREAD_NO_SIG; | |
34e8f829 | 2378 | #if PTH_TRACE |
224c7076 A |
2379 | __kdebug_trace(0x9000028, thread, 0, 0, 3, 0); |
2380 | #endif | |
2381 | free(thread); | |
2382 | } | |
2383 | return(0); | |
2384 | } | |
3d9156a7 | 2385 | |
224c7076 A |
2386 | /* ALWAYS called with list lock and return with list lock */ |
2387 | int | |
2388 | _pthread_find_thread(pthread_t thread) | |
2389 | { | |
2390 | pthread_t p; | |
3d9156a7 | 2391 | |
224c7076 A |
2392 | loop: |
2393 | TAILQ_FOREACH(p, &__pthread_head, plist) { | |
2394 | if (p == thread) { | |
2395 | if (thread->kernel_thread == MACH_PORT_NULL) { | |
3d9156a7 | 2396 | UNLOCK(_pthread_list_lock); |
224c7076 A |
2397 | sched_yield(); |
2398 | LOCK(_pthread_list_lock); | |
2399 | goto loop; | |
2400 | } | |
2401 | return(0); | |
3d9156a7 | 2402 | } |
3d9156a7 | 2403 | } |
224c7076 | 2404 | return(ESRCH); |
3d9156a7 A |
2405 | } |
2406 | ||
3d9156a7 | 2407 | int |
224c7076 | 2408 | _pthread_lookup_thread(pthread_t thread, mach_port_t * portp, int only_joinable) |
3d9156a7 | 2409 | { |
224c7076 A |
2410 | mach_port_t kport; |
2411 | int ret = 0; | |
2412 | ||
2413 | if (thread == NULL) | |
2414 | return(ESRCH); | |
2415 | ||
2416 | LOCK(_pthread_list_lock); | |
2417 | ||
2418 | if ((ret = _pthread_find_thread(thread)) != 0) { | |
2419 | UNLOCK(_pthread_list_lock); | |
2420 | return(ret); | |
2421 | } | |
2422 | if ((only_joinable != 0) && ((thread->detached & PTHREAD_CREATE_DETACHED) != 0)) { | |
2423 | UNLOCK(_pthread_list_lock); | |
2424 | return(EINVAL); | |
2425 | } | |
2426 | kport = thread->kernel_thread; | |
2427 | UNLOCK(_pthread_list_lock); | |
2428 | if (portp != NULL) | |
2429 | *portp = kport; | |
2430 | return(0); | |
2431 | } | |
3d9156a7 | 2432 | |
224c7076 A |
2433 | /* XXXXXXXXXXXXX Pthread Workqueue Attributes XXXXXXXXXXXXXXXXXX */ |
2434 | int | |
2435 | pthread_workqueue_attr_init_np(pthread_workqueue_attr_t * attrp) | |
2436 | { | |
34e8f829 A |
2437 | attrp->queueprio = WORKQ_DEFAULT_PRIOQUEUE; |
2438 | attrp->sig = PTHREAD_WORKQUEUE_ATTR_SIG; | |
2439 | attrp->overcommit = 0; | |
224c7076 A |
2440 | return(0); |
2441 | } | |
2442 | ||
2443 | int | |
2444 | pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t * attr) | |
2445 | { | |
34e8f829 | 2446 | if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) |
3d9156a7 | 2447 | { |
224c7076 | 2448 | return (0); |
3d9156a7 A |
2449 | } else |
2450 | { | |
224c7076 | 2451 | return (EINVAL); /* Not an attribute structure! */ |
3d9156a7 A |
2452 | } |
2453 | } | |
2454 | ||
224c7076 | 2455 | int |
34e8f829 | 2456 | pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t * attr, int * qpriop) |
224c7076 | 2457 | { |
34e8f829 A |
2458 | if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) { |
2459 | *qpriop = attr->queueprio; | |
224c7076 A |
2460 | return (0); |
2461 | } else { | |
2462 | return (EINVAL); /* Not an attribute structure! */ | |
2463 | } | |
2464 | } | |
3d9156a7 | 2465 | |
3d9156a7 | 2466 | |
224c7076 | 2467 | int |
34e8f829 | 2468 | pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t * attr, int qprio) |
224c7076 | 2469 | { |
34e8f829 | 2470 | int error = 0; |
224c7076 | 2471 | |
34e8f829 A |
2472 | if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) { |
2473 | switch(qprio) { | |
2474 | case WORKQ_HIGH_PRIOQUEUE: | |
2475 | case WORKQ_DEFAULT_PRIOQUEUE: | |
2476 | case WORKQ_LOW_PRIOQUEUE: | |
1f2f436a | 2477 | case WORKQ_BG_PRIOQUEUE: |
34e8f829 A |
2478 | attr->queueprio = qprio; |
2479 | break; | |
2480 | default: | |
2481 | error = EINVAL; | |
2482 | } | |
224c7076 | 2483 | } else { |
34e8f829 | 2484 | error = EINVAL; |
224c7076 | 2485 | } |
34e8f829 | 2486 | return (error); |
224c7076 | 2487 | } |
3d9156a7 | 2488 | |
3d9156a7 | 2489 | |
224c7076 | 2490 | int |
34e8f829 | 2491 | pthread_workqueue_attr_getovercommit_np(const pthread_workqueue_attr_t * attr, int * ocommp) |
224c7076 | 2492 | { |
34e8f829 A |
2493 | if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) { |
2494 | *ocommp = attr->overcommit; | |
224c7076 A |
2495 | return (0); |
2496 | } else { | |
2497 | return (EINVAL); /* Not an attribute structure! */ | |
2498 | } | |
2499 | } | |
2500 | ||
224c7076 A |
2501 | |
2502 | int | |
34e8f829 | 2503 | pthread_workqueue_attr_setovercommit_np(pthread_workqueue_attr_t * attr, int ocomm) |
224c7076 | 2504 | { |
34e8f829 | 2505 | int error = 0; |
224c7076 | 2506 | |
34e8f829 A |
2507 | if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) { |
2508 | attr->overcommit = ocomm; | |
224c7076 | 2509 | } else { |
34e8f829 | 2510 | error = EINVAL; |
224c7076 | 2511 | } |
34e8f829 | 2512 | return (error); |
224c7076 | 2513 | } |
224c7076 A |
2514 | /* XXXXXXXXXXXXX Pthread Workqueue support routines XXXXXXXXXXXXXXXXXX */ |
2515 | ||
2516 | static void | |
2517 | workqueue_list_lock() | |
2518 | { | |
2519 | OSSpinLockLock(&__workqueue_list_lock); | |
2520 | } | |
2521 | ||
2522 | static void | |
2523 | workqueue_list_unlock() | |
2524 | { | |
2525 | OSSpinLockUnlock(&__workqueue_list_lock); | |
2526 | } | |
2527 | ||
2528 | int | |
2529 | pthread_workqueue_init_np() | |
2530 | { | |
2531 | int ret; | |
2532 | ||
ad3c9f2a A |
2533 | if (__workqueue_newspis != 0) |
2534 | return(EPERM); | |
2535 | __workqueue_oldspis = 1; | |
2536 | ||
224c7076 A |
2537 | workqueue_list_lock(); |
2538 | ret =_pthread_work_internal_init(); | |
2539 | workqueue_list_unlock(); | |
2540 | ||
2541 | return(ret); | |
2542 | } | |
2543 | ||
34e8f829 A |
2544 | int |
2545 | pthread_workqueue_requestconcurrency_np(int queue, int request_concurrency) | |
2546 | { | |
2547 | int error = 0; | |
2548 | ||
ad3c9f2a A |
2549 | if (__workqueue_newspis != 0) |
2550 | return(EPERM); | |
2551 | ||
34e8f829 A |
2552 | if (queue < 0 || queue > WORKQ_NUM_PRIOQUEUE) |
2553 | return(EINVAL); | |
2554 | ||
2555 | error =__workq_kernreturn(WQOPS_THREAD_SETCONC, NULL, request_concurrency, queue); | |
2556 | ||
2557 | if (error == -1) | |
2558 | return(errno); | |
2559 | return(0); | |
2560 | } | |
2561 | ||
2562 | void | |
2563 | pthread_workqueue_atfork_prepare(void) | |
2564 | { | |
2565 | /* | |
2566 | * NOTE: Any workq additions here | |
2567 | * should be for i386,x86_64 only | |
2568 | */ | |
2569 | dispatch_atfork_prepare(); | |
2570 | } | |
2571 | ||
2572 | void | |
2573 | pthread_workqueue_atfork_parent(void) | |
2574 | { | |
2575 | /* | |
2576 | * NOTE: Any workq additions here | |
2577 | * should be for i386,x86_64 only | |
2578 | */ | |
2579 | dispatch_atfork_parent(); | |
2580 | } | |
2581 | ||
2582 | void | |
2583 | pthread_workqueue_atfork_child(void) | |
2584 | { | |
ad3c9f2a A |
2585 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
2586 | pthread_t self = pthread_self(); | |
2587 | ||
2588 | __workqueue_list_lock = OS_SPINLOCK_INIT; | |
2589 | ||
2590 | /* already using new spis? */ | |
2591 | if (__workqueue_newspis != 0) { | |
2592 | /* prepare the kernel for workq action */ | |
2593 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) | |
2594 | __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)), _pthread_start, &workq_targetconc[0], (uintptr_t)(&self->tsd[__PTK_LIBDISPATCH_KEY0]) - (uintptr_t)(&self->tsd[0])); | |
2595 | #else | |
2596 | __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)),NULL,NULL,0); | |
2597 | #endif | |
2598 | (void)__workq_open(); | |
2599 | kernel_workq_setup = 1; | |
2600 | return; | |
2601 | } | |
2602 | ||
2603 | /* not using old spis either? */ | |
2604 | if (__workqueue_oldspis == 0) | |
2605 | return; | |
2606 | ||
34e8f829 A |
2607 | /* |
2608 | * NOTE: workq additions here | |
2609 | * are for i386,x86_64 only as | |
2610 | * ppc and arm do not support it | |
2611 | */ | |
34e8f829 A |
2612 | if (kernel_workq_setup != 0){ |
2613 | kernel_workq_setup = 0; | |
2614 | _pthread_work_internal_init(); | |
2615 | } | |
2616 | #endif | |
2617 | dispatch_atfork_child(); | |
2618 | } | |
2619 | ||
224c7076 A |
2620 | static int |
2621 | _pthread_work_internal_init(void) | |
2622 | { | |
2623 | int i, error; | |
2624 | pthread_workqueue_head_t headp; | |
224c7076 | 2625 | pthread_workqueue_t wq; |
ad3c9f2a | 2626 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
1f2f436a A |
2627 | pthread_t self = pthread_self(); |
2628 | #endif | |
224c7076 A |
2629 | |
2630 | if (kernel_workq_setup == 0) { | |
ad3c9f2a | 2631 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
1f2f436a | 2632 | __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)), _pthread_start, &workq_targetconc[0], (uintptr_t)(&self->tsd[__PTK_LIBDISPATCH_KEY0]) - (uintptr_t)(&self->tsd[0])); |
224c7076 | 2633 | #else |
1f2f436a | 2634 | __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)),NULL,NULL,0); |
224c7076 A |
2635 | #endif |
2636 | ||
34e8f829 A |
2637 | _pthread_wq_attr_default.queueprio = WORKQ_DEFAULT_PRIOQUEUE; |
2638 | _pthread_wq_attr_default.sig = PTHREAD_WORKQUEUE_ATTR_SIG; | |
224c7076 | 2639 | |
1f2f436a | 2640 | for( i = 0; i< WORKQ_NUM_PRIOQUEUE; i++) { |
224c7076 A |
2641 | headp = __pthread_wq_head_tbl[i]; |
2642 | TAILQ_INIT(&headp->wqhead); | |
2643 | headp->next_workq = 0; | |
2644 | } | |
2645 | ||
1f2f436a A |
2646 | __workqueue_pool_ptr = NULL; |
2647 | __workqueue_pool_size = round_page(sizeof(struct _pthread_workitem) * WORKITEM_POOL_SIZE); | |
2648 | ||
2649 | __workqueue_pool_ptr = (struct _pthread_workitem *)mmap(NULL, __workqueue_pool_size, | |
2650 | PROT_READ|PROT_WRITE, | |
2651 | MAP_ANON | MAP_PRIVATE, | |
2652 | 0, | |
2653 | 0); | |
2654 | ||
2655 | if (__workqueue_pool_ptr == MAP_FAILED) { | |
2656 | /* Not expected to fail, if it does, always malloc for work items */ | |
2657 | __workqueue_nitems = WORKITEM_POOL_SIZE; | |
2658 | __workqueue_pool_ptr = NULL; | |
2659 | } else | |
2660 | __workqueue_nitems = 0; | |
2661 | ||
2662 | /* sets up the workitem pool */ | |
2663 | grow_workitem(); | |
2664 | ||
2665 | /* since the size is less than a page, leaving this in malloc pool */ | |
224c7076 A |
2666 | wq = (struct _pthread_workqueue *)malloc(sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE); |
2667 | bzero(wq, (sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE)); | |
2668 | for (i = 0; i < WORKQUEUE_POOL_SIZE; i++) { | |
2669 | TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, &wq[i], wq_list); | |
2670 | } | |
2671 | ||
2672 | if (error = __workq_open()) { | |
2673 | TAILQ_INIT(&__pthread_workitem_pool_head); | |
2674 | TAILQ_INIT(&__pthread_workqueue_pool_head); | |
1f2f436a A |
2675 | if (__workqueue_pool_ptr != NULL) { |
2676 | munmap((void *)__workqueue_pool_ptr, __workqueue_pool_size); | |
2677 | } | |
224c7076 A |
2678 | free(wq); |
2679 | return(ENOMEM); | |
2680 | } | |
2681 | kernel_workq_setup = 1; | |
2682 | } | |
2683 | return(0); | |
2684 | } | |
2685 | ||
2686 | ||
2687 | /* This routine is called with list lock held */ | |
2688 | static pthread_workitem_t | |
2689 | alloc_workitem(void) | |
2690 | { | |
2691 | pthread_workitem_t witem; | |
2692 | ||
2693 | if (TAILQ_EMPTY(&__pthread_workitem_pool_head)) { | |
1f2f436a A |
2694 | /* the chunk size is set so some multiple of it is pool size */ |
2695 | if (__workqueue_nitems < WORKITEM_POOL_SIZE) { | |
2696 | grow_workitem(); | |
2697 | } else { | |
2698 | workqueue_list_unlock(); | |
2699 | witem = malloc(sizeof(struct _pthread_workitem)); | |
2700 | workqueue_list_lock(); | |
2701 | witem->fromcache = 0; | |
2702 | goto out; | |
2703 | } | |
224c7076 | 2704 | } |
1f2f436a A |
2705 | witem = TAILQ_FIRST(&__pthread_workitem_pool_head); |
2706 | TAILQ_REMOVE(&__pthread_workitem_pool_head, witem, item_entry); | |
2707 | witem->fromcache = 1; | |
2708 | out: | |
2709 | witem->flags = 0; | |
2710 | witem->item_entry.tqe_next = 0; | |
2711 | witem->item_entry.tqe_prev = 0; | |
2712 | user_workitem_count++; | |
224c7076 A |
2713 | return(witem); |
2714 | } | |
2715 | ||
2716 | /* This routine is called with list lock held */ | |
2717 | static void | |
2718 | free_workitem(pthread_workitem_t witem) | |
2719 | { | |
1f2f436a A |
2720 | user_workitem_count--; |
2721 | witem->flags = 0; | |
2722 | if (witem->fromcache != 0) | |
2723 | TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, witem, item_entry); | |
2724 | else | |
2725 | free(witem); | |
2726 | } | |
2727 | ||
2728 | static void | |
2729 | grow_workitem(void) | |
2730 | { | |
2731 | pthread_workitem_t witemp; | |
2732 | int i; | |
2733 | ||
2734 | witemp = &__workqueue_pool_ptr[__workqueue_nitems]; | |
2735 | bzero(witemp, (sizeof(struct _pthread_workitem) * WORKITEM_CHUNK_SIZE)); | |
2736 | for (i = 0; i < WORKITEM_CHUNK_SIZE; i++) { | |
2737 | witemp[i].fromcache = 1; | |
2738 | TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, &witemp[i], item_entry); | |
2739 | } | |
2740 | __workqueue_nitems += WORKITEM_CHUNK_SIZE; | |
224c7076 A |
2741 | } |
2742 | ||
2743 | /* This routine is called with list lock held */ | |
2744 | static pthread_workqueue_t | |
2745 | alloc_workqueue(void) | |
2746 | { | |
2747 | pthread_workqueue_t wq; | |
2748 | ||
2749 | if (TAILQ_EMPTY(&__pthread_workqueue_pool_head)) { | |
2750 | workqueue_list_unlock(); | |
2751 | wq = malloc(sizeof(struct _pthread_workqueue)); | |
2752 | workqueue_list_lock(); | |
2753 | } else { | |
2754 | wq = TAILQ_FIRST(&__pthread_workqueue_pool_head); | |
2755 | TAILQ_REMOVE(&__pthread_workqueue_pool_head, wq, wq_list); | |
2756 | } | |
2757 | user_workq_count++; | |
2758 | return(wq); | |
2759 | } | |
2760 | ||
2761 | /* This routine is called with list lock held */ | |
2762 | static void | |
2763 | free_workqueue(pthread_workqueue_t wq) | |
2764 | { | |
2765 | user_workq_count--; | |
2766 | TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, wq, wq_list); | |
2767 | } | |
2768 | ||
2769 | static void | |
2770 | _pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * attr) | |
2771 | { | |
2772 | bzero(wq, sizeof(struct _pthread_workqueue)); | |
2773 | if (attr != NULL) { | |
224c7076 | 2774 | wq->queueprio = attr->queueprio; |
34e8f829 | 2775 | wq->overcommit = attr->overcommit; |
224c7076 | 2776 | } else { |
34e8f829 A |
2777 | wq->queueprio = WORKQ_DEFAULT_PRIOQUEUE; |
2778 | wq->overcommit = 0; | |
224c7076 A |
2779 | } |
2780 | LOCK_INIT(wq->lock); | |
2781 | wq->flags = 0; | |
2782 | TAILQ_INIT(&wq->item_listhead); | |
2783 | TAILQ_INIT(&wq->item_kernhead); | |
34e8f829 A |
2784 | #if WQ_LISTTRACE |
2785 | __kdebug_trace(0x90080ac, wq, &wq->item_listhead, wq->item_listhead.tqh_first, wq->item_listhead.tqh_last, 0); | |
2786 | #endif | |
224c7076 A |
2787 | wq->wq_list.tqe_next = 0; |
2788 | wq->wq_list.tqe_prev = 0; | |
34e8f829 | 2789 | wq->sig = PTHREAD_WORKQUEUE_SIG; |
224c7076 A |
2790 | wq->headp = __pthread_wq_head_tbl[wq->queueprio]; |
2791 | } | |
2792 | ||
2793 | int | |
2794 | valid_workq(pthread_workqueue_t workq) | |
2795 | { | |
34e8f829 | 2796 | if (workq->sig == PTHREAD_WORKQUEUE_SIG) |
224c7076 A |
2797 | return(1); |
2798 | else | |
2799 | return(0); | |
2800 | } | |
2801 | ||
2802 | ||
2803 | /* called with list lock */ | |
2804 | static void | |
2805 | pick_nextworkqueue_droplock() | |
2806 | { | |
2807 | int i, curwqprio, val, found; | |
2808 | pthread_workqueue_head_t headp; | |
2809 | pthread_workqueue_t workq; | |
2810 | pthread_workqueue_t nworkq = NULL; | |
2811 | ||
34e8f829 A |
2812 | #if WQ_TRACE |
2813 | __kdebug_trace(0x9008098, kernel_workq_count, 0, 0, 0, 0); | |
2814 | #endif | |
224c7076 A |
2815 | loop: |
2816 | while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) { | |
2817 | found = 0; | |
1f2f436a | 2818 | for (i = 0; i < WORKQ_NUM_PRIOQUEUE; i++) { |
224c7076 A |
2819 | wqreadyprio = i; /* because there is nothing else higher to run */ |
2820 | headp = __pthread_wq_head_tbl[i]; | |
2821 | ||
2822 | if (TAILQ_EMPTY(&headp->wqhead)) | |
2823 | continue; | |
2824 | workq = headp->next_workq; | |
2825 | if (workq == NULL) | |
2826 | workq = TAILQ_FIRST(&headp->wqhead); | |
2827 | curwqprio = workq->queueprio; | |
2828 | nworkq = workq; /* starting pt */ | |
2829 | while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) { | |
2830 | headp->next_workq = TAILQ_NEXT(workq, wq_list); | |
2831 | if (headp->next_workq == NULL) | |
2832 | headp->next_workq = TAILQ_FIRST(&headp->wqhead); | |
34e8f829 A |
2833 | #if WQ_TRACE |
2834 | __kdebug_trace(0x9008098, kernel_workq_count, workq, 0, 1, 0); | |
2835 | #endif | |
224c7076 A |
2836 | val = post_nextworkitem(workq); |
2837 | ||
2838 | if (val != 0) { | |
2839 | /* things could have changed so reasses */ | |
2840 | /* If kernel queue is full , skip */ | |
2841 | if (kernel_workq_count >= KERNEL_WORKQ_ELEM_MAX) | |
2842 | break; | |
2843 | /* If anything with higher prio arrived, then reevaluate */ | |
2844 | if (wqreadyprio < curwqprio) | |
2845 | goto loop; /* we need re evaluate again */ | |
2846 | /* we can post some more work items */ | |
2847 | found = 1; | |
2848 | } | |
2849 | ||
2850 | /* cannot use workq here as it could be freed */ | |
2851 | if (TAILQ_EMPTY(&headp->wqhead)) | |
2852 | break; | |
2853 | /* if we found nothing to run and only one workqueue in the list, skip */ | |
2854 | if ((val == 0) && (workq == headp->next_workq)) | |
2855 | break; | |
2856 | workq = headp->next_workq; | |
2857 | if (workq == NULL) | |
2858 | workq = TAILQ_FIRST(&headp->wqhead); | |
2859 | if (val != 0) | |
2860 | nworkq = workq; | |
2861 | /* if we found nothing to run and back to workq where we started */ | |
2862 | if ((val == 0) && (workq == nworkq)) | |
2863 | break; | |
2864 | } | |
2865 | if (kernel_workq_count >= KERNEL_WORKQ_ELEM_MAX) | |
2866 | break; | |
2867 | } | |
2868 | /* nothing found to run? */ | |
2869 | if (found == 0) | |
3d9156a7 | 2870 | break; |
224c7076 A |
2871 | } |
2872 | workqueue_list_unlock(); | |
2873 | } | |
2874 | ||
2875 | static int | |
2876 | post_nextworkitem(pthread_workqueue_t workq) | |
2877 | { | |
34e8f829 | 2878 | int error, prio; |
224c7076 A |
2879 | pthread_workitem_t witem; |
2880 | pthread_workqueue_head_t headp; | |
2881 | void (*func)(pthread_workqueue_t, void *); | |
2882 | ||
2883 | if ((workq->flags & PTHREAD_WORKQ_SUSPEND) == PTHREAD_WORKQ_SUSPEND) { | |
2884 | return(0); | |
2885 | } | |
34e8f829 A |
2886 | #if WQ_TRACE |
2887 | __kdebug_trace(0x900809c, workq, workq->item_listhead.tqh_first, 0, 1, 0); | |
2888 | #endif | |
224c7076 A |
2889 | if (TAILQ_EMPTY(&workq->item_listhead)) { |
2890 | return(0); | |
2891 | } | |
34e8f829 A |
2892 | if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON) |
2893 | return(0); | |
2894 | ||
224c7076 A |
2895 | witem = TAILQ_FIRST(&workq->item_listhead); |
2896 | headp = workq->headp; | |
34e8f829 A |
2897 | #if WQ_TRACE |
2898 | __kdebug_trace(0x900809c, workq, witem, 0, 0xee, 0); | |
2899 | #endif | |
224c7076 | 2900 | if ((witem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) { |
34e8f829 A |
2901 | #if WQ_TRACE |
2902 | __kdebug_trace(0x9000064, workq, 0, 0, 2, 0); | |
2903 | #endif | |
224c7076 A |
2904 | |
2905 | if ((witem->flags & PTH_WQITEM_APPLIED) != 0) { | |
2906 | return(0); | |
2907 | } | |
2908 | /* Also barrier when nothing is there needs to be handled */ | |
2909 | /* Nothing to wait for */ | |
2910 | if (workq->kq_count != 0) { | |
2911 | witem->flags |= PTH_WQITEM_APPLIED; | |
2912 | workq->flags |= PTHREAD_WORKQ_BARRIER_ON; | |
2913 | workq->barrier_count = workq->kq_count; | |
2914 | #if WQ_TRACE | |
2915 | __kdebug_trace(0x9000064, 1, workq->barrier_count, 0, 0, 0); | |
2916 | #endif | |
2917 | return(1); | |
2918 | } else { | |
2919 | #if WQ_TRACE | |
2920 | __kdebug_trace(0x9000064, 2, workq->barrier_count, 0, 0, 0); | |
2921 | #endif | |
2922 | if (witem->func != NULL) { | |
34e8f829 A |
2923 | /* since we are going to drop list lock */ |
2924 | witem->flags |= PTH_WQITEM_APPLIED; | |
2925 | workq->flags |= PTHREAD_WORKQ_BARRIER_ON; | |
224c7076 | 2926 | workqueue_list_unlock(); |
34e8f829 | 2927 | func = (void (*)(pthread_workqueue_t, void *))witem->func; |
224c7076 | 2928 | (*func)(workq, witem->func_arg); |
34e8f829 A |
2929 | #if WQ_TRACE |
2930 | __kdebug_trace(0x9000064, 3, workq->barrier_count, 0, 0, 0); | |
2931 | #endif | |
224c7076 | 2932 | workqueue_list_lock(); |
34e8f829 | 2933 | workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON; |
224c7076 A |
2934 | } |
2935 | TAILQ_REMOVE(&workq->item_listhead, witem, item_entry); | |
34e8f829 A |
2936 | #if WQ_LISTTRACE |
2937 | __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0); | |
2938 | #endif | |
224c7076 | 2939 | free_workitem(witem); |
34e8f829 A |
2940 | #if WQ_TRACE |
2941 | __kdebug_trace(0x9000064, 4, workq->barrier_count, 0, 0, 0); | |
2942 | #endif | |
224c7076 A |
2943 | return(1); |
2944 | } | |
2945 | } else if ((witem->flags & PTH_WQITEM_DESTROY) == PTH_WQITEM_DESTROY) { | |
2946 | #if WQ_TRACE | |
2947 | __kdebug_trace(0x9000068, 1, workq->kq_count, 0, 0, 0); | |
2948 | #endif | |
2949 | if ((witem->flags & PTH_WQITEM_APPLIED) != 0) { | |
2950 | return(0); | |
2951 | } | |
2952 | witem->flags |= PTH_WQITEM_APPLIED; | |
2953 | workq->flags |= (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON); | |
2954 | workq->barrier_count = workq->kq_count; | |
34e8f829 | 2955 | workq->term_callback = (void (*)(struct _pthread_workqueue *,void *))witem->func; |
224c7076 A |
2956 | workq->term_callarg = witem->func_arg; |
2957 | TAILQ_REMOVE(&workq->item_listhead, witem, item_entry); | |
34e8f829 A |
2958 | #if WQ_LISTTRACE |
2959 | __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0); | |
2960 | #endif | |
224c7076 A |
2961 | if ((TAILQ_EMPTY(&workq->item_listhead)) && (workq->kq_count == 0)) { |
2962 | if (!(TAILQ_EMPTY(&workq->item_kernhead))) { | |
2963 | #if WQ_TRACE | |
2964 | __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 0xff, 0); | |
2965 | #endif | |
2966 | } | |
224c7076 A |
2967 | free_workitem(witem); |
2968 | workq->flags |= PTHREAD_WORKQ_DESTROYED; | |
2969 | #if WQ_TRACE | |
2970 | __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 1, 0); | |
2971 | #endif | |
2972 | headp = __pthread_wq_head_tbl[workq->queueprio]; | |
2973 | if (headp->next_workq == workq) { | |
2974 | headp->next_workq = TAILQ_NEXT(workq, wq_list); | |
2975 | if (headp->next_workq == NULL) { | |
2976 | headp->next_workq = TAILQ_FIRST(&headp->wqhead); | |
2977 | if (headp->next_workq == workq) | |
2978 | headp->next_workq = NULL; | |
2979 | } | |
2980 | } | |
2981 | workq->sig = 0; | |
2982 | TAILQ_REMOVE(&headp->wqhead, workq, wq_list); | |
2983 | if (workq->term_callback != NULL) { | |
2984 | workqueue_list_unlock(); | |
2985 | (*workq->term_callback)(workq, workq->term_callarg); | |
2986 | workqueue_list_lock(); | |
2987 | } | |
2988 | free_workqueue(workq); | |
2989 | return(1); | |
34e8f829 | 2990 | } else { |
224c7076 | 2991 | TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry); |
34e8f829 A |
2992 | #if WQ_LISTTRACE |
2993 | __kdebug_trace(0x90080b0, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0); | |
2994 | #endif | |
2995 | } | |
224c7076 A |
2996 | #if WQ_TRACE |
2997 | __kdebug_trace(0x9000068, 2, workq->barrier_count, 0, 0, 0); | |
2998 | #endif | |
2999 | return(1); | |
3000 | } else { | |
3001 | #if WQ_TRACE | |
3002 | __kdebug_trace(0x9000060, witem, workq, witem->func_arg, 0xfff, 0); | |
3003 | #endif | |
3004 | TAILQ_REMOVE(&workq->item_listhead, witem, item_entry); | |
34e8f829 A |
3005 | #if WQ_LISTTRACE |
3006 | __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0); | |
3007 | #endif | |
224c7076 A |
3008 | TAILQ_INSERT_TAIL(&workq->item_kernhead, witem, item_entry); |
3009 | if ((witem->flags & PTH_WQITEM_KERN_COUNT) == 0) { | |
3010 | workq->kq_count++; | |
3011 | witem->flags |= PTH_WQITEM_KERN_COUNT; | |
3012 | } | |
ad3c9f2a | 3013 | OSAtomicIncrement32Barrier(&kernel_workq_count); |
224c7076 | 3014 | workqueue_list_unlock(); |
34e8f829 A |
3015 | |
3016 | prio = workq->queueprio; | |
3017 | if (workq->overcommit != 0) { | |
3018 | prio |= WORKQUEUE_OVERCOMMIT; | |
3019 | } | |
3020 | ||
3021 | if (( error =__workq_kernreturn(WQOPS_QUEUE_ADD, witem, workq->affinity, prio)) == -1) { | |
ad3c9f2a | 3022 | OSAtomicDecrement32Barrier(&kernel_workq_count); |
224c7076 A |
3023 | workqueue_list_lock(); |
3024 | #if WQ_TRACE | |
3025 | __kdebug_trace(0x900007c, witem, workq, witem->func_arg, workq->kq_count, 0); | |
3026 | #endif | |
3027 | TAILQ_REMOVE(&workq->item_kernhead, witem, item_entry); | |
3028 | TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry); | |
34e8f829 A |
3029 | #if WQ_LISTTRACE |
3030 | __kdebug_trace(0x90080b0, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0); | |
3031 | #endif | |
224c7076 A |
3032 | if ((workq->flags & (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON)) != 0) |
3033 | workq->flags |= PTHREAD_WORKQ_REQUEUED; | |
3034 | } else | |
3035 | workqueue_list_lock(); | |
3036 | #if WQ_TRACE | |
3037 | __kdebug_trace(0x9000060, witem, workq, witem->func_arg, workq->kq_count, 0); | |
3038 | #endif | |
3039 | return(1); | |
3040 | } | |
3041 | /* noone should come here */ | |
3042 | #if 1 | |
3043 | printf("error in logic for next workitem\n"); | |
34e8f829 | 3044 | LIBC_ABORT("error in logic for next workitem"); |
224c7076 A |
3045 | #endif |
3046 | return(0); | |
3047 | } | |
3048 | ||
3049 | void | |
3050 | _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse) | |
3051 | { | |
3052 | int ret; | |
3053 | pthread_attr_t *attrs = &_pthread_attr_default; | |
3054 | pthread_workqueue_t workq; | |
34e8f829 | 3055 | #if WQ_DEBUG |
224c7076 | 3056 | pthread_t pself; |
34e8f829 | 3057 | #endif |
ad3c9f2a A |
3058 | int thread_reuse = 0; |
3059 | int thread_priority = 0; | |
3060 | int thread_newspi = 0; | |
3061 | int thread_options = 0; | |
3062 | ||
3063 | if (reuse & WQ_FLAG_THREAD_NEWSPI) { | |
3064 | thread_reuse = reuse & WQ_FLAG_THREAD_REUSE; | |
3065 | if ((reuse & WQ_FLAG_THREAD_OVERCOMMIT) != 0) | |
3066 | thread_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT; | |
3067 | thread_priority = reuse & WQ_FLAG_THREAD_PRIOMASK; | |
3068 | thread_newspi = 1; | |
3069 | workq = NULL; | |
3070 | } else { | |
3071 | thread_reuse = (reuse == 0)? 0: WQ_FLAG_THREAD_REUSE; | |
3072 | workq = item->workq; | |
3073 | } | |
224c7076 | 3074 | |
ad3c9f2a A |
3075 | |
3076 | if (thread_reuse == 0) { | |
224c7076 A |
3077 | /* reuse is set to 0, when a thread is newly created to run a workitem */ |
3078 | _pthread_struct_init(self, attrs, stackaddr, DEFAULT_STACK_SIZE, 1, 1); | |
3079 | self->wqthread = 1; | |
34e8f829 | 3080 | self->wqkillset = 0; |
224c7076 A |
3081 | self->parentcheck = 1; |
3082 | ||
3083 | /* These are not joinable threads */ | |
3084 | self->detached &= ~PTHREAD_CREATE_JOINABLE; | |
3085 | self->detached |= PTHREAD_CREATE_DETACHED; | |
51282358 | 3086 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
224c7076 A |
3087 | _pthread_set_self(self); |
3088 | #endif | |
3089 | #if WQ_TRACE | |
3090 | __kdebug_trace(0x9000050, self, item, item->func_arg, 0, 0); | |
3091 | #endif | |
3092 | self->kernel_thread = kport; | |
ad3c9f2a A |
3093 | if (thread_newspi != 0) { |
3094 | self->fun = (void *(*)(void *))__libdispatch_workerfunction; | |
3095 | self->arg = thread_priority; | |
3096 | } else { | |
3097 | self->fun = (void *(*)(void *))item->func; | |
3098 | self->arg = item->func_arg; | |
3099 | } | |
224c7076 A |
3100 | /* Add to the pthread list */ |
3101 | LOCK(_pthread_list_lock); | |
3102 | TAILQ_INSERT_TAIL(&__pthread_head, self, plist); | |
34e8f829 | 3103 | #if PTH_LISTTRACE |
224c7076 A |
3104 | __kdebug_trace(0x900000c, self, 0, 0, 10, 0); |
3105 | #endif | |
3106 | _pthread_count++; | |
3107 | UNLOCK(_pthread_list_lock); | |
34e8f829 | 3108 | |
ad3c9f2a | 3109 | #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) |
34e8f829 A |
3110 | if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1) |
3111 | printf("Failed to set thread_id in pthread_wqthread\n"); | |
3112 | #endif | |
3113 | ||
224c7076 A |
3114 | } else { |
3115 | /* reuse is set to 1, when a thread is resued to run another work item */ | |
3116 | #if WQ_TRACE | |
3117 | __kdebug_trace(0x9000054, self, item, item->func_arg, 0, 0); | |
3118 | #endif | |
3119 | /* reset all tsd from 1 to KEYS_MAX */ | |
34e8f829 A |
3120 | if (self == NULL) |
3121 | LIBC_ABORT("_pthread_wqthread: pthread %p setup to be NULL", self); | |
224c7076 | 3122 | |
ad3c9f2a A |
3123 | if (thread_newspi != 0) { |
3124 | self->fun = (void *(*)(void *))__libdispatch_workerfunction; | |
3125 | self->arg = NULL; | |
3126 | } else { | |
3127 | self->fun = (void *(*)(void *))item->func; | |
3128 | self->arg = item->func_arg; | |
3129 | } | |
224c7076 A |
3130 | } |
3131 | ||
3132 | #if WQ_DEBUG | |
3133 | if (reuse == 0) { | |
3134 | pself = pthread_self(); | |
3135 | if (self != pself) { | |
3136 | #if WQ_TRACE | |
3137 | __kdebug_trace(0x9000078, self, pself, item->func_arg, 0, 0); | |
3138 | #endif | |
3139 | printf("pthread_self not set: pself %p, passed in %p\n", pself, self); | |
3140 | _pthread_set_self(self); | |
3141 | pself = pthread_self(); | |
3142 | if (self != pself) | |
3143 | printf("(2)pthread_self not set: pself %p, passed in %p\n", pself, self); | |
3144 | pself = self; | |
3145 | } | |
3146 | } else { | |
3147 | pself = pthread_self(); | |
3148 | if (self != pself) { | |
3149 | printf("(3)pthread_self not set in reuse: pself %p, passed in %p\n", pself, self); | |
34e8f829 | 3150 | LIBC_ABORT("(3)pthread_self not set in reuse: pself %p, passed in %p", pself, self); |
224c7076 A |
3151 | } |
3152 | } | |
3153 | #endif /* WQ_DEBUG */ | |
3154 | ||
ad3c9f2a A |
3155 | if (thread_newspi != 0) { |
3156 | (*__libdispatch_workerfunction)(thread_priority, thread_options, NULL); | |
3157 | _pthread_workq_return(self); | |
3158 | } else { | |
3159 | self->cur_workq = workq; | |
3160 | self->cur_workitem = item; | |
3161 | OSAtomicDecrement32Barrier(&kernel_workq_count); | |
3162 | ||
3163 | ret = (int)(intptr_t)(*self->fun)(self->arg); | |
3164 | /* If we reach here without going through the above initialization path then don't go through | |
3165 | * with the teardown code path ( e.g. setjmp/longjmp ). Instead just exit this thread. | |
3166 | */ | |
3167 | if (self != pthread_self()) { | |
3168 | pthread_exit(PTHREAD_CANCELED); | |
3169 | } | |
224c7076 | 3170 | |
ad3c9f2a | 3171 | workqueue_exit(self, workq, item); |
34e8f829 | 3172 | } |
224c7076 A |
3173 | } |
3174 | ||
3175 | static void | |
3176 | workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item) | |
3177 | { | |
224c7076 A |
3178 | pthread_workitem_t baritem; |
3179 | pthread_workqueue_head_t headp; | |
3180 | void (*func)(pthread_workqueue_t, void *); | |
3181 | ||
3182 | workqueue_list_lock(); | |
3183 | ||
3184 | TAILQ_REMOVE(&workq->item_kernhead, item, item_entry); | |
3185 | workq->kq_count--; | |
3186 | #if WQ_TRACE | |
3187 | __kdebug_trace(0x9000070, self, 1, item->func_arg, workq->kq_count, 0); | |
3188 | #endif | |
224c7076 A |
3189 | free_workitem(item); |
3190 | ||
3191 | if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON) { | |
3192 | workq->barrier_count--; | |
3193 | #if WQ_TRACE | |
3194 | __kdebug_trace(0x9000084, self, workq->barrier_count, workq->kq_count, 1, 0); | |
3195 | #endif | |
3196 | if (workq->barrier_count <= 0 ) { | |
3197 | /* Need to remove barrier item from the list */ | |
3198 | baritem = TAILQ_FIRST(&workq->item_listhead); | |
3199 | #if WQ_DEBUG | |
3200 | if ((baritem->flags & (PTH_WQITEM_BARRIER | PTH_WQITEM_DESTROY| PTH_WQITEM_APPLIED)) == 0) | |
3201 | printf("Incorect bar item being removed in barrier processing\n"); | |
3202 | #endif /* WQ_DEBUG */ | |
3203 | /* if the front item is a barrier and call back is registered, run that */ | |
3204 | if (((baritem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) && (baritem->func != NULL)) { | |
3205 | workqueue_list_unlock(); | |
34e8f829 | 3206 | func = (void (*)(pthread_workqueue_t, void *))baritem->func; |
224c7076 A |
3207 | (*func)(workq, baritem->func_arg); |
3208 | workqueue_list_lock(); | |
3209 | } | |
3210 | TAILQ_REMOVE(&workq->item_listhead, baritem, item_entry); | |
34e8f829 A |
3211 | #if WQ_LISTTRACE |
3212 | __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0); | |
3213 | #endif | |
224c7076 A |
3214 | free_workitem(baritem); |
3215 | workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON; | |
3216 | #if WQ_TRACE | |
3217 | __kdebug_trace(0x9000058, self, item, item->func_arg, 0, 0); | |
3218 | #endif | |
3219 | if ((workq->flags & PTHREAD_WORKQ_TERM_ON) != 0) { | |
3220 | headp = __pthread_wq_head_tbl[workq->queueprio]; | |
3221 | workq->flags |= PTHREAD_WORKQ_DESTROYED; | |
3222 | #if WQ_TRACE | |
3223 | __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 2, 0); | |
3224 | #endif | |
3225 | if (headp->next_workq == workq) { | |
3226 | headp->next_workq = TAILQ_NEXT(workq, wq_list); | |
3227 | if (headp->next_workq == NULL) { | |
3228 | headp->next_workq = TAILQ_FIRST(&headp->wqhead); | |
3229 | if (headp->next_workq == workq) | |
3230 | headp->next_workq = NULL; | |
3231 | } | |
3232 | } | |
3233 | TAILQ_REMOVE(&headp->wqhead, workq, wq_list); | |
3234 | workq->sig = 0; | |
3235 | if (workq->term_callback != NULL) { | |
3236 | workqueue_list_unlock(); | |
3237 | (*workq->term_callback)(workq, workq->term_callarg); | |
3238 | workqueue_list_lock(); | |
3239 | } | |
3240 | free_workqueue(workq); | |
3241 | } else { | |
3242 | /* if there are higher prio schedulabel item reset to wqreadyprio */ | |
3243 | if ((workq->queueprio < wqreadyprio) && (!(TAILQ_EMPTY(&workq->item_listhead)))) | |
3244 | wqreadyprio = workq->queueprio; | |
3245 | } | |
3246 | } | |
3247 | } | |
3248 | #if WQ_TRACE | |
3249 | else { | |
3250 | __kdebug_trace(0x9000070, self, 2, item->func_arg, workq->barrier_count, 0); | |
3251 | } | |
3252 | ||
3253 | __kdebug_trace(0x900005c, self, item, 0, 0, 0); | |
3254 | #endif | |
3255 | pick_nextworkqueue_droplock(); | |
3256 | _pthread_workq_return(self); | |
3257 | } | |
3258 | ||
3259 | static void | |
3260 | _pthread_workq_return(pthread_t self) | |
3261 | { | |
34e8f829 | 3262 | __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0); |
224c7076 A |
3263 | |
3264 | /* This is the way to terminate the thread */ | |
3265 | _pthread_exit(self, NULL); | |
3266 | } | |
3267 | ||
3268 | ||
224c7076 A |
3269 | /* XXXXXXXXXXXXX Pthread Workqueue functions XXXXXXXXXXXXXXXXXX */ |
3270 | ||
ad3c9f2a A |
3271 | int |
3272 | pthread_workqueue_setdispatch_np(void (*worker_func)(int, int, void *)) | |
3273 | { | |
3274 | int error = 0; | |
3275 | ||
3276 | if (__workqueue_oldspis != 0) | |
3277 | return(EPERM); | |
3278 | ||
3279 | __workqueue_newspis = 1; | |
3280 | ||
3281 | if (__libdispatch_workerfunction == NULL) { | |
3282 | __libdispatch_workerfunction = worker_func; | |
3283 | /* check whether the kernel supports new SPIs */ | |
3284 | error = __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP, NULL, 0, 0); | |
3285 | if (error == -1){ | |
3286 | __libdispatch_workerfunction = NULL; | |
3287 | error = ENOTSUP; | |
3288 | __workqueue_newspis = 0; | |
3289 | } else { | |
3290 | /* prepare the kernel for workq action */ | |
3291 | (void)__workq_open(); | |
3292 | kernel_workq_setup = 1; | |
3293 | if (__is_threaded == 0) | |
3294 | __is_threaded = 1; | |
3295 | __workqueue_newspis = 1; | |
3296 | } | |
3297 | } else { | |
3298 | error = EBUSY; | |
3299 | } | |
3300 | ||
3301 | return(error); | |
3302 | } | |
3303 | ||
3304 | int | |
3305 | pthread_workqueue_addthreads_np(int queue_priority, int options, int numthreads) | |
3306 | { | |
3307 | int priority = queue_priority & WQ_FLAG_THREAD_PRIOMASK; | |
3308 | int error = 0; | |
3309 | ||
3310 | /* new spi not inited yet?? */ | |
3311 | if (__workqueue_newspis == 0) | |
3312 | return(EPERM); | |
3313 | ||
3314 | ||
3315 | if ((options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT) != 0) | |
3316 | priority |= WORKQUEUE_OVERCOMMIT; | |
3317 | ||
3318 | error = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, priority); | |
3319 | ||
3320 | if (error == -1) | |
3321 | return(errno); | |
3322 | else | |
3323 | return(0); | |
3324 | } | |
3325 | ||
224c7076 A |
3326 | int |
3327 | pthread_workqueue_create_np(pthread_workqueue_t * workqp, const pthread_workqueue_attr_t * attr) | |
3328 | { | |
3329 | pthread_workqueue_t wq; | |
3330 | pthread_workqueue_head_t headp; | |
3331 | ||
34e8f829 A |
3332 | #if defined(__ppc__) |
3333 | IF_ROSETTA() { | |
3334 | return(ENOTSUP); | |
3335 | } | |
3336 | #endif | |
ad3c9f2a A |
3337 | if (__workqueue_newspis != 0) |
3338 | return(EPERM); | |
3339 | ||
3340 | if (__workqueue_oldspis == 0) | |
3341 | __workqueue_oldspis = 1; | |
3342 | ||
34e8f829 | 3343 | if ((attr != NULL) && (attr->sig != PTHREAD_WORKQUEUE_ATTR_SIG)) { |
224c7076 A |
3344 | return(EINVAL); |
3345 | } | |
3346 | ||
3347 | if (__is_threaded == 0) | |
3348 | __is_threaded = 1; | |
3349 | ||
3350 | workqueue_list_lock(); | |
3351 | if (kernel_workq_setup == 0) { | |
3352 | int ret = _pthread_work_internal_init(); | |
3353 | if (ret != 0) { | |
3354 | workqueue_list_unlock(); | |
3355 | return(ret); | |
3356 | } | |
3357 | } | |
3358 | ||
3359 | wq = alloc_workqueue(); | |
3360 | ||
3361 | _pthread_workq_init(wq, attr); | |
3362 | ||
3363 | headp = __pthread_wq_head_tbl[wq->queueprio]; | |
3364 | TAILQ_INSERT_TAIL(&headp->wqhead, wq, wq_list); | |
3365 | if (headp->next_workq == NULL) { | |
3366 | headp->next_workq = TAILQ_FIRST(&headp->wqhead); | |
3367 | } | |
3368 | ||
3369 | workqueue_list_unlock(); | |
3370 | ||
3371 | *workqp = wq; | |
3372 | ||
3373 | return(0); | |
3374 | } | |
3375 | ||
3376 | int | |
34e8f829 | 3377 | pthread_workqueue_additem_np(pthread_workqueue_t workq, void ( *workitem_func)(void *), void * workitem_arg, pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) |
224c7076 A |
3378 | { |
3379 | pthread_workitem_t witem; | |
3380 | ||
ad3c9f2a A |
3381 | if (__workqueue_newspis != 0) |
3382 | return(EPERM); | |
3383 | ||
224c7076 A |
3384 | if (valid_workq(workq) == 0) { |
3385 | return(EINVAL); | |
3386 | } | |
3387 | ||
3388 | workqueue_list_lock(); | |
3389 | ||
3390 | /* | |
3391 | * Allocate the workitem here as it can drop the lock. | |
3392 | * Also we can evaluate the workqueue state only once. | |
3393 | */ | |
3394 | witem = alloc_workitem(); | |
3395 | witem->func = workitem_func; | |
3396 | witem->func_arg = workitem_arg; | |
224c7076 | 3397 | witem->workq = workq; |
224c7076 A |
3398 | |
3399 | /* alloc workitem can drop the lock, check the state */ | |
3400 | if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) { | |
3401 | free_workitem(witem); | |
3402 | workqueue_list_unlock(); | |
3403 | *itemhandlep = 0; | |
3404 | return(ESRCH); | |
3405 | } | |
3406 | ||
3407 | if (itemhandlep != NULL) | |
3408 | *itemhandlep = (pthread_workitem_handle_t *)witem; | |
34e8f829 | 3409 | if (gencountp != NULL) |
1f2f436a | 3410 | *gencountp = 0; |
34e8f829 A |
3411 | #if WQ_TRACE |
3412 | __kdebug_trace(0x9008090, witem, witem->func, witem->func_arg, workq, 0); | |
3413 | #endif | |
224c7076 | 3414 | TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry); |
34e8f829 A |
3415 | #if WQ_LISTTRACE |
3416 | __kdebug_trace(0x90080a4, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0); | |
3417 | #endif | |
224c7076 | 3418 | |
224c7076 A |
3419 | if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio)) |
3420 | wqreadyprio = workq->queueprio; | |
3421 | ||
3422 | pick_nextworkqueue_droplock(); | |
3423 | ||
3424 | return(0); | |
3425 | } | |
3426 | ||
34e8f829 A |
3427 | int |
3428 | pthread_workqueue_getovercommit_np(pthread_workqueue_t workq, unsigned int *ocommp) | |
224c7076 | 3429 | { |
ad3c9f2a A |
3430 | if (__workqueue_newspis != 0) |
3431 | return(EPERM); | |
3432 | ||
34e8f829 A |
3433 | if (valid_workq(workq) == 0) { |
3434 | return(EINVAL); | |
3435 | } | |
3436 | ||
3437 | if (ocommp != NULL) | |
3438 | *ocommp = workq->overcommit; | |
224c7076 A |
3439 | return(0); |
3440 | } | |
3441 | ||
224c7076 | 3442 | |
224c7076 A |
3443 | #else /* !BUILDING_VARIANT ] [ */ |
3444 | extern int __unix_conforming; | |
3445 | extern int _pthread_count; | |
ad3c9f2a A |
3446 | extern int __workqueue_newspis; |
3447 | extern int __workqueue_oldspis; | |
3448 | ||
224c7076 A |
3449 | extern pthread_lock_t _pthread_list_lock; |
3450 | extern void _pthread_testcancel(pthread_t thread, int isconforming); | |
3451 | extern int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr, int conforming); | |
3452 | ||
3453 | #endif /* !BUILDING_VARIANT ] */ | |
3454 | ||
3d9156a7 | 3455 | #if __DARWIN_UNIX03 |
224c7076 A |
3456 | |
3457 | __private_extern__ void | |
3458 | __posix_join_cleanup(void *arg) | |
3459 | { | |
3460 | pthread_t thread = (pthread_t)arg; | |
3461 | int already_exited, res; | |
3462 | void * dummy; | |
3463 | semaphore_t death; | |
1f2f436a | 3464 | int newstyle; |
224c7076 A |
3465 | |
3466 | LOCK(thread->lock); | |
3467 | already_exited = (thread->detached & _PTHREAD_EXITED); | |
3468 | ||
3469 | newstyle = thread->newstyle; | |
3470 | ||
3471 | #if WQ_TRACE | |
3472 | __kdebug_trace(0x900002c, thread, newstyle, 0, 0, 0); | |
3473 | #endif | |
34e8f829 | 3474 | if (newstyle == 0) { |
224c7076 A |
3475 | death = thread->death; |
3476 | if (!already_exited){ | |
3477 | thread->joiner = (struct _pthread *)NULL; | |
3478 | UNLOCK(thread->lock); | |
3479 | restore_sem_to_pool(death); | |
3480 | } else { | |
3481 | UNLOCK(thread->lock); | |
3482 | while ((res = _pthread_reap_thread(thread, | |
3483 | thread->kernel_thread, | |
3484 | &dummy, 1)) == EAGAIN) | |
3485 | { | |
3486 | sched_yield(); | |
3487 | } | |
3488 | restore_sem_to_pool(death); | |
3489 | ||
3490 | } | |
3491 | ||
3492 | } else { | |
3493 | /* leave another thread to join */ | |
3494 | thread->joiner = (struct _pthread *)NULL; | |
3495 | UNLOCK(thread->lock); | |
3d9156a7 | 3496 | } |
224c7076 | 3497 | } |
3d9156a7 | 3498 | |
224c7076 A |
3499 | #endif /* __DARWIN_UNIX03 */ |
3500 | ||
3501 | ||
3502 | /* | |
3503 | * Wait for a thread to terminate and obtain its exit value. | |
3504 | */ | |
3505 | /* | |
3506 | int | |
3507 | pthread_join(pthread_t thread, | |
3508 | void **value_ptr) | |
3509 | ||
3510 | moved to pthread_cancelable.c */ | |
3511 | ||
3512 | /* | |
3513 | * Cancel a thread | |
3514 | */ | |
3515 | int | |
3516 | pthread_cancel(pthread_t thread) | |
3517 | { | |
3518 | #if __DARWIN_UNIX03 | |
3519 | if (__unix_conforming == 0) | |
3520 | __unix_conforming = 1; | |
3521 | #endif /* __DARWIN_UNIX03 */ | |
3522 | ||
3523 | if (_pthread_lookup_thread(thread, NULL, 0) != 0) | |
3524 | return(ESRCH); | |
3525 | ||
34e8f829 A |
3526 | /* if the thread is a workqueue thread, then return error */ |
3527 | if (thread->wqthread != 0) { | |
3528 | return(ENOTSUP); | |
3529 | } | |
224c7076 A |
3530 | #if __DARWIN_UNIX03 |
3531 | int state; | |
3532 | ||
3533 | LOCK(thread->lock); | |
3534 | state = thread->cancel_state |= _PTHREAD_CANCEL_PENDING; | |
3535 | UNLOCK(thread->lock); | |
3536 | if (state & PTHREAD_CANCEL_ENABLE) | |
3537 | __pthread_markcancel(thread->kernel_thread); | |
3538 | #else /* __DARWIN_UNIX03 */ | |
3539 | thread->cancel_state |= _PTHREAD_CANCEL_PENDING; | |
3d9156a7 A |
3540 | #endif /* __DARWIN_UNIX03 */ |
3541 | return (0); | |
3542 | } | |
3543 | ||
224c7076 A |
3544 | void |
3545 | pthread_testcancel(void) | |
3546 | { | |
3547 | pthread_t self = pthread_self(); | |
3548 | ||
3549 | #if __DARWIN_UNIX03 | |
3550 | if (__unix_conforming == 0) | |
3551 | __unix_conforming = 1; | |
3552 | _pthread_testcancel(self, 1); | |
3553 | #else /* __DARWIN_UNIX03 */ | |
3554 | _pthread_testcancel(self, 0); | |
3555 | #endif /* __DARWIN_UNIX03 */ | |
3556 | ||
3557 | } | |
3558 | ||
3559 | ||
3560 | /* | |
3561 | * Query/update the cancelability 'state' of a thread | |
3562 | */ | |
3563 | int | |
3564 | pthread_setcancelstate(int state, int *oldstate) | |
3565 | { | |
3566 | #if __DARWIN_UNIX03 | |
3567 | if (__unix_conforming == 0) { | |
3568 | __unix_conforming = 1; | |
3569 | } | |
3570 | return (_pthread_setcancelstate_internal(state, oldstate, 1)); | |
3571 | #else /* __DARWIN_UNIX03 */ | |
3572 | return (_pthread_setcancelstate_internal(state, oldstate, 0)); | |
3573 | #endif /* __DARWIN_UNIX03 */ | |
3574 | ||
3575 | } | |
3576 | ||
3577 | ||
3578 | ||
3d9156a7 A |
3579 | /* |
3580 | * Query/update the cancelability 'type' of a thread | |
3581 | */ | |
3582 | int | |
3583 | pthread_setcanceltype(int type, int *oldtype) | |
3584 | { | |
3585 | pthread_t self = pthread_self(); | |
3586 | ||
3587 | #if __DARWIN_UNIX03 | |
3588 | if (__unix_conforming == 0) | |
3589 | __unix_conforming = 1; | |
3590 | #endif /* __DARWIN_UNIX03 */ | |
3591 | ||
3592 | if ((type != PTHREAD_CANCEL_DEFERRED) && | |
3593 | (type != PTHREAD_CANCEL_ASYNCHRONOUS)) | |
3594 | return EINVAL; | |
3595 | self = pthread_self(); | |
3596 | LOCK(self->lock); | |
3597 | if (oldtype) | |
3598 | *oldtype = self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK; | |
3599 | self->cancel_state &= ~_PTHREAD_CANCEL_TYPE_MASK; | |
3600 | self->cancel_state |= type; | |
3601 | UNLOCK(self->lock); | |
3602 | #if !__DARWIN_UNIX03 | |
3603 | _pthread_testcancel(self, 0); /* See if we need to 'die' now... */ | |
3604 | #endif /* __DARWIN_UNIX03 */ | |
3605 | return (0); | |
3606 | } | |
3607 | ||
224c7076 A |
3608 | int |
3609 | pthread_sigmask(int how, const sigset_t * set, sigset_t * oset) | |
3610 | { | |
3611 | #if __DARWIN_UNIX03 | |
3612 | int err = 0; | |
3613 | ||
3614 | if (__pthread_sigmask(how, set, oset) == -1) { | |
3615 | err = errno; | |
3616 | } | |
3617 | return(err); | |
3618 | #else /* __DARWIN_UNIX03 */ | |
3619 | return(__pthread_sigmask(how, set, oset)); | |
3620 | #endif /* __DARWIN_UNIX03 */ | |
3621 | } | |
3622 | ||
3623 | /* | |
3624 | int | |
3625 | sigwait(const sigset_t * set, int * sig) | |
3626 | ||
3627 | moved to pthread_cancelable.c */ |