]>
Commit | Line | Data |
---|---|---|
f1a1da6c A |
1 | /* |
2 | * Copyright (c) 2000-2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
20 | * | |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | /* | |
24 | * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 | |
25 | * All Rights Reserved | |
26 | * | |
27 | * Permission to use, copy, modify, and distribute this software and | |
28 | * its documentation for any purpose and without fee is hereby granted, | |
29 | * provided that the above copyright notice appears in all copies and | |
30 | * that both the copyright notice and this permission notice appear in | |
31 | * supporting documentation. | |
32 | * | |
33 | * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE | |
34 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
35 | * FOR A PARTICULAR PURPOSE. | |
36 | * | |
37 | * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR | |
38 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM | |
39 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, | |
40 | * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION | |
41 | * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
42 | * | |
43 | */ | |
44 | /* | |
45 | * MkLinux | |
46 | */ | |
47 | ||
48 | /* | |
49 | * POSIX Pthread Library | |
50 | */ | |
51 | ||
52 | #include "internal.h" | |
53 | #include "private.h" | |
54 | #include "workqueue_private.h" | |
55 | #include "introspection_private.h" | |
56 | #include "qos_private.h" | |
57 | ||
58 | #include <stdlib.h> | |
59 | #include <errno.h> | |
60 | #include <signal.h> | |
61 | #include <unistd.h> | |
62 | #include <mach/mach_init.h> | |
63 | #include <mach/mach_vm.h> | |
64 | #include <sys/time.h> | |
65 | #include <sys/resource.h> | |
66 | #include <sys/sysctl.h> | |
67 | #include <sys/queue.h> | |
68 | #include <sys/mman.h> | |
69 | #include <machine/vmparam.h> | |
70 | #define __APPLE_API_PRIVATE | |
71 | #include <machine/cpu_capabilities.h> | |
72 | #include <libkern/OSAtomic.h> | |
73 | ||
74 | #include <_simple.h> | |
75 | #include <platform/string.h> | |
76 | #include <platform/compat.h> | |
77 | ||
78 | extern int __sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, | |
79 | void *newp, size_t newlen); | |
80 | extern void __exit(int) __attribute__((noreturn)); | |
81 | ||
82 | static void (*exitf)(int) = __exit; | |
83 | __private_extern__ void* (*_pthread_malloc)(size_t) = NULL; | |
84 | __private_extern__ void (*_pthread_free)(void *) = NULL; | |
85 | ||
86 | // | |
87 | // Global variables | |
88 | // | |
89 | ||
90 | // This global should be used (carefully) by anyone needing to know if a | |
91 | // pthread (other than the main thread) has been created. | |
92 | int __is_threaded = 0; | |
93 | ||
94 | int __unix_conforming = 0; | |
95 | ||
96 | // _pthread_list_lock protects _pthread_count, access to the __pthread_head | |
97 | // list, and the parentcheck, childrun and childexit flags of the pthread | |
98 | // structure. Externally imported by pthread_cancelable.c. | |
99 | __private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER; | |
100 | __private_extern__ struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head); | |
101 | static int _pthread_count = 1; | |
102 | ||
103 | #if PTHREAD_LAYOUT_SPI | |
104 | ||
105 | const struct pthread_layout_offsets_s pthread_layout_offsets = { | |
106 | .plo_version = 1, | |
107 | .plo_pthread_tsd_base_offset = offsetof(struct _pthread, tsd), | |
108 | .plo_pthread_tsd_base_address_offset = 0, | |
109 | .plo_pthread_tsd_entry_size = sizeof(((struct _pthread *)NULL)->tsd[0]), | |
110 | }; | |
111 | ||
112 | #endif // PTHREAD_LAYOUT_SPI | |
113 | ||
114 | // | |
115 | // Static variables | |
116 | // | |
117 | ||
118 | // Mach message notification that a thread needs to be recycled. | |
119 | typedef struct _pthread_reap_msg_t { | |
120 | mach_msg_header_t header; | |
121 | pthread_t thread; | |
122 | mach_msg_trailer_t trailer; | |
123 | } pthread_reap_msg_t; | |
124 | ||
125 | #define pthreadsize ((size_t)mach_vm_round_page(sizeof(struct _pthread))) | |
126 | static pthread_attr_t _pthread_attr_default = {0}; | |
964d3577 A |
127 | |
128 | // The main thread's pthread_t | |
129 | static struct _pthread _thread __attribute__((aligned(4096))) = {0}; | |
f1a1da6c A |
130 | |
131 | static int default_priority; | |
132 | static int max_priority; | |
133 | static int min_priority; | |
134 | static int pthread_concurrency; | |
135 | ||
136 | // work queue support data | |
137 | static void (*__libdispatch_workerfunction)(pthread_priority_t) = NULL; | |
964d3577 | 138 | static void (*__libdispatch_keventfunction)(void **events, int *nevents) = NULL; |
f1a1da6c A |
139 | static int __libdispatch_offset; |
140 | ||
141 | // supported feature set | |
142 | int __pthread_supported_features; | |
143 | ||
144 | // | |
145 | // Function prototypes | |
146 | // | |
147 | ||
148 | // pthread primitives | |
149 | static int _pthread_allocate(pthread_t *thread, const pthread_attr_t *attrs, void **stack); | |
150 | static int _pthread_deallocate(pthread_t t); | |
151 | ||
152 | static void _pthread_terminate(pthread_t t); | |
153 | ||
154 | static void _pthread_struct_init(pthread_t t, | |
155 | const pthread_attr_t *attrs, | |
156 | void *stack, | |
157 | size_t stacksize, | |
158 | int kernalloc); | |
159 | ||
160 | extern void _pthread_set_self(pthread_t); | |
161 | ||
162 | static void _pthread_dealloc_reply_port(pthread_t t); | |
163 | ||
164 | static inline void __pthread_add_thread(pthread_t t, bool parent); | |
165 | static inline int __pthread_remove_thread(pthread_t t, bool child, bool *should_exit); | |
166 | ||
167 | static int _pthread_find_thread(pthread_t thread); | |
168 | ||
169 | static void _pthread_exit(pthread_t self, void *value_ptr) __dead2; | |
170 | static void _pthread_setcancelstate_exit(pthread_t self, void *value_ptr, int conforming); | |
171 | ||
172 | static inline void _pthread_introspection_thread_create(pthread_t t, bool destroy); | |
173 | static inline void _pthread_introspection_thread_start(pthread_t t); | |
174 | static inline void _pthread_introspection_thread_terminate(pthread_t t, void *freeaddr, size_t freesize, bool destroy); | |
175 | static inline void _pthread_introspection_thread_destroy(pthread_t t); | |
176 | ||
964d3577 A |
177 | extern void start_wqthread(pthread_t self, mach_port_t kport, void *stackaddr, void *unused, int reuse); // trampoline into _pthread_wqthread |
178 | extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags); // trampoline into _pthread_start | |
f1a1da6c A |
179 | |
180 | void pthread_workqueue_atfork_child(void); | |
181 | ||
182 | static bool __workq_newapi; | |
183 | ||
184 | /* Compatibility: previous pthread API used WORKQUEUE_OVERCOMMIT to request overcommit threads from | |
185 | * the kernel. This definition is kept here, in userspace only, to perform the compatibility shimm | |
186 | * from old API requests to the new kext conventions. | |
187 | */ | |
188 | #define WORKQUEUE_OVERCOMMIT 0x10000 | |
189 | ||
190 | /* | |
191 | * Flags filed passed to bsdthread_create and back in pthread_start | |
192 | 31 <---------------------------------> 0 | |
193 | _________________________________________ | |
194 | | flags(8) | policy(8) | importance(16) | | |
195 | ----------------------------------------- | |
196 | */ | |
197 | ||
198 | #define PTHREAD_START_CUSTOM 0x01000000 | |
199 | #define PTHREAD_START_SETSCHED 0x02000000 | |
200 | #define PTHREAD_START_DETACHED 0x04000000 | |
201 | #define PTHREAD_START_QOSCLASS 0x08000000 | |
202 | #define PTHREAD_START_QOSCLASS_MASK 0xffffff | |
203 | #define PTHREAD_START_POLICY_BITSHIFT 16 | |
204 | #define PTHREAD_START_POLICY_MASK 0xff | |
205 | #define PTHREAD_START_IMPORTANCE_MASK 0xffff | |
206 | ||
207 | static int pthread_setschedparam_internal(pthread_t, mach_port_t, int, const struct sched_param *); | |
208 | extern pthread_t __bsdthread_create(void *(*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags); | |
209 | extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, void *, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t); | |
210 | extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem); | |
211 | extern __uint64_t __thread_selfid( void ); | |
212 | extern int __pthread_canceled(int); | |
213 | extern int __pthread_kill(mach_port_t, int); | |
214 | ||
215 | extern int __workq_open(void); | |
216 | extern int __workq_kernreturn(int, void *, int, int); | |
217 | ||
218 | #if defined(__i386__) || defined(__x86_64__) | |
219 | static const mach_vm_address_t PTHREAD_STACK_HINT = 0xB0000000; | |
220 | #else | |
221 | #error no PTHREAD_STACK_HINT for this architecture | |
222 | #endif | |
223 | ||
964d3577 | 224 | #if defined(__i386__) && defined(static_assert) |
f1a1da6c | 225 | // Check for regression of <rdar://problem/13249323> |
964d3577 | 226 | static_assert(offsetof(struct _pthread, err_no) == 68); |
f1a1da6c A |
227 | #endif |
228 | ||
229 | // Allocate a thread structure, stack and guard page. | |
230 | // | |
231 | // The thread structure may optionally be placed in the same allocation as the | |
232 | // stack, residing above the top of the stack. This cannot be done if a | |
233 | // custom stack address is provided. | |
234 | // | |
235 | // Similarly the guard page cannot be allocated if a custom stack address is | |
236 | // provided. | |
237 | // | |
238 | // The allocated thread structure is initialized with values that indicate how | |
239 | // it should be freed. | |
240 | ||
241 | static int | |
242 | _pthread_allocate(pthread_t *thread, const pthread_attr_t *attrs, void **stack) | |
243 | { | |
244 | int res; | |
245 | kern_return_t kr; | |
246 | pthread_t t = NULL; | |
247 | mach_vm_address_t allocaddr = PTHREAD_STACK_HINT; | |
248 | size_t allocsize = 0; | |
249 | size_t guardsize = 0; | |
250 | size_t stacksize = 0; | |
251 | ||
252 | PTHREAD_ASSERT(attrs->stacksize >= PTHREAD_STACK_MIN); | |
253 | ||
254 | *thread = NULL; | |
255 | *stack = NULL; | |
256 | ||
257 | // Allocate a pthread structure if necessary | |
258 | ||
259 | if (attrs->stackaddr != NULL) { | |
260 | PTHREAD_ASSERT(((uintptr_t)attrs->stackaddr % vm_page_size) == 0); | |
261 | *stack = attrs->stackaddr; | |
262 | allocsize = pthreadsize; | |
263 | } else { | |
264 | guardsize = attrs->guardsize; | |
265 | stacksize = attrs->stacksize; | |
266 | allocsize = stacksize + guardsize + pthreadsize; | |
267 | } | |
268 | ||
269 | kr = mach_vm_map(mach_task_self(), | |
270 | &allocaddr, | |
271 | allocsize, | |
272 | vm_page_size - 1, | |
273 | VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE, | |
274 | MEMORY_OBJECT_NULL, | |
275 | 0, | |
276 | FALSE, | |
277 | VM_PROT_DEFAULT, | |
278 | VM_PROT_ALL, | |
279 | VM_INHERIT_DEFAULT); | |
280 | ||
281 | if (kr != KERN_SUCCESS) { | |
282 | kr = mach_vm_allocate(mach_task_self(), | |
283 | &allocaddr, | |
284 | allocsize, | |
285 | VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE); | |
286 | } | |
287 | ||
288 | if (kr == KERN_SUCCESS) { | |
289 | // The stack grows down. | |
290 | // Set the guard page at the lowest address of the | |
291 | // newly allocated stack. Return the highest address | |
292 | // of the stack. | |
293 | if (guardsize) { | |
294 | (void)mach_vm_protect(mach_task_self(), allocaddr, guardsize, FALSE, VM_PROT_NONE); | |
295 | } | |
296 | ||
297 | // Thread structure resides at the top of the stack. | |
298 | t = (void *)(allocaddr + stacksize + guardsize); | |
299 | if (stacksize) { | |
300 | // Returns the top of the stack. | |
301 | *stack = t; | |
302 | } | |
303 | } | |
304 | ||
305 | if (t != NULL) { | |
964d3577 | 306 | _pthread_struct_init(t, attrs, *stack, attrs->stacksize, 0); |
f1a1da6c A |
307 | t->freeaddr = (void *)allocaddr; |
308 | t->freesize = allocsize; | |
309 | *thread = t; | |
310 | res = 0; | |
311 | } else { | |
312 | res = EAGAIN; | |
313 | } | |
314 | return res; | |
315 | } | |
316 | ||
317 | static int | |
318 | _pthread_deallocate(pthread_t t) | |
319 | { | |
320 | // Don't free the main thread. | |
321 | if (t != &_thread) { | |
964d3577 A |
322 | kern_return_t ret; |
323 | ret = mach_vm_deallocate(mach_task_self(), t->freeaddr, t->freesize); | |
324 | PTHREAD_ASSERT(ret == KERN_SUCCESS); | |
f1a1da6c A |
325 | } |
326 | return 0; | |
327 | } | |
328 | ||
329 | // Terminates the thread if called from the currently running thread. | |
330 | PTHREAD_NORETURN | |
331 | static void | |
332 | _pthread_terminate(pthread_t t) | |
333 | { | |
334 | PTHREAD_ASSERT(t == pthread_self()); | |
335 | ||
336 | uintptr_t freeaddr = (uintptr_t)t->freeaddr; | |
964d3577 | 337 | size_t freesize = t->freesize; |
f1a1da6c A |
338 | |
339 | mach_port_t kport = _pthread_kernel_thread(t); | |
340 | semaphore_t joinsem = t->joiner_notify; | |
341 | ||
342 | _pthread_dealloc_reply_port(t); | |
343 | ||
964d3577 A |
344 | // If the pthread_t sticks around after the __bsdthread_terminate, we'll |
345 | // need to free it later | |
f1a1da6c A |
346 | |
347 | // After the call to __pthread_remove_thread, it is only safe to | |
348 | // dereference the pthread_t structure if EBUSY has been returned. | |
349 | ||
350 | bool destroy, should_exit; | |
351 | destroy = (__pthread_remove_thread(t, true, &should_exit) != EBUSY); | |
352 | ||
353 | if (t == &_thread) { | |
354 | // Don't free the main thread. | |
355 | freesize = 0; | |
964d3577 A |
356 | } else if (!destroy) { |
357 | // We were told to keep the pthread_t structure around. In the common | |
358 | // case, the pthread structure itself is part of the allocation | |
359 | // described by freeaddr/freesize, in which case we need to split and | |
360 | // only deallocate the area below the pthread structure. In the event | |
361 | // of a custom stack, the freeaddr/size will be the pthread structure | |
362 | // itself, in which case we shouldn't free anything. | |
363 | if ((void*)t > t->freeaddr && (void*)t < t->freeaddr + t->freesize){ | |
364 | freesize = trunc_page((uintptr_t)t - (uintptr_t)freeaddr); | |
365 | t->freeaddr += freesize; | |
366 | t->freesize -= freesize; | |
367 | } else { | |
368 | freesize = 0; | |
369 | } | |
f1a1da6c A |
370 | } |
371 | if (freesize == 0) { | |
372 | freeaddr = 0; | |
373 | } | |
374 | _pthread_introspection_thread_terminate(t, freeaddr, freesize, destroy); | |
375 | if (should_exit) { | |
376 | exitf(0); | |
377 | } | |
378 | ||
379 | __bsdthread_terminate((void *)freeaddr, freesize, kport, joinsem); | |
380 | PTHREAD_ABORT("thread %p didn't terminate", t); | |
381 | } | |
382 | ||
383 | int | |
384 | pthread_attr_destroy(pthread_attr_t *attr) | |
385 | { | |
386 | int ret = EINVAL; | |
387 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
388 | attr->sig = 0; | |
389 | ret = 0; | |
390 | } | |
391 | return ret; | |
392 | } | |
393 | ||
394 | int | |
395 | pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate) | |
396 | { | |
397 | int ret = EINVAL; | |
398 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
399 | *detachstate = attr->detached; | |
400 | ret = 0; | |
401 | } | |
402 | return ret; | |
403 | } | |
404 | ||
405 | int | |
406 | pthread_attr_getinheritsched(const pthread_attr_t *attr, int *inheritsched) | |
407 | { | |
408 | int ret = EINVAL; | |
409 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
410 | *inheritsched = attr->inherit; | |
411 | ret = 0; | |
412 | } | |
413 | return ret; | |
414 | } | |
415 | ||
416 | int | |
417 | pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param) | |
418 | { | |
419 | int ret = EINVAL; | |
420 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
421 | *param = attr->param; | |
422 | ret = 0; | |
423 | } | |
424 | return ret; | |
425 | } | |
426 | ||
427 | int | |
428 | pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy) | |
429 | { | |
430 | int ret = EINVAL; | |
431 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
432 | *policy = attr->policy; | |
433 | ret = 0; | |
434 | } | |
435 | return ret; | |
436 | } | |
437 | ||
438 | // Default stack size is 512KB; independent of the main thread's stack size. | |
439 | static const size_t DEFAULT_STACK_SIZE = 512 * 1024; | |
440 | ||
441 | int | |
442 | pthread_attr_init(pthread_attr_t *attr) | |
443 | { | |
444 | attr->stacksize = DEFAULT_STACK_SIZE; | |
445 | attr->stackaddr = NULL; | |
446 | attr->sig = _PTHREAD_ATTR_SIG; | |
447 | attr->param.sched_priority = default_priority; | |
448 | attr->param.quantum = 10; /* quantum isn't public yet */ | |
449 | attr->detached = PTHREAD_CREATE_JOINABLE; | |
450 | attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED; | |
451 | attr->policy = _PTHREAD_DEFAULT_POLICY; | |
452 | attr->fastpath = 1; | |
453 | attr->schedset = 0; | |
454 | attr->guardsize = vm_page_size; | |
455 | attr->qosclass = _pthread_priority_make_newest(QOS_CLASS_DEFAULT, 0, 0); | |
456 | return 0; | |
457 | } | |
458 | ||
459 | int | |
460 | pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate) | |
461 | { | |
462 | int ret = EINVAL; | |
463 | if (attr->sig == _PTHREAD_ATTR_SIG && | |
464 | (detachstate == PTHREAD_CREATE_JOINABLE || | |
465 | detachstate == PTHREAD_CREATE_DETACHED)) { | |
466 | attr->detached = detachstate; | |
467 | ret = 0; | |
468 | } | |
469 | return ret; | |
470 | } | |
471 | ||
472 | int | |
473 | pthread_attr_setinheritsched(pthread_attr_t *attr, int inheritsched) | |
474 | { | |
475 | int ret = EINVAL; | |
476 | if (attr->sig == _PTHREAD_ATTR_SIG && | |
477 | (inheritsched == PTHREAD_INHERIT_SCHED || | |
478 | inheritsched == PTHREAD_EXPLICIT_SCHED)) { | |
479 | attr->inherit = inheritsched; | |
480 | ret = 0; | |
481 | } | |
482 | return ret; | |
483 | } | |
484 | ||
485 | int | |
486 | pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param) | |
487 | { | |
488 | int ret = EINVAL; | |
489 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
490 | /* TODO: Validate sched_param fields */ | |
491 | attr->param = *param; | |
492 | attr->schedset = 1; | |
493 | ret = 0; | |
494 | } | |
495 | return ret; | |
496 | } | |
497 | ||
498 | int | |
499 | pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy) | |
500 | { | |
501 | int ret = EINVAL; | |
502 | if (attr->sig == _PTHREAD_ATTR_SIG && | |
503 | (policy == SCHED_OTHER || | |
504 | policy == SCHED_RR || | |
505 | policy == SCHED_FIFO)) { | |
506 | attr->policy = policy; | |
507 | attr->schedset = 1; | |
508 | ret = 0; | |
509 | } | |
510 | return ret; | |
511 | } | |
512 | ||
513 | int | |
514 | pthread_attr_setscope(pthread_attr_t *attr, int scope) | |
515 | { | |
516 | int ret = EINVAL; | |
517 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
518 | if (scope == PTHREAD_SCOPE_SYSTEM) { | |
519 | // No attribute yet for the scope. | |
520 | ret = 0; | |
521 | } else if (scope == PTHREAD_SCOPE_PROCESS) { | |
522 | ret = ENOTSUP; | |
523 | } | |
524 | } | |
525 | return ret; | |
526 | } | |
527 | ||
528 | int | |
529 | pthread_attr_getscope(const pthread_attr_t *attr, int *scope) | |
530 | { | |
531 | int ret = EINVAL; | |
532 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
533 | *scope = PTHREAD_SCOPE_SYSTEM; | |
534 | ret = 0; | |
535 | } | |
536 | return ret; | |
537 | } | |
538 | ||
539 | int | |
540 | pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr) | |
541 | { | |
542 | int ret = EINVAL; | |
543 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
544 | *stackaddr = attr->stackaddr; | |
545 | ret = 0; | |
546 | } | |
547 | return ret; | |
548 | } | |
549 | ||
550 | int | |
551 | pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr) | |
552 | { | |
553 | int ret = EINVAL; | |
554 | if (attr->sig == _PTHREAD_ATTR_SIG && | |
555 | ((uintptr_t)stackaddr % vm_page_size) == 0) { | |
556 | attr->stackaddr = stackaddr; | |
557 | attr->fastpath = 0; | |
558 | attr->guardsize = 0; | |
559 | ret = 0; | |
560 | } | |
561 | return ret; | |
562 | } | |
563 | ||
564 | int | |
565 | pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize) | |
566 | { | |
567 | int ret = EINVAL; | |
568 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
569 | *stacksize = attr->stacksize; | |
570 | ret = 0; | |
571 | } | |
572 | return ret; | |
573 | } | |
574 | ||
575 | int | |
576 | pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize) | |
577 | { | |
578 | int ret = EINVAL; | |
579 | if (attr->sig == _PTHREAD_ATTR_SIG && | |
580 | (stacksize % vm_page_size) == 0 && | |
581 | stacksize >= PTHREAD_STACK_MIN) { | |
582 | attr->stacksize = stacksize; | |
583 | ret = 0; | |
584 | } | |
585 | return ret; | |
586 | } | |
587 | ||
588 | int | |
589 | pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize) | |
590 | { | |
591 | int ret = EINVAL; | |
592 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
593 | *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize); | |
594 | *stacksize = attr->stacksize; | |
595 | ret = 0; | |
596 | } | |
597 | return ret; | |
598 | } | |
599 | ||
600 | // Per SUSv3, the stackaddr is the base address, the lowest addressable byte | |
601 | // address. This is not the same as in pthread_attr_setstackaddr. | |
602 | int | |
603 | pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize) | |
604 | { | |
605 | int ret = EINVAL; | |
606 | if (attr->sig == _PTHREAD_ATTR_SIG && | |
607 | ((uintptr_t)stackaddr % vm_page_size) == 0 && | |
608 | (stacksize % vm_page_size) == 0 && | |
609 | stacksize >= PTHREAD_STACK_MIN) { | |
610 | attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize); | |
611 | attr->stacksize = stacksize; | |
612 | attr->fastpath = 0; | |
613 | ret = 0; | |
614 | } | |
615 | return ret; | |
616 | } | |
617 | ||
618 | int | |
619 | pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize) | |
620 | { | |
621 | int ret = EINVAL; | |
622 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
623 | /* Guardsize of 0 is valid, ot means no guard */ | |
624 | if ((guardsize % vm_page_size) == 0) { | |
625 | attr->guardsize = guardsize; | |
626 | attr->fastpath = 0; | |
627 | ret = 0; | |
628 | } | |
629 | } | |
630 | return ret; | |
631 | } | |
632 | ||
633 | int | |
634 | pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize) | |
635 | { | |
636 | int ret = EINVAL; | |
637 | if (attr->sig == _PTHREAD_ATTR_SIG) { | |
638 | *guardsize = attr->guardsize; | |
639 | ret = 0; | |
640 | } | |
641 | return ret; | |
642 | } | |
643 | ||
644 | ||
645 | /* | |
646 | * Create and start execution of a new thread. | |
647 | */ | |
648 | ||
649 | static void | |
650 | _pthread_body(pthread_t self) | |
651 | { | |
652 | _pthread_set_self(self); | |
653 | __pthread_add_thread(self, false); | |
654 | _pthread_exit(self, (self->fun)(self->arg)); | |
655 | } | |
656 | ||
657 | void | |
658 | _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void *arg, size_t stacksize, unsigned int pflags) | |
659 | { | |
660 | if ((pflags & PTHREAD_START_CUSTOM) == 0) { | |
964d3577 | 661 | uintptr_t stackaddr = self; |
f1a1da6c A |
662 | _pthread_struct_init(self, &_pthread_attr_default, stackaddr, stacksize, 1); |
663 | ||
664 | if (pflags & PTHREAD_START_SETSCHED) { | |
665 | self->policy = ((pflags >> PTHREAD_START_POLICY_BITSHIFT) & PTHREAD_START_POLICY_MASK); | |
666 | self->param.sched_priority = (pflags & PTHREAD_START_IMPORTANCE_MASK); | |
667 | } | |
668 | ||
669 | if ((pflags & PTHREAD_START_DETACHED) == PTHREAD_START_DETACHED) { | |
670 | self->detached &= ~PTHREAD_CREATE_JOINABLE; | |
671 | self->detached |= PTHREAD_CREATE_DETACHED; | |
672 | } | |
673 | } | |
674 | ||
675 | if ((pflags & PTHREAD_START_QOSCLASS) != 0) { | |
676 | /* The QoS class is cached in the TSD of the pthread, so to reflect the | |
677 | * class that the kernel brought us up at, the TSD must be primed from the | |
678 | * flags parameter. | |
679 | */ | |
680 | self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = (pflags & PTHREAD_START_QOSCLASS_MASK); | |
681 | } else { | |
682 | /* Give the thread a default QoS tier, of zero. */ | |
683 | self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0); | |
684 | } | |
685 | ||
686 | _pthread_set_kernel_thread(self, kport); | |
687 | self->fun = fun; | |
688 | self->arg = arg; | |
689 | ||
690 | _pthread_body(self); | |
691 | } | |
692 | ||
693 | static void | |
694 | _pthread_struct_init(pthread_t t, | |
695 | const pthread_attr_t *attrs, | |
964d3577 | 696 | void *stackaddr, |
f1a1da6c | 697 | size_t stacksize, |
964d3577 | 698 | int kernalloc) |
f1a1da6c A |
699 | { |
700 | t->sig = _PTHREAD_SIG; | |
701 | t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = t; | |
702 | t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0); | |
703 | LOCK_INIT(t->lock); | |
964d3577 A |
704 | |
705 | t->stacksize = stacksize; | |
706 | t->stackaddr = stackaddr; | |
707 | ||
f1a1da6c | 708 | t->kernalloc = kernalloc; |
964d3577 A |
709 | if (kernalloc){ |
710 | /* | |
711 | * The pthread may be offset into a page. In that event, by contract | |
712 | * with the kernel, the allocation will extend pthreadsize from the | |
713 | * start of the next page. There's also one page worth of allocation | |
714 | * below stacksize for the guard page. <rdar://problem/19941744> | |
715 | */ | |
716 | t->freeaddr = (stackaddr - stacksize) - vm_page_size; | |
717 | t->freesize = (round_page((uintptr_t)stackaddr) + pthreadsize) - (uintptr_t)t->freeaddr; | |
f1a1da6c | 718 | } |
964d3577 | 719 | |
f1a1da6c A |
720 | t->guardsize = attrs->guardsize; |
721 | t->detached = attrs->detached; | |
722 | t->inherit = attrs->inherit; | |
723 | t->policy = attrs->policy; | |
724 | t->schedset = attrs->schedset; | |
725 | t->param = attrs->param; | |
726 | t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; | |
727 | } | |
728 | ||
729 | /* Need to deprecate this in future */ | |
730 | int | |
731 | _pthread_is_threaded(void) | |
732 | { | |
733 | return __is_threaded; | |
734 | } | |
735 | ||
736 | /* Non portable public api to know whether this process has(had) atleast one thread | |
737 | * apart from main thread. There could be race if there is a thread in the process of | |
738 | * creation at the time of call . It does not tell whether there are more than one thread | |
739 | * at this point of time. | |
740 | */ | |
741 | int | |
742 | pthread_is_threaded_np(void) | |
743 | { | |
744 | return __is_threaded; | |
745 | } | |
746 | ||
747 | mach_port_t | |
748 | pthread_mach_thread_np(pthread_t t) | |
749 | { | |
750 | mach_port_t kport = MACH_PORT_NULL; | |
751 | ||
752 | if (t == pthread_self()) { | |
753 | /* | |
754 | * If the call is on self, return the kernel port. We cannot | |
755 | * add this bypass for main thread as it might have exited, | |
756 | * and we should not return stale port info. | |
757 | */ | |
758 | kport = _pthread_kernel_thread(t); | |
759 | } else { | |
760 | (void)_pthread_lookup_thread(t, &kport, 0); | |
761 | } | |
762 | ||
763 | return kport; | |
764 | } | |
765 | ||
766 | pthread_t | |
767 | pthread_from_mach_thread_np(mach_port_t kernel_thread) | |
768 | { | |
769 | struct _pthread *p = NULL; | |
770 | ||
771 | /* No need to wait as mach port is already known */ | |
772 | LOCK(_pthread_list_lock); | |
773 | ||
774 | TAILQ_FOREACH(p, &__pthread_head, plist) { | |
775 | if (_pthread_kernel_thread(p) == kernel_thread) { | |
776 | break; | |
777 | } | |
778 | } | |
779 | ||
780 | UNLOCK(_pthread_list_lock); | |
781 | ||
782 | return p; | |
783 | } | |
784 | ||
785 | size_t | |
786 | pthread_get_stacksize_np(pthread_t t) | |
787 | { | |
788 | int ret; | |
789 | size_t size = 0; | |
790 | ||
791 | if (t == NULL) { | |
792 | return ESRCH; // XXX bug? | |
793 | } | |
794 | ||
795 | // since the main thread will not get de-allocated from underneath us | |
796 | if (t == pthread_self() || t == &_thread) { | |
797 | return t->stacksize; | |
798 | } | |
799 | ||
800 | LOCK(_pthread_list_lock); | |
801 | ||
802 | ret = _pthread_find_thread(t); | |
803 | if (ret == 0) { | |
804 | size = t->stacksize; | |
805 | } else { | |
806 | size = ret; // XXX bug? | |
807 | } | |
808 | ||
809 | UNLOCK(_pthread_list_lock); | |
810 | ||
811 | return size; | |
812 | } | |
813 | ||
814 | void * | |
815 | pthread_get_stackaddr_np(pthread_t t) | |
816 | { | |
817 | int ret; | |
818 | void *addr = NULL; | |
819 | ||
820 | if (t == NULL) { | |
821 | return (void *)(uintptr_t)ESRCH; // XXX bug? | |
822 | } | |
823 | ||
824 | // since the main thread will not get de-allocated from underneath us | |
825 | if (t == pthread_self() || t == &_thread) { | |
826 | return t->stackaddr; | |
827 | } | |
828 | ||
829 | LOCK(_pthread_list_lock); | |
830 | ||
831 | ret = _pthread_find_thread(t); | |
832 | if (ret == 0) { | |
833 | addr = t->stackaddr; | |
834 | } else { | |
835 | addr = (void *)(uintptr_t)ret; // XXX bug? | |
836 | } | |
837 | ||
838 | UNLOCK(_pthread_list_lock); | |
839 | ||
840 | return addr; | |
841 | } | |
842 | ||
843 | static mach_port_t | |
844 | _pthread_reply_port(pthread_t t) | |
845 | { | |
846 | void *p; | |
847 | if (t == NULL) { | |
848 | p = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY); | |
849 | } else { | |
850 | p = t->tsd[_PTHREAD_TSD_SLOT_MIG_REPLY]; | |
851 | } | |
852 | return (mach_port_t)(uintptr_t)p; | |
853 | } | |
854 | ||
855 | static void | |
856 | _pthread_set_reply_port(pthread_t t, mach_port_t reply_port) | |
857 | { | |
858 | void *p = (void *)(uintptr_t)reply_port; | |
859 | if (t == NULL) { | |
860 | _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY, p); | |
861 | } else { | |
862 | t->tsd[_PTHREAD_TSD_SLOT_MIG_REPLY] = p; | |
863 | } | |
864 | } | |
865 | ||
866 | static void | |
867 | _pthread_dealloc_reply_port(pthread_t t) | |
868 | { | |
869 | mach_port_t reply_port = _pthread_reply_port(t); | |
870 | if (reply_port != MACH_PORT_NULL) { | |
871 | mig_dealloc_reply_port(reply_port); | |
872 | } | |
873 | } | |
874 | ||
875 | pthread_t | |
876 | pthread_main_thread_np(void) | |
877 | { | |
878 | return &_thread; | |
879 | } | |
880 | ||
881 | /* returns non-zero if the current thread is the main thread */ | |
882 | int | |
883 | pthread_main_np(void) | |
884 | { | |
885 | pthread_t self = pthread_self(); | |
886 | ||
887 | return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT); | |
888 | } | |
889 | ||
890 | ||
891 | /* if we are passed in a pthread_t that is NULL, then we return | |
892 | the current thread's thread_id. So folks don't have to call | |
893 | pthread_self, in addition to us doing it, if they just want | |
894 | their thread_id. | |
895 | */ | |
896 | int | |
897 | pthread_threadid_np(pthread_t thread, uint64_t *thread_id) | |
898 | { | |
899 | int res = 0; | |
900 | pthread_t self = pthread_self(); | |
901 | ||
902 | if (thread_id == NULL) { | |
903 | return EINVAL; | |
904 | } | |
905 | ||
906 | if (thread == NULL || thread == self) { | |
907 | *thread_id = self->thread_id; | |
908 | } else { | |
909 | LOCK(_pthread_list_lock); | |
910 | res = _pthread_find_thread(thread); | |
911 | if (res == 0) { | |
912 | *thread_id = thread->thread_id; | |
913 | } | |
914 | UNLOCK(_pthread_list_lock); | |
915 | } | |
916 | return res; | |
917 | } | |
918 | ||
919 | int | |
920 | pthread_getname_np(pthread_t thread, char *threadname, size_t len) | |
921 | { | |
922 | int res; | |
923 | ||
924 | if (thread == NULL) { | |
925 | return ESRCH; | |
926 | } | |
927 | ||
928 | LOCK(_pthread_list_lock); | |
929 | res = _pthread_find_thread(thread); | |
930 | if (res == 0) { | |
931 | strlcpy(threadname, thread->pthread_name, len); | |
932 | } | |
933 | UNLOCK(_pthread_list_lock); | |
934 | return res; | |
935 | } | |
936 | ||
937 | int | |
938 | pthread_setname_np(const char *name) | |
939 | { | |
940 | int res; | |
941 | pthread_t self = pthread_self(); | |
942 | ||
943 | size_t len = 0; | |
944 | if (name != NULL) { | |
945 | len = strlen(name); | |
946 | } | |
947 | ||
948 | /* protytype is in pthread_internals.h */ | |
949 | res = __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name, (int)len); | |
950 | if (res == 0) { | |
951 | if (len > 0) { | |
952 | strlcpy(self->pthread_name, name, MAXTHREADNAMESIZE); | |
953 | } else { | |
954 | bzero(self->pthread_name, MAXTHREADNAMESIZE); | |
955 | } | |
956 | } | |
957 | return res; | |
958 | ||
959 | } | |
960 | ||
961 | PTHREAD_ALWAYS_INLINE | |
962 | static inline void | |
963 | __pthread_add_thread(pthread_t t, bool parent) | |
964 | { | |
965 | bool should_deallocate = false; | |
966 | bool should_add = true; | |
967 | ||
968 | LOCK(_pthread_list_lock); | |
969 | ||
970 | // The parent and child threads race to add the thread to the list. | |
971 | // When called by the parent: | |
972 | // - set parentcheck to true | |
973 | // - back off if childrun is true | |
974 | // When called by the child: | |
975 | // - set childrun to true | |
976 | // - back off if parentcheck is true | |
977 | if (parent) { | |
978 | t->parentcheck = 1; | |
979 | if (t->childrun) { | |
980 | // child got here first, don't add. | |
981 | should_add = false; | |
982 | } | |
983 | ||
984 | // If the child exits before we check in then it has to keep | |
985 | // the thread structure memory alive so our dereferences above | |
986 | // are valid. If it's a detached thread, then no joiner will | |
987 | // deallocate the thread structure itself. So we do it here. | |
988 | if (t->childexit) { | |
989 | should_add = false; | |
990 | should_deallocate = ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED); | |
991 | } | |
992 | } else { | |
993 | t->childrun = 1; | |
994 | if (t->parentcheck) { | |
995 | // Parent got here first, don't add. | |
996 | should_add = false; | |
997 | } | |
998 | if (t->wqthread) { | |
999 | // Work queue threads have no parent. Simulate. | |
1000 | t->parentcheck = 1; | |
1001 | } | |
1002 | } | |
1003 | ||
1004 | if (should_add) { | |
1005 | TAILQ_INSERT_TAIL(&__pthread_head, t, plist); | |
1006 | _pthread_count++; | |
1007 | } | |
1008 | ||
1009 | UNLOCK(_pthread_list_lock); | |
1010 | ||
1011 | if (parent) { | |
1012 | _pthread_introspection_thread_create(t, should_deallocate); | |
1013 | if (should_deallocate) { | |
1014 | _pthread_deallocate(t); | |
1015 | } | |
1016 | } else { | |
1017 | _pthread_introspection_thread_start(t); | |
1018 | } | |
1019 | } | |
1020 | ||
1021 | // <rdar://problem/12544957> must always inline this function to avoid epilogues | |
1022 | // Returns EBUSY if the thread structure should be kept alive (is joinable). | |
1023 | // Returns ESRCH if the thread structure is no longer valid (was detached). | |
1024 | PTHREAD_ALWAYS_INLINE | |
1025 | static inline int | |
1026 | __pthread_remove_thread(pthread_t t, bool child, bool *should_exit) | |
1027 | { | |
1028 | int ret = 0; | |
1029 | ||
1030 | bool should_remove = true; | |
1031 | ||
1032 | LOCK(_pthread_list_lock); | |
1033 | ||
1034 | // When a thread removes itself: | |
1035 | // - Set the childexit flag indicating that the thread has exited. | |
1036 | // - Return false if parentcheck is zero (must keep structure) | |
1037 | // - If the thread is joinable, keep it on the list so that | |
1038 | // the join operation succeeds. Still decrement the running | |
1039 | // thread count so that we exit if no threads are running. | |
1040 | // - Update the running thread count. | |
1041 | // When another thread removes a joinable thread: | |
1042 | // - CAREFUL not to dereference the thread before verifying that the | |
1043 | // reference is still valid using _pthread_find_thread(). | |
1044 | // - Remove the thread from the list. | |
1045 | ||
1046 | if (child) { | |
1047 | t->childexit = 1; | |
1048 | if (t->parentcheck == 0) { | |
1049 | ret = EBUSY; | |
1050 | } | |
1051 | if ((t->detached & PTHREAD_CREATE_JOINABLE) != 0) { | |
1052 | ret = EBUSY; | |
1053 | should_remove = false; | |
1054 | } | |
1055 | *should_exit = (--_pthread_count <= 0); | |
1056 | } else { | |
1057 | ret = _pthread_find_thread(t); | |
1058 | if (ret == 0) { | |
1059 | // If we found a thread but it's not joinable, bail. | |
1060 | if ((t->detached & PTHREAD_CREATE_JOINABLE) == 0) { | |
1061 | should_remove = false; | |
1062 | ret = ESRCH; | |
1063 | } | |
1064 | } | |
1065 | } | |
1066 | if (should_remove) { | |
1067 | TAILQ_REMOVE(&__pthread_head, t, plist); | |
1068 | } | |
1069 | ||
1070 | UNLOCK(_pthread_list_lock); | |
1071 | ||
1072 | return ret; | |
1073 | } | |
1074 | ||
1075 | int | |
1076 | pthread_create(pthread_t *thread, | |
1077 | const pthread_attr_t *attr, | |
1078 | void *(*start_routine)(void *), | |
1079 | void *arg) | |
1080 | { | |
1081 | pthread_t t = NULL; | |
1082 | unsigned int flags = 0; | |
1083 | ||
1084 | pthread_attr_t *attrs = (pthread_attr_t *)attr; | |
1085 | if (attrs == NULL) { | |
1086 | attrs = &_pthread_attr_default; | |
1087 | } else if (attrs->sig != _PTHREAD_ATTR_SIG) { | |
1088 | return EINVAL; | |
1089 | } | |
1090 | ||
1091 | if (attrs->detached == PTHREAD_CREATE_DETACHED) { | |
1092 | flags |= PTHREAD_START_DETACHED; | |
1093 | } | |
1094 | ||
1095 | if (attrs->schedset != 0) { | |
1096 | flags |= PTHREAD_START_SETSCHED; | |
1097 | flags |= ((attrs->policy & PTHREAD_START_POLICY_MASK) << PTHREAD_START_POLICY_BITSHIFT); | |
1098 | flags |= (attrs->param.sched_priority & PTHREAD_START_IMPORTANCE_MASK); | |
1099 | } else if (attrs->qosclass != 0) { | |
1100 | flags |= PTHREAD_START_QOSCLASS; | |
1101 | flags |= (attrs->qosclass & PTHREAD_START_QOSCLASS_MASK); | |
1102 | } | |
1103 | ||
1104 | __is_threaded = 1; | |
1105 | ||
1106 | void *stack; | |
1107 | ||
1108 | if (attrs->fastpath) { | |
1109 | // kernel will allocate thread and stack, pass stacksize. | |
1110 | stack = (void *)attrs->stacksize; | |
1111 | } else { | |
1112 | // allocate the thread and its stack | |
1113 | flags |= PTHREAD_START_CUSTOM; | |
1114 | ||
1115 | int res; | |
1116 | res = _pthread_allocate(&t, attrs, &stack); | |
1117 | if (res) { | |
1118 | return res; | |
1119 | } | |
1120 | ||
1121 | t->arg = arg; | |
1122 | t->fun = start_routine; | |
1123 | } | |
1124 | ||
1125 | pthread_t t2; | |
1126 | t2 = __bsdthread_create(start_routine, arg, stack, t, flags); | |
1127 | if (t2 == (pthread_t)-1) { | |
1128 | if (flags & PTHREAD_START_CUSTOM) { | |
1129 | // free the thread and stack if we allocated it | |
1130 | _pthread_deallocate(t); | |
1131 | } | |
1132 | return EAGAIN; | |
1133 | } | |
1134 | if (t == NULL) { | |
1135 | t = t2; | |
1136 | } | |
1137 | ||
1138 | __pthread_add_thread(t, true); | |
1139 | ||
1140 | // XXX if a thread is created detached and exits, t will be invalid | |
1141 | *thread = t; | |
1142 | return 0; | |
1143 | } | |
1144 | ||
1145 | int | |
1146 | pthread_create_suspended_np(pthread_t *thread, | |
1147 | const pthread_attr_t *attr, | |
1148 | void *(*start_routine)(void *), | |
1149 | void *arg) | |
1150 | { | |
1151 | int res; | |
1152 | void *stack; | |
1153 | mach_port_t kernel_thread = MACH_PORT_NULL; | |
1154 | ||
1155 | const pthread_attr_t *attrs = attr; | |
1156 | if (attrs == NULL) { | |
1157 | attrs = &_pthread_attr_default; | |
1158 | } else if (attrs->sig != _PTHREAD_ATTR_SIG) { | |
1159 | return EINVAL; | |
1160 | } | |
1161 | ||
1162 | pthread_t t; | |
1163 | res = _pthread_allocate(&t, attrs, &stack); | |
1164 | if (res) { | |
1165 | return res; | |
1166 | } | |
1167 | ||
1168 | *thread = t; | |
1169 | ||
1170 | kern_return_t kr; | |
1171 | kr = thread_create(mach_task_self(), &kernel_thread); | |
1172 | if (kr != KERN_SUCCESS) { | |
1173 | //PTHREAD_ABORT("thread_create() failed: %d", kern_res); | |
1174 | return EINVAL; /* Need better error here? */ | |
1175 | } | |
1176 | ||
1177 | _pthread_set_kernel_thread(t, kernel_thread); | |
1178 | (void)pthread_setschedparam_internal(t, kernel_thread, t->policy, &t->param); | |
1179 | ||
1180 | __is_threaded = 1; | |
1181 | ||
1182 | t->arg = arg; | |
1183 | t->fun = start_routine; | |
1184 | ||
1185 | __pthread_add_thread(t, true); | |
1186 | ||
1187 | // Set up a suspended thread. | |
1188 | _pthread_setup(t, _pthread_body, stack, 1, 0); | |
1189 | return res; | |
1190 | } | |
1191 | ||
1192 | int | |
1193 | pthread_detach(pthread_t thread) | |
1194 | { | |
1195 | int res; | |
1196 | bool join = false; | |
1197 | semaphore_t sema = SEMAPHORE_NULL; | |
1198 | ||
1199 | res = _pthread_lookup_thread(thread, NULL, 1); | |
1200 | if (res) { | |
1201 | return res; // Not a valid thread to detach. | |
1202 | } | |
1203 | ||
1204 | LOCK(thread->lock); | |
1205 | if (thread->detached & PTHREAD_CREATE_JOINABLE) { | |
1206 | if (thread->detached & _PTHREAD_EXITED) { | |
1207 | // Join the thread if it's already exited. | |
1208 | join = true; | |
1209 | } else { | |
1210 | thread->detached &= ~PTHREAD_CREATE_JOINABLE; | |
1211 | thread->detached |= PTHREAD_CREATE_DETACHED; | |
1212 | sema = thread->joiner_notify; | |
1213 | } | |
1214 | } else { | |
1215 | res = EINVAL; | |
1216 | } | |
1217 | UNLOCK(thread->lock); | |
1218 | ||
1219 | if (join) { | |
1220 | pthread_join(thread, NULL); | |
1221 | } else if (sema) { | |
1222 | semaphore_signal(sema); | |
1223 | } | |
1224 | ||
1225 | return res; | |
1226 | } | |
1227 | ||
1228 | int | |
1229 | pthread_kill(pthread_t th, int sig) | |
1230 | { | |
1231 | if (sig < 0 || sig > NSIG) { | |
1232 | return EINVAL; | |
1233 | } | |
1234 | ||
1235 | mach_port_t kport = MACH_PORT_NULL; | |
1236 | if (_pthread_lookup_thread(th, &kport, 0) != 0) { | |
1237 | return ESRCH; // Not a valid thread. | |
1238 | } | |
1239 | ||
1240 | // Don't signal workqueue threads. | |
1241 | if (th->wqthread != 0 && th->wqkillset == 0) { | |
1242 | return ENOTSUP; | |
1243 | } | |
1244 | ||
1245 | int ret = __pthread_kill(kport, sig); | |
1246 | ||
1247 | if (ret == -1) { | |
1248 | ret = errno; | |
1249 | } | |
1250 | return ret; | |
1251 | } | |
1252 | ||
1253 | int | |
1254 | __pthread_workqueue_setkill(int enable) | |
1255 | { | |
1256 | pthread_t self = pthread_self(); | |
1257 | ||
1258 | LOCK(self->lock); | |
1259 | self->wqkillset = enable ? 1 : 0; | |
1260 | UNLOCK(self->lock); | |
1261 | ||
1262 | return 0; | |
1263 | } | |
1264 | ||
1265 | static void * | |
1266 | __pthread_get_exit_value(pthread_t t, int conforming) | |
1267 | { | |
1268 | const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING); | |
1269 | void *value = t->exit_value; | |
1270 | if (conforming) { | |
1271 | if ((t->cancel_state & flags) == flags) { | |
1272 | value = PTHREAD_CANCELED; | |
1273 | } | |
1274 | } | |
1275 | return value; | |
1276 | } | |
1277 | ||
1278 | /* For compatibility... */ | |
1279 | ||
1280 | pthread_t | |
1281 | _pthread_self(void) { | |
1282 | return pthread_self(); | |
1283 | } | |
1284 | ||
1285 | /* | |
1286 | * Terminate a thread. | |
1287 | */ | |
1288 | int __disable_threadsignal(int); | |
1289 | ||
1290 | PTHREAD_NORETURN | |
1291 | static void | |
1292 | _pthread_exit(pthread_t self, void *value_ptr) | |
1293 | { | |
1294 | struct __darwin_pthread_handler_rec *handler; | |
1295 | ||
1296 | // Disable signal delivery while we clean up | |
1297 | __disable_threadsignal(1); | |
1298 | ||
1299 | // Set cancel state to disable and type to deferred | |
1300 | _pthread_setcancelstate_exit(self, value_ptr, __unix_conforming); | |
1301 | ||
1302 | while ((handler = self->__cleanup_stack) != 0) { | |
1303 | (handler->__routine)(handler->__arg); | |
1304 | self->__cleanup_stack = handler->__next; | |
1305 | } | |
1306 | _pthread_tsd_cleanup(self); | |
1307 | ||
1308 | LOCK(self->lock); | |
1309 | self->detached |= _PTHREAD_EXITED; | |
1310 | self->exit_value = value_ptr; | |
1311 | ||
1312 | if ((self->detached & PTHREAD_CREATE_JOINABLE) && | |
1313 | self->joiner_notify == SEMAPHORE_NULL) { | |
1314 | self->joiner_notify = (semaphore_t)os_get_cached_semaphore(); | |
1315 | } | |
1316 | UNLOCK(self->lock); | |
1317 | ||
1318 | // Clear per-thread semaphore cache | |
1319 | os_put_cached_semaphore(SEMAPHORE_NULL); | |
1320 | ||
1321 | _pthread_terminate(self); | |
1322 | } | |
1323 | ||
1324 | void | |
1325 | pthread_exit(void *value_ptr) | |
1326 | { | |
1327 | pthread_t self = pthread_self(); | |
1328 | if (self->wqthread == 0) { | |
1329 | _pthread_exit(self, value_ptr); | |
1330 | } else { | |
1331 | PTHREAD_ABORT("pthread_exit() may only be called against threads created via pthread_create()"); | |
1332 | } | |
1333 | } | |
1334 | ||
1335 | int | |
1336 | pthread_getschedparam(pthread_t thread, | |
1337 | int *policy, | |
1338 | struct sched_param *param) | |
1339 | { | |
1340 | int ret; | |
1341 | ||
1342 | if (thread == NULL) { | |
1343 | return ESRCH; | |
1344 | } | |
1345 | ||
1346 | LOCK(_pthread_list_lock); | |
1347 | ||
1348 | ret = _pthread_find_thread(thread); | |
1349 | if (ret == 0) { | |
1350 | if (policy) { | |
1351 | *policy = thread->policy; | |
1352 | } | |
1353 | if (param) { | |
1354 | *param = thread->param; | |
1355 | } | |
1356 | } | |
1357 | ||
1358 | UNLOCK(_pthread_list_lock); | |
1359 | ||
1360 | return ret; | |
1361 | } | |
1362 | ||
1363 | static int | |
1364 | pthread_setschedparam_internal(pthread_t thread, | |
1365 | mach_port_t kport, | |
1366 | int policy, | |
1367 | const struct sched_param *param) | |
1368 | { | |
1369 | policy_base_data_t bases; | |
1370 | policy_base_t base; | |
1371 | mach_msg_type_number_t count; | |
1372 | kern_return_t ret; | |
1373 | ||
1374 | switch (policy) { | |
1375 | case SCHED_OTHER: | |
1376 | bases.ts.base_priority = param->sched_priority; | |
1377 | base = (policy_base_t)&bases.ts; | |
1378 | count = POLICY_TIMESHARE_BASE_COUNT; | |
1379 | break; | |
1380 | case SCHED_FIFO: | |
1381 | bases.fifo.base_priority = param->sched_priority; | |
1382 | base = (policy_base_t)&bases.fifo; | |
1383 | count = POLICY_FIFO_BASE_COUNT; | |
1384 | break; | |
1385 | case SCHED_RR: | |
1386 | bases.rr.base_priority = param->sched_priority; | |
1387 | /* quantum isn't public yet */ | |
1388 | bases.rr.quantum = param->quantum; | |
1389 | base = (policy_base_t)&bases.rr; | |
1390 | count = POLICY_RR_BASE_COUNT; | |
1391 | break; | |
1392 | default: | |
1393 | return EINVAL; | |
1394 | } | |
1395 | ret = thread_policy(kport, policy, base, count, TRUE); | |
1396 | return (ret != KERN_SUCCESS) ? EINVAL : 0; | |
1397 | } | |
1398 | ||
1399 | int | |
1400 | pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param) | |
1401 | { | |
1402 | mach_port_t kport = MACH_PORT_NULL; | |
1403 | int res; | |
1404 | int bypass = 1; | |
1405 | ||
1406 | // since the main thread will not get de-allocated from underneath us | |
1407 | if (t == pthread_self() || t == &_thread ) { | |
1408 | kport = _pthread_kernel_thread(t); | |
1409 | } else { | |
1410 | bypass = 0; | |
1411 | (void)_pthread_lookup_thread(t, &kport, 0); | |
1412 | } | |
1413 | ||
1414 | res = pthread_setschedparam_internal(t, kport, policy, param); | |
1415 | if (res == 0) { | |
1416 | if (bypass == 0) { | |
1417 | // Ensure the thread is still valid. | |
1418 | LOCK(_pthread_list_lock); | |
1419 | res = _pthread_find_thread(t); | |
1420 | if (res == 0) { | |
1421 | t->policy = policy; | |
1422 | t->param = *param; | |
1423 | } | |
1424 | UNLOCK(_pthread_list_lock); | |
1425 | } else { | |
1426 | t->policy = policy; | |
1427 | t->param = *param; | |
1428 | } | |
1429 | } | |
1430 | return res; | |
1431 | } | |
1432 | ||
1433 | int | |
1434 | sched_get_priority_min(int policy) | |
1435 | { | |
1436 | return default_priority - 16; | |
1437 | } | |
1438 | ||
1439 | int | |
1440 | sched_get_priority_max(int policy) | |
1441 | { | |
1442 | return default_priority + 16; | |
1443 | } | |
1444 | ||
1445 | int | |
1446 | pthread_equal(pthread_t t1, pthread_t t2) | |
1447 | { | |
1448 | return (t1 == t2); | |
1449 | } | |
1450 | ||
1451 | // Force LLVM not to optimise this to a call to __pthread_set_self, if it does | |
1452 | // then _pthread_set_self won't be bound when secondary threads try and start up. | |
1453 | PTHREAD_NOINLINE | |
1454 | void | |
1455 | _pthread_set_self(pthread_t p) | |
1456 | { | |
1457 | extern void __pthread_set_self(void *); | |
1458 | ||
1459 | if (p == NULL) { | |
1460 | p = &_thread; | |
1461 | } | |
1462 | ||
1463 | uint64_t tid = __thread_selfid(); | |
1464 | if (tid == -1ull) { | |
1465 | PTHREAD_ABORT("failed to set thread_id"); | |
1466 | } | |
1467 | ||
1468 | p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = p; | |
1469 | p->tsd[_PTHREAD_TSD_SLOT_ERRNO] = &p->err_no; | |
1470 | p->thread_id = tid; | |
1471 | __pthread_set_self(&p->tsd[0]); | |
1472 | } | |
1473 | ||
1474 | struct _pthread_once_context { | |
1475 | pthread_once_t *pthread_once; | |
1476 | void (*routine)(void); | |
1477 | }; | |
1478 | ||
1479 | static void | |
1480 | __pthread_once_handler(void *context) | |
1481 | { | |
1482 | struct _pthread_once_context *ctx = context; | |
1483 | pthread_cleanup_push((void*)__os_once_reset, &ctx->pthread_once->once); | |
1484 | ctx->routine(); | |
1485 | pthread_cleanup_pop(0); | |
1486 | ctx->pthread_once->sig = _PTHREAD_ONCE_SIG; | |
1487 | } | |
1488 | ||
1489 | int | |
1490 | pthread_once(pthread_once_t *once_control, void (*init_routine)(void)) | |
1491 | { | |
1492 | struct _pthread_once_context ctx = { once_control, init_routine }; | |
1493 | do { | |
1494 | os_once(&once_control->once, &ctx, __pthread_once_handler); | |
1495 | } while (once_control->sig == _PTHREAD_ONCE_SIG_init); | |
1496 | return 0; | |
1497 | } | |
1498 | ||
1499 | void | |
1500 | _pthread_testcancel(pthread_t thread, int isconforming) | |
1501 | { | |
1502 | const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING); | |
1503 | ||
1504 | LOCK(thread->lock); | |
1505 | bool canceled = ((thread->cancel_state & flags) == flags); | |
1506 | UNLOCK(thread->lock); | |
1507 | ||
1508 | if (canceled) { | |
1509 | pthread_exit(isconforming ? PTHREAD_CANCELED : 0); | |
1510 | } | |
1511 | } | |
1512 | ||
1513 | void | |
1514 | _pthread_exit_if_canceled(int error) | |
1515 | { | |
1516 | if (__unix_conforming && ((error & 0xff) == EINTR) && (__pthread_canceled(0) == 0)) { | |
1517 | pthread_t self = pthread_self(); | |
1518 | if (self != NULL) { | |
1519 | self->cancel_error = error; | |
1520 | } | |
1521 | pthread_exit(PTHREAD_CANCELED); | |
1522 | } | |
1523 | } | |
1524 | ||
1525 | int | |
1526 | pthread_getconcurrency(void) | |
1527 | { | |
1528 | return pthread_concurrency; | |
1529 | } | |
1530 | ||
1531 | int | |
1532 | pthread_setconcurrency(int new_level) | |
1533 | { | |
1534 | if (new_level < 0) { | |
1535 | return EINVAL; | |
1536 | } | |
1537 | pthread_concurrency = new_level; | |
1538 | return 0; | |
1539 | } | |
1540 | ||
1541 | void | |
1542 | _pthread_set_pfz(uintptr_t address) | |
1543 | { | |
1544 | } | |
1545 | ||
1546 | #if !defined(PTHREAD_TARGET_EOS) && !defined(VARIANT_DYLD) | |
1547 | void * | |
1548 | malloc(size_t sz) | |
1549 | { | |
1550 | if (_pthread_malloc) { | |
1551 | return _pthread_malloc(sz); | |
1552 | } else { | |
1553 | return NULL; | |
1554 | } | |
1555 | } | |
1556 | ||
1557 | void | |
1558 | free(void *p) | |
1559 | { | |
1560 | if (_pthread_free) { | |
1561 | _pthread_free(p); | |
1562 | } | |
1563 | } | |
1564 | #endif | |
1565 | ||
1566 | /* | |
1567 | * Perform package initialization - called automatically when application starts | |
1568 | */ | |
1569 | struct ProgramVars; /* forward reference */ | |
1570 | ||
1571 | int | |
1572 | __pthread_init(const struct _libpthread_functions *pthread_funcs, const char *envp[] __unused, | |
1573 | const char *apple[] __unused, const struct ProgramVars *vars __unused) | |
1574 | { | |
1575 | // Save our provided pushed-down functions | |
1576 | if (pthread_funcs) { | |
1577 | exitf = pthread_funcs->exit; | |
1578 | ||
1579 | if (pthread_funcs->version >= 2) { | |
1580 | _pthread_malloc = pthread_funcs->malloc; | |
1581 | _pthread_free = pthread_funcs->free; | |
1582 | } | |
1583 | } | |
1584 | ||
1585 | // | |
1586 | // Get host information | |
1587 | // | |
1588 | ||
1589 | kern_return_t kr; | |
1590 | host_flavor_t flavor = HOST_PRIORITY_INFO; | |
1591 | mach_msg_type_number_t count = HOST_PRIORITY_INFO_COUNT; | |
1592 | host_priority_info_data_t priority_info; | |
1593 | host_t host = mach_host_self(); | |
1594 | kr = host_info(host, flavor, (host_info_t)&priority_info, &count); | |
1595 | if (kr != KERN_SUCCESS) { | |
1596 | PTHREAD_ABORT("host_info(mach_host_self(), ...) failed: %s", mach_error_string(kr)); | |
1597 | } else { | |
1598 | default_priority = priority_info.user_priority; | |
1599 | min_priority = priority_info.minimum_priority; | |
1600 | max_priority = priority_info.maximum_priority; | |
1601 | } | |
1602 | mach_port_deallocate(mach_task_self(), host); | |
1603 | ||
1604 | // | |
1605 | // Set up the main thread structure | |
1606 | // | |
1607 | ||
1608 | void *stackaddr; | |
1609 | size_t stacksize = DFLSSIZ; | |
964d3577 A |
1610 | size_t len = sizeof(stackaddr); |
1611 | int mib[] = { CTL_KERN, KERN_USRSTACK }; | |
1612 | if (__sysctl(mib, 2, &stackaddr, &len, NULL, 0) != 0) { | |
1613 | stackaddr = (void *)USRSTACK; | |
f1a1da6c A |
1614 | } |
1615 | ||
1616 | pthread_t thread = &_thread; | |
1617 | pthread_attr_init(&_pthread_attr_default); | |
1618 | _pthread_struct_init(thread, &_pthread_attr_default, stackaddr, stacksize, 0); | |
1619 | thread->detached = PTHREAD_CREATE_JOINABLE; | |
1620 | ||
1621 | // Finish initialization with common code that is reinvoked on the | |
1622 | // child side of a fork. | |
1623 | ||
1624 | // Finishes initialization of main thread attributes. | |
1625 | // Initializes the thread list and add the main thread. | |
1626 | // Calls _pthread_set_self() to prepare the main thread for execution. | |
1627 | __pthread_fork_child_internal(thread); | |
1628 | ||
1629 | // Set up kernel entry points with __bsdthread_register. | |
1630 | pthread_workqueue_atfork_child(); | |
1631 | ||
964d3577 A |
1632 | // Have pthread_key do its init envvar checks. |
1633 | _pthread_key_global_init(envp); | |
1634 | ||
f1a1da6c A |
1635 | return 0; |
1636 | } | |
1637 | ||
1638 | int | |
1639 | sched_yield(void) | |
1640 | { | |
1641 | swtch_pri(0); | |
1642 | return 0; | |
1643 | } | |
1644 | ||
1645 | PTHREAD_NOEXPORT void | |
1646 | __pthread_fork_child_internal(pthread_t p) | |
1647 | { | |
1648 | TAILQ_INIT(&__pthread_head); | |
1649 | LOCK_INIT(_pthread_list_lock); | |
1650 | ||
1651 | // Re-use the main thread's static storage if no thread was provided. | |
1652 | if (p == NULL) { | |
1653 | if (_thread.tsd[0] != 0) { | |
1654 | bzero(&_thread, sizeof(struct _pthread)); | |
1655 | } | |
1656 | p = &_thread; | |
1657 | } | |
1658 | ||
1659 | LOCK_INIT(p->lock); | |
1660 | _pthread_set_kernel_thread(p, mach_thread_self()); | |
1661 | _pthread_set_reply_port(p, mach_reply_port()); | |
1662 | p->__cleanup_stack = NULL; | |
1663 | p->joiner_notify = SEMAPHORE_NULL; | |
1664 | p->joiner = MACH_PORT_NULL; | |
1665 | p->detached |= _PTHREAD_CREATE_PARENT; | |
1666 | p->tsd[__TSD_SEMAPHORE_CACHE] = SEMAPHORE_NULL; | |
1667 | ||
1668 | // Initialize the list of threads with the new main thread. | |
1669 | TAILQ_INSERT_HEAD(&__pthread_head, p, plist); | |
1670 | _pthread_count = 1; | |
1671 | ||
1672 | _pthread_set_self(p); | |
1673 | _pthread_introspection_thread_start(p); | |
1674 | } | |
1675 | ||
1676 | /* | |
1677 | * Query/update the cancelability 'state' of a thread | |
1678 | */ | |
1679 | PTHREAD_NOEXPORT int | |
1680 | _pthread_setcancelstate_internal(int state, int *oldstate, int conforming) | |
1681 | { | |
1682 | pthread_t self; | |
1683 | ||
1684 | switch (state) { | |
1685 | case PTHREAD_CANCEL_ENABLE: | |
1686 | if (conforming) { | |
1687 | __pthread_canceled(1); | |
1688 | } | |
1689 | break; | |
1690 | case PTHREAD_CANCEL_DISABLE: | |
1691 | if (conforming) { | |
1692 | __pthread_canceled(2); | |
1693 | } | |
1694 | break; | |
1695 | default: | |
1696 | return EINVAL; | |
1697 | } | |
1698 | ||
1699 | self = pthread_self(); | |
1700 | LOCK(self->lock); | |
1701 | if (oldstate) { | |
1702 | *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK; | |
1703 | } | |
1704 | self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK; | |
1705 | self->cancel_state |= state; | |
1706 | UNLOCK(self->lock); | |
1707 | if (!conforming) { | |
1708 | _pthread_testcancel(self, 0); /* See if we need to 'die' now... */ | |
1709 | } | |
1710 | return 0; | |
1711 | } | |
1712 | ||
1713 | /* When a thread exits set the cancellation state to DISABLE and DEFERRED */ | |
1714 | static void | |
1715 | _pthread_setcancelstate_exit(pthread_t self, void * value_ptr, int conforming) | |
1716 | { | |
1717 | LOCK(self->lock); | |
1718 | self->cancel_state &= ~(_PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK); | |
1719 | self->cancel_state |= (PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED); | |
1720 | if (value_ptr == PTHREAD_CANCELED) { | |
1721 | // 4597450: begin | |
1722 | self->detached |= _PTHREAD_WASCANCEL; | |
1723 | // 4597450: end | |
1724 | } | |
1725 | UNLOCK(self->lock); | |
1726 | } | |
1727 | ||
1728 | int | |
1729 | _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming) | |
1730 | { | |
1731 | // Returns ESRCH if the thread was not created joinable. | |
1732 | int ret = __pthread_remove_thread(thread, false, NULL); | |
1733 | if (ret != 0) { | |
1734 | return ret; | |
1735 | } | |
1736 | ||
1737 | if (value_ptr) { | |
1738 | *value_ptr = __pthread_get_exit_value(thread, conforming); | |
1739 | } | |
1740 | _pthread_introspection_thread_destroy(thread); | |
1741 | _pthread_deallocate(thread); | |
1742 | return 0; | |
1743 | } | |
1744 | ||
1745 | /* ALWAYS called with list lock and return with list lock */ | |
1746 | int | |
1747 | _pthread_find_thread(pthread_t thread) | |
1748 | { | |
1749 | if (thread != NULL) { | |
1750 | pthread_t p; | |
1751 | loop: | |
1752 | TAILQ_FOREACH(p, &__pthread_head, plist) { | |
1753 | if (p == thread) { | |
1754 | if (_pthread_kernel_thread(thread) == MACH_PORT_NULL) { | |
1755 | UNLOCK(_pthread_list_lock); | |
1756 | sched_yield(); | |
1757 | LOCK(_pthread_list_lock); | |
1758 | goto loop; | |
1759 | } | |
1760 | return 0; | |
1761 | } | |
1762 | } | |
1763 | } | |
1764 | return ESRCH; | |
1765 | } | |
1766 | ||
1767 | int | |
1768 | _pthread_lookup_thread(pthread_t thread, mach_port_t *portp, int only_joinable) | |
1769 | { | |
1770 | mach_port_t kport = MACH_PORT_NULL; | |
1771 | int ret; | |
1772 | ||
1773 | if (thread == NULL) { | |
1774 | return ESRCH; | |
1775 | } | |
1776 | ||
1777 | LOCK(_pthread_list_lock); | |
1778 | ||
1779 | ret = _pthread_find_thread(thread); | |
1780 | if (ret == 0) { | |
1781 | // Fail if we only want joinable threads and the thread found is | |
1782 | // not in the detached state. | |
1783 | if (only_joinable != 0 && (thread->detached & PTHREAD_CREATE_DETACHED) != 0) { | |
1784 | ret = EINVAL; | |
1785 | } else { | |
1786 | kport = _pthread_kernel_thread(thread); | |
1787 | } | |
1788 | } | |
1789 | ||
1790 | UNLOCK(_pthread_list_lock); | |
1791 | ||
1792 | if (portp != NULL) { | |
1793 | *portp = kport; | |
1794 | } | |
1795 | ||
1796 | return ret; | |
1797 | } | |
1798 | ||
1799 | void | |
1800 | _pthread_clear_qos_tsd(mach_port_t thread_port) | |
1801 | { | |
1802 | if (thread_port == MACH_PORT_NULL || (uintptr_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF) == thread_port) { | |
1803 | /* Clear the current thread's TSD, that can be done inline. */ | |
1804 | _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0)); | |
1805 | } else { | |
1806 | pthread_t p; | |
1807 | ||
1808 | LOCK(_pthread_list_lock); | |
1809 | ||
1810 | TAILQ_FOREACH(p, &__pthread_head, plist) { | |
964d3577 | 1811 | mach_port_t kp = _pthread_kernel_thread(p); |
f1a1da6c A |
1812 | if (thread_port == kp) { |
1813 | p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0); | |
1814 | break; | |
1815 | } | |
1816 | } | |
1817 | ||
1818 | UNLOCK(_pthread_list_lock); | |
1819 | } | |
1820 | } | |
1821 | ||
1822 | /***** pthread workqueue support routines *****/ | |
1823 | ||
1824 | PTHREAD_NOEXPORT void | |
1825 | pthread_workqueue_atfork_child(void) | |
1826 | { | |
1827 | struct _pthread_registration_data data = { | |
1828 | .dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *), | |
1829 | }; | |
1830 | ||
1831 | int rv = __bsdthread_register(thread_start, | |
1832 | start_wqthread, | |
1833 | (int)pthreadsize, | |
1834 | (void*)&data, | |
1835 | (uintptr_t)sizeof(data), | |
1836 | data.dispatch_queue_offset); | |
1837 | ||
1838 | if (rv > 0) { | |
1839 | __pthread_supported_features = rv; | |
1840 | } | |
1841 | ||
1842 | if (_pthread_priority_get_qos_newest(data.main_qos) != QOS_CLASS_UNSPECIFIED) { | |
1843 | _pthread_set_main_qos(data.main_qos); | |
1844 | _thread.tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = data.main_qos; | |
1845 | } | |
1846 | ||
1847 | if (__libdispatch_workerfunction != NULL) { | |
1848 | // prepare the kernel for workq action | |
1849 | (void)__workq_open(); | |
1850 | } | |
1851 | } | |
1852 | ||
964d3577 | 1853 | // workqueue entry point from kernel |
f1a1da6c | 1854 | void |
964d3577 | 1855 | _pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr, void *keventlist, int flags, int nkevents) |
f1a1da6c A |
1856 | { |
1857 | PTHREAD_ASSERT(flags & WQ_FLAG_THREAD_NEWSPI); | |
1858 | ||
1859 | int thread_reuse = flags & WQ_FLAG_THREAD_REUSE; | |
1860 | int thread_class = flags & WQ_FLAG_THREAD_PRIOMASK; | |
1861 | int overcommit = (flags & WQ_FLAG_THREAD_OVERCOMMIT) != 0; | |
964d3577 A |
1862 | int kevent = flags & WQ_FLAG_THREAD_KEVENT; |
1863 | PTHREAD_ASSERT((!kevent) || (__libdispatch_keventfunction != NULL)); | |
1864 | ||
1865 | pthread_priority_t priority = 0; | |
1866 | unsigned long priority_flags = 0; | |
f1a1da6c | 1867 | |
964d3577 A |
1868 | if (overcommit) |
1869 | priority_flags |= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; | |
1870 | if (flags & WQ_FLAG_THREAD_EVENT_MANAGER) | |
1871 | priority_flags |= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; | |
f1a1da6c A |
1872 | |
1873 | if ((__pthread_supported_features & PTHREAD_FEATURE_QOS_MAINTENANCE) == 0) { | |
964d3577 | 1874 | priority = _pthread_priority_make_version2(thread_class, 0, priority_flags); |
f1a1da6c | 1875 | } else { |
964d3577 | 1876 | priority = _pthread_priority_make_newest(thread_class, 0, priority_flags); |
f1a1da6c A |
1877 | } |
1878 | ||
1879 | if (thread_reuse == 0) { | |
1880 | // New thread created by kernel, needs initialization. | |
964d3577 A |
1881 | size_t stacksize = (uintptr_t)self - (uintptr_t)stacklowaddr; |
1882 | _pthread_struct_init(self, &_pthread_attr_default, (void*)self, stacksize, 1); | |
1883 | ||
f1a1da6c A |
1884 | _pthread_set_kernel_thread(self, kport); |
1885 | self->wqthread = 1; | |
1886 | self->wqkillset = 0; | |
1887 | ||
1888 | // Not a joinable thread. | |
1889 | self->detached &= ~PTHREAD_CREATE_JOINABLE; | |
1890 | self->detached |= PTHREAD_CREATE_DETACHED; | |
1891 | ||
1892 | // Update the running thread count and set childrun bit. | |
1893 | // XXX this should be consolidated with pthread_body(). | |
1894 | _pthread_set_self(self); | |
1895 | _pthread_introspection_thread_create(self, false); | |
1896 | __pthread_add_thread(self, false); | |
964d3577 | 1897 | } |
f1a1da6c | 1898 | |
964d3577 A |
1899 | // If we're running with fine-grained priority, we also need to |
1900 | // set this thread to have the QoS class provided to use by the kernel | |
1901 | if (__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) { | |
1902 | _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, _pthread_priority_make_newest(thread_class, 0, priority_flags)); | |
f1a1da6c A |
1903 | } |
1904 | ||
1905 | #if WQ_DEBUG | |
1906 | PTHREAD_ASSERT(self); | |
1907 | PTHREAD_ASSERT(self == pthread_self()); | |
1908 | #endif // WQ_DEBUG | |
1909 | ||
964d3577 A |
1910 | if (kevent){ |
1911 | self->fun = (void *(*)(void*))__libdispatch_keventfunction; | |
1912 | } else { | |
1913 | self->fun = (void *(*)(void *))__libdispatch_workerfunction; | |
1914 | } | |
f1a1da6c A |
1915 | self->arg = (void *)(uintptr_t)thread_class; |
1916 | ||
964d3577 A |
1917 | if (kevent && keventlist){ |
1918 | kevent_errors_retry: | |
1919 | (*__libdispatch_keventfunction)(&keventlist, &nkevents); | |
1920 | ||
1921 | int errors_out = __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, keventlist, nkevents, 0); | |
1922 | if (errors_out > 0){ | |
1923 | nkevents = errors_out; | |
1924 | goto kevent_errors_retry; | |
1925 | } else if (errors_out < 0){ | |
1926 | PTHREAD_ABORT("kevent return produced an error: %d", errno); | |
1927 | } | |
1928 | _pthread_exit(self, NULL); | |
1929 | } else if (kevent){ | |
1930 | (*__libdispatch_keventfunction)(NULL, NULL); | |
1931 | ||
1932 | __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0); | |
1933 | _pthread_exit(self, NULL); | |
1934 | } | |
1935 | ||
f1a1da6c A |
1936 | if (__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) { |
1937 | if (!__workq_newapi) { | |
1938 | /* Old thread priorities are inverted from where we have them in | |
1939 | * the new flexible priority scheme. The highest priority is zero, | |
1940 | * up to 2, with background at 3. | |
1941 | */ | |
1942 | pthread_workqueue_function_t func = (pthread_workqueue_function_t)__libdispatch_workerfunction; | |
1943 | ||
1944 | int opts = overcommit ? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT : 0; | |
1945 | ||
1946 | if ((__pthread_supported_features & PTHREAD_FEATURE_QOS_DEFAULT) == 0) { | |
1947 | /* Dirty hack to support kernels that don't have QOS_CLASS_DEFAULT. */ | |
1948 | switch (thread_class) { | |
1949 | case QOS_CLASS_USER_INTERACTIVE: | |
1950 | thread_class = QOS_CLASS_USER_INITIATED; | |
1951 | break; | |
1952 | case QOS_CLASS_USER_INITIATED: | |
1953 | thread_class = QOS_CLASS_DEFAULT; | |
1954 | break; | |
1955 | default: | |
1956 | break; | |
1957 | } | |
1958 | } | |
1959 | ||
1960 | switch (thread_class) { | |
1961 | /* QOS_CLASS_USER_INTERACTIVE is not currently requested by for old dispatch priority compatibility */ | |
1962 | case QOS_CLASS_USER_INITIATED: | |
1963 | (*func)(WORKQ_HIGH_PRIOQUEUE, opts, NULL); | |
1964 | break; | |
1965 | ||
1966 | case QOS_CLASS_DEFAULT: | |
1967 | /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being | |
1968 | * picked up by NSThread (et al) and transported around the system. So change the TSD to | |
1969 | * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy. | |
1970 | */ | |
1971 | _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, _pthread_priority_make_newest(QOS_CLASS_USER_INITIATED, 0, 0)); | |
1972 | (*func)(WORKQ_DEFAULT_PRIOQUEUE, opts, NULL); | |
1973 | break; | |
1974 | ||
1975 | case QOS_CLASS_UTILITY: | |
1976 | (*func)(WORKQ_LOW_PRIOQUEUE, opts, NULL); | |
1977 | break; | |
1978 | ||
1979 | case QOS_CLASS_BACKGROUND: | |
1980 | (*func)(WORKQ_BG_PRIOQUEUE, opts, NULL); | |
1981 | break; | |
1982 | ||
1983 | /* Legacy dispatch does not use QOS_CLASS_MAINTENANCE, so no need to handle it here */ | |
1984 | } | |
1985 | ||
1986 | } else { | |
1987 | /* "New" API, where dispatch is expecting to be given the thread priority */ | |
1988 | (*__libdispatch_workerfunction)(priority); | |
1989 | } | |
1990 | } else { | |
1991 | /* We're the new library running on an old kext, so thread_class is really the workq priority. */ | |
1992 | pthread_workqueue_function_t func = (pthread_workqueue_function_t)__libdispatch_workerfunction; | |
1993 | int options = overcommit ? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT : 0; | |
1994 | (*func)(thread_class, options, NULL); | |
1995 | } | |
1996 | ||
1997 | __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0); | |
1998 | _pthread_exit(self, NULL); | |
1999 | } | |
2000 | ||
2001 | /***** pthread workqueue API for libdispatch *****/ | |
2002 | ||
f1a1da6c A |
2003 | void |
2004 | pthread_workqueue_setdispatchoffset_np(int offset) | |
2005 | { | |
2006 | __libdispatch_offset = offset; | |
2007 | } | |
2008 | ||
2009 | int | |
964d3577 | 2010 | pthread_workqueue_setdispatch_with_kevent_np(pthread_workqueue_function2_t queue_func, pthread_workqueue_function_kevent_t kevent_func) |
f1a1da6c A |
2011 | { |
2012 | int res = EBUSY; | |
2013 | if (__libdispatch_workerfunction == NULL) { | |
2014 | // Check whether the kernel supports new SPIs | |
964d3577 | 2015 | res = __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP, NULL, __libdispatch_offset, kevent_func != NULL ? 0x01 : 0x00); |
f1a1da6c A |
2016 | if (res == -1){ |
2017 | res = ENOTSUP; | |
2018 | } else { | |
964d3577 A |
2019 | __libdispatch_workerfunction = queue_func; |
2020 | __libdispatch_keventfunction = kevent_func; | |
f1a1da6c A |
2021 | |
2022 | // Prepare the kernel for workq action | |
2023 | (void)__workq_open(); | |
2024 | if (__is_threaded == 0) { | |
2025 | __is_threaded = 1; | |
2026 | } | |
2027 | } | |
2028 | } | |
2029 | return res; | |
2030 | } | |
2031 | ||
964d3577 A |
2032 | int |
2033 | _pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func, pthread_workqueue_function_kevent_t kevent_func, int offset, int flags) | |
2034 | { | |
2035 | if (flags != 0) { | |
2036 | return ENOTSUP; | |
2037 | } | |
2038 | ||
2039 | __workq_newapi = true; | |
2040 | __libdispatch_offset = offset; | |
2041 | ||
2042 | int rv = pthread_workqueue_setdispatch_with_kevent_np(queue_func, kevent_func); | |
2043 | return rv; | |
2044 | } | |
2045 | ||
2046 | int | |
2047 | _pthread_workqueue_init(pthread_workqueue_function2_t func, int offset, int flags) | |
2048 | { | |
2049 | return _pthread_workqueue_init_with_kevent(func, NULL, offset, flags); | |
2050 | } | |
2051 | ||
2052 | int | |
2053 | pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func) | |
2054 | { | |
2055 | return pthread_workqueue_setdispatch_with_kevent_np((pthread_workqueue_function2_t)worker_func, NULL); | |
2056 | } | |
2057 | ||
f1a1da6c A |
2058 | int |
2059 | _pthread_workqueue_supported(void) | |
2060 | { | |
2061 | return __pthread_supported_features; | |
2062 | } | |
2063 | ||
2064 | int | |
2065 | pthread_workqueue_addthreads_np(int queue_priority, int options, int numthreads) | |
2066 | { | |
2067 | int res = 0; | |
2068 | ||
2069 | // Cannot add threads without a worker function registered. | |
2070 | if (__libdispatch_workerfunction == NULL) { | |
2071 | return EPERM; | |
2072 | } | |
2073 | ||
2074 | pthread_priority_t kp = 0; | |
2075 | ||
2076 | if (__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) { | |
2077 | /* The new kernel API takes the new QoS class + relative priority style of | |
2078 | * priority. This entry point is here for compatibility with old libdispatch | |
2079 | * versions (ie. the simulator). We request the corresponding new bracket | |
2080 | * from the kernel, then on the way out run all dispatch queues that were | |
2081 | * requested. | |
2082 | */ | |
2083 | ||
2084 | int compat_priority = queue_priority & WQ_FLAG_THREAD_PRIOMASK; | |
2085 | int flags = 0; | |
2086 | ||
2087 | /* To make sure the library does not issue more threads to dispatch than | |
2088 | * were requested, the total number of active requests is recorded in | |
2089 | * __workq_requests. | |
2090 | */ | |
2091 | if (options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT) { | |
2092 | flags = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; | |
2093 | } | |
2094 | ||
2095 | kp = _pthread_qos_class_encode_workqueue(compat_priority, flags); | |
2096 | ||
2097 | } else { | |
2098 | /* Running on the old kernel, queue_priority is what we pass directly to | |
2099 | * the syscall. | |
2100 | */ | |
2101 | kp = queue_priority & WQ_FLAG_THREAD_PRIOMASK; | |
2102 | ||
2103 | if (options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT) { | |
2104 | kp |= WORKQUEUE_OVERCOMMIT; | |
2105 | } | |
2106 | } | |
2107 | ||
2108 | res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)kp); | |
2109 | if (res == -1) { | |
2110 | res = errno; | |
2111 | } | |
2112 | return res; | |
2113 | } | |
2114 | ||
2115 | int | |
2116 | _pthread_workqueue_addthreads(int numthreads, pthread_priority_t priority) | |
2117 | { | |
2118 | int res = 0; | |
2119 | ||
2120 | if (__libdispatch_workerfunction == NULL) { | |
2121 | return EPERM; | |
2122 | } | |
2123 | ||
2124 | if ((__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) == 0) { | |
2125 | return ENOTSUP; | |
2126 | } | |
2127 | ||
2128 | res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)priority); | |
2129 | if (res == -1) { | |
2130 | res = errno; | |
2131 | } | |
2132 | return res; | |
2133 | } | |
2134 | ||
964d3577 A |
2135 | int |
2136 | _pthread_workqueue_set_event_manager_priority(pthread_priority_t priority) | |
2137 | { | |
2138 | int res = __workq_kernreturn(WQOPS_SET_EVENT_MANAGER_PRIORITY, NULL, (int)priority, 0); | |
2139 | if (res == -1) { | |
2140 | res = errno; | |
2141 | } | |
2142 | return res; | |
2143 | } | |
2144 | ||
f1a1da6c A |
2145 | /* |
2146 | * Introspection SPI for libpthread. | |
2147 | */ | |
2148 | ||
2149 | static pthread_introspection_hook_t _pthread_introspection_hook; | |
2150 | ||
2151 | pthread_introspection_hook_t | |
2152 | pthread_introspection_hook_install(pthread_introspection_hook_t hook) | |
2153 | { | |
2154 | if (os_slowpath(!hook)) { | |
2155 | PTHREAD_ABORT("pthread_introspection_hook_install was passed NULL"); | |
2156 | } | |
2157 | pthread_introspection_hook_t prev; | |
2158 | prev = __sync_swap(&_pthread_introspection_hook, hook); | |
2159 | return prev; | |
2160 | } | |
2161 | ||
2162 | PTHREAD_NOINLINE | |
2163 | static void | |
2164 | _pthread_introspection_hook_callout_thread_create(pthread_t t, bool destroy) | |
2165 | { | |
2166 | _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE, t, t, | |
2167 | pthreadsize); | |
2168 | if (!destroy) return; | |
2169 | _pthread_introspection_thread_destroy(t); | |
2170 | } | |
2171 | ||
2172 | static inline void | |
2173 | _pthread_introspection_thread_create(pthread_t t, bool destroy) | |
2174 | { | |
2175 | if (os_fastpath(!_pthread_introspection_hook)) return; | |
2176 | _pthread_introspection_hook_callout_thread_create(t, destroy); | |
2177 | } | |
2178 | ||
2179 | PTHREAD_NOINLINE | |
2180 | static void | |
2181 | _pthread_introspection_hook_callout_thread_start(pthread_t t) | |
2182 | { | |
2183 | size_t freesize; | |
2184 | void *freeaddr; | |
2185 | if (t == &_thread) { | |
2186 | freesize = t->stacksize + t->guardsize; | |
2187 | freeaddr = t->stackaddr - freesize; | |
2188 | } else { | |
2189 | freesize = t->freesize - pthreadsize; | |
2190 | freeaddr = t->freeaddr; | |
2191 | } | |
2192 | _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START, t, | |
2193 | freeaddr, freesize); | |
2194 | } | |
2195 | ||
2196 | static inline void | |
2197 | _pthread_introspection_thread_start(pthread_t t) | |
2198 | { | |
2199 | if (os_fastpath(!_pthread_introspection_hook)) return; | |
2200 | _pthread_introspection_hook_callout_thread_start(t); | |
2201 | } | |
2202 | ||
2203 | PTHREAD_NOINLINE | |
2204 | static void | |
2205 | _pthread_introspection_hook_callout_thread_terminate(pthread_t t, | |
2206 | void *freeaddr, size_t freesize, bool destroy) | |
2207 | { | |
2208 | if (destroy && freesize) { | |
2209 | freesize -= pthreadsize; | |
2210 | } | |
2211 | _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE, t, | |
2212 | freeaddr, freesize); | |
2213 | if (!destroy) return; | |
2214 | _pthread_introspection_thread_destroy(t); | |
2215 | } | |
2216 | ||
2217 | static inline void | |
2218 | _pthread_introspection_thread_terminate(pthread_t t, void *freeaddr, | |
2219 | size_t freesize, bool destroy) | |
2220 | { | |
2221 | if (os_fastpath(!_pthread_introspection_hook)) return; | |
2222 | _pthread_introspection_hook_callout_thread_terminate(t, freeaddr, freesize, | |
2223 | destroy); | |
2224 | } | |
2225 | ||
2226 | PTHREAD_NOINLINE | |
2227 | static void | |
2228 | _pthread_introspection_hook_callout_thread_destroy(pthread_t t) | |
2229 | { | |
2230 | if (t == &_thread) return; | |
2231 | _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY, t, t, | |
2232 | pthreadsize); | |
2233 | } | |
2234 | ||
2235 | static inline void | |
2236 | _pthread_introspection_thread_destroy(pthread_t t) | |
2237 | { | |
2238 | if (os_fastpath(!_pthread_introspection_hook)) return; | |
2239 | _pthread_introspection_hook_callout_thread_destroy(t); | |
2240 | } | |
2241 |