]> git.saurik.com Git - apple/libpthread.git/blob - src/pthread.c
libpthread-218.51.1.tar.gz
[apple/libpthread.git] / src / pthread.c
1 /*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 */
51
52 #include "internal.h"
53 #include "private.h"
54 #include "workqueue_private.h"
55 #include "introspection_private.h"
56 #include "qos_private.h"
57 #include "tsd_private.h"
58
59 #include <stdlib.h>
60 #include <errno.h>
61 #include <signal.h>
62 #include <unistd.h>
63 #include <mach/mach_init.h>
64 #include <mach/mach_vm.h>
65 #include <sys/time.h>
66 #include <sys/resource.h>
67 #include <sys/sysctl.h>
68 #include <sys/queue.h>
69 #include <sys/mman.h>
70 #include <machine/vmparam.h>
71 #define __APPLE_API_PRIVATE
72 #include <machine/cpu_capabilities.h>
73 #include <libkern/OSAtomic.h>
74
75 #include <_simple.h>
76 #include <platform/string.h>
77 #include <platform/compat.h>
78
79 extern int __sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
80 void *newp, size_t newlen);
81 extern void __exit(int) __attribute__((noreturn));
82
83 static void (*exitf)(int) = __exit;
84 __private_extern__ void* (*_pthread_malloc)(size_t) = NULL;
85 __private_extern__ void (*_pthread_free)(void *) = NULL;
86
87 //
88 // Global variables
89 //
90
91 // This global should be used (carefully) by anyone needing to know if a
92 // pthread (other than the main thread) has been created.
93 int __is_threaded = 0;
94
95 int __unix_conforming = 0;
96
97 // _pthread_list_lock protects _pthread_count, access to the __pthread_head
98 // list, and the parentcheck, childrun and childexit flags of the pthread
99 // structure. Externally imported by pthread_cancelable.c.
100 __private_extern__ _pthread_lock _pthread_list_lock = _PTHREAD_LOCK_INITIALIZER;
101 __private_extern__ struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
102 static int _pthread_count = 1;
103
104 #if PTHREAD_LAYOUT_SPI
105
106 const struct pthread_layout_offsets_s pthread_layout_offsets = {
107 .plo_version = 1,
108 .plo_pthread_tsd_base_offset = offsetof(struct _pthread, tsd),
109 .plo_pthread_tsd_base_address_offset = 0,
110 .plo_pthread_tsd_entry_size = sizeof(((struct _pthread *)NULL)->tsd[0]),
111 };
112
113 #endif // PTHREAD_LAYOUT_SPI
114
115 //
116 // Static variables
117 //
118
119 // Mach message notification that a thread needs to be recycled.
120 typedef struct _pthread_reap_msg_t {
121 mach_msg_header_t header;
122 pthread_t thread;
123 mach_msg_trailer_t trailer;
124 } pthread_reap_msg_t;
125
126 /*
127 * The pthread may be offset into a page. In that event, by contract
128 * with the kernel, the allocation will extend PTHREAD_SIZE from the
129 * start of the next page. There's also one page worth of allocation
130 * below stacksize for the guard page. <rdar://problem/19941744>
131 */
132 #define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
133 #define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size)
134 #define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize))
135
136 static pthread_attr_t _pthread_attr_default = {0};
137
138 // The main thread's pthread_t
139 static struct _pthread _thread __attribute__((aligned(64))) = {0};
140
141 static int default_priority;
142 static int max_priority;
143 static int min_priority;
144 static int pthread_concurrency;
145
146 // work queue support data
147 static void (*__libdispatch_workerfunction)(pthread_priority_t) = NULL;
148 static void (*__libdispatch_keventfunction)(void **events, int *nevents) = NULL;
149 static int __libdispatch_offset;
150
151 // supported feature set
152 int __pthread_supported_features;
153
154 //
155 // Function prototypes
156 //
157
158 // pthread primitives
159 static int _pthread_allocate(pthread_t *thread, const pthread_attr_t *attrs, void **stack);
160 static int _pthread_deallocate(pthread_t t);
161
162 static void _pthread_terminate(pthread_t t);
163
164 static void _pthread_struct_init(pthread_t t,
165 const pthread_attr_t *attrs,
166 void *stack,
167 size_t stacksize,
168 void *freeaddr,
169 size_t freesize);
170
171 extern void _pthread_set_self(pthread_t);
172 static void _pthread_set_self_internal(pthread_t, bool needs_tsd_base_set);
173
174 static void _pthread_dealloc_reply_port(pthread_t t);
175
176 static inline void __pthread_add_thread(pthread_t t, bool parent, bool from_mach_thread);
177 static inline int __pthread_remove_thread(pthread_t t, bool child, bool *should_exit);
178
179 static int _pthread_find_thread(pthread_t thread);
180
181 static void _pthread_exit(pthread_t self, void *value_ptr) __dead2;
182 static void _pthread_setcancelstate_exit(pthread_t self, void *value_ptr, int conforming);
183
184 static inline void _pthread_introspection_thread_create(pthread_t t, bool destroy);
185 static inline void _pthread_introspection_thread_start(pthread_t t);
186 static inline void _pthread_introspection_thread_terminate(pthread_t t, void *freeaddr, size_t freesize, bool destroy);
187 static inline void _pthread_introspection_thread_destroy(pthread_t t);
188
189 extern void start_wqthread(pthread_t self, mach_port_t kport, void *stackaddr, void *unused, int reuse); // trampoline into _pthread_wqthread
190 extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags); // trampoline into _pthread_start
191
192 void pthread_workqueue_atfork_child(void);
193
194 static bool __workq_newapi;
195
196 /* Compatibility: previous pthread API used WORKQUEUE_OVERCOMMIT to request overcommit threads from
197 * the kernel. This definition is kept here, in userspace only, to perform the compatibility shimm
198 * from old API requests to the new kext conventions.
199 */
200 #define WORKQUEUE_OVERCOMMIT 0x10000
201
202 /*
203 * Flags filed passed to bsdthread_create and back in pthread_start
204 31 <---------------------------------> 0
205 _________________________________________
206 | flags(8) | policy(8) | importance(16) |
207 -----------------------------------------
208 */
209
210 #define PTHREAD_START_CUSTOM 0x01000000
211 #define PTHREAD_START_SETSCHED 0x02000000
212 #define PTHREAD_START_DETACHED 0x04000000
213 #define PTHREAD_START_QOSCLASS 0x08000000
214 #define PTHREAD_START_TSD_BASE_SET 0x10000000
215 #define PTHREAD_START_QOSCLASS_MASK 0x00ffffff
216 #define PTHREAD_START_POLICY_BITSHIFT 16
217 #define PTHREAD_START_POLICY_MASK 0xff
218 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
219
220 static int pthread_setschedparam_internal(pthread_t, mach_port_t, int, const struct sched_param *);
221 extern pthread_t __bsdthread_create(void *(*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags);
222 extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, void *, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t);
223 extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem);
224 extern __uint64_t __thread_selfid( void );
225 extern int __pthread_canceled(int);
226 extern int __pthread_kill(mach_port_t, int);
227
228 extern int __workq_open(void);
229 extern int __workq_kernreturn(int, void *, int, int);
230
231 #if defined(__i386__) || defined(__x86_64__)
232 static const mach_vm_address_t PTHREAD_STACK_HINT = 0xB0000000;
233 #else
234 #error no PTHREAD_STACK_HINT for this architecture
235 #endif
236
237 // Check that offsets of _PTHREAD_STRUCT_DIRECT_*_OFFSET values hasn't changed
238 _Static_assert(offsetof(struct _pthread, tsd) + _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET
239 == offsetof(struct _pthread, thread_id),
240 "_PTHREAD_STRUCT_DIRECT_THREADID_OFFSET is correct");
241
242 // Allocate a thread structure, stack and guard page.
243 //
244 // The thread structure may optionally be placed in the same allocation as the
245 // stack, residing above the top of the stack. This cannot be done if a
246 // custom stack address is provided.
247 //
248 // Similarly the guard page cannot be allocated if a custom stack address is
249 // provided.
250 //
251 // The allocated thread structure is initialized with values that indicate how
252 // it should be freed.
253
254 static int
255 _pthread_allocate(pthread_t *thread, const pthread_attr_t *attrs, void **stack)
256 {
257 int res;
258 kern_return_t kr;
259 pthread_t t = NULL;
260 mach_vm_address_t allocaddr = PTHREAD_STACK_HINT;
261 size_t allocsize = 0;
262 size_t guardsize = 0;
263 size_t stacksize = 0;
264
265 PTHREAD_ASSERT(attrs->stacksize >= PTHREAD_STACK_MIN);
266
267 *thread = NULL;
268 *stack = NULL;
269
270 // Allocate a pthread structure if necessary
271
272 if (attrs->stackaddr != NULL) {
273 PTHREAD_ASSERT(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
274 *stack = attrs->stackaddr;
275 allocsize = PTHREAD_SIZE;
276 } else {
277 guardsize = attrs->guardsize;
278 stacksize = attrs->stacksize;
279 allocsize = stacksize + guardsize + PTHREAD_SIZE;
280 }
281
282 kr = mach_vm_map(mach_task_self(),
283 &allocaddr,
284 allocsize,
285 vm_page_size - 1,
286 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE,
287 MEMORY_OBJECT_NULL,
288 0,
289 FALSE,
290 VM_PROT_DEFAULT,
291 VM_PROT_ALL,
292 VM_INHERIT_DEFAULT);
293
294 if (kr != KERN_SUCCESS) {
295 kr = mach_vm_allocate(mach_task_self(),
296 &allocaddr,
297 allocsize,
298 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
299 }
300
301 if (kr == KERN_SUCCESS) {
302 // The stack grows down.
303 // Set the guard page at the lowest address of the
304 // newly allocated stack. Return the highest address
305 // of the stack.
306 if (guardsize) {
307 (void)mach_vm_protect(mach_task_self(), allocaddr, guardsize, FALSE, VM_PROT_NONE);
308 }
309
310 // Thread structure resides at the top of the stack.
311 t = (void *)(allocaddr + stacksize + guardsize);
312 if (stacksize) {
313 // Returns the top of the stack.
314 *stack = t;
315 }
316 }
317
318 if (t != NULL) {
319 _pthread_struct_init(t, attrs,
320 *stack, attrs->stacksize,
321 allocaddr, allocsize);
322 *thread = t;
323 res = 0;
324 } else {
325 res = EAGAIN;
326 }
327 return res;
328 }
329
330 static int
331 _pthread_deallocate(pthread_t t)
332 {
333 // Don't free the main thread.
334 if (t != &_thread) {
335 kern_return_t ret;
336 ret = mach_vm_deallocate(mach_task_self(), t->freeaddr, t->freesize);
337 PTHREAD_ASSERT(ret == KERN_SUCCESS);
338 }
339 return 0;
340 }
341
342 #pragma clang diagnostic push
343 #pragma clang diagnostic ignored "-Wreturn-stack-address"
344
345 PTHREAD_NOINLINE
346 static void*
347 _current_stack_address(void)
348 {
349 int a;
350 return &a;
351 }
352
353 #pragma clang diagnostic pop
354
355 // Terminates the thread if called from the currently running thread.
356 PTHREAD_NORETURN PTHREAD_NOINLINE
357 static void
358 _pthread_terminate(pthread_t t)
359 {
360 PTHREAD_ASSERT(t == pthread_self());
361
362 uintptr_t freeaddr = (uintptr_t)t->freeaddr;
363 size_t freesize = t->freesize;
364
365 // the size of just the stack
366 size_t freesize_stack = t->freesize;
367
368 // We usually pass our structure+stack to bsdthread_terminate to free, but
369 // if we get told to keep the pthread_t structure around then we need to
370 // adjust the free size and addr in the pthread_t to just refer to the
371 // structure and not the stack. If we do end up deallocating the
372 // structure, this is useless work since no one can read the result, but we
373 // can't do it after the call to pthread_remove_thread because it isn't
374 // safe to dereference t after that.
375 if ((void*)t > t->freeaddr && (void*)t < t->freeaddr + t->freesize){
376 // Check to ensure the pthread structure itself is part of the
377 // allocation described by freeaddr/freesize, in which case we split and
378 // only deallocate the area below the pthread structure. In the event of a
379 // custom stack, the freeaddr/size will be the pthread structure itself, in
380 // which case we shouldn't free anything (the final else case).
381 freesize_stack = trunc_page((uintptr_t)t - (uintptr_t)freeaddr);
382
383 // describe just the remainder for deallocation when the pthread_t goes away
384 t->freeaddr += freesize_stack;
385 t->freesize -= freesize_stack;
386 } else if (t == &_thread){
387 freeaddr = t->stackaddr - pthread_get_stacksize_np(t);
388 uintptr_t stackborder = trunc_page((uintptr_t)_current_stack_address());
389 freesize_stack = stackborder - freeaddr;
390 } else {
391 freesize_stack = 0;
392 }
393
394 mach_port_t kport = _pthread_kernel_thread(t);
395 semaphore_t joinsem = t->joiner_notify;
396
397 _pthread_dealloc_reply_port(t);
398
399 // After the call to __pthread_remove_thread, it is not safe to
400 // dereference the pthread_t structure.
401
402 bool destroy, should_exit;
403 destroy = (__pthread_remove_thread(t, true, &should_exit) != EBUSY);
404
405 if (!destroy || t == &_thread) {
406 // Use the adjusted freesize of just the stack that we computed above.
407 freesize = freesize_stack;
408 }
409
410 // Check if there is nothing to free because the thread has a custom
411 // stack allocation and is joinable.
412 if (freesize == 0) {
413 freeaddr = 0;
414 }
415 _pthread_introspection_thread_terminate(t, freeaddr, freesize, destroy);
416 if (should_exit) {
417 exitf(0);
418 }
419
420 __bsdthread_terminate((void *)freeaddr, freesize, kport, joinsem);
421 PTHREAD_ABORT("thread %p didn't terminate", t);
422 }
423
424 int
425 pthread_attr_destroy(pthread_attr_t *attr)
426 {
427 int ret = EINVAL;
428 if (attr->sig == _PTHREAD_ATTR_SIG) {
429 attr->sig = 0;
430 ret = 0;
431 }
432 return ret;
433 }
434
435 int
436 pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
437 {
438 int ret = EINVAL;
439 if (attr->sig == _PTHREAD_ATTR_SIG) {
440 *detachstate = attr->detached;
441 ret = 0;
442 }
443 return ret;
444 }
445
446 int
447 pthread_attr_getinheritsched(const pthread_attr_t *attr, int *inheritsched)
448 {
449 int ret = EINVAL;
450 if (attr->sig == _PTHREAD_ATTR_SIG) {
451 *inheritsched = attr->inherit;
452 ret = 0;
453 }
454 return ret;
455 }
456
457 int
458 pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
459 {
460 int ret = EINVAL;
461 if (attr->sig == _PTHREAD_ATTR_SIG) {
462 *param = attr->param;
463 ret = 0;
464 }
465 return ret;
466 }
467
468 int
469 pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
470 {
471 int ret = EINVAL;
472 if (attr->sig == _PTHREAD_ATTR_SIG) {
473 *policy = attr->policy;
474 ret = 0;
475 }
476 return ret;
477 }
478
479 // Default stack size is 512KB; independent of the main thread's stack size.
480 static const size_t DEFAULT_STACK_SIZE = 512 * 1024;
481
482 int
483 pthread_attr_init(pthread_attr_t *attr)
484 {
485 attr->stacksize = DEFAULT_STACK_SIZE;
486 attr->stackaddr = NULL;
487 attr->sig = _PTHREAD_ATTR_SIG;
488 attr->param.sched_priority = default_priority;
489 attr->param.quantum = 10; /* quantum isn't public yet */
490 attr->detached = PTHREAD_CREATE_JOINABLE;
491 attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
492 attr->policy = _PTHREAD_DEFAULT_POLICY;
493 attr->fastpath = 1;
494 attr->schedset = 0;
495 attr->guardsize = vm_page_size;
496 attr->qosclass = _pthread_priority_make_newest(QOS_CLASS_DEFAULT, 0, 0);
497 return 0;
498 }
499
500 int
501 pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
502 {
503 int ret = EINVAL;
504 if (attr->sig == _PTHREAD_ATTR_SIG &&
505 (detachstate == PTHREAD_CREATE_JOINABLE ||
506 detachstate == PTHREAD_CREATE_DETACHED)) {
507 attr->detached = detachstate;
508 ret = 0;
509 }
510 return ret;
511 }
512
513 int
514 pthread_attr_setinheritsched(pthread_attr_t *attr, int inheritsched)
515 {
516 int ret = EINVAL;
517 if (attr->sig == _PTHREAD_ATTR_SIG &&
518 (inheritsched == PTHREAD_INHERIT_SCHED ||
519 inheritsched == PTHREAD_EXPLICIT_SCHED)) {
520 attr->inherit = inheritsched;
521 ret = 0;
522 }
523 return ret;
524 }
525
526 int
527 pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param)
528 {
529 int ret = EINVAL;
530 if (attr->sig == _PTHREAD_ATTR_SIG) {
531 /* TODO: Validate sched_param fields */
532 attr->param = *param;
533 attr->schedset = 1;
534 ret = 0;
535 }
536 return ret;
537 }
538
539 int
540 pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
541 {
542 int ret = EINVAL;
543 if (attr->sig == _PTHREAD_ATTR_SIG &&
544 (policy == SCHED_OTHER ||
545 policy == SCHED_RR ||
546 policy == SCHED_FIFO)) {
547 attr->policy = policy;
548 attr->schedset = 1;
549 ret = 0;
550 }
551 return ret;
552 }
553
554 int
555 pthread_attr_setscope(pthread_attr_t *attr, int scope)
556 {
557 int ret = EINVAL;
558 if (attr->sig == _PTHREAD_ATTR_SIG) {
559 if (scope == PTHREAD_SCOPE_SYSTEM) {
560 // No attribute yet for the scope.
561 ret = 0;
562 } else if (scope == PTHREAD_SCOPE_PROCESS) {
563 ret = ENOTSUP;
564 }
565 }
566 return ret;
567 }
568
569 int
570 pthread_attr_getscope(const pthread_attr_t *attr, int *scope)
571 {
572 int ret = EINVAL;
573 if (attr->sig == _PTHREAD_ATTR_SIG) {
574 *scope = PTHREAD_SCOPE_SYSTEM;
575 ret = 0;
576 }
577 return ret;
578 }
579
580 int
581 pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
582 {
583 int ret = EINVAL;
584 if (attr->sig == _PTHREAD_ATTR_SIG) {
585 *stackaddr = attr->stackaddr;
586 ret = 0;
587 }
588 return ret;
589 }
590
591 int
592 pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
593 {
594 int ret = EINVAL;
595 if (attr->sig == _PTHREAD_ATTR_SIG &&
596 ((uintptr_t)stackaddr % vm_page_size) == 0) {
597 attr->stackaddr = stackaddr;
598 attr->fastpath = 0;
599 attr->guardsize = 0;
600 ret = 0;
601 }
602 return ret;
603 }
604
605 int
606 pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
607 {
608 int ret = EINVAL;
609 if (attr->sig == _PTHREAD_ATTR_SIG) {
610 *stacksize = attr->stacksize;
611 ret = 0;
612 }
613 return ret;
614 }
615
616 int
617 pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
618 {
619 int ret = EINVAL;
620 if (attr->sig == _PTHREAD_ATTR_SIG &&
621 (stacksize % vm_page_size) == 0 &&
622 stacksize >= PTHREAD_STACK_MIN) {
623 attr->stacksize = stacksize;
624 ret = 0;
625 }
626 return ret;
627 }
628
629 int
630 pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
631 {
632 int ret = EINVAL;
633 if (attr->sig == _PTHREAD_ATTR_SIG) {
634 *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize);
635 *stacksize = attr->stacksize;
636 ret = 0;
637 }
638 return ret;
639 }
640
641 // Per SUSv3, the stackaddr is the base address, the lowest addressable byte
642 // address. This is not the same as in pthread_attr_setstackaddr.
643 int
644 pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
645 {
646 int ret = EINVAL;
647 if (attr->sig == _PTHREAD_ATTR_SIG &&
648 ((uintptr_t)stackaddr % vm_page_size) == 0 &&
649 (stacksize % vm_page_size) == 0 &&
650 stacksize >= PTHREAD_STACK_MIN) {
651 attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize);
652 attr->stacksize = stacksize;
653 attr->fastpath = 0;
654 ret = 0;
655 }
656 return ret;
657 }
658
659 int
660 pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
661 {
662 int ret = EINVAL;
663 if (attr->sig == _PTHREAD_ATTR_SIG) {
664 /* Guardsize of 0 is valid, ot means no guard */
665 if ((guardsize % vm_page_size) == 0) {
666 attr->guardsize = guardsize;
667 attr->fastpath = 0;
668 ret = 0;
669 }
670 }
671 return ret;
672 }
673
674 int
675 pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize)
676 {
677 int ret = EINVAL;
678 if (attr->sig == _PTHREAD_ATTR_SIG) {
679 *guardsize = attr->guardsize;
680 ret = 0;
681 }
682 return ret;
683 }
684
685
686 /*
687 * Create and start execution of a new thread.
688 */
689 PTHREAD_NOINLINE
690 static void
691 _pthread_body(pthread_t self, bool needs_tsd_base_set)
692 {
693 _pthread_set_self_internal(self, needs_tsd_base_set);
694 __pthread_add_thread(self, false, false);
695 void *result = (self->fun)(self->arg);
696
697 _pthread_exit(self, result);
698 }
699
700 void
701 _pthread_start(pthread_t self,
702 mach_port_t kport,
703 void *(*fun)(void *),
704 void *arg,
705 size_t stacksize,
706 unsigned int pflags)
707 {
708 if ((pflags & PTHREAD_START_CUSTOM) == 0) {
709 void *stackaddr = self;
710 _pthread_struct_init(self, &_pthread_attr_default,
711 stackaddr, stacksize,
712 PTHREAD_ALLOCADDR(stackaddr, stacksize), PTHREAD_ALLOCSIZE(stackaddr, stacksize));
713
714 if (pflags & PTHREAD_START_SETSCHED) {
715 self->policy = ((pflags >> PTHREAD_START_POLICY_BITSHIFT) & PTHREAD_START_POLICY_MASK);
716 self->param.sched_priority = (pflags & PTHREAD_START_IMPORTANCE_MASK);
717 }
718
719 if ((pflags & PTHREAD_START_DETACHED) == PTHREAD_START_DETACHED) {
720 self->detached &= ~PTHREAD_CREATE_JOINABLE;
721 self->detached |= PTHREAD_CREATE_DETACHED;
722 }
723 }
724
725 if ((pflags & PTHREAD_START_QOSCLASS) != 0) {
726 /* The QoS class is cached in the TSD of the pthread, so to reflect the
727 * class that the kernel brought us up at, the TSD must be primed from the
728 * flags parameter.
729 */
730 self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = (pflags & PTHREAD_START_QOSCLASS_MASK);
731 } else {
732 /* Give the thread a default QoS tier, of zero. */
733 self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
734 }
735
736 bool thread_tsd_bsd_set = (bool)(pflags & PTHREAD_START_TSD_BASE_SET);
737
738 _pthread_set_kernel_thread(self, kport);
739 self->fun = fun;
740 self->arg = arg;
741
742 _pthread_body(self, !thread_tsd_bsd_set);
743 }
744
745 static void
746 _pthread_struct_init(pthread_t t,
747 const pthread_attr_t *attrs,
748 void *stackaddr,
749 size_t stacksize,
750 void *freeaddr,
751 size_t freesize)
752 {
753 #if DEBUG
754 PTHREAD_ASSERT(t->sig != _PTHREAD_SIG);
755 #endif
756
757 t->sig = _PTHREAD_SIG;
758 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = t;
759 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
760 _PTHREAD_LOCK_INIT(t->lock);
761
762 t->stackaddr = stackaddr;
763 t->stacksize = stacksize;
764 t->freeaddr = freeaddr;
765 t->freesize = freesize;
766
767 t->guardsize = attrs->guardsize;
768 t->detached = attrs->detached;
769 t->inherit = attrs->inherit;
770 t->policy = attrs->policy;
771 t->schedset = attrs->schedset;
772 t->param = attrs->param;
773 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
774 }
775
776 /* Need to deprecate this in future */
777 int
778 _pthread_is_threaded(void)
779 {
780 return __is_threaded;
781 }
782
783 /* Non portable public api to know whether this process has(had) atleast one thread
784 * apart from main thread. There could be race if there is a thread in the process of
785 * creation at the time of call . It does not tell whether there are more than one thread
786 * at this point of time.
787 */
788 int
789 pthread_is_threaded_np(void)
790 {
791 return __is_threaded;
792 }
793
794 mach_port_t
795 pthread_mach_thread_np(pthread_t t)
796 {
797 mach_port_t kport = MACH_PORT_NULL;
798
799 if (t == pthread_self()) {
800 /*
801 * If the call is on self, return the kernel port. We cannot
802 * add this bypass for main thread as it might have exited,
803 * and we should not return stale port info.
804 */
805 kport = _pthread_kernel_thread(t);
806 } else {
807 (void)_pthread_lookup_thread(t, &kport, 0);
808 }
809
810 return kport;
811 }
812
813 pthread_t
814 pthread_from_mach_thread_np(mach_port_t kernel_thread)
815 {
816 struct _pthread *p = NULL;
817
818 /* No need to wait as mach port is already known */
819 _PTHREAD_LOCK(_pthread_list_lock);
820
821 TAILQ_FOREACH(p, &__pthread_head, plist) {
822 if (_pthread_kernel_thread(p) == kernel_thread) {
823 break;
824 }
825 }
826
827 _PTHREAD_UNLOCK(_pthread_list_lock);
828
829 return p;
830 }
831
832 size_t
833 pthread_get_stacksize_np(pthread_t t)
834 {
835 int ret;
836 size_t size = 0;
837
838 if (t == NULL) {
839 return ESRCH; // XXX bug?
840 }
841
842 #if !defined(__arm__) && !defined(__arm64__)
843 // The default rlimit based allocations will be provided with a stacksize
844 // of the current limit and a freesize of the max. However, custom
845 // allocations will just have the guard page to free. If we aren't in the
846 // latter case, call into rlimit to determine the current stack size. In
847 // the event that the current limit == max limit then we'll fall down the
848 // fast path, but since it's unlikely that the limit is going to be lowered
849 // after it's been change to the max, we should be fine.
850 //
851 // Of course, on arm rlim_cur == rlim_max and there's only the one guard
852 // page. So, we can skip all this there.
853 if (t == &_thread && t->stacksize + vm_page_size != t->freesize) {
854 // We want to call getrlimit() just once, as it's relatively expensive
855 static size_t rlimit_stack;
856
857 if (rlimit_stack == 0) {
858 struct rlimit limit;
859 int ret = getrlimit(RLIMIT_STACK, &limit);
860
861 if (ret == 0) {
862 rlimit_stack = (size_t) limit.rlim_cur;
863 }
864 }
865
866 if (rlimit_stack == 0 || rlimit_stack > t->freesize) {
867 return t->stacksize;
868 } else {
869 return rlimit_stack;
870 }
871 }
872 #endif /* !defined(__arm__) && !defined(__arm64__) */
873
874 if (t == pthread_self() || t == &_thread) {
875 return t->stacksize;
876 }
877
878 _PTHREAD_LOCK(_pthread_list_lock);
879
880 ret = _pthread_find_thread(t);
881 if (ret == 0) {
882 size = t->stacksize;
883 } else {
884 size = ret; // XXX bug?
885 }
886
887 _PTHREAD_UNLOCK(_pthread_list_lock);
888
889 return size;
890 }
891
892 void *
893 pthread_get_stackaddr_np(pthread_t t)
894 {
895 int ret;
896 void *addr = NULL;
897
898 if (t == NULL) {
899 return (void *)(uintptr_t)ESRCH; // XXX bug?
900 }
901
902 // since the main thread will not get de-allocated from underneath us
903 if (t == pthread_self() || t == &_thread) {
904 return t->stackaddr;
905 }
906
907 _PTHREAD_LOCK(_pthread_list_lock);
908
909 ret = _pthread_find_thread(t);
910 if (ret == 0) {
911 addr = t->stackaddr;
912 } else {
913 addr = (void *)(uintptr_t)ret; // XXX bug?
914 }
915
916 _PTHREAD_UNLOCK(_pthread_list_lock);
917
918 return addr;
919 }
920
921 static mach_port_t
922 _pthread_reply_port(pthread_t t)
923 {
924 void *p;
925 if (t == NULL) {
926 p = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY);
927 } else {
928 p = t->tsd[_PTHREAD_TSD_SLOT_MIG_REPLY];
929 }
930 return (mach_port_t)(uintptr_t)p;
931 }
932
933 static void
934 _pthread_set_reply_port(pthread_t t, mach_port_t reply_port)
935 {
936 void *p = (void *)(uintptr_t)reply_port;
937 if (t == NULL) {
938 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY, p);
939 } else {
940 t->tsd[_PTHREAD_TSD_SLOT_MIG_REPLY] = p;
941 }
942 }
943
944 static void
945 _pthread_dealloc_reply_port(pthread_t t)
946 {
947 mach_port_t reply_port = _pthread_reply_port(t);
948 if (reply_port != MACH_PORT_NULL) {
949 mig_dealloc_reply_port(reply_port);
950 }
951 }
952
953 pthread_t
954 pthread_main_thread_np(void)
955 {
956 return &_thread;
957 }
958
959 /* returns non-zero if the current thread is the main thread */
960 int
961 pthread_main_np(void)
962 {
963 pthread_t self = pthread_self();
964
965 return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT);
966 }
967
968
969 /* if we are passed in a pthread_t that is NULL, then we return
970 the current thread's thread_id. So folks don't have to call
971 pthread_self, in addition to us doing it, if they just want
972 their thread_id.
973 */
974 int
975 pthread_threadid_np(pthread_t thread, uint64_t *thread_id)
976 {
977 int res = 0;
978 pthread_t self = pthread_self();
979
980 if (thread_id == NULL) {
981 return EINVAL;
982 }
983
984 if (thread == NULL || thread == self) {
985 *thread_id = self->thread_id;
986 } else {
987 _PTHREAD_LOCK(_pthread_list_lock);
988 res = _pthread_find_thread(thread);
989 if (res == 0) {
990 *thread_id = thread->thread_id;
991 }
992 _PTHREAD_UNLOCK(_pthread_list_lock);
993 }
994 return res;
995 }
996
997 int
998 pthread_getname_np(pthread_t thread, char *threadname, size_t len)
999 {
1000 int res;
1001
1002 if (thread == NULL) {
1003 return ESRCH;
1004 }
1005
1006 _PTHREAD_LOCK(_pthread_list_lock);
1007 res = _pthread_find_thread(thread);
1008 if (res == 0) {
1009 strlcpy(threadname, thread->pthread_name, len);
1010 }
1011 _PTHREAD_UNLOCK(_pthread_list_lock);
1012 return res;
1013 }
1014
1015 int
1016 pthread_setname_np(const char *name)
1017 {
1018 int res;
1019 pthread_t self = pthread_self();
1020
1021 size_t len = 0;
1022 if (name != NULL) {
1023 len = strlen(name);
1024 }
1025
1026 /* protytype is in pthread_internals.h */
1027 res = __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name, (int)len);
1028 if (res == 0) {
1029 if (len > 0) {
1030 strlcpy(self->pthread_name, name, MAXTHREADNAMESIZE);
1031 } else {
1032 bzero(self->pthread_name, MAXTHREADNAMESIZE);
1033 }
1034 }
1035 return res;
1036
1037 }
1038
1039 PTHREAD_ALWAYS_INLINE
1040 static inline void
1041 __pthread_add_thread(pthread_t t, bool parent, bool from_mach_thread)
1042 {
1043 bool should_deallocate = false;
1044 bool should_add = true;
1045
1046 if (from_mach_thread){
1047 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock);
1048 } else {
1049 _PTHREAD_LOCK(_pthread_list_lock);
1050 }
1051
1052 // The parent and child threads race to add the thread to the list.
1053 // When called by the parent:
1054 // - set parentcheck to true
1055 // - back off if childrun is true
1056 // When called by the child:
1057 // - set childrun to true
1058 // - back off if parentcheck is true
1059 if (parent) {
1060 t->parentcheck = 1;
1061 if (t->childrun) {
1062 // child got here first, don't add.
1063 should_add = false;
1064 }
1065
1066 // If the child exits before we check in then it has to keep
1067 // the thread structure memory alive so our dereferences above
1068 // are valid. If it's a detached thread, then no joiner will
1069 // deallocate the thread structure itself. So we do it here.
1070 if (t->childexit) {
1071 should_add = false;
1072 should_deallocate = ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED);
1073 }
1074 } else {
1075 t->childrun = 1;
1076 if (t->parentcheck) {
1077 // Parent got here first, don't add.
1078 should_add = false;
1079 }
1080 if (t->wqthread) {
1081 // Work queue threads have no parent. Simulate.
1082 t->parentcheck = 1;
1083 }
1084 }
1085
1086 if (should_add) {
1087 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1088 _pthread_count++;
1089 }
1090
1091 if (from_mach_thread){
1092 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock);
1093 } else {
1094 _PTHREAD_UNLOCK(_pthread_list_lock);
1095 }
1096
1097 if (parent) {
1098 if (!from_mach_thread) {
1099 // PR-26275485: Mach threads will likely crash trying to run
1100 // introspection code. Since the fall out from the introspection
1101 // code not seeing the injected thread is likely less than crashing
1102 // in the introspection code, just don't make the call.
1103 _pthread_introspection_thread_create(t, should_deallocate);
1104 }
1105 if (should_deallocate) {
1106 _pthread_deallocate(t);
1107 }
1108 } else {
1109 _pthread_introspection_thread_start(t);
1110 }
1111 }
1112
1113 // <rdar://problem/12544957> must always inline this function to avoid epilogues
1114 // Returns EBUSY if the thread structure should be kept alive (is joinable).
1115 // Returns ESRCH if the thread structure is no longer valid (was detached).
1116 PTHREAD_ALWAYS_INLINE
1117 static inline int
1118 __pthread_remove_thread(pthread_t t, bool child, bool *should_exit)
1119 {
1120 int ret = 0;
1121
1122 bool should_remove = true;
1123
1124 _PTHREAD_LOCK(_pthread_list_lock);
1125
1126 // When a thread removes itself:
1127 // - Set the childexit flag indicating that the thread has exited.
1128 // - Return false if parentcheck is zero (must keep structure)
1129 // - If the thread is joinable, keep it on the list so that
1130 // the join operation succeeds. Still decrement the running
1131 // thread count so that we exit if no threads are running.
1132 // - Update the running thread count.
1133 // When another thread removes a joinable thread:
1134 // - CAREFUL not to dereference the thread before verifying that the
1135 // reference is still valid using _pthread_find_thread().
1136 // - Remove the thread from the list.
1137
1138 if (child) {
1139 t->childexit = 1;
1140 if (t->parentcheck == 0) {
1141 ret = EBUSY;
1142 }
1143 if ((t->detached & PTHREAD_CREATE_JOINABLE) != 0) {
1144 ret = EBUSY;
1145 should_remove = false;
1146 }
1147 *should_exit = (--_pthread_count <= 0);
1148 } else {
1149 ret = _pthread_find_thread(t);
1150 if (ret == 0) {
1151 // If we found a thread but it's not joinable, bail.
1152 if ((t->detached & PTHREAD_CREATE_JOINABLE) == 0) {
1153 should_remove = false;
1154 ret = ESRCH;
1155 }
1156 }
1157 }
1158 if (should_remove) {
1159 TAILQ_REMOVE(&__pthread_head, t, plist);
1160 }
1161
1162 _PTHREAD_UNLOCK(_pthread_list_lock);
1163
1164 return ret;
1165 }
1166
1167 static int
1168 _pthread_create(pthread_t *thread,
1169 const pthread_attr_t *attr,
1170 void *(*start_routine)(void *),
1171 void *arg,
1172 bool from_mach_thread)
1173 {
1174 pthread_t t = NULL;
1175 unsigned int flags = 0;
1176
1177 pthread_attr_t *attrs = (pthread_attr_t *)attr;
1178 if (attrs == NULL) {
1179 attrs = &_pthread_attr_default;
1180 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1181 return EINVAL;
1182 }
1183
1184 if (attrs->detached == PTHREAD_CREATE_DETACHED) {
1185 flags |= PTHREAD_START_DETACHED;
1186 }
1187
1188 if (attrs->schedset != 0) {
1189 flags |= PTHREAD_START_SETSCHED;
1190 flags |= ((attrs->policy & PTHREAD_START_POLICY_MASK) << PTHREAD_START_POLICY_BITSHIFT);
1191 flags |= (attrs->param.sched_priority & PTHREAD_START_IMPORTANCE_MASK);
1192 } else if (attrs->qosclass != 0) {
1193 flags |= PTHREAD_START_QOSCLASS;
1194 flags |= (attrs->qosclass & PTHREAD_START_QOSCLASS_MASK);
1195 }
1196
1197 __is_threaded = 1;
1198
1199 void *stack;
1200
1201 if (attrs->fastpath) {
1202 // kernel will allocate thread and stack, pass stacksize.
1203 stack = (void *)attrs->stacksize;
1204 } else {
1205 // allocate the thread and its stack
1206 flags |= PTHREAD_START_CUSTOM;
1207
1208 int res;
1209 res = _pthread_allocate(&t, attrs, &stack);
1210 if (res) {
1211 return res;
1212 }
1213
1214 t->arg = arg;
1215 t->fun = start_routine;
1216 }
1217
1218 pthread_t t2;
1219 t2 = __bsdthread_create(start_routine, arg, stack, t, flags);
1220 if (t2 == (pthread_t)-1) {
1221 if (flags & PTHREAD_START_CUSTOM) {
1222 // free the thread and stack if we allocated it
1223 _pthread_deallocate(t);
1224 }
1225 return EAGAIN;
1226 }
1227 if (t == NULL) {
1228 t = t2;
1229 }
1230
1231 __pthread_add_thread(t, true, from_mach_thread);
1232
1233 // n.b. if a thread is created detached and exits, t will be invalid
1234 *thread = t;
1235 return 0;
1236 }
1237
1238 int
1239 pthread_create(pthread_t *thread,
1240 const pthread_attr_t *attr,
1241 void *(*start_routine)(void *),
1242 void *arg)
1243 {
1244 return _pthread_create(thread, attr, start_routine, arg, false);
1245 }
1246
1247 int
1248 pthread_create_from_mach_thread(pthread_t *thread,
1249 const pthread_attr_t *attr,
1250 void *(*start_routine)(void *),
1251 void *arg)
1252 {
1253 return _pthread_create(thread, attr, start_routine, arg, true);
1254 }
1255
1256 static void
1257 _pthread_suspended_body(pthread_t self)
1258 {
1259 _pthread_set_self(self);
1260 __pthread_add_thread(self, false, false);
1261 _pthread_exit(self, (self->fun)(self->arg));
1262 }
1263
1264 int
1265 pthread_create_suspended_np(pthread_t *thread,
1266 const pthread_attr_t *attr,
1267 void *(*start_routine)(void *),
1268 void *arg)
1269 {
1270 int res;
1271 void *stack;
1272 mach_port_t kernel_thread = MACH_PORT_NULL;
1273
1274 const pthread_attr_t *attrs = attr;
1275 if (attrs == NULL) {
1276 attrs = &_pthread_attr_default;
1277 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1278 return EINVAL;
1279 }
1280
1281 pthread_t t;
1282 res = _pthread_allocate(&t, attrs, &stack);
1283 if (res) {
1284 return res;
1285 }
1286
1287 *thread = t;
1288
1289 kern_return_t kr;
1290 kr = thread_create(mach_task_self(), &kernel_thread);
1291 if (kr != KERN_SUCCESS) {
1292 //PTHREAD_ABORT("thread_create() failed: %d", kern_res);
1293 return EINVAL; /* Need better error here? */
1294 }
1295
1296 _pthread_set_kernel_thread(t, kernel_thread);
1297 (void)pthread_setschedparam_internal(t, kernel_thread, t->policy, &t->param);
1298
1299 __is_threaded = 1;
1300
1301 t->arg = arg;
1302 t->fun = start_routine;
1303
1304 __pthread_add_thread(t, true, false);
1305
1306 // Set up a suspended thread.
1307 _pthread_setup(t, _pthread_suspended_body, stack, 1, 0);
1308 return res;
1309 }
1310
1311 int
1312 pthread_detach(pthread_t thread)
1313 {
1314 int res;
1315 bool join = false;
1316 semaphore_t sema = SEMAPHORE_NULL;
1317
1318 res = _pthread_lookup_thread(thread, NULL, 1);
1319 if (res) {
1320 return res; // Not a valid thread to detach.
1321 }
1322
1323 _PTHREAD_LOCK(thread->lock);
1324 if (thread->detached & PTHREAD_CREATE_JOINABLE) {
1325 if (thread->detached & _PTHREAD_EXITED) {
1326 // Join the thread if it's already exited.
1327 join = true;
1328 } else {
1329 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
1330 thread->detached |= PTHREAD_CREATE_DETACHED;
1331 sema = thread->joiner_notify;
1332 }
1333 } else {
1334 res = EINVAL;
1335 }
1336 _PTHREAD_UNLOCK(thread->lock);
1337
1338 if (join) {
1339 pthread_join(thread, NULL);
1340 } else if (sema) {
1341 semaphore_signal(sema);
1342 }
1343
1344 return res;
1345 }
1346
1347 int
1348 pthread_kill(pthread_t th, int sig)
1349 {
1350 if (sig < 0 || sig > NSIG) {
1351 return EINVAL;
1352 }
1353
1354 mach_port_t kport = MACH_PORT_NULL;
1355 if (_pthread_lookup_thread(th, &kport, 0) != 0) {
1356 return ESRCH; // Not a valid thread.
1357 }
1358
1359 // Don't signal workqueue threads.
1360 if (th->wqthread != 0 && th->wqkillset == 0) {
1361 return ENOTSUP;
1362 }
1363
1364 int ret = __pthread_kill(kport, sig);
1365
1366 if (ret == -1) {
1367 ret = errno;
1368 }
1369 return ret;
1370 }
1371
1372 int
1373 __pthread_workqueue_setkill(int enable)
1374 {
1375 pthread_t self = pthread_self();
1376
1377 _PTHREAD_LOCK(self->lock);
1378 self->wqkillset = enable ? 1 : 0;
1379 _PTHREAD_UNLOCK(self->lock);
1380
1381 return 0;
1382 }
1383
1384 static void *
1385 __pthread_get_exit_value(pthread_t t, int conforming)
1386 {
1387 const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
1388 void *value = t->exit_value;
1389 if (conforming) {
1390 if ((t->cancel_state & flags) == flags) {
1391 value = PTHREAD_CANCELED;
1392 }
1393 }
1394 return value;
1395 }
1396
1397 /* For compatibility... */
1398
1399 pthread_t
1400 _pthread_self(void) {
1401 return pthread_self();
1402 }
1403
1404 /*
1405 * Terminate a thread.
1406 */
1407 int __disable_threadsignal(int);
1408
1409 PTHREAD_NORETURN
1410 static void
1411 _pthread_exit(pthread_t self, void *value_ptr)
1412 {
1413 struct __darwin_pthread_handler_rec *handler;
1414
1415 // Disable signal delivery while we clean up
1416 __disable_threadsignal(1);
1417
1418 // Set cancel state to disable and type to deferred
1419 _pthread_setcancelstate_exit(self, value_ptr, __unix_conforming);
1420
1421 while ((handler = self->__cleanup_stack) != 0) {
1422 (handler->__routine)(handler->__arg);
1423 self->__cleanup_stack = handler->__next;
1424 }
1425 _pthread_tsd_cleanup(self);
1426
1427 _PTHREAD_LOCK(self->lock);
1428 self->detached |= _PTHREAD_EXITED;
1429 self->exit_value = value_ptr;
1430
1431 if ((self->detached & PTHREAD_CREATE_JOINABLE) &&
1432 self->joiner_notify == SEMAPHORE_NULL) {
1433 self->joiner_notify = (semaphore_t)os_get_cached_semaphore();
1434 }
1435 _PTHREAD_UNLOCK(self->lock);
1436
1437 // Clear per-thread semaphore cache
1438 os_put_cached_semaphore(SEMAPHORE_NULL);
1439
1440 _pthread_terminate(self);
1441 }
1442
1443 void
1444 pthread_exit(void *value_ptr)
1445 {
1446 pthread_t self = pthread_self();
1447 if (self->wqthread == 0) {
1448 _pthread_exit(self, value_ptr);
1449 } else {
1450 PTHREAD_ABORT("pthread_exit() may only be called against threads created via pthread_create()");
1451 }
1452 }
1453
1454 int
1455 pthread_getschedparam(pthread_t thread,
1456 int *policy,
1457 struct sched_param *param)
1458 {
1459 int ret;
1460
1461 if (thread == NULL) {
1462 return ESRCH;
1463 }
1464
1465 _PTHREAD_LOCK(_pthread_list_lock);
1466
1467 ret = _pthread_find_thread(thread);
1468 if (ret == 0) {
1469 if (policy) {
1470 *policy = thread->policy;
1471 }
1472 if (param) {
1473 *param = thread->param;
1474 }
1475 }
1476
1477 _PTHREAD_UNLOCK(_pthread_list_lock);
1478
1479 return ret;
1480 }
1481
1482 static int
1483 pthread_setschedparam_internal(pthread_t thread,
1484 mach_port_t kport,
1485 int policy,
1486 const struct sched_param *param)
1487 {
1488 policy_base_data_t bases;
1489 policy_base_t base;
1490 mach_msg_type_number_t count;
1491 kern_return_t ret;
1492
1493 switch (policy) {
1494 case SCHED_OTHER:
1495 bases.ts.base_priority = param->sched_priority;
1496 base = (policy_base_t)&bases.ts;
1497 count = POLICY_TIMESHARE_BASE_COUNT;
1498 break;
1499 case SCHED_FIFO:
1500 bases.fifo.base_priority = param->sched_priority;
1501 base = (policy_base_t)&bases.fifo;
1502 count = POLICY_FIFO_BASE_COUNT;
1503 break;
1504 case SCHED_RR:
1505 bases.rr.base_priority = param->sched_priority;
1506 /* quantum isn't public yet */
1507 bases.rr.quantum = param->quantum;
1508 base = (policy_base_t)&bases.rr;
1509 count = POLICY_RR_BASE_COUNT;
1510 break;
1511 default:
1512 return EINVAL;
1513 }
1514 ret = thread_policy(kport, policy, base, count, TRUE);
1515 return (ret != KERN_SUCCESS) ? EINVAL : 0;
1516 }
1517
1518 int
1519 pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param)
1520 {
1521 mach_port_t kport = MACH_PORT_NULL;
1522 int res;
1523 int bypass = 1;
1524
1525 // since the main thread will not get de-allocated from underneath us
1526 if (t == pthread_self() || t == &_thread ) {
1527 kport = _pthread_kernel_thread(t);
1528 } else {
1529 bypass = 0;
1530 (void)_pthread_lookup_thread(t, &kport, 0);
1531 }
1532
1533 res = pthread_setschedparam_internal(t, kport, policy, param);
1534 if (res == 0) {
1535 if (bypass == 0) {
1536 // Ensure the thread is still valid.
1537 _PTHREAD_LOCK(_pthread_list_lock);
1538 res = _pthread_find_thread(t);
1539 if (res == 0) {
1540 t->policy = policy;
1541 t->param = *param;
1542 }
1543 _PTHREAD_UNLOCK(_pthread_list_lock);
1544 } else {
1545 t->policy = policy;
1546 t->param = *param;
1547 }
1548 }
1549 return res;
1550 }
1551
1552 int
1553 sched_get_priority_min(int policy)
1554 {
1555 return default_priority - 16;
1556 }
1557
1558 int
1559 sched_get_priority_max(int policy)
1560 {
1561 return default_priority + 16;
1562 }
1563
1564 int
1565 pthread_equal(pthread_t t1, pthread_t t2)
1566 {
1567 return (t1 == t2);
1568 }
1569
1570 /*
1571 * Force LLVM not to optimise this to a call to __pthread_set_self, if it does
1572 * then _pthread_set_self won't be bound when secondary threads try and start up.
1573 */
1574 PTHREAD_NOINLINE
1575 void
1576 _pthread_set_self(pthread_t p)
1577 {
1578 return _pthread_set_self_internal(p, true);
1579 }
1580
1581 void
1582 _pthread_set_self_internal(pthread_t p, bool needs_tsd_base_set)
1583 {
1584 if (p == NULL) {
1585 p = &_thread;
1586 }
1587
1588 uint64_t tid = __thread_selfid();
1589 if (tid == -1ull) {
1590 PTHREAD_ABORT("failed to set thread_id");
1591 }
1592
1593 p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = p;
1594 p->tsd[_PTHREAD_TSD_SLOT_ERRNO] = &p->err_no;
1595 p->thread_id = tid;
1596
1597 if (needs_tsd_base_set) {
1598 _thread_set_tsd_base(&p->tsd[0]);
1599 }
1600 }
1601
1602 struct _pthread_once_context {
1603 pthread_once_t *pthread_once;
1604 void (*routine)(void);
1605 };
1606
1607 static void
1608 __pthread_once_handler(void *context)
1609 {
1610 struct _pthread_once_context *ctx = context;
1611 pthread_cleanup_push((void*)__os_once_reset, &ctx->pthread_once->once);
1612 ctx->routine();
1613 pthread_cleanup_pop(0);
1614 ctx->pthread_once->sig = _PTHREAD_ONCE_SIG;
1615 }
1616
1617 int
1618 pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
1619 {
1620 struct _pthread_once_context ctx = { once_control, init_routine };
1621 do {
1622 os_once(&once_control->once, &ctx, __pthread_once_handler);
1623 } while (once_control->sig == _PTHREAD_ONCE_SIG_init);
1624 return 0;
1625 }
1626
1627 void
1628 _pthread_testcancel(pthread_t thread, int isconforming)
1629 {
1630 const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
1631
1632 _PTHREAD_LOCK(thread->lock);
1633 bool canceled = ((thread->cancel_state & flags) == flags);
1634 _PTHREAD_UNLOCK(thread->lock);
1635
1636 if (canceled) {
1637 pthread_exit(isconforming ? PTHREAD_CANCELED : 0);
1638 }
1639 }
1640
1641 void
1642 _pthread_exit_if_canceled(int error)
1643 {
1644 if (__unix_conforming && ((error & 0xff) == EINTR) && (__pthread_canceled(0) == 0)) {
1645 pthread_t self = pthread_self();
1646 if (self != NULL) {
1647 self->cancel_error = error;
1648 }
1649 pthread_exit(PTHREAD_CANCELED);
1650 }
1651 }
1652
1653 int
1654 pthread_getconcurrency(void)
1655 {
1656 return pthread_concurrency;
1657 }
1658
1659 int
1660 pthread_setconcurrency(int new_level)
1661 {
1662 if (new_level < 0) {
1663 return EINVAL;
1664 }
1665 pthread_concurrency = new_level;
1666 return 0;
1667 }
1668
1669 static unsigned long
1670 _pthread_strtoul(const char *p, const char **endptr, int base)
1671 {
1672 uintptr_t val = 0;
1673
1674 // Expect hex string starting with "0x"
1675 if ((base == 16 || base == 0) && p && p[0] == '0' && p[1] == 'x') {
1676 p += 2;
1677 while (1) {
1678 char c = *p;
1679 if ('0' <= c && c <= '9') {
1680 val = (val << 4) + (c - '0');
1681 } else if ('a' <= c && c <= 'f') {
1682 val = (val << 4) + (c - 'a' + 10);
1683 } else if ('A' <= c && c <= 'F') {
1684 val = (val << 4) + (c - 'A' + 10);
1685 } else {
1686 break;
1687 }
1688 ++p;
1689 }
1690 }
1691
1692 *endptr = (char *)p;
1693 return val;
1694 }
1695
1696 static int
1697 parse_main_stack_params(const char *apple[],
1698 void **stackaddr,
1699 size_t *stacksize,
1700 void **allocaddr,
1701 size_t *allocsize)
1702 {
1703 const char *p = _simple_getenv(apple, "main_stack");
1704 if (!p) return 0;
1705
1706 int ret = 0;
1707 const char *s = p;
1708
1709 *stackaddr = _pthread_strtoul(s, &s, 16);
1710 if (*s != ',') goto out;
1711
1712 *stacksize = _pthread_strtoul(s + 1, &s, 16);
1713 if (*s != ',') goto out;
1714
1715 *allocaddr = _pthread_strtoul(s + 1, &s, 16);
1716 if (*s != ',') goto out;
1717
1718 *allocsize = _pthread_strtoul(s + 1, &s, 16);
1719 if (*s != ',' && *s != 0) goto out;
1720
1721 ret = 1;
1722 out:
1723 bzero((char *)p, strlen(p));
1724 return ret;
1725 }
1726
1727 #if !defined(VARIANT_STATIC)
1728 void *
1729 malloc(size_t sz)
1730 {
1731 if (_pthread_malloc) {
1732 return _pthread_malloc(sz);
1733 } else {
1734 return NULL;
1735 }
1736 }
1737
1738 void
1739 free(void *p)
1740 {
1741 if (_pthread_free) {
1742 _pthread_free(p);
1743 }
1744 }
1745 #endif // VARIANT_STATIC
1746
1747 /*
1748 * Perform package initialization - called automatically when application starts
1749 */
1750 struct ProgramVars; /* forward reference */
1751
1752 int
1753 __pthread_init(const struct _libpthread_functions *pthread_funcs,
1754 const char *envp[] __unused,
1755 const char *apple[],
1756 const struct ProgramVars *vars __unused)
1757 {
1758 // Save our provided pushed-down functions
1759 if (pthread_funcs) {
1760 exitf = pthread_funcs->exit;
1761
1762 if (pthread_funcs->version >= 2) {
1763 _pthread_malloc = pthread_funcs->malloc;
1764 _pthread_free = pthread_funcs->free;
1765 }
1766 }
1767
1768 //
1769 // Get host information
1770 //
1771
1772 kern_return_t kr;
1773 host_flavor_t flavor = HOST_PRIORITY_INFO;
1774 mach_msg_type_number_t count = HOST_PRIORITY_INFO_COUNT;
1775 host_priority_info_data_t priority_info;
1776 host_t host = mach_host_self();
1777 kr = host_info(host, flavor, (host_info_t)&priority_info, &count);
1778 if (kr != KERN_SUCCESS) {
1779 PTHREAD_ABORT("host_info(mach_host_self(), ...) failed: %s", mach_error_string(kr));
1780 } else {
1781 default_priority = priority_info.user_priority;
1782 min_priority = priority_info.minimum_priority;
1783 max_priority = priority_info.maximum_priority;
1784 }
1785 mach_port_deallocate(mach_task_self(), host);
1786
1787 //
1788 // Set up the main thread structure
1789 //
1790
1791 // Get the address and size of the main thread's stack from the kernel.
1792 void *stackaddr = 0;
1793 size_t stacksize = 0;
1794 void *allocaddr = 0;
1795 size_t allocsize = 0;
1796 if (!parse_main_stack_params(apple, &stackaddr, &stacksize, &allocaddr, &allocsize) ||
1797 stackaddr == NULL || stacksize == 0) {
1798 // Fall back to previous bevhaior.
1799 size_t len = sizeof(stackaddr);
1800 int mib[] = { CTL_KERN, KERN_USRSTACK };
1801 if (__sysctl(mib, 2, &stackaddr, &len, NULL, 0) != 0) {
1802 #if defined(__LP64__)
1803 stackaddr = (void *)USRSTACK64;
1804 #else
1805 stackaddr = (void *)USRSTACK;
1806 #endif
1807 }
1808 stacksize = DFLSSIZ;
1809 allocaddr = 0;
1810 allocsize = 0;
1811 }
1812
1813 pthread_t thread = &_thread;
1814 pthread_attr_init(&_pthread_attr_default);
1815 _pthread_struct_init(thread, &_pthread_attr_default,
1816 stackaddr, stacksize,
1817 allocaddr, allocsize);
1818 thread->detached = PTHREAD_CREATE_JOINABLE;
1819
1820 // Finish initialization with common code that is reinvoked on the
1821 // child side of a fork.
1822
1823 // Finishes initialization of main thread attributes.
1824 // Initializes the thread list and add the main thread.
1825 // Calls _pthread_set_self() to prepare the main thread for execution.
1826 __pthread_fork_child_internal(thread);
1827
1828 // Set up kernel entry points with __bsdthread_register.
1829 pthread_workqueue_atfork_child();
1830
1831 // Have pthread_key do its init envvar checks.
1832 _pthread_key_global_init(envp);
1833
1834 return 0;
1835 }
1836
1837 int
1838 sched_yield(void)
1839 {
1840 swtch_pri(0);
1841 return 0;
1842 }
1843
1844 PTHREAD_NOEXPORT void
1845 __pthread_fork_child_internal(pthread_t p)
1846 {
1847 TAILQ_INIT(&__pthread_head);
1848 _PTHREAD_LOCK_INIT(_pthread_list_lock);
1849
1850 // Re-use the main thread's static storage if no thread was provided.
1851 if (p == NULL) {
1852 if (_thread.tsd[0] != 0) {
1853 bzero(&_thread, sizeof(struct _pthread));
1854 }
1855 p = &_thread;
1856 }
1857
1858 _PTHREAD_LOCK_INIT(p->lock);
1859 _pthread_set_kernel_thread(p, mach_thread_self());
1860 _pthread_set_reply_port(p, mach_reply_port());
1861 p->__cleanup_stack = NULL;
1862 p->joiner_notify = SEMAPHORE_NULL;
1863 p->joiner = MACH_PORT_NULL;
1864 p->detached |= _PTHREAD_CREATE_PARENT;
1865 p->tsd[__TSD_SEMAPHORE_CACHE] = SEMAPHORE_NULL;
1866
1867 // Initialize the list of threads with the new main thread.
1868 TAILQ_INSERT_HEAD(&__pthread_head, p, plist);
1869 _pthread_count = 1;
1870
1871 _pthread_set_self(p);
1872 _pthread_introspection_thread_start(p);
1873 }
1874
1875 /*
1876 * Query/update the cancelability 'state' of a thread
1877 */
1878 PTHREAD_NOEXPORT int
1879 _pthread_setcancelstate_internal(int state, int *oldstate, int conforming)
1880 {
1881 pthread_t self;
1882
1883 switch (state) {
1884 case PTHREAD_CANCEL_ENABLE:
1885 if (conforming) {
1886 __pthread_canceled(1);
1887 }
1888 break;
1889 case PTHREAD_CANCEL_DISABLE:
1890 if (conforming) {
1891 __pthread_canceled(2);
1892 }
1893 break;
1894 default:
1895 return EINVAL;
1896 }
1897
1898 self = pthread_self();
1899 _PTHREAD_LOCK(self->lock);
1900 if (oldstate) {
1901 *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
1902 }
1903 self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK;
1904 self->cancel_state |= state;
1905 _PTHREAD_UNLOCK(self->lock);
1906 if (!conforming) {
1907 _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
1908 }
1909 return 0;
1910 }
1911
1912 /* When a thread exits set the cancellation state to DISABLE and DEFERRED */
1913 static void
1914 _pthread_setcancelstate_exit(pthread_t self, void * value_ptr, int conforming)
1915 {
1916 _PTHREAD_LOCK(self->lock);
1917 self->cancel_state &= ~(_PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK);
1918 self->cancel_state |= (PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED);
1919 if (value_ptr == PTHREAD_CANCELED) {
1920 // 4597450: begin
1921 self->detached |= _PTHREAD_WASCANCEL;
1922 // 4597450: end
1923 }
1924 _PTHREAD_UNLOCK(self->lock);
1925 }
1926
1927 int
1928 _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming)
1929 {
1930 // Returns ESRCH if the thread was not created joinable.
1931 int ret = __pthread_remove_thread(thread, false, NULL);
1932 if (ret != 0) {
1933 return ret;
1934 }
1935
1936 if (value_ptr) {
1937 *value_ptr = __pthread_get_exit_value(thread, conforming);
1938 }
1939 _pthread_introspection_thread_destroy(thread);
1940 _pthread_deallocate(thread);
1941 return 0;
1942 }
1943
1944 /* ALWAYS called with list lock and return with list lock */
1945 int
1946 _pthread_find_thread(pthread_t thread)
1947 {
1948 if (thread != NULL) {
1949 pthread_t p;
1950 loop:
1951 TAILQ_FOREACH(p, &__pthread_head, plist) {
1952 if (p == thread) {
1953 if (_pthread_kernel_thread(thread) == MACH_PORT_NULL) {
1954 _PTHREAD_UNLOCK(_pthread_list_lock);
1955 sched_yield();
1956 _PTHREAD_LOCK(_pthread_list_lock);
1957 goto loop;
1958 }
1959 return 0;
1960 }
1961 }
1962 }
1963 return ESRCH;
1964 }
1965
1966 int
1967 _pthread_lookup_thread(pthread_t thread, mach_port_t *portp, int only_joinable)
1968 {
1969 mach_port_t kport = MACH_PORT_NULL;
1970 int ret;
1971
1972 if (thread == NULL) {
1973 return ESRCH;
1974 }
1975
1976 _PTHREAD_LOCK(_pthread_list_lock);
1977
1978 ret = _pthread_find_thread(thread);
1979 if (ret == 0) {
1980 // Fail if we only want joinable threads and the thread found is
1981 // not in the detached state.
1982 if (only_joinable != 0 && (thread->detached & PTHREAD_CREATE_DETACHED) != 0) {
1983 ret = EINVAL;
1984 } else {
1985 kport = _pthread_kernel_thread(thread);
1986 }
1987 }
1988
1989 _PTHREAD_UNLOCK(_pthread_list_lock);
1990
1991 if (portp != NULL) {
1992 *portp = kport;
1993 }
1994
1995 return ret;
1996 }
1997
1998 void
1999 _pthread_clear_qos_tsd(mach_port_t thread_port)
2000 {
2001 if (thread_port == MACH_PORT_NULL || (uintptr_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF) == thread_port) {
2002 /* Clear the current thread's TSD, that can be done inline. */
2003 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0));
2004 } else {
2005 pthread_t p;
2006
2007 _PTHREAD_LOCK(_pthread_list_lock);
2008
2009 TAILQ_FOREACH(p, &__pthread_head, plist) {
2010 mach_port_t kp = _pthread_kernel_thread(p);
2011 if (thread_port == kp) {
2012 p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
2013 break;
2014 }
2015 }
2016
2017 _PTHREAD_UNLOCK(_pthread_list_lock);
2018 }
2019 }
2020
2021 /***** pthread workqueue support routines *****/
2022
2023 PTHREAD_NOEXPORT void
2024 pthread_workqueue_atfork_child(void)
2025 {
2026 struct _pthread_registration_data data = {};
2027 data.version = sizeof(struct _pthread_registration_data);
2028 data.dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *);
2029 data.tsd_offset = offsetof(struct _pthread, tsd);
2030
2031 int rv = __bsdthread_register(thread_start,
2032 start_wqthread, (int)PTHREAD_SIZE,
2033 (void*)&data, (uintptr_t)sizeof(data),
2034 data.dispatch_queue_offset);
2035
2036 if (rv > 0) {
2037 __pthread_supported_features = rv;
2038 }
2039
2040 pthread_priority_t main_qos = (pthread_priority_t)data.main_qos;
2041
2042 if (_pthread_priority_get_qos_newest(main_qos) != QOS_CLASS_UNSPECIFIED) {
2043 _pthread_set_main_qos(main_qos);
2044 _thread.tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = main_qos;
2045 }
2046
2047 if (__libdispatch_workerfunction != NULL) {
2048 // prepare the kernel for workq action
2049 (void)__workq_open();
2050 }
2051 }
2052
2053 // workqueue entry point from kernel
2054 void
2055 _pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr, void *keventlist, int flags, int nkevents)
2056 {
2057 PTHREAD_ASSERT(flags & WQ_FLAG_THREAD_NEWSPI);
2058
2059 int thread_reuse = flags & WQ_FLAG_THREAD_REUSE;
2060 int thread_class = flags & WQ_FLAG_THREAD_PRIOMASK;
2061 int overcommit = (flags & WQ_FLAG_THREAD_OVERCOMMIT) != 0;
2062 int kevent = flags & WQ_FLAG_THREAD_KEVENT;
2063 PTHREAD_ASSERT((!kevent) || (__libdispatch_keventfunction != NULL));
2064
2065 pthread_priority_t priority = 0;
2066 unsigned long priority_flags = 0;
2067
2068 if (overcommit)
2069 priority_flags |= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
2070 if (flags & WQ_FLAG_THREAD_EVENT_MANAGER)
2071 priority_flags |= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
2072 if (kevent)
2073 priority_flags |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
2074
2075 if ((__pthread_supported_features & PTHREAD_FEATURE_QOS_MAINTENANCE) == 0) {
2076 priority = _pthread_priority_make_version2(thread_class, 0, priority_flags);
2077 } else {
2078 priority = _pthread_priority_make_newest(thread_class, 0, priority_flags);
2079 }
2080
2081 if (thread_reuse == 0) {
2082 // New thread created by kernel, needs initialization.
2083 void *stackaddr = self;
2084 size_t stacksize = (uintptr_t)self - (uintptr_t)stacklowaddr;
2085
2086 _pthread_struct_init(self, &_pthread_attr_default,
2087 stackaddr, stacksize,
2088 PTHREAD_ALLOCADDR(stackaddr, stacksize), PTHREAD_ALLOCSIZE(stackaddr, stacksize));
2089
2090 _pthread_set_kernel_thread(self, kport);
2091 self->wqthread = 1;
2092 self->wqkillset = 0;
2093
2094 // Not a joinable thread.
2095 self->detached &= ~PTHREAD_CREATE_JOINABLE;
2096 self->detached |= PTHREAD_CREATE_DETACHED;
2097
2098 // Update the running thread count and set childrun bit.
2099 bool thread_tsd_base_set = (bool)(flags & WQ_FLAG_THREAD_TSD_BASE_SET);
2100 _pthread_set_self_internal(self, !thread_tsd_base_set);
2101 _pthread_introspection_thread_create(self, false);
2102 __pthread_add_thread(self, false, false);
2103 }
2104
2105 // If we're running with fine-grained priority, we also need to
2106 // set this thread to have the QoS class provided to use by the kernel
2107 if (__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) {
2108 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, _pthread_priority_make_newest(thread_class, 0, priority_flags));
2109 }
2110
2111 #if WQ_DEBUG
2112 PTHREAD_ASSERT(self);
2113 PTHREAD_ASSERT(self == pthread_self());
2114 #endif // WQ_DEBUG
2115
2116 if (kevent){
2117 self->fun = (void *(*)(void*))__libdispatch_keventfunction;
2118 } else {
2119 self->fun = (void *(*)(void *))__libdispatch_workerfunction;
2120 }
2121 self->arg = (void *)(uintptr_t)thread_class;
2122
2123 if (kevent && keventlist && nkevents > 0){
2124 kevent_errors_retry:
2125 (*__libdispatch_keventfunction)(&keventlist, &nkevents);
2126
2127 int errors_out = __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, keventlist, nkevents, 0);
2128 if (errors_out > 0){
2129 nkevents = errors_out;
2130 goto kevent_errors_retry;
2131 } else if (errors_out < 0){
2132 PTHREAD_ABORT("kevent return produced an error: %d", errno);
2133 }
2134 goto thexit;
2135 } else if (kevent){
2136 (*__libdispatch_keventfunction)(NULL, NULL);
2137
2138 __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, NULL, 0, 0);
2139 goto thexit;
2140 }
2141
2142 if (__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) {
2143 if (!__workq_newapi) {
2144 /* Old thread priorities are inverted from where we have them in
2145 * the new flexible priority scheme. The highest priority is zero,
2146 * up to 2, with background at 3.
2147 */
2148 pthread_workqueue_function_t func = (pthread_workqueue_function_t)__libdispatch_workerfunction;
2149
2150 int opts = overcommit ? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT : 0;
2151
2152 if ((__pthread_supported_features & PTHREAD_FEATURE_QOS_DEFAULT) == 0) {
2153 /* Dirty hack to support kernels that don't have QOS_CLASS_DEFAULT. */
2154 switch (thread_class) {
2155 case QOS_CLASS_USER_INTERACTIVE:
2156 thread_class = QOS_CLASS_USER_INITIATED;
2157 break;
2158 case QOS_CLASS_USER_INITIATED:
2159 thread_class = QOS_CLASS_DEFAULT;
2160 break;
2161 default:
2162 break;
2163 }
2164 }
2165
2166 switch (thread_class) {
2167 /* QOS_CLASS_USER_INTERACTIVE is not currently requested by for old dispatch priority compatibility */
2168 case QOS_CLASS_USER_INITIATED:
2169 (*func)(WORKQ_HIGH_PRIOQUEUE, opts, NULL);
2170 break;
2171
2172 case QOS_CLASS_DEFAULT:
2173 /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being
2174 * picked up by NSThread (et al) and transported around the system. So change the TSD to
2175 * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy.
2176 */
2177 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, _pthread_priority_make_newest(QOS_CLASS_USER_INITIATED, 0, 0));
2178 (*func)(WORKQ_DEFAULT_PRIOQUEUE, opts, NULL);
2179 break;
2180
2181 case QOS_CLASS_UTILITY:
2182 (*func)(WORKQ_LOW_PRIOQUEUE, opts, NULL);
2183 break;
2184
2185 case QOS_CLASS_BACKGROUND:
2186 (*func)(WORKQ_BG_PRIOQUEUE, opts, NULL);
2187 break;
2188
2189 /* Legacy dispatch does not use QOS_CLASS_MAINTENANCE, so no need to handle it here */
2190 }
2191
2192 } else {
2193 /* "New" API, where dispatch is expecting to be given the thread priority */
2194 (*__libdispatch_workerfunction)(priority);
2195 }
2196 } else {
2197 /* We're the new library running on an old kext, so thread_class is really the workq priority. */
2198 pthread_workqueue_function_t func = (pthread_workqueue_function_t)__libdispatch_workerfunction;
2199 int options = overcommit ? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT : 0;
2200 (*func)(thread_class, options, NULL);
2201 }
2202
2203 __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0);
2204
2205 thexit:
2206 {
2207 pthread_priority_t current_priority = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS);
2208 if ((current_priority & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) ||
2209 (_pthread_priority_get_qos_newest(current_priority) > WQ_THREAD_CLEANUP_QOS)) {
2210 // Reset QoS to something low for the cleanup process
2211 pthread_priority_t priority = _pthread_priority_make_newest(WQ_THREAD_CLEANUP_QOS, 0, 0);
2212 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, priority);
2213 }
2214 }
2215
2216 _pthread_exit(self, NULL);
2217 }
2218
2219 /***** pthread workqueue API for libdispatch *****/
2220
2221 void
2222 pthread_workqueue_setdispatchoffset_np(int offset)
2223 {
2224 __libdispatch_offset = offset;
2225 }
2226
2227 int
2228 pthread_workqueue_setdispatch_with_kevent_np(pthread_workqueue_function2_t queue_func, pthread_workqueue_function_kevent_t kevent_func)
2229 {
2230 int res = EBUSY;
2231 if (__libdispatch_workerfunction == NULL) {
2232 // Check whether the kernel supports new SPIs
2233 res = __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP, NULL, __libdispatch_offset, kevent_func != NULL ? 0x01 : 0x00);
2234 if (res == -1){
2235 res = ENOTSUP;
2236 } else {
2237 __libdispatch_workerfunction = queue_func;
2238 __libdispatch_keventfunction = kevent_func;
2239
2240 // Prepare the kernel for workq action
2241 (void)__workq_open();
2242 if (__is_threaded == 0) {
2243 __is_threaded = 1;
2244 }
2245 }
2246 }
2247 return res;
2248 }
2249
2250 int
2251 _pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func, pthread_workqueue_function_kevent_t kevent_func, int offset, int flags)
2252 {
2253 if (flags != 0) {
2254 return ENOTSUP;
2255 }
2256
2257 __workq_newapi = true;
2258 __libdispatch_offset = offset;
2259
2260 int rv = pthread_workqueue_setdispatch_with_kevent_np(queue_func, kevent_func);
2261 return rv;
2262 }
2263
2264 int
2265 _pthread_workqueue_init(pthread_workqueue_function2_t func, int offset, int flags)
2266 {
2267 return _pthread_workqueue_init_with_kevent(func, NULL, offset, flags);
2268 }
2269
2270 int
2271 pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func)
2272 {
2273 return pthread_workqueue_setdispatch_with_kevent_np((pthread_workqueue_function2_t)worker_func, NULL);
2274 }
2275
2276 int
2277 _pthread_workqueue_supported(void)
2278 {
2279 return __pthread_supported_features;
2280 }
2281
2282 int
2283 pthread_workqueue_addthreads_np(int queue_priority, int options, int numthreads)
2284 {
2285 int res = 0;
2286
2287 // Cannot add threads without a worker function registered.
2288 if (__libdispatch_workerfunction == NULL) {
2289 return EPERM;
2290 }
2291
2292 pthread_priority_t kp = 0;
2293
2294 if (__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) {
2295 /* The new kernel API takes the new QoS class + relative priority style of
2296 * priority. This entry point is here for compatibility with old libdispatch
2297 * versions (ie. the simulator). We request the corresponding new bracket
2298 * from the kernel, then on the way out run all dispatch queues that were
2299 * requested.
2300 */
2301
2302 int compat_priority = queue_priority & WQ_FLAG_THREAD_PRIOMASK;
2303 int flags = 0;
2304
2305 /* To make sure the library does not issue more threads to dispatch than
2306 * were requested, the total number of active requests is recorded in
2307 * __workq_requests.
2308 */
2309 if (options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT) {
2310 flags = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
2311 }
2312
2313 kp = _pthread_qos_class_encode_workqueue(compat_priority, flags);
2314
2315 } else {
2316 /* Running on the old kernel, queue_priority is what we pass directly to
2317 * the syscall.
2318 */
2319 kp = queue_priority & WQ_FLAG_THREAD_PRIOMASK;
2320
2321 if (options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT) {
2322 kp |= WORKQUEUE_OVERCOMMIT;
2323 }
2324 }
2325
2326 res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)kp);
2327 if (res == -1) {
2328 res = errno;
2329 }
2330 return res;
2331 }
2332
2333 int
2334 _pthread_workqueue_addthreads(int numthreads, pthread_priority_t priority)
2335 {
2336 int res = 0;
2337
2338 if (__libdispatch_workerfunction == NULL) {
2339 return EPERM;
2340 }
2341
2342 if ((__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) == 0) {
2343 return ENOTSUP;
2344 }
2345
2346 res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)priority);
2347 if (res == -1) {
2348 res = errno;
2349 }
2350 return res;
2351 }
2352
2353 int
2354 _pthread_workqueue_set_event_manager_priority(pthread_priority_t priority)
2355 {
2356 int res = __workq_kernreturn(WQOPS_SET_EVENT_MANAGER_PRIORITY, NULL, (int)priority, 0);
2357 if (res == -1) {
2358 res = errno;
2359 }
2360 return res;
2361 }
2362
2363 /*
2364 * Introspection SPI for libpthread.
2365 */
2366
2367 static pthread_introspection_hook_t _pthread_introspection_hook;
2368
2369 pthread_introspection_hook_t
2370 pthread_introspection_hook_install(pthread_introspection_hook_t hook)
2371 {
2372 if (os_slowpath(!hook)) {
2373 PTHREAD_ABORT("pthread_introspection_hook_install was passed NULL");
2374 }
2375 pthread_introspection_hook_t prev;
2376 prev = __sync_swap(&_pthread_introspection_hook, hook);
2377 return prev;
2378 }
2379
2380 PTHREAD_NOINLINE
2381 static void
2382 _pthread_introspection_hook_callout_thread_create(pthread_t t, bool destroy)
2383 {
2384 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE, t, t,
2385 PTHREAD_SIZE);
2386 if (!destroy) return;
2387 _pthread_introspection_thread_destroy(t);
2388 }
2389
2390 static inline void
2391 _pthread_introspection_thread_create(pthread_t t, bool destroy)
2392 {
2393 if (os_fastpath(!_pthread_introspection_hook)) return;
2394 _pthread_introspection_hook_callout_thread_create(t, destroy);
2395 }
2396
2397 PTHREAD_NOINLINE
2398 static void
2399 _pthread_introspection_hook_callout_thread_start(pthread_t t)
2400 {
2401 size_t freesize;
2402 void *freeaddr;
2403 if (t == &_thread) {
2404 freesize = t->stacksize + t->guardsize;
2405 freeaddr = t->stackaddr - freesize;
2406 } else {
2407 freesize = t->freesize - PTHREAD_SIZE;
2408 freeaddr = t->freeaddr;
2409 }
2410 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START, t,
2411 freeaddr, freesize);
2412 }
2413
2414 static inline void
2415 _pthread_introspection_thread_start(pthread_t t)
2416 {
2417 if (os_fastpath(!_pthread_introspection_hook)) return;
2418 _pthread_introspection_hook_callout_thread_start(t);
2419 }
2420
2421 PTHREAD_NOINLINE
2422 static void
2423 _pthread_introspection_hook_callout_thread_terminate(pthread_t t,
2424 void *freeaddr, size_t freesize, bool destroy)
2425 {
2426 if (destroy && freesize) {
2427 freesize -= PTHREAD_SIZE;
2428 }
2429 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE, t,
2430 freeaddr, freesize);
2431 if (!destroy) return;
2432 _pthread_introspection_thread_destroy(t);
2433 }
2434
2435 static inline void
2436 _pthread_introspection_thread_terminate(pthread_t t, void *freeaddr,
2437 size_t freesize, bool destroy)
2438 {
2439 if (os_fastpath(!_pthread_introspection_hook)) return;
2440 _pthread_introspection_hook_callout_thread_terminate(t, freeaddr, freesize,
2441 destroy);
2442 }
2443
2444 PTHREAD_NOINLINE
2445 static void
2446 _pthread_introspection_hook_callout_thread_destroy(pthread_t t)
2447 {
2448 if (t == &_thread) return;
2449 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY, t, t,
2450 PTHREAD_SIZE);
2451 }
2452
2453 static inline void
2454 _pthread_introspection_thread_destroy(pthread_t t)
2455 {
2456 if (os_fastpath(!_pthread_introspection_hook)) return;
2457 _pthread_introspection_hook_callout_thread_destroy(t);
2458 }
2459