]> git.saurik.com Git - apple/libpthread.git/blob - src/pthread.c
8e63bd3a06e5321b0fd46f22cb5df93b62f7e3cb
[apple/libpthread.git] / src / pthread.c
1 /*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 */
51
52 #include "resolver.h"
53 #include "internal.h"
54 #include "private.h"
55 #include "workqueue_private.h"
56 #include "introspection_private.h"
57 #include "qos_private.h"
58 #include "tsd_private.h"
59
60 #include <stdlib.h>
61 #include <errno.h>
62 #include <signal.h>
63 #include <unistd.h>
64 #include <mach/mach_init.h>
65 #include <mach/mach_vm.h>
66 #include <sys/time.h>
67 #include <sys/resource.h>
68 #include <sys/sysctl.h>
69 #include <sys/queue.h>
70 #include <sys/mman.h>
71 #include <machine/vmparam.h>
72 #define __APPLE_API_PRIVATE
73 #include <machine/cpu_capabilities.h>
74
75 #include <_simple.h>
76 #include <platform/string.h>
77 #include <platform/compat.h>
78
79 extern int __sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
80 void *newp, size_t newlen);
81 extern void __exit(int) __attribute__((noreturn));
82 extern int __pthread_kill(mach_port_t, int);
83
84 extern struct _pthread _thread;
85 extern int default_priority;
86
87
88 //
89 // Global variables
90 //
91
92 static void (*exitf)(int) = __exit;
93 PTHREAD_NOEXPORT void* (*_pthread_malloc)(size_t) = NULL;
94 PTHREAD_NOEXPORT void (*_pthread_free)(void *) = NULL;
95
96 #if PTHREAD_DEBUG_LOG
97 #include <fcntl.h>
98 int _pthread_debuglog;
99 uint64_t _pthread_debugstart;
100 #endif
101
102 // This global should be used (carefully) by anyone needing to know if a
103 // pthread (other than the main thread) has been created.
104 int __is_threaded = 0;
105
106 int __unix_conforming = 0;
107
108 // _pthread_list_lock protects _pthread_count, access to the __pthread_head
109 // list, and the parentcheck, childrun and childexit flags of the pthread
110 // structure. Externally imported by pthread_cancelable.c.
111 PTHREAD_NOEXPORT _pthread_lock _pthread_list_lock = _PTHREAD_LOCK_INITIALIZER;
112 PTHREAD_NOEXPORT struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
113 static int _pthread_count = 1;
114
115 #if PTHREAD_LAYOUT_SPI
116
117 const struct pthread_layout_offsets_s pthread_layout_offsets = {
118 .plo_version = 1,
119 .plo_pthread_tsd_base_offset = offsetof(struct _pthread, tsd),
120 .plo_pthread_tsd_base_address_offset = 0,
121 .plo_pthread_tsd_entry_size = sizeof(((struct _pthread *)NULL)->tsd[0]),
122 };
123
124 #endif // PTHREAD_LAYOUT_SPI
125
126 //
127 // Static variables
128 //
129
130 // Mach message notification that a thread needs to be recycled.
131 typedef struct _pthread_reap_msg_t {
132 mach_msg_header_t header;
133 pthread_t thread;
134 mach_msg_trailer_t trailer;
135 } pthread_reap_msg_t;
136
137 /*
138 * The pthread may be offset into a page. In that event, by contract
139 * with the kernel, the allocation will extend PTHREAD_SIZE from the
140 * start of the next page. There's also one page worth of allocation
141 * below stacksize for the guard page. <rdar://problem/19941744>
142 */
143 #define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
144 #define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size)
145 #define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize))
146
147 static pthread_attr_t _pthread_attr_default = { };
148
149 // The main thread's pthread_t
150 PTHREAD_NOEXPORT struct _pthread _thread __attribute__((aligned(64))) = { };
151
152 PTHREAD_NOEXPORT int default_priority;
153 static int max_priority;
154 static int min_priority;
155 static int pthread_concurrency;
156
157 // work queue support data
158 static void (*__libdispatch_workerfunction)(pthread_priority_t) = NULL;
159 static void (*__libdispatch_keventfunction)(void **events, int *nevents) = NULL;
160 static void (*__libdispatch_workloopfunction)(uint64_t *workloop_id, void **events, int *nevents) = NULL;
161 static int __libdispatch_offset;
162
163 // supported feature set
164 int __pthread_supported_features;
165 static bool __workq_newapi;
166
167 //
168 // Function prototypes
169 //
170
171 // pthread primitives
172 static int _pthread_allocate(pthread_t *thread, const pthread_attr_t *attrs, void **stack);
173 static int _pthread_deallocate(pthread_t t);
174
175 static void _pthread_terminate_invoke(pthread_t t);
176
177 static inline void _pthread_struct_init(pthread_t t,
178 const pthread_attr_t *attrs,
179 void *stack,
180 size_t stacksize,
181 void *freeaddr,
182 size_t freesize);
183
184 static inline void _pthread_set_self_internal(pthread_t, bool needs_tsd_base_set);
185
186 static void _pthread_dealloc_reply_port(pthread_t t);
187 static void _pthread_dealloc_special_reply_port(pthread_t t);
188
189 static inline void __pthread_add_thread(pthread_t t, const pthread_attr_t *attr, bool parent, bool from_mach_thread);
190 static inline int __pthread_remove_thread(pthread_t t, bool child, bool *should_exit);
191
192 static void _pthread_exit(pthread_t self, void *value_ptr) __dead2;
193
194 static inline void _pthread_introspection_thread_create(pthread_t t, bool destroy);
195 static inline void _pthread_introspection_thread_start(pthread_t t);
196 static inline void _pthread_introspection_thread_terminate(pthread_t t, void *freeaddr, size_t freesize, bool destroy);
197 static inline void _pthread_introspection_thread_destroy(pthread_t t);
198
199 extern void _pthread_set_self(pthread_t);
200 extern void start_wqthread(pthread_t self, mach_port_t kport, void *stackaddr, void *unused, int reuse); // trampoline into _pthread_wqthread
201 extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags); // trampoline into _pthread_start
202
203 /* Compatibility: previous pthread API used WORKQUEUE_OVERCOMMIT to request overcommit threads from
204 * the kernel. This definition is kept here, in userspace only, to perform the compatibility shimm
205 * from old API requests to the new kext conventions.
206 */
207 #define WORKQUEUE_OVERCOMMIT 0x10000
208
209 /*
210 * Flags filed passed to bsdthread_create and back in pthread_start
211 31 <---------------------------------> 0
212 _________________________________________
213 | flags(8) | policy(8) | importance(16) |
214 -----------------------------------------
215 */
216
217 #define PTHREAD_START_CUSTOM 0x01000000
218 #define PTHREAD_START_SETSCHED 0x02000000
219 #define PTHREAD_START_DETACHED 0x04000000
220 #define PTHREAD_START_QOSCLASS 0x08000000
221 #define PTHREAD_START_TSD_BASE_SET 0x10000000
222 #define PTHREAD_START_QOSCLASS_MASK 0x00ffffff
223 #define PTHREAD_START_POLICY_BITSHIFT 16
224 #define PTHREAD_START_POLICY_MASK 0xff
225 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
226
227 static int pthread_setschedparam_internal(pthread_t, mach_port_t, int, const struct sched_param *);
228 extern pthread_t __bsdthread_create(void *(*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags);
229 extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, void *, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t);
230 extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem);
231 extern __uint64_t __thread_selfid( void );
232
233 extern int __workq_open(void);
234 extern int __workq_kernreturn(int, void *, int, int);
235
236 #if defined(__i386__) || defined(__x86_64__)
237 static const mach_vm_address_t PTHREAD_STACK_HINT = 0xB0000000;
238 #else
239 #error no PTHREAD_STACK_HINT for this architecture
240 #endif
241
242 // Check that offsets of _PTHREAD_STRUCT_DIRECT_*_OFFSET values hasn't changed
243 _Static_assert(offsetof(struct _pthread, tsd) + _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET
244 == offsetof(struct _pthread, thread_id),
245 "_PTHREAD_STRUCT_DIRECT_THREADID_OFFSET is correct");
246
247 // Allocate a thread structure, stack and guard page.
248 //
249 // The thread structure may optionally be placed in the same allocation as the
250 // stack, residing above the top of the stack. This cannot be done if a
251 // custom stack address is provided.
252 //
253 // Similarly the guard page cannot be allocated if a custom stack address is
254 // provided.
255 //
256 // The allocated thread structure is initialized with values that indicate how
257 // it should be freed.
258
259 static int
260 _pthread_allocate(pthread_t *thread, const pthread_attr_t *attrs, void **stack)
261 {
262 int res;
263 kern_return_t kr;
264 pthread_t t = NULL;
265 mach_vm_address_t allocaddr = PTHREAD_STACK_HINT;
266 size_t allocsize = 0;
267 size_t guardsize = 0;
268 size_t stacksize = 0;
269
270 PTHREAD_ASSERT(attrs->stacksize >= PTHREAD_STACK_MIN);
271
272 *thread = NULL;
273 *stack = NULL;
274
275 // Allocate a pthread structure if necessary
276
277 if (attrs->stackaddr != NULL) {
278 PTHREAD_ASSERT(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
279 *stack = attrs->stackaddr;
280 allocsize = PTHREAD_SIZE;
281 } else {
282 guardsize = attrs->guardsize;
283 stacksize = attrs->stacksize;
284 allocsize = stacksize + guardsize + PTHREAD_SIZE;
285 }
286
287 kr = mach_vm_map(mach_task_self(),
288 &allocaddr,
289 allocsize,
290 vm_page_size - 1,
291 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE,
292 MEMORY_OBJECT_NULL,
293 0,
294 FALSE,
295 VM_PROT_DEFAULT,
296 VM_PROT_ALL,
297 VM_INHERIT_DEFAULT);
298
299 if (kr != KERN_SUCCESS) {
300 kr = mach_vm_allocate(mach_task_self(),
301 &allocaddr,
302 allocsize,
303 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
304 }
305
306 if (kr == KERN_SUCCESS) {
307 // The stack grows down.
308 // Set the guard page at the lowest address of the
309 // newly allocated stack. Return the highest address
310 // of the stack.
311 if (guardsize) {
312 (void)mach_vm_protect(mach_task_self(), allocaddr, guardsize, FALSE, VM_PROT_NONE);
313 }
314
315 // Thread structure resides at the top of the stack.
316 t = (void *)(allocaddr + stacksize + guardsize);
317 if (stacksize) {
318 // Returns the top of the stack.
319 *stack = t;
320 }
321 }
322
323 if (t != NULL) {
324 _pthread_struct_init(t, attrs,
325 *stack, attrs->stacksize,
326 allocaddr, allocsize);
327 *thread = t;
328 res = 0;
329 } else {
330 res = EAGAIN;
331 }
332 return res;
333 }
334
335 static int
336 _pthread_deallocate(pthread_t t)
337 {
338 // Don't free the main thread.
339 if (t != &_thread) {
340 kern_return_t ret;
341 ret = mach_vm_deallocate(mach_task_self(), t->freeaddr, t->freesize);
342 PTHREAD_ASSERT(ret == KERN_SUCCESS);
343 }
344 return 0;
345 }
346
347 #pragma clang diagnostic push
348 #pragma clang diagnostic ignored "-Wreturn-stack-address"
349
350 PTHREAD_NOINLINE
351 static void*
352 _pthread_current_stack_address(void)
353 {
354 int a;
355 return &a;
356 }
357
358 #pragma clang diagnostic pop
359
360 // Terminates the thread if called from the currently running thread.
361 PTHREAD_NORETURN PTHREAD_NOINLINE PTHREAD_NOT_TAIL_CALLED
362 static void
363 _pthread_terminate(pthread_t t)
364 {
365 PTHREAD_ASSERT(t == pthread_self());
366
367 uintptr_t freeaddr = (uintptr_t)t->freeaddr;
368 size_t freesize = t->freesize;
369
370 // the size of just the stack
371 size_t freesize_stack = t->freesize;
372
373 // We usually pass our structure+stack to bsdthread_terminate to free, but
374 // if we get told to keep the pthread_t structure around then we need to
375 // adjust the free size and addr in the pthread_t to just refer to the
376 // structure and not the stack. If we do end up deallocating the
377 // structure, this is useless work since no one can read the result, but we
378 // can't do it after the call to pthread_remove_thread because it isn't
379 // safe to dereference t after that.
380 if ((void*)t > t->freeaddr && (void*)t < t->freeaddr + t->freesize){
381 // Check to ensure the pthread structure itself is part of the
382 // allocation described by freeaddr/freesize, in which case we split and
383 // only deallocate the area below the pthread structure. In the event of a
384 // custom stack, the freeaddr/size will be the pthread structure itself, in
385 // which case we shouldn't free anything (the final else case).
386 freesize_stack = trunc_page((uintptr_t)t - (uintptr_t)freeaddr);
387
388 // describe just the remainder for deallocation when the pthread_t goes away
389 t->freeaddr += freesize_stack;
390 t->freesize -= freesize_stack;
391 } else if (t == &_thread){
392 freeaddr = t->stackaddr - pthread_get_stacksize_np(t);
393 uintptr_t stackborder = trunc_page((uintptr_t)_pthread_current_stack_address());
394 freesize_stack = stackborder - freeaddr;
395 } else {
396 freesize_stack = 0;
397 }
398
399 mach_port_t kport = _pthread_kernel_thread(t);
400 semaphore_t joinsem = t->joiner_notify;
401
402 _pthread_dealloc_special_reply_port(t);
403 _pthread_dealloc_reply_port(t);
404
405 // After the call to __pthread_remove_thread, it is not safe to
406 // dereference the pthread_t structure.
407
408 bool destroy, should_exit;
409 destroy = (__pthread_remove_thread(t, true, &should_exit) != EBUSY);
410
411 if (!destroy || t == &_thread) {
412 // Use the adjusted freesize of just the stack that we computed above.
413 freesize = freesize_stack;
414 }
415
416 // Check if there is nothing to free because the thread has a custom
417 // stack allocation and is joinable.
418 if (freesize == 0) {
419 freeaddr = 0;
420 }
421 _pthread_introspection_thread_terminate(t, freeaddr, freesize, destroy);
422 if (should_exit) {
423 exitf(0);
424 }
425
426 __bsdthread_terminate((void *)freeaddr, freesize, kport, joinsem);
427 PTHREAD_ABORT("thread %p didn't terminate", t);
428 }
429
430 PTHREAD_NORETURN
431 static void
432 _pthread_terminate_invoke(pthread_t t)
433 {
434 _pthread_terminate(t);
435 }
436
437 int
438 pthread_attr_destroy(pthread_attr_t *attr)
439 {
440 int ret = EINVAL;
441 if (attr->sig == _PTHREAD_ATTR_SIG) {
442 attr->sig = 0;
443 ret = 0;
444 }
445 return ret;
446 }
447
448 int
449 pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
450 {
451 int ret = EINVAL;
452 if (attr->sig == _PTHREAD_ATTR_SIG) {
453 *detachstate = attr->detached;
454 ret = 0;
455 }
456 return ret;
457 }
458
459 int
460 pthread_attr_getinheritsched(const pthread_attr_t *attr, int *inheritsched)
461 {
462 int ret = EINVAL;
463 if (attr->sig == _PTHREAD_ATTR_SIG) {
464 *inheritsched = attr->inherit;
465 ret = 0;
466 }
467 return ret;
468 }
469
470 int
471 pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
472 {
473 int ret = EINVAL;
474 if (attr->sig == _PTHREAD_ATTR_SIG) {
475 *param = attr->param;
476 ret = 0;
477 }
478 return ret;
479 }
480
481 int
482 pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
483 {
484 int ret = EINVAL;
485 if (attr->sig == _PTHREAD_ATTR_SIG) {
486 *policy = attr->policy;
487 ret = 0;
488 }
489 return ret;
490 }
491
492 // Default stack size is 512KB; independent of the main thread's stack size.
493 static const size_t DEFAULT_STACK_SIZE = 512 * 1024;
494
495 int
496 pthread_attr_init(pthread_attr_t *attr)
497 {
498 attr->stacksize = DEFAULT_STACK_SIZE;
499 attr->stackaddr = NULL;
500 attr->sig = _PTHREAD_ATTR_SIG;
501 attr->param.sched_priority = default_priority;
502 attr->param.quantum = 10; /* quantum isn't public yet */
503 attr->detached = PTHREAD_CREATE_JOINABLE;
504 attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
505 attr->policy = _PTHREAD_DEFAULT_POLICY;
506 attr->fastpath = 1;
507 attr->schedset = 0;
508 attr->guardsize = vm_page_size;
509 attr->qosclass = _pthread_priority_make_newest(QOS_CLASS_DEFAULT, 0, 0);
510 return 0;
511 }
512
513 int
514 pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
515 {
516 int ret = EINVAL;
517 if (attr->sig == _PTHREAD_ATTR_SIG &&
518 (detachstate == PTHREAD_CREATE_JOINABLE ||
519 detachstate == PTHREAD_CREATE_DETACHED)) {
520 attr->detached = detachstate;
521 ret = 0;
522 }
523 return ret;
524 }
525
526 int
527 pthread_attr_setinheritsched(pthread_attr_t *attr, int inheritsched)
528 {
529 int ret = EINVAL;
530 if (attr->sig == _PTHREAD_ATTR_SIG &&
531 (inheritsched == PTHREAD_INHERIT_SCHED ||
532 inheritsched == PTHREAD_EXPLICIT_SCHED)) {
533 attr->inherit = inheritsched;
534 ret = 0;
535 }
536 return ret;
537 }
538
539 int
540 pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param)
541 {
542 int ret = EINVAL;
543 if (attr->sig == _PTHREAD_ATTR_SIG) {
544 /* TODO: Validate sched_param fields */
545 attr->param = *param;
546 attr->schedset = 1;
547 ret = 0;
548 }
549 return ret;
550 }
551
552 int
553 pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
554 {
555 int ret = EINVAL;
556 if (attr->sig == _PTHREAD_ATTR_SIG &&
557 (policy == SCHED_OTHER ||
558 policy == SCHED_RR ||
559 policy == SCHED_FIFO)) {
560 attr->policy = policy;
561 attr->schedset = 1;
562 ret = 0;
563 }
564 return ret;
565 }
566
567 int
568 pthread_attr_setscope(pthread_attr_t *attr, int scope)
569 {
570 int ret = EINVAL;
571 if (attr->sig == _PTHREAD_ATTR_SIG) {
572 if (scope == PTHREAD_SCOPE_SYSTEM) {
573 // No attribute yet for the scope.
574 ret = 0;
575 } else if (scope == PTHREAD_SCOPE_PROCESS) {
576 ret = ENOTSUP;
577 }
578 }
579 return ret;
580 }
581
582 int
583 pthread_attr_getscope(const pthread_attr_t *attr, int *scope)
584 {
585 int ret = EINVAL;
586 if (attr->sig == _PTHREAD_ATTR_SIG) {
587 *scope = PTHREAD_SCOPE_SYSTEM;
588 ret = 0;
589 }
590 return ret;
591 }
592
593 int
594 pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
595 {
596 int ret = EINVAL;
597 if (attr->sig == _PTHREAD_ATTR_SIG) {
598 *stackaddr = attr->stackaddr;
599 ret = 0;
600 }
601 return ret;
602 }
603
604 int
605 pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
606 {
607 int ret = EINVAL;
608 if (attr->sig == _PTHREAD_ATTR_SIG &&
609 ((uintptr_t)stackaddr % vm_page_size) == 0) {
610 attr->stackaddr = stackaddr;
611 attr->fastpath = 0;
612 attr->guardsize = 0;
613 ret = 0;
614 }
615 return ret;
616 }
617
618 int
619 pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
620 {
621 int ret = EINVAL;
622 if (attr->sig == _PTHREAD_ATTR_SIG) {
623 *stacksize = attr->stacksize;
624 ret = 0;
625 }
626 return ret;
627 }
628
629 int
630 pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
631 {
632 int ret = EINVAL;
633 if (attr->sig == _PTHREAD_ATTR_SIG &&
634 (stacksize % vm_page_size) == 0 &&
635 stacksize >= PTHREAD_STACK_MIN) {
636 attr->stacksize = stacksize;
637 ret = 0;
638 }
639 return ret;
640 }
641
642 int
643 pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
644 {
645 int ret = EINVAL;
646 if (attr->sig == _PTHREAD_ATTR_SIG) {
647 *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize);
648 *stacksize = attr->stacksize;
649 ret = 0;
650 }
651 return ret;
652 }
653
654 // Per SUSv3, the stackaddr is the base address, the lowest addressable byte
655 // address. This is not the same as in pthread_attr_setstackaddr.
656 int
657 pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
658 {
659 int ret = EINVAL;
660 if (attr->sig == _PTHREAD_ATTR_SIG &&
661 ((uintptr_t)stackaddr % vm_page_size) == 0 &&
662 (stacksize % vm_page_size) == 0 &&
663 stacksize >= PTHREAD_STACK_MIN) {
664 attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize);
665 attr->stacksize = stacksize;
666 attr->fastpath = 0;
667 ret = 0;
668 }
669 return ret;
670 }
671
672 int
673 pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
674 {
675 int ret = EINVAL;
676 if (attr->sig == _PTHREAD_ATTR_SIG) {
677 /* Guardsize of 0 is valid, ot means no guard */
678 if ((guardsize % vm_page_size) == 0) {
679 attr->guardsize = guardsize;
680 attr->fastpath = 0;
681 ret = 0;
682 }
683 }
684 return ret;
685 }
686
687 int
688 pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize)
689 {
690 int ret = EINVAL;
691 if (attr->sig == _PTHREAD_ATTR_SIG) {
692 *guardsize = attr->guardsize;
693 ret = 0;
694 }
695 return ret;
696 }
697
698
699 /*
700 * Create and start execution of a new thread.
701 */
702 PTHREAD_NOINLINE PTHREAD_NORETURN
703 static void
704 _pthread_body(pthread_t self, bool needs_tsd_base_set)
705 {
706 _pthread_set_self_internal(self, needs_tsd_base_set);
707 __pthread_add_thread(self, NULL, false, false);
708 void *result = (self->fun)(self->arg);
709
710 _pthread_exit(self, result);
711 }
712
713 PTHREAD_NORETURN
714 void
715 _pthread_start(pthread_t self,
716 mach_port_t kport,
717 void *(*fun)(void *),
718 void *arg,
719 size_t stacksize,
720 unsigned int pflags)
721 {
722 if ((pflags & PTHREAD_START_CUSTOM) == 0) {
723 void *stackaddr = self;
724 _pthread_struct_init(self, &_pthread_attr_default,
725 stackaddr, stacksize,
726 PTHREAD_ALLOCADDR(stackaddr, stacksize), PTHREAD_ALLOCSIZE(stackaddr, stacksize));
727
728 if (pflags & PTHREAD_START_SETSCHED) {
729 self->policy = ((pflags >> PTHREAD_START_POLICY_BITSHIFT) & PTHREAD_START_POLICY_MASK);
730 self->param.sched_priority = (pflags & PTHREAD_START_IMPORTANCE_MASK);
731 }
732
733 if ((pflags & PTHREAD_START_DETACHED) == PTHREAD_START_DETACHED) {
734 self->detached &= ~PTHREAD_CREATE_JOINABLE;
735 self->detached |= PTHREAD_CREATE_DETACHED;
736 }
737 }
738
739 if ((pflags & PTHREAD_START_QOSCLASS) != 0) {
740 /* The QoS class is cached in the TSD of the pthread, so to reflect the
741 * class that the kernel brought us up at, the TSD must be primed from the
742 * flags parameter.
743 */
744 self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = (pflags & PTHREAD_START_QOSCLASS_MASK);
745 } else {
746 /* Give the thread a default QoS tier, of zero. */
747 self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
748 }
749
750 bool thread_tsd_bsd_set = (bool)(pflags & PTHREAD_START_TSD_BASE_SET);
751
752 #if DEBUG
753 PTHREAD_ASSERT(MACH_PORT_VALID(kport));
754 PTHREAD_ASSERT(_pthread_kernel_thread(self) == kport);
755 #endif
756 // will mark the thread initialized
757 _pthread_markcancel_if_canceled(self, kport);
758
759 self->fun = fun;
760 self->arg = arg;
761
762 _pthread_body(self, !thread_tsd_bsd_set);
763 }
764
765 PTHREAD_ALWAYS_INLINE
766 static inline void
767 _pthread_struct_init(pthread_t t,
768 const pthread_attr_t *attrs,
769 void *stackaddr,
770 size_t stacksize,
771 void *freeaddr,
772 size_t freesize)
773 {
774 #if DEBUG
775 PTHREAD_ASSERT(t->sig != _PTHREAD_SIG);
776 #endif
777
778 t->sig = _PTHREAD_SIG;
779 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = t;
780 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
781 _PTHREAD_LOCK_INIT(t->lock);
782
783 t->stackaddr = stackaddr;
784 t->stacksize = stacksize;
785 t->freeaddr = freeaddr;
786 t->freesize = freesize;
787
788 t->guardsize = attrs->guardsize;
789 t->detached = attrs->detached;
790 t->inherit = attrs->inherit;
791 t->policy = attrs->policy;
792 t->schedset = attrs->schedset;
793 t->param = attrs->param;
794 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
795 }
796
797 /* Need to deprecate this in future */
798 int
799 _pthread_is_threaded(void)
800 {
801 return __is_threaded;
802 }
803
804 /* Non portable public api to know whether this process has(had) atleast one thread
805 * apart from main thread. There could be race if there is a thread in the process of
806 * creation at the time of call . It does not tell whether there are more than one thread
807 * at this point of time.
808 */
809 int
810 pthread_is_threaded_np(void)
811 {
812 return __is_threaded;
813 }
814
815
816 PTHREAD_NOEXPORT_VARIANT
817 mach_port_t
818 pthread_mach_thread_np(pthread_t t)
819 {
820 mach_port_t kport = MACH_PORT_NULL;
821 (void)_pthread_is_valid(t, 0, &kport);
822 return kport;
823 }
824
825 PTHREAD_NOEXPORT_VARIANT
826 pthread_t
827 pthread_from_mach_thread_np(mach_port_t kernel_thread)
828 {
829 struct _pthread *p = NULL;
830
831 /* No need to wait as mach port is already known */
832 _PTHREAD_LOCK(_pthread_list_lock);
833
834 TAILQ_FOREACH(p, &__pthread_head, plist) {
835 if (_pthread_kernel_thread(p) == kernel_thread) {
836 break;
837 }
838 }
839
840 _PTHREAD_UNLOCK(_pthread_list_lock);
841
842 return p;
843 }
844
845 PTHREAD_NOEXPORT_VARIANT
846 size_t
847 pthread_get_stacksize_np(pthread_t t)
848 {
849 size_t size = 0;
850
851 if (t == NULL) {
852 return ESRCH; // XXX bug?
853 }
854
855 #if !defined(__arm__) && !defined(__arm64__)
856 // The default rlimit based allocations will be provided with a stacksize
857 // of the current limit and a freesize of the max. However, custom
858 // allocations will just have the guard page to free. If we aren't in the
859 // latter case, call into rlimit to determine the current stack size. In
860 // the event that the current limit == max limit then we'll fall down the
861 // fast path, but since it's unlikely that the limit is going to be lowered
862 // after it's been change to the max, we should be fine.
863 //
864 // Of course, on arm rlim_cur == rlim_max and there's only the one guard
865 // page. So, we can skip all this there.
866 if (t == &_thread && t->stacksize + vm_page_size != t->freesize) {
867 // We want to call getrlimit() just once, as it's relatively expensive
868 static size_t rlimit_stack;
869
870 if (rlimit_stack == 0) {
871 struct rlimit limit;
872 int ret = getrlimit(RLIMIT_STACK, &limit);
873
874 if (ret == 0) {
875 rlimit_stack = (size_t) limit.rlim_cur;
876 }
877 }
878
879 if (rlimit_stack == 0 || rlimit_stack > t->freesize) {
880 return t->stacksize;
881 } else {
882 return rlimit_stack;
883 }
884 }
885 #endif /* !defined(__arm__) && !defined(__arm64__) */
886
887 if (t == pthread_self() || t == &_thread) {
888 return t->stacksize;
889 }
890
891 _PTHREAD_LOCK(_pthread_list_lock);
892
893 if (_pthread_is_valid_locked(t)) {
894 size = t->stacksize;
895 } else {
896 size = ESRCH; // XXX bug?
897 }
898
899 _PTHREAD_UNLOCK(_pthread_list_lock);
900
901 return size;
902 }
903
904 PTHREAD_NOEXPORT_VARIANT
905 void *
906 pthread_get_stackaddr_np(pthread_t t)
907 {
908 void *addr = NULL;
909
910 if (t == NULL) {
911 return (void *)(uintptr_t)ESRCH; // XXX bug?
912 }
913
914 // since the main thread will not get de-allocated from underneath us
915 if (t == pthread_self() || t == &_thread) {
916 return t->stackaddr;
917 }
918
919 _PTHREAD_LOCK(_pthread_list_lock);
920
921 if (_pthread_is_valid_locked(t)) {
922 addr = t->stackaddr;
923 } else {
924 addr = (void *)(uintptr_t)ESRCH; // XXX bug?
925 }
926
927 _PTHREAD_UNLOCK(_pthread_list_lock);
928
929 return addr;
930 }
931
932
933 static mach_port_t
934 _pthread_reply_port(pthread_t t)
935 {
936 void *p;
937 if (t == NULL) {
938 p = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY);
939 } else {
940 p = t->tsd[_PTHREAD_TSD_SLOT_MIG_REPLY];
941 }
942 return (mach_port_t)(uintptr_t)p;
943 }
944
945 static void
946 _pthread_set_reply_port(pthread_t t, mach_port_t reply_port)
947 {
948 void *p = (void *)(uintptr_t)reply_port;
949 if (t == NULL) {
950 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY, p);
951 } else {
952 t->tsd[_PTHREAD_TSD_SLOT_MIG_REPLY] = p;
953 }
954 }
955
956 static void
957 _pthread_dealloc_reply_port(pthread_t t)
958 {
959 mach_port_t reply_port = _pthread_reply_port(t);
960 if (reply_port != MACH_PORT_NULL) {
961 mig_dealloc_reply_port(reply_port);
962 }
963 }
964
965 static mach_port_t
966 _pthread_special_reply_port(pthread_t t)
967 {
968 void *p;
969 if (t == NULL) {
970 p = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY);
971 } else {
972 p = t->tsd[_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY];
973 }
974 return (mach_port_t)(uintptr_t)p;
975 }
976
977 static void
978 _pthread_dealloc_special_reply_port(pthread_t t)
979 {
980 mach_port_t special_reply_port = _pthread_special_reply_port(t);
981 if (special_reply_port != MACH_PORT_NULL) {
982 mach_port_mod_refs(mach_task_self(), special_reply_port,
983 MACH_PORT_RIGHT_RECEIVE, -1);
984 }
985 }
986
987 pthread_t
988 pthread_main_thread_np(void)
989 {
990 return &_thread;
991 }
992
993 /* returns non-zero if the current thread is the main thread */
994 int
995 pthread_main_np(void)
996 {
997 pthread_t self = pthread_self();
998
999 return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT);
1000 }
1001
1002
1003 /* if we are passed in a pthread_t that is NULL, then we return
1004 the current thread's thread_id. So folks don't have to call
1005 pthread_self, in addition to us doing it, if they just want
1006 their thread_id.
1007 */
1008 PTHREAD_NOEXPORT_VARIANT
1009 int
1010 pthread_threadid_np(pthread_t thread, uint64_t *thread_id)
1011 {
1012 int res = 0;
1013 pthread_t self = pthread_self();
1014
1015 if (thread_id == NULL) {
1016 return EINVAL;
1017 }
1018
1019 if (thread == NULL || thread == self) {
1020 *thread_id = self->thread_id;
1021 } else {
1022 _PTHREAD_LOCK(_pthread_list_lock);
1023 if (!_pthread_is_valid_locked(thread)) {
1024 res = ESRCH;
1025 } else if (thread->thread_id == 0) {
1026 res = EINVAL;
1027 } else {
1028 *thread_id = thread->thread_id;
1029 }
1030 _PTHREAD_UNLOCK(_pthread_list_lock);
1031 }
1032 return res;
1033 }
1034
1035 PTHREAD_NOEXPORT_VARIANT
1036 int
1037 pthread_getname_np(pthread_t thread, char *threadname, size_t len)
1038 {
1039 int res = 0;
1040
1041 if (thread == NULL) {
1042 return ESRCH;
1043 }
1044
1045 _PTHREAD_LOCK(_pthread_list_lock);
1046 if (_pthread_is_valid_locked(thread)) {
1047 strlcpy(threadname, thread->pthread_name, len);
1048 } else {
1049 res = ESRCH;
1050 }
1051 _PTHREAD_UNLOCK(_pthread_list_lock);
1052 return res;
1053 }
1054
1055
1056 int
1057 pthread_setname_np(const char *name)
1058 {
1059 int res;
1060 pthread_t self = pthread_self();
1061
1062 size_t len = 0;
1063 if (name != NULL) {
1064 len = strlen(name);
1065 }
1066
1067 /* protytype is in pthread_internals.h */
1068 res = __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name, (int)len);
1069 if (res == 0) {
1070 if (len > 0) {
1071 strlcpy(self->pthread_name, name, MAXTHREADNAMESIZE);
1072 } else {
1073 bzero(self->pthread_name, MAXTHREADNAMESIZE);
1074 }
1075 }
1076 return res;
1077
1078 }
1079
1080 PTHREAD_ALWAYS_INLINE
1081 static inline void
1082 __pthread_add_thread(pthread_t t, const pthread_attr_t *attrs,
1083 bool parent, bool from_mach_thread)
1084 {
1085 bool should_deallocate = false;
1086 bool should_add = true;
1087
1088 mach_port_t kport = _pthread_kernel_thread(t);
1089 if (os_slowpath(!MACH_PORT_VALID(kport))) {
1090 PTHREAD_CLIENT_CRASH(kport,
1091 "Unable to allocate thread port, possible port leak");
1092 }
1093
1094 if (from_mach_thread) {
1095 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock);
1096 } else {
1097 _PTHREAD_LOCK(_pthread_list_lock);
1098 }
1099
1100 // The parent and child threads race to add the thread to the list.
1101 // When called by the parent:
1102 // - set parentcheck to true
1103 // - back off if childrun is true
1104 // When called by the child:
1105 // - set childrun to true
1106 // - back off if parentcheck is true
1107 if (parent) {
1108 t->parentcheck = 1;
1109 if (t->childrun) {
1110 // child got here first, don't add.
1111 should_add = false;
1112 }
1113
1114 // If the child exits before we check in then it has to keep
1115 // the thread structure memory alive so our dereferences above
1116 // are valid. If it's a detached thread, then no joiner will
1117 // deallocate the thread structure itself. So we do it here.
1118 if (t->childexit) {
1119 should_add = false;
1120 should_deallocate = ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED);
1121 }
1122 } else {
1123 t->childrun = 1;
1124 if (t->parentcheck) {
1125 // Parent got here first, don't add.
1126 should_add = false;
1127 }
1128 if (t->wqthread) {
1129 // Work queue threads have no parent. Simulate.
1130 t->parentcheck = 1;
1131 }
1132 }
1133
1134 if (should_add) {
1135 TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
1136 _pthread_count++;
1137
1138 /*
1139 * Set some initial values which we know in the pthread structure in
1140 * case folks try to get the values before the thread can set them.
1141 */
1142 if (parent && attrs && attrs->schedset == 0) {
1143 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = attrs->qosclass;
1144 }
1145 }
1146
1147 if (from_mach_thread){
1148 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock);
1149 } else {
1150 _PTHREAD_UNLOCK(_pthread_list_lock);
1151 }
1152
1153 if (parent) {
1154 if (!from_mach_thread) {
1155 // PR-26275485: Mach threads will likely crash trying to run
1156 // introspection code. Since the fall out from the introspection
1157 // code not seeing the injected thread is likely less than crashing
1158 // in the introspection code, just don't make the call.
1159 _pthread_introspection_thread_create(t, should_deallocate);
1160 }
1161 if (should_deallocate) {
1162 _pthread_deallocate(t);
1163 }
1164 } else {
1165 _pthread_introspection_thread_start(t);
1166 }
1167 }
1168
1169 // <rdar://problem/12544957> must always inline this function to avoid epilogues
1170 // Returns EBUSY if the thread structure should be kept alive (is joinable).
1171 // Returns ESRCH if the thread structure is no longer valid (was detached).
1172 PTHREAD_ALWAYS_INLINE
1173 static inline int
1174 __pthread_remove_thread(pthread_t t, bool child, bool *should_exit)
1175 {
1176 int ret = 0;
1177
1178 bool should_remove = true;
1179
1180 _PTHREAD_LOCK(_pthread_list_lock);
1181
1182 // When a thread removes itself:
1183 // - Set the childexit flag indicating that the thread has exited.
1184 // - Return false if parentcheck is zero (must keep structure)
1185 // - If the thread is joinable, keep it on the list so that
1186 // the join operation succeeds. Still decrement the running
1187 // thread count so that we exit if no threads are running.
1188 // - Update the running thread count.
1189 // When another thread removes a joinable thread:
1190 // - CAREFUL not to dereference the thread before verifying that the
1191 // reference is still valid using _pthread_is_valid_locked().
1192 // - Remove the thread from the list.
1193
1194 if (child) {
1195 t->childexit = 1;
1196 if (t->parentcheck == 0) {
1197 ret = EBUSY;
1198 }
1199 if ((t->detached & PTHREAD_CREATE_JOINABLE) != 0) {
1200 ret = EBUSY;
1201 should_remove = false;
1202 }
1203 *should_exit = (--_pthread_count <= 0);
1204 } else if (!_pthread_is_valid_locked(t)) {
1205 ret = ESRCH;
1206 should_remove = false;
1207 } else if ((t->detached & PTHREAD_CREATE_JOINABLE) == 0) {
1208 // If we found a thread but it's not joinable, bail.
1209 ret = ESRCH;
1210 should_remove = false;
1211 } else if (t->parentcheck == 0) {
1212 // If we're not the child thread *and* the parent has not finished
1213 // creating the thread yet, then we are another thread that's joining
1214 // and we cannot deallocate the pthread.
1215 ret = EBUSY;
1216 }
1217 if (should_remove) {
1218 TAILQ_REMOVE(&__pthread_head, t, plist);
1219 }
1220
1221 _PTHREAD_UNLOCK(_pthread_list_lock);
1222
1223 return ret;
1224 }
1225
1226 static int
1227 _pthread_create(pthread_t *thread,
1228 const pthread_attr_t *attr,
1229 void *(*start_routine)(void *),
1230 void *arg,
1231 bool from_mach_thread)
1232 {
1233 pthread_t t = NULL;
1234 unsigned int flags = 0;
1235
1236 pthread_attr_t *attrs = (pthread_attr_t *)attr;
1237 if (attrs == NULL) {
1238 attrs = &_pthread_attr_default;
1239 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1240 return EINVAL;
1241 }
1242
1243 if (attrs->detached == PTHREAD_CREATE_DETACHED) {
1244 flags |= PTHREAD_START_DETACHED;
1245 }
1246
1247 if (attrs->schedset != 0) {
1248 flags |= PTHREAD_START_SETSCHED;
1249 flags |= ((attrs->policy & PTHREAD_START_POLICY_MASK) << PTHREAD_START_POLICY_BITSHIFT);
1250 flags |= (attrs->param.sched_priority & PTHREAD_START_IMPORTANCE_MASK);
1251 } else if (attrs->qosclass != 0) {
1252 flags |= PTHREAD_START_QOSCLASS;
1253 flags |= (attrs->qosclass & PTHREAD_START_QOSCLASS_MASK);
1254 }
1255
1256 __is_threaded = 1;
1257
1258 void *stack;
1259
1260 if (attrs->fastpath) {
1261 // kernel will allocate thread and stack, pass stacksize.
1262 stack = (void *)attrs->stacksize;
1263 } else {
1264 // allocate the thread and its stack
1265 flags |= PTHREAD_START_CUSTOM;
1266
1267 int res;
1268 res = _pthread_allocate(&t, attrs, &stack);
1269 if (res) {
1270 return res;
1271 }
1272
1273 t->arg = arg;
1274 t->fun = start_routine;
1275 }
1276
1277 pthread_t t2;
1278 t2 = __bsdthread_create(start_routine, arg, stack, t, flags);
1279 if (t2 == (pthread_t)-1) {
1280 if (errno == EMFILE) {
1281 PTHREAD_CLIENT_CRASH(0,
1282 "Unable to allocate thread port, possible port leak");
1283 }
1284 if (flags & PTHREAD_START_CUSTOM) {
1285 // free the thread and stack if we allocated it
1286 _pthread_deallocate(t);
1287 }
1288 return EAGAIN;
1289 }
1290 if (t == NULL) {
1291 t = t2;
1292 }
1293
1294 __pthread_add_thread(t, attrs, true, from_mach_thread);
1295
1296 // n.b. if a thread is created detached and exits, t will be invalid
1297 *thread = t;
1298 return 0;
1299 }
1300
1301 int
1302 pthread_create(pthread_t *thread,
1303 const pthread_attr_t *attr,
1304 void *(*start_routine)(void *),
1305 void *arg)
1306 {
1307 return _pthread_create(thread, attr, start_routine, arg, false);
1308 }
1309
1310 int
1311 pthread_create_from_mach_thread(pthread_t *thread,
1312 const pthread_attr_t *attr,
1313 void *(*start_routine)(void *),
1314 void *arg)
1315 {
1316 return _pthread_create(thread, attr, start_routine, arg, true);
1317 }
1318
1319 PTHREAD_NORETURN
1320 static void
1321 _pthread_suspended_body(pthread_t self)
1322 {
1323 _pthread_set_self(self);
1324 __pthread_add_thread(self, NULL, false, false);
1325 _pthread_exit(self, (self->fun)(self->arg));
1326 }
1327
1328 int
1329 pthread_create_suspended_np(pthread_t *thread,
1330 const pthread_attr_t *attr,
1331 void *(*start_routine)(void *),
1332 void *arg)
1333 {
1334 int res;
1335 void *stack;
1336 mach_port_t kernel_thread = MACH_PORT_NULL;
1337
1338 const pthread_attr_t *attrs = attr;
1339 if (attrs == NULL) {
1340 attrs = &_pthread_attr_default;
1341 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1342 return EINVAL;
1343 }
1344
1345 pthread_t t;
1346 res = _pthread_allocate(&t, attrs, &stack);
1347 if (res) {
1348 return res;
1349 }
1350
1351 *thread = t;
1352
1353 kern_return_t kr;
1354 kr = thread_create(mach_task_self(), &kernel_thread);
1355 if (kr != KERN_SUCCESS) {
1356 //PTHREAD_ABORT("thread_create() failed: %d", kern_res);
1357 return EINVAL; /* Need better error here? */
1358 }
1359
1360 _pthread_set_kernel_thread(t, kernel_thread);
1361 (void)pthread_setschedparam_internal(t, kernel_thread, t->policy, &t->param);
1362
1363 __is_threaded = 1;
1364
1365 t->arg = arg;
1366 t->fun = start_routine;
1367
1368 t->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
1369 __pthread_add_thread(t, NULL, true, false);
1370
1371 // Set up a suspended thread.
1372 _pthread_setup(t, _pthread_suspended_body, stack, 1, 0);
1373 return res;
1374 }
1375
1376
1377 PTHREAD_NOEXPORT_VARIANT
1378 int
1379 pthread_detach(pthread_t thread)
1380 {
1381 int res = 0;
1382 bool join = false;
1383 semaphore_t sema = SEMAPHORE_NULL;
1384
1385 if (!_pthread_is_valid(thread, PTHREAD_IS_VALID_LOCK_THREAD, NULL)) {
1386 return ESRCH; // Not a valid thread to detach.
1387 }
1388
1389 if ((thread->detached & PTHREAD_CREATE_DETACHED) ||
1390 !(thread->detached & PTHREAD_CREATE_JOINABLE)) {
1391 res = EINVAL;
1392 } else if (thread->detached & _PTHREAD_EXITED) {
1393 // Join the thread if it's already exited.
1394 join = true;
1395 } else {
1396 thread->detached &= ~PTHREAD_CREATE_JOINABLE;
1397 thread->detached |= PTHREAD_CREATE_DETACHED;
1398 sema = thread->joiner_notify;
1399 }
1400
1401 _PTHREAD_UNLOCK(thread->lock);
1402
1403 if (join) {
1404 pthread_join(thread, NULL);
1405 } else if (sema) {
1406 semaphore_signal(sema);
1407 }
1408
1409 return res;
1410 }
1411
1412 PTHREAD_NOEXPORT_VARIANT
1413 int
1414 pthread_kill(pthread_t th, int sig)
1415 {
1416 if (sig < 0 || sig > NSIG) {
1417 return EINVAL;
1418 }
1419
1420 mach_port_t kport = MACH_PORT_NULL;
1421 if (!_pthread_is_valid(th, 0, &kport)) {
1422 return ESRCH; // Not a valid thread.
1423 }
1424
1425 // Don't signal workqueue threads.
1426 if (th->wqthread != 0 && th->wqkillset == 0) {
1427 return ENOTSUP;
1428 }
1429
1430 int ret = __pthread_kill(kport, sig);
1431
1432 if (ret == -1) {
1433 ret = errno;
1434 }
1435 return ret;
1436 }
1437
1438 PTHREAD_NOEXPORT_VARIANT
1439 int
1440 __pthread_workqueue_setkill(int enable)
1441 {
1442 pthread_t self = pthread_self();
1443
1444 _PTHREAD_LOCK(self->lock);
1445 self->wqkillset = enable ? 1 : 0;
1446 _PTHREAD_UNLOCK(self->lock);
1447
1448 return 0;
1449 }
1450
1451
1452 /* For compatibility... */
1453
1454 pthread_t
1455 _pthread_self(void) {
1456 return pthread_self();
1457 }
1458
1459 /*
1460 * Terminate a thread.
1461 */
1462 int __disable_threadsignal(int);
1463
1464 PTHREAD_NORETURN
1465 static void
1466 _pthread_exit(pthread_t self, void *value_ptr)
1467 {
1468 struct __darwin_pthread_handler_rec *handler;
1469
1470 // Disable signal delivery while we clean up
1471 __disable_threadsignal(1);
1472
1473 // Set cancel state to disable and type to deferred
1474 _pthread_setcancelstate_exit(self, value_ptr, __unix_conforming);
1475
1476 while ((handler = self->__cleanup_stack) != 0) {
1477 (handler->__routine)(handler->__arg);
1478 self->__cleanup_stack = handler->__next;
1479 }
1480 _pthread_tsd_cleanup(self);
1481
1482 _PTHREAD_LOCK(self->lock);
1483 self->detached |= _PTHREAD_EXITED;
1484 self->exit_value = value_ptr;
1485
1486 if ((self->detached & PTHREAD_CREATE_JOINABLE) &&
1487 self->joiner_notify == SEMAPHORE_NULL) {
1488 self->joiner_notify = (semaphore_t)os_get_cached_semaphore();
1489 }
1490 _PTHREAD_UNLOCK(self->lock);
1491
1492 // Clear per-thread semaphore cache
1493 os_put_cached_semaphore(SEMAPHORE_NULL);
1494
1495 _pthread_terminate_invoke(self);
1496 }
1497
1498 void
1499 pthread_exit(void *value_ptr)
1500 {
1501 pthread_t self = pthread_self();
1502 if (self->wqthread == 0) {
1503 _pthread_exit(self, value_ptr);
1504 } else {
1505 PTHREAD_ABORT("pthread_exit() may only be called against threads created via pthread_create()");
1506 }
1507 }
1508
1509
1510 PTHREAD_NOEXPORT_VARIANT
1511 int
1512 pthread_getschedparam(pthread_t thread,
1513 int *policy,
1514 struct sched_param *param)
1515 {
1516 int ret = 0;
1517
1518 if (thread == NULL) {
1519 return ESRCH;
1520 }
1521
1522 _PTHREAD_LOCK(_pthread_list_lock);
1523
1524 if (_pthread_is_valid_locked(thread)) {
1525 if (policy) {
1526 *policy = thread->policy;
1527 }
1528 if (param) {
1529 *param = thread->param;
1530 }
1531 } else {
1532 ret = ESRCH;
1533 }
1534
1535 _PTHREAD_UNLOCK(_pthread_list_lock);
1536
1537 return ret;
1538 }
1539
1540
1541 PTHREAD_ALWAYS_INLINE
1542 static inline int
1543 pthread_setschedparam_internal(pthread_t thread,
1544 mach_port_t kport,
1545 int policy,
1546 const struct sched_param *param)
1547 {
1548 policy_base_data_t bases;
1549 policy_base_t base;
1550 mach_msg_type_number_t count;
1551 kern_return_t ret;
1552
1553 switch (policy) {
1554 case SCHED_OTHER:
1555 bases.ts.base_priority = param->sched_priority;
1556 base = (policy_base_t)&bases.ts;
1557 count = POLICY_TIMESHARE_BASE_COUNT;
1558 break;
1559 case SCHED_FIFO:
1560 bases.fifo.base_priority = param->sched_priority;
1561 base = (policy_base_t)&bases.fifo;
1562 count = POLICY_FIFO_BASE_COUNT;
1563 break;
1564 case SCHED_RR:
1565 bases.rr.base_priority = param->sched_priority;
1566 /* quantum isn't public yet */
1567 bases.rr.quantum = param->quantum;
1568 base = (policy_base_t)&bases.rr;
1569 count = POLICY_RR_BASE_COUNT;
1570 break;
1571 default:
1572 return EINVAL;
1573 }
1574 ret = thread_policy(kport, policy, base, count, TRUE);
1575 return (ret != KERN_SUCCESS) ? EINVAL : 0;
1576 }
1577
1578
1579 PTHREAD_NOEXPORT_VARIANT
1580 int
1581 pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param)
1582 {
1583 mach_port_t kport = MACH_PORT_NULL;
1584 int res;
1585 int bypass = 1;
1586
1587 // since the main thread will not get de-allocated from underneath us
1588 if (t == pthread_self() || t == &_thread) {
1589 kport = _pthread_kernel_thread(t);
1590 } else {
1591 bypass = 0;
1592 (void)_pthread_is_valid(t, 0, &kport);
1593 }
1594
1595 res = pthread_setschedparam_internal(t, kport, policy, param);
1596 if (res == 0) {
1597 if (bypass == 0) {
1598 // Ensure the thread is still valid.
1599 _PTHREAD_LOCK(_pthread_list_lock);
1600 if (_pthread_is_valid_locked(t)) {
1601 t->policy = policy;
1602 t->param = *param;
1603 } else {
1604 res = ESRCH;
1605 }
1606 _PTHREAD_UNLOCK(_pthread_list_lock);
1607 } else {
1608 t->policy = policy;
1609 t->param = *param;
1610 }
1611 }
1612 return res;
1613 }
1614
1615
1616 int
1617 sched_get_priority_min(int policy)
1618 {
1619 return default_priority - 16;
1620 }
1621
1622 int
1623 sched_get_priority_max(int policy)
1624 {
1625 return default_priority + 16;
1626 }
1627
1628 int
1629 pthread_equal(pthread_t t1, pthread_t t2)
1630 {
1631 return (t1 == t2);
1632 }
1633
1634 /*
1635 * Force LLVM not to optimise this to a call to __pthread_set_self, if it does
1636 * then _pthread_set_self won't be bound when secondary threads try and start up.
1637 */
1638 PTHREAD_NOINLINE
1639 void
1640 _pthread_set_self(pthread_t p)
1641 {
1642 return _pthread_set_self_internal(p, true);
1643 }
1644
1645 PTHREAD_ALWAYS_INLINE
1646 static inline void
1647 _pthread_set_self_internal(pthread_t p, bool needs_tsd_base_set)
1648 {
1649 if (p == NULL) {
1650 p = &_thread;
1651 }
1652
1653 uint64_t tid = __thread_selfid();
1654 if (tid == -1ull) {
1655 PTHREAD_ABORT("failed to set thread_id");
1656 }
1657
1658 p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = p;
1659 p->tsd[_PTHREAD_TSD_SLOT_ERRNO] = &p->err_no;
1660 p->thread_id = tid;
1661
1662 if (needs_tsd_base_set) {
1663 _thread_set_tsd_base(&p->tsd[0]);
1664 }
1665 }
1666
1667
1668 // <rdar://problem/28984807> pthread_once should have an acquire barrier
1669 PTHREAD_ALWAYS_INLINE
1670 static inline void
1671 _os_once_acquire(os_once_t *predicate, void *context, os_function_t function)
1672 {
1673 if (OS_EXPECT(os_atomic_load(predicate, acquire), ~0l) != ~0l) {
1674 _os_once(predicate, context, function);
1675 OS_COMPILER_CAN_ASSUME(*predicate == ~0l);
1676 }
1677 }
1678
1679 struct _pthread_once_context {
1680 pthread_once_t *pthread_once;
1681 void (*routine)(void);
1682 };
1683
1684 static void
1685 __pthread_once_handler(void *context)
1686 {
1687 struct _pthread_once_context *ctx = context;
1688 pthread_cleanup_push((void*)__os_once_reset, &ctx->pthread_once->once);
1689 ctx->routine();
1690 pthread_cleanup_pop(0);
1691 ctx->pthread_once->sig = _PTHREAD_ONCE_SIG;
1692 }
1693
1694 PTHREAD_NOEXPORT_VARIANT
1695 int
1696 pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
1697 {
1698 struct _pthread_once_context ctx = { once_control, init_routine };
1699 do {
1700 _os_once_acquire(&once_control->once, &ctx, __pthread_once_handler);
1701 } while (once_control->sig == _PTHREAD_ONCE_SIG_init);
1702 return 0;
1703 }
1704
1705
1706 int
1707 pthread_getconcurrency(void)
1708 {
1709 return pthread_concurrency;
1710 }
1711
1712 int
1713 pthread_setconcurrency(int new_level)
1714 {
1715 if (new_level < 0) {
1716 return EINVAL;
1717 }
1718 pthread_concurrency = new_level;
1719 return 0;
1720 }
1721
1722 static unsigned long
1723 _pthread_strtoul(const char *p, const char **endptr, int base)
1724 {
1725 uintptr_t val = 0;
1726
1727 // Expect hex string starting with "0x"
1728 if ((base == 16 || base == 0) && p && p[0] == '0' && p[1] == 'x') {
1729 p += 2;
1730 while (1) {
1731 char c = *p;
1732 if ('0' <= c && c <= '9') {
1733 val = (val << 4) + (c - '0');
1734 } else if ('a' <= c && c <= 'f') {
1735 val = (val << 4) + (c - 'a' + 10);
1736 } else if ('A' <= c && c <= 'F') {
1737 val = (val << 4) + (c - 'A' + 10);
1738 } else {
1739 break;
1740 }
1741 ++p;
1742 }
1743 }
1744
1745 *endptr = (char *)p;
1746 return val;
1747 }
1748
1749 static int
1750 parse_main_stack_params(const char *apple[],
1751 void **stackaddr,
1752 size_t *stacksize,
1753 void **allocaddr,
1754 size_t *allocsize)
1755 {
1756 const char *p = _simple_getenv(apple, "main_stack");
1757 if (!p) return 0;
1758
1759 int ret = 0;
1760 const char *s = p;
1761
1762 *stackaddr = _pthread_strtoul(s, &s, 16);
1763 if (*s != ',') goto out;
1764
1765 *stacksize = _pthread_strtoul(s + 1, &s, 16);
1766 if (*s != ',') goto out;
1767
1768 *allocaddr = _pthread_strtoul(s + 1, &s, 16);
1769 if (*s != ',') goto out;
1770
1771 *allocsize = _pthread_strtoul(s + 1, &s, 16);
1772 if (*s != ',' && *s != 0) goto out;
1773
1774 ret = 1;
1775 out:
1776 bzero((char *)p, strlen(p));
1777 return ret;
1778 }
1779
1780 #if !defined(VARIANT_STATIC)
1781 void *
1782 malloc(size_t sz)
1783 {
1784 if (_pthread_malloc) {
1785 return _pthread_malloc(sz);
1786 } else {
1787 return NULL;
1788 }
1789 }
1790
1791 void
1792 free(void *p)
1793 {
1794 if (_pthread_free) {
1795 _pthread_free(p);
1796 }
1797 }
1798 #endif // VARIANT_STATIC
1799
1800 /*
1801 * Perform package initialization - called automatically when application starts
1802 */
1803 struct ProgramVars; /* forward reference */
1804
1805 int
1806 __pthread_init(const struct _libpthread_functions *pthread_funcs,
1807 const char *envp[] __unused,
1808 const char *apple[],
1809 const struct ProgramVars *vars __unused)
1810 {
1811 // Save our provided pushed-down functions
1812 if (pthread_funcs) {
1813 exitf = pthread_funcs->exit;
1814
1815 if (pthread_funcs->version >= 2) {
1816 _pthread_malloc = pthread_funcs->malloc;
1817 _pthread_free = pthread_funcs->free;
1818 }
1819 }
1820
1821 //
1822 // Get host information
1823 //
1824
1825 kern_return_t kr;
1826 host_flavor_t flavor = HOST_PRIORITY_INFO;
1827 mach_msg_type_number_t count = HOST_PRIORITY_INFO_COUNT;
1828 host_priority_info_data_t priority_info;
1829 host_t host = mach_host_self();
1830 kr = host_info(host, flavor, (host_info_t)&priority_info, &count);
1831 if (kr != KERN_SUCCESS) {
1832 PTHREAD_ABORT("host_info(mach_host_self(), ...) failed: %s", mach_error_string(kr));
1833 } else {
1834 default_priority = priority_info.user_priority;
1835 min_priority = priority_info.minimum_priority;
1836 max_priority = priority_info.maximum_priority;
1837 }
1838 mach_port_deallocate(mach_task_self(), host);
1839
1840 //
1841 // Set up the main thread structure
1842 //
1843
1844 // Get the address and size of the main thread's stack from the kernel.
1845 void *stackaddr = 0;
1846 size_t stacksize = 0;
1847 void *allocaddr = 0;
1848 size_t allocsize = 0;
1849 if (!parse_main_stack_params(apple, &stackaddr, &stacksize, &allocaddr, &allocsize) ||
1850 stackaddr == NULL || stacksize == 0) {
1851 // Fall back to previous bevhaior.
1852 size_t len = sizeof(stackaddr);
1853 int mib[] = { CTL_KERN, KERN_USRSTACK };
1854 if (__sysctl(mib, 2, &stackaddr, &len, NULL, 0) != 0) {
1855 #if defined(__LP64__)
1856 stackaddr = (void *)USRSTACK64;
1857 #else
1858 stackaddr = (void *)USRSTACK;
1859 #endif
1860 }
1861 stacksize = DFLSSIZ;
1862 allocaddr = 0;
1863 allocsize = 0;
1864 }
1865
1866 pthread_t thread = &_thread;
1867 pthread_attr_init(&_pthread_attr_default);
1868 _pthread_struct_init(thread, &_pthread_attr_default,
1869 stackaddr, stacksize,
1870 allocaddr, allocsize);
1871 thread->detached = PTHREAD_CREATE_JOINABLE;
1872
1873 // Finish initialization with common code that is reinvoked on the
1874 // child side of a fork.
1875
1876 // Finishes initialization of main thread attributes.
1877 // Initializes the thread list and add the main thread.
1878 // Calls _pthread_set_self() to prepare the main thread for execution.
1879 _pthread_main_thread_init(thread);
1880
1881 struct _pthread_registration_data registration_data;
1882 // Set up kernel entry points with __bsdthread_register.
1883 _pthread_bsdthread_init(&registration_data);
1884
1885 // Have pthread_key and pthread_mutex do their init envvar checks.
1886 _pthread_key_global_init(envp);
1887 _pthread_mutex_global_init(envp, &registration_data);
1888
1889 #if PTHREAD_DEBUG_LOG
1890 _SIMPLE_STRING path = _simple_salloc();
1891 _simple_sprintf(path, "/var/tmp/libpthread.%d.log", getpid());
1892 _pthread_debuglog = open(_simple_string(path),
1893 O_WRONLY | O_APPEND | O_CREAT | O_NOFOLLOW | O_CLOEXEC, 0666);
1894 _simple_sfree(path);
1895 _pthread_debugstart = mach_absolute_time();
1896 #endif
1897
1898 return 0;
1899 }
1900
1901 PTHREAD_NOEXPORT void
1902 _pthread_main_thread_init(pthread_t p)
1903 {
1904 TAILQ_INIT(&__pthread_head);
1905 _PTHREAD_LOCK_INIT(_pthread_list_lock);
1906
1907 // Re-use the main thread's static storage if no thread was provided.
1908 if (p == NULL) {
1909 if (_thread.tsd[0] != 0) {
1910 bzero(&_thread, sizeof(struct _pthread));
1911 }
1912 p = &_thread;
1913 }
1914
1915 _PTHREAD_LOCK_INIT(p->lock);
1916 _pthread_set_kernel_thread(p, mach_thread_self());
1917 _pthread_set_reply_port(p, mach_reply_port());
1918 p->__cleanup_stack = NULL;
1919 p->joiner_notify = SEMAPHORE_NULL;
1920 p->joiner = MACH_PORT_NULL;
1921 p->detached |= _PTHREAD_CREATE_PARENT;
1922 p->tsd[__TSD_SEMAPHORE_CACHE] = (void*)SEMAPHORE_NULL;
1923 p->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
1924
1925 // Initialize the list of threads with the new main thread.
1926 TAILQ_INSERT_HEAD(&__pthread_head, p, plist);
1927 _pthread_count = 1;
1928
1929 _pthread_set_self(p);
1930 _pthread_introspection_thread_start(p);
1931 }
1932
1933 int
1934 _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming)
1935 {
1936 int ret = __pthread_remove_thread(thread, false, NULL);
1937 if (ret != 0 && ret != EBUSY) {
1938 // Returns ESRCH if the thread was not created joinable.
1939 return ret;
1940 }
1941
1942 if (value_ptr) {
1943 *value_ptr = _pthread_get_exit_value(thread, conforming);
1944 }
1945 _pthread_introspection_thread_destroy(thread);
1946 if (ret != EBUSY) {
1947 // __pthread_remove_thread returns EBUSY if the parent has not
1948 // finished creating the thread (and is still expecting the pthread_t
1949 // to be alive).
1950 _pthread_deallocate(thread);
1951 }
1952 return 0;
1953 }
1954
1955 int
1956 sched_yield(void)
1957 {
1958 swtch_pri(0);
1959 return 0;
1960 }
1961
1962 // XXX remove
1963 void
1964 cthread_yield(void)
1965 {
1966 sched_yield();
1967 }
1968
1969 void
1970 pthread_yield_np(void)
1971 {
1972 sched_yield();
1973 }
1974
1975
1976
1977 PTHREAD_NOEXPORT_VARIANT
1978 void
1979 _pthread_clear_qos_tsd(mach_port_t thread_port)
1980 {
1981 if (thread_port == MACH_PORT_NULL || (uintptr_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF) == thread_port) {
1982 /* Clear the current thread's TSD, that can be done inline. */
1983 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0));
1984 } else {
1985 pthread_t p;
1986
1987 _PTHREAD_LOCK(_pthread_list_lock);
1988
1989 TAILQ_FOREACH(p, &__pthread_head, plist) {
1990 mach_port_t kp = _pthread_kernel_thread(p);
1991 if (thread_port == kp) {
1992 p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
1993 break;
1994 }
1995 }
1996
1997 _PTHREAD_UNLOCK(_pthread_list_lock);
1998 }
1999 }
2000
2001
2002 /***** pthread workqueue support routines *****/
2003
2004 PTHREAD_NOEXPORT void
2005 _pthread_bsdthread_init(struct _pthread_registration_data *data)
2006 {
2007 bzero(data, sizeof(*data));
2008 data->version = sizeof(struct _pthread_registration_data);
2009 data->dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *);
2010 data->return_to_kernel_offset = __TSD_RETURN_TO_KERNEL * sizeof(void *);
2011 data->tsd_offset = offsetof(struct _pthread, tsd);
2012 data->mach_thread_self_offset = __TSD_MACH_THREAD_SELF * sizeof(void *);
2013
2014 int rv = __bsdthread_register(thread_start,
2015 start_wqthread, (int)PTHREAD_SIZE,
2016 (void*)data, (uintptr_t)sizeof(*data),
2017 data->dispatch_queue_offset);
2018
2019 if (rv > 0) {
2020 if ((rv & PTHREAD_FEATURE_QOS_DEFAULT) == 0) {
2021 PTHREAD_INTERNAL_CRASH(rv,
2022 "Missing required support for QOS_CLASS_DEFAULT");
2023 }
2024 if ((rv & PTHREAD_FEATURE_QOS_MAINTENANCE) == 0) {
2025 PTHREAD_INTERNAL_CRASH(rv,
2026 "Missing required support for QOS_CLASS_MAINTENANCE");
2027 }
2028 __pthread_supported_features = rv;
2029 }
2030
2031 /*
2032 * TODO: differentiate between (-1, EINVAL) after fork (which has the side
2033 * effect of resetting the child's stack_addr_hint before bailing out) and
2034 * (-1, EINVAL) because of invalid arguments. We'd probably like to treat
2035 * the latter as fatal.
2036 *
2037 * <rdar://problem/36451838>
2038 */
2039
2040 pthread_priority_t main_qos = (pthread_priority_t)data->main_qos;
2041
2042 if (_pthread_priority_get_qos_newest(main_qos) != QOS_CLASS_UNSPECIFIED) {
2043 _pthread_set_main_qos(main_qos);
2044 _thread.tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = main_qos;
2045 }
2046
2047 if (__libdispatch_workerfunction != NULL) {
2048 // prepare the kernel for workq action
2049 (void)__workq_open();
2050 }
2051 }
2052
2053 // workqueue entry point from kernel
2054 PTHREAD_NORETURN
2055 void
2056 _pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr, void *keventlist, int flags, int nkevents)
2057 {
2058 PTHREAD_ASSERT(flags & WQ_FLAG_THREAD_NEWSPI);
2059
2060 bool thread_reuse = flags & WQ_FLAG_THREAD_REUSE;
2061 bool overcommit = flags & WQ_FLAG_THREAD_OVERCOMMIT;
2062 bool kevent = flags & WQ_FLAG_THREAD_KEVENT;
2063 bool workloop = (flags & WQ_FLAG_THREAD_WORKLOOP) &&
2064 __libdispatch_workloopfunction != NULL;
2065 PTHREAD_ASSERT((!kevent) || (__libdispatch_keventfunction != NULL));
2066 PTHREAD_ASSERT(!workloop || kevent);
2067
2068 pthread_priority_t priority = 0;
2069 unsigned long priority_flags = 0;
2070
2071 if (overcommit)
2072 priority_flags |= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
2073 if (flags & WQ_FLAG_THREAD_EVENT_MANAGER)
2074 priority_flags |= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
2075 if (kevent)
2076 priority_flags |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
2077
2078 int thread_class = flags & WQ_FLAG_THREAD_PRIOMASK;
2079 priority = _pthread_priority_make_newest(thread_class, 0, priority_flags);
2080
2081 if (!thread_reuse) {
2082 // New thread created by kernel, needs initialization.
2083 void *stackaddr = self;
2084 size_t stacksize = (uintptr_t)self - (uintptr_t)stacklowaddr;
2085
2086 _pthread_struct_init(self, &_pthread_attr_default,
2087 stackaddr, stacksize,
2088 PTHREAD_ALLOCADDR(stackaddr, stacksize), PTHREAD_ALLOCSIZE(stackaddr, stacksize));
2089
2090 _pthread_set_kernel_thread(self, kport);
2091 self->wqthread = 1;
2092 self->wqkillset = 0;
2093 self->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
2094
2095 // Not a joinable thread.
2096 self->detached &= ~PTHREAD_CREATE_JOINABLE;
2097 self->detached |= PTHREAD_CREATE_DETACHED;
2098
2099 // Update the running thread count and set childrun bit.
2100 bool thread_tsd_base_set = (bool)(flags & WQ_FLAG_THREAD_TSD_BASE_SET);
2101 _pthread_set_self_internal(self, !thread_tsd_base_set);
2102 _pthread_introspection_thread_create(self, false);
2103 __pthread_add_thread(self, NULL, false, false);
2104 }
2105
2106 // If we're running with fine-grained priority, we also need to
2107 // set this thread to have the QoS class provided to use by the kernel
2108 if (__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) {
2109 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, _pthread_priority_make_newest(thread_class, 0, priority_flags));
2110 }
2111
2112 #if WQ_DEBUG
2113 PTHREAD_ASSERT(self);
2114 PTHREAD_ASSERT(self == pthread_self());
2115 #endif // WQ_DEBUG
2116
2117 if (workloop) {
2118 self->fun = (void *(*)(void*))__libdispatch_workloopfunction;
2119 } else if (kevent){
2120 self->fun = (void *(*)(void*))__libdispatch_keventfunction;
2121 } else {
2122 self->fun = (void *(*)(void*))__libdispatch_workerfunction;
2123 }
2124 self->arg = (void *)(uintptr_t)thread_class;
2125
2126 if (kevent && keventlist && nkevents > 0){
2127 int errors_out;
2128 kevent_errors_retry:
2129
2130 if (workloop) {
2131 kqueue_id_t kevent_id = *(kqueue_id_t*)((char*)keventlist - sizeof(kqueue_id_t));
2132 kqueue_id_t kevent_id_in = kevent_id;
2133 (__libdispatch_workloopfunction)(&kevent_id, &keventlist, &nkevents);
2134 PTHREAD_ASSERT(kevent_id == kevent_id_in || nkevents == 0);
2135 errors_out = __workq_kernreturn(WQOPS_THREAD_WORKLOOP_RETURN, keventlist, nkevents, 0);
2136 } else {
2137 (__libdispatch_keventfunction)(&keventlist, &nkevents);
2138 errors_out = __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, keventlist, nkevents, 0);
2139 }
2140
2141 if (errors_out > 0){
2142 nkevents = errors_out;
2143 goto kevent_errors_retry;
2144 } else if (errors_out < 0){
2145 PTHREAD_ABORT("kevent return produced an error: %d", errno);
2146 }
2147 goto thexit;
2148 } else if (kevent){
2149 if (workloop) {
2150 (__libdispatch_workloopfunction)(0, NULL, NULL);
2151 __workq_kernreturn(WQOPS_THREAD_WORKLOOP_RETURN, NULL, 0, -1);
2152 } else {
2153 (__libdispatch_keventfunction)(NULL, NULL);
2154 __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, NULL, 0, 0);
2155 }
2156
2157 goto thexit;
2158 }
2159
2160 if (__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) {
2161 if (!__workq_newapi) {
2162 /* Old thread priorities are inverted from where we have them in
2163 * the new flexible priority scheme. The highest priority is zero,
2164 * up to 2, with background at 3.
2165 */
2166 pthread_workqueue_function_t func = (pthread_workqueue_function_t)__libdispatch_workerfunction;
2167
2168 int opts = overcommit ? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT : 0;
2169
2170 if ((__pthread_supported_features & PTHREAD_FEATURE_QOS_DEFAULT) == 0) {
2171 /* Dirty hack to support kernels that don't have QOS_CLASS_DEFAULT. */
2172 switch (thread_class) {
2173 case QOS_CLASS_USER_INTERACTIVE:
2174 thread_class = QOS_CLASS_USER_INITIATED;
2175 break;
2176 case QOS_CLASS_USER_INITIATED:
2177 thread_class = QOS_CLASS_DEFAULT;
2178 break;
2179 default:
2180 break;
2181 }
2182 }
2183
2184 switch (thread_class) {
2185 /* QOS_CLASS_USER_INTERACTIVE is not currently requested by for old dispatch priority compatibility */
2186 case QOS_CLASS_USER_INITIATED:
2187 (*func)(WORKQ_HIGH_PRIOQUEUE, opts, NULL);
2188 break;
2189
2190 case QOS_CLASS_DEFAULT:
2191 /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being
2192 * picked up by NSThread (et al) and transported around the system. So change the TSD to
2193 * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy.
2194 */
2195 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, _pthread_priority_make_newest(QOS_CLASS_USER_INITIATED, 0, 0));
2196 (*func)(WORKQ_DEFAULT_PRIOQUEUE, opts, NULL);
2197 break;
2198
2199 case QOS_CLASS_UTILITY:
2200 (*func)(WORKQ_LOW_PRIOQUEUE, opts, NULL);
2201 break;
2202
2203 case QOS_CLASS_BACKGROUND:
2204 (*func)(WORKQ_BG_PRIOQUEUE, opts, NULL);
2205 break;
2206
2207 /* Legacy dispatch does not use QOS_CLASS_MAINTENANCE, so no need to handle it here */
2208 }
2209
2210 } else {
2211 /* "New" API, where dispatch is expecting to be given the thread priority */
2212 (*__libdispatch_workerfunction)(priority);
2213 }
2214 } else {
2215 /* We're the new library running on an old kext, so thread_class is really the workq priority. */
2216 pthread_workqueue_function_t func = (pthread_workqueue_function_t)__libdispatch_workerfunction;
2217 int options = overcommit ? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT : 0;
2218 (*func)(thread_class, options, NULL);
2219 }
2220
2221 __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0);
2222
2223 thexit:
2224 {
2225 pthread_priority_t current_priority = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS);
2226 if ((current_priority & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) ||
2227 (_pthread_priority_get_qos_newest(current_priority) > WQ_THREAD_CLEANUP_QOS)) {
2228 // Reset QoS to something low for the cleanup process
2229 priority = _pthread_priority_make_newest(WQ_THREAD_CLEANUP_QOS, 0, 0);
2230 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, priority);
2231 }
2232 }
2233
2234 _pthread_exit(self, NULL);
2235 }
2236
2237 /***** pthread workqueue API for libdispatch *****/
2238
2239 _Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN == WQ_KEVENT_LIST_LEN,
2240 "Kernel and userland should agree on the event list size");
2241
2242 void
2243 pthread_workqueue_setdispatchoffset_np(int offset)
2244 {
2245 __libdispatch_offset = offset;
2246 }
2247
2248 static int
2249 pthread_workqueue_setdispatch_with_workloop_np(pthread_workqueue_function2_t queue_func,
2250 pthread_workqueue_function_kevent_t kevent_func,
2251 pthread_workqueue_function_workloop_t workloop_func)
2252 {
2253 int res = EBUSY;
2254 if (__libdispatch_workerfunction == NULL) {
2255 // Check whether the kernel supports new SPIs
2256 res = __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP, NULL, __libdispatch_offset, kevent_func != NULL ? 0x01 : 0x00);
2257 if (res == -1){
2258 res = ENOTSUP;
2259 } else {
2260 __libdispatch_workerfunction = queue_func;
2261 __libdispatch_keventfunction = kevent_func;
2262 __libdispatch_workloopfunction = workloop_func;
2263
2264 // Prepare the kernel for workq action
2265 (void)__workq_open();
2266 if (__is_threaded == 0) {
2267 __is_threaded = 1;
2268 }
2269 }
2270 }
2271 return res;
2272 }
2273
2274 int
2275 _pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func,
2276 pthread_workqueue_function_kevent_t kevent_func,
2277 pthread_workqueue_function_workloop_t workloop_func,
2278 int offset, int flags)
2279 {
2280 if (flags != 0) {
2281 return ENOTSUP;
2282 }
2283
2284 __workq_newapi = true;
2285 __libdispatch_offset = offset;
2286
2287 int rv = pthread_workqueue_setdispatch_with_workloop_np(queue_func, kevent_func, workloop_func);
2288 return rv;
2289 }
2290
2291 int
2292 _pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func,
2293 pthread_workqueue_function_kevent_t kevent_func,
2294 int offset, int flags)
2295 {
2296 return _pthread_workqueue_init_with_workloop(queue_func, kevent_func, NULL, offset, flags);
2297 }
2298
2299 int
2300 _pthread_workqueue_init(pthread_workqueue_function2_t func, int offset, int flags)
2301 {
2302 return _pthread_workqueue_init_with_kevent(func, NULL, offset, flags);
2303 }
2304
2305 int
2306 pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func)
2307 {
2308 return pthread_workqueue_setdispatch_with_workloop_np((pthread_workqueue_function2_t)worker_func, NULL, NULL);
2309 }
2310
2311 int
2312 _pthread_workqueue_supported(void)
2313 {
2314 if (os_unlikely(!__pthread_supported_features)) {
2315 PTHREAD_INTERNAL_CRASH(0, "libpthread has not been initialized");
2316 }
2317
2318 return __pthread_supported_features;
2319 }
2320
2321 int
2322 pthread_workqueue_addthreads_np(int queue_priority, int options, int numthreads)
2323 {
2324 int res = 0;
2325
2326 // Cannot add threads without a worker function registered.
2327 if (__libdispatch_workerfunction == NULL) {
2328 return EPERM;
2329 }
2330
2331 pthread_priority_t kp = 0;
2332
2333 if (__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) {
2334 /* The new kernel API takes the new QoS class + relative priority style of
2335 * priority. This entry point is here for compatibility with old libdispatch
2336 * versions (ie. the simulator). We request the corresponding new bracket
2337 * from the kernel, then on the way out run all dispatch queues that were
2338 * requested.
2339 */
2340
2341 int compat_priority = queue_priority & WQ_FLAG_THREAD_PRIOMASK;
2342 int flags = 0;
2343
2344 /* To make sure the library does not issue more threads to dispatch than
2345 * were requested, the total number of active requests is recorded in
2346 * __workq_requests.
2347 */
2348 if (options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT) {
2349 flags = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
2350 }
2351
2352 #pragma clang diagnostic push
2353 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2354 kp = _pthread_qos_class_encode_workqueue(compat_priority, flags);
2355 #pragma clang diagnostic pop
2356
2357 } else {
2358 /* Running on the old kernel, queue_priority is what we pass directly to
2359 * the syscall.
2360 */
2361 kp = queue_priority & WQ_FLAG_THREAD_PRIOMASK;
2362
2363 if (options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT) {
2364 kp |= WORKQUEUE_OVERCOMMIT;
2365 }
2366 }
2367
2368 res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)kp);
2369 if (res == -1) {
2370 res = errno;
2371 }
2372 return res;
2373 }
2374
2375 bool
2376 _pthread_workqueue_should_narrow(pthread_priority_t pri)
2377 {
2378 int res = __workq_kernreturn(WQOPS_SHOULD_NARROW, NULL, (int)pri, 0);
2379 if (res == -1) {
2380 return false;
2381 }
2382 return res;
2383 }
2384
2385 int
2386 _pthread_workqueue_addthreads(int numthreads, pthread_priority_t priority)
2387 {
2388 int res = 0;
2389
2390 if (__libdispatch_workerfunction == NULL) {
2391 return EPERM;
2392 }
2393
2394 if ((__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) == 0) {
2395 return ENOTSUP;
2396 }
2397
2398 res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)priority);
2399 if (res == -1) {
2400 res = errno;
2401 }
2402 return res;
2403 }
2404
2405 int
2406 _pthread_workqueue_set_event_manager_priority(pthread_priority_t priority)
2407 {
2408 int res = __workq_kernreturn(WQOPS_SET_EVENT_MANAGER_PRIORITY, NULL, (int)priority, 0);
2409 if (res == -1) {
2410 res = errno;
2411 }
2412 return res;
2413 }
2414
2415 /*
2416 * Introspection SPI for libpthread.
2417 */
2418
2419 static pthread_introspection_hook_t _pthread_introspection_hook;
2420
2421 pthread_introspection_hook_t
2422 pthread_introspection_hook_install(pthread_introspection_hook_t hook)
2423 {
2424 pthread_introspection_hook_t prev;
2425 prev = _pthread_atomic_xchg_ptr((void**)&_pthread_introspection_hook, hook);
2426 return prev;
2427 }
2428
2429 PTHREAD_NOINLINE
2430 static void
2431 _pthread_introspection_hook_callout_thread_create(pthread_t t, bool destroy)
2432 {
2433 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE, t, t,
2434 PTHREAD_SIZE);
2435 if (!destroy) return;
2436 _pthread_introspection_thread_destroy(t);
2437 }
2438
2439 static inline void
2440 _pthread_introspection_thread_create(pthread_t t, bool destroy)
2441 {
2442 if (os_fastpath(!_pthread_introspection_hook)) return;
2443 _pthread_introspection_hook_callout_thread_create(t, destroy);
2444 }
2445
2446 PTHREAD_NOINLINE
2447 static void
2448 _pthread_introspection_hook_callout_thread_start(pthread_t t)
2449 {
2450 size_t freesize;
2451 void *freeaddr;
2452 if (t == &_thread) {
2453 freesize = t->stacksize + t->guardsize;
2454 freeaddr = t->stackaddr - freesize;
2455 } else {
2456 freesize = t->freesize - PTHREAD_SIZE;
2457 freeaddr = t->freeaddr;
2458 }
2459 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START, t,
2460 freeaddr, freesize);
2461 }
2462
2463 static inline void
2464 _pthread_introspection_thread_start(pthread_t t)
2465 {
2466 if (os_fastpath(!_pthread_introspection_hook)) return;
2467 _pthread_introspection_hook_callout_thread_start(t);
2468 }
2469
2470 PTHREAD_NOINLINE
2471 static void
2472 _pthread_introspection_hook_callout_thread_terminate(pthread_t t,
2473 void *freeaddr, size_t freesize, bool destroy)
2474 {
2475 if (destroy && freesize) {
2476 freesize -= PTHREAD_SIZE;
2477 }
2478 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE, t,
2479 freeaddr, freesize);
2480 if (!destroy) return;
2481 _pthread_introspection_thread_destroy(t);
2482 }
2483
2484 static inline void
2485 _pthread_introspection_thread_terminate(pthread_t t, void *freeaddr,
2486 size_t freesize, bool destroy)
2487 {
2488 if (os_fastpath(!_pthread_introspection_hook)) return;
2489 _pthread_introspection_hook_callout_thread_terminate(t, freeaddr, freesize,
2490 destroy);
2491 }
2492
2493 PTHREAD_NOINLINE
2494 static void
2495 _pthread_introspection_hook_callout_thread_destroy(pthread_t t)
2496 {
2497 if (t == &_thread) return;
2498 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY, t, t,
2499 PTHREAD_SIZE);
2500 }
2501
2502 static inline void
2503 _pthread_introspection_thread_destroy(pthread_t t)
2504 {
2505 if (os_fastpath(!_pthread_introspection_hook)) return;
2506 _pthread_introspection_hook_callout_thread_destroy(t);
2507 }
2508