]> git.saurik.com Git - apple/libpthread.git/blob - src/pthread.c
a8729bbf60c8daab9fc0ff3b52339b24a2148f76
[apple/libpthread.git] / src / pthread.c
1 /*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 */
51
52 #include "resolver.h"
53 #include "internal.h"
54 #include "private.h"
55 #include "workqueue_private.h"
56 #include "introspection_private.h"
57 #include "qos_private.h"
58 #include "tsd_private.h"
59 #include "pthread/stack_np.h"
60 #include "offsets.h" // included to validate the offsets at build time
61
62 #include <stdlib.h>
63 #include <errno.h>
64 #include <signal.h>
65 #include <unistd.h>
66 #include <mach/mach_init.h>
67 #include <mach/mach_vm.h>
68 #include <mach/mach_sync_ipc.h>
69 #include <sys/time.h>
70 #include <sys/resource.h>
71 #include <sys/sysctl.h>
72 #include <sys/queue.h>
73 #include <sys/ulock.h>
74 #include <sys/mman.h>
75 #include <machine/vmparam.h>
76 #define __APPLE_API_PRIVATE
77 #include <machine/cpu_capabilities.h>
78 #if __has_include(<ptrauth.h>)
79 #include <ptrauth.h>
80 #endif // __has_include(<ptrauth.h>)
81
82 #include <_simple.h>
83 #include <platform/string.h>
84 #include <platform/compat.h>
85
86 #include <stack_logging.h>
87
88 // Defined in libsyscall; initialized in libmalloc
89 extern malloc_logger_t *__syscall_logger;
90
91 extern int __sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
92 void *newp, size_t newlen);
93 extern void __exit(int) __attribute__((noreturn));
94 extern int __pthread_kill(mach_port_t, int);
95
96 extern void _pthread_joiner_wake(pthread_t thread);
97
98 #if !VARIANT_DYLD
99 PTHREAD_NOEXPORT extern struct _pthread *_main_thread_ptr;
100 #define main_thread() (_main_thread_ptr)
101 #endif // VARIANT_DYLD
102
103 // Default stack size is 512KB; independent of the main thread's stack size.
104 #define DEFAULT_STACK_SIZE (size_t)(512 * 1024)
105
106
107 //
108 // Global constants
109 //
110
111 /*
112 * The pthread may be offset into a page. In that event, by contract
113 * with the kernel, the allocation will extend PTHREAD_SIZE from the
114 * start of the next page. There's also one page worth of allocation
115 * below stacksize for the guard page. <rdar://problem/19941744>
116 */
117 #define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
118 #define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size)
119 #define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize))
120
121 static const pthread_attr_t _pthread_attr_default = {
122 .sig = _PTHREAD_ATTR_SIG,
123 .stacksize = 0,
124 .detached = PTHREAD_CREATE_JOINABLE,
125 .inherit = _PTHREAD_DEFAULT_INHERITSCHED,
126 .policy = _PTHREAD_DEFAULT_POLICY,
127 .defaultguardpage = true,
128 // compile time constant for _pthread_default_priority(0)
129 .qosclass = (1U << (THREAD_QOS_LEGACY - 1 + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT)) |
130 ((uint8_t)-1 & _PTHREAD_PRIORITY_PRIORITY_MASK),
131 };
132
133 #if PTHREAD_LAYOUT_SPI
134
135 const struct pthread_layout_offsets_s pthread_layout_offsets = {
136 .plo_version = 1,
137 .plo_pthread_tsd_base_offset = offsetof(struct _pthread, tsd),
138 .plo_pthread_tsd_base_address_offset = 0,
139 .plo_pthread_tsd_entry_size = sizeof(((struct _pthread *)NULL)->tsd[0]),
140 };
141
142 #endif // PTHREAD_LAYOUT_SPI
143
144 //
145 // Global exported variables
146 //
147
148 // This global should be used (carefully) by anyone needing to know if a
149 // pthread (other than the main thread) has been created.
150 int __is_threaded = 0;
151 int __unix_conforming = 0;
152
153 //
154 // Global internal variables
155 //
156
157 // _pthread_list_lock protects _pthread_count, access to the __pthread_head
158 // list. Externally imported by pthread_cancelable.c.
159 struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
160 _pthread_lock _pthread_list_lock = _PTHREAD_LOCK_INITIALIZER;
161
162 uint32_t _main_qos;
163
164 #if VARIANT_DYLD
165 // The main thread's pthread_t
166 struct _pthread _main_thread __attribute__((aligned(64))) = { };
167 #define main_thread() (&_main_thread)
168 #else // VARIANT_DYLD
169 struct _pthread *_main_thread_ptr;
170 #endif // VARIANT_DYLD
171
172 #if PTHREAD_DEBUG_LOG
173 #include <fcntl.h>
174 int _pthread_debuglog;
175 uint64_t _pthread_debugstart;
176 #endif
177
178 //
179 // Global static variables
180 //
181 static bool __workq_newapi;
182 static uint8_t default_priority;
183 #if !VARIANT_DYLD
184 static uint8_t max_priority;
185 static uint8_t min_priority;
186 #endif // !VARIANT_DYLD
187 static int _pthread_count = 1;
188 static int pthread_concurrency;
189 uintptr_t _pthread_ptr_munge_token;
190
191 static void (*exitf)(int) = __exit;
192 #if !VARIANT_DYLD
193 static void *(*_pthread_malloc)(size_t) = NULL;
194 static void (*_pthread_free)(void *) = NULL;
195 #endif // !VARIANT_DYLD
196
197 // work queue support data
198 PTHREAD_NORETURN
199 static void
200 __pthread_invalid_keventfunction(void **events, int *nevents)
201 {
202 PTHREAD_CLIENT_CRASH(0, "Invalid kqworkq setup");
203 }
204
205 PTHREAD_NORETURN
206 static void
207 __pthread_invalid_workloopfunction(uint64_t *workloop_id, void **events, int *nevents)
208 {
209 PTHREAD_CLIENT_CRASH(0, "Invalid kqwl setup");
210 }
211 static pthread_workqueue_function2_t __libdispatch_workerfunction;
212 static pthread_workqueue_function_kevent_t __libdispatch_keventfunction = &__pthread_invalid_keventfunction;
213 static pthread_workqueue_function_workloop_t __libdispatch_workloopfunction = &__pthread_invalid_workloopfunction;
214 static int __pthread_supported_features; // supported feature set
215
216 #if defined(__i386__) || defined(__x86_64__)
217 static mach_vm_address_t __pthread_stack_hint = 0xB0000000;
218 #elif defined(__arm__) || defined(__arm64__)
219 static mach_vm_address_t __pthread_stack_hint = 0x30000000;
220 #else
221 #error no __pthread_stack_hint for this architecture
222 #endif
223
224 //
225 // Function prototypes
226 //
227
228 // pthread primitives
229 static inline void _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs,
230 void *stack, size_t stacksize, void *freeaddr, size_t freesize);
231
232 #if VARIANT_DYLD
233 static void _pthread_set_self_dyld(void);
234 #endif // VARIANT_DYLD
235 static inline void _pthread_set_self_internal(pthread_t);
236
237 static void _pthread_dealloc_reply_port(pthread_t t);
238 static void _pthread_dealloc_special_reply_port(pthread_t t);
239
240 static inline void __pthread_started_thread(pthread_t t);
241
242 static void _pthread_exit(pthread_t self, void *value_ptr) __dead2;
243
244 static inline void _pthread_introspection_thread_create(pthread_t t);
245 static inline void _pthread_introspection_thread_start(pthread_t t);
246 static inline void _pthread_introspection_thread_terminate(pthread_t t);
247 static inline void _pthread_introspection_thread_destroy(pthread_t t);
248
249 extern void _pthread_set_self(pthread_t);
250 extern void start_wqthread(pthread_t self, mach_port_t kport, void *stackaddr, void *unused, int reuse); // trampoline into _pthread_wqthread
251 extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags); // trampoline into _pthread_start
252
253 /*
254 * Flags filed passed to bsdthread_create and back in pthread_start
255 * 31 <---------------------------------> 0
256 * _________________________________________
257 * | flags(8) | policy(8) | importance(16) |
258 * -----------------------------------------
259 */
260 #define PTHREAD_START_CUSTOM 0x01000000 // <rdar://problem/34501401>
261 #define PTHREAD_START_SETSCHED 0x02000000
262 // was PTHREAD_START_DETACHED 0x04000000
263 #define PTHREAD_START_QOSCLASS 0x08000000
264 #define PTHREAD_START_TSD_BASE_SET 0x10000000
265 #define PTHREAD_START_SUSPENDED 0x20000000
266 #define PTHREAD_START_QOSCLASS_MASK 0x00ffffff
267 #define PTHREAD_START_POLICY_BITSHIFT 16
268 #define PTHREAD_START_POLICY_MASK 0xff
269 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
270
271 extern pthread_t __bsdthread_create(void *(*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags);
272 extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, void *, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t);
273 extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem);
274 extern __uint64_t __thread_selfid( void );
275
276 #if __LP64__
277 _Static_assert(offsetof(struct _pthread, tsd) == 224, "TSD LP64 offset");
278 #else
279 _Static_assert(offsetof(struct _pthread, tsd) == 176, "TSD ILP32 offset");
280 #endif
281 _Static_assert(offsetof(struct _pthread, tsd) + _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET
282 == offsetof(struct _pthread, thread_id),
283 "_PTHREAD_STRUCT_DIRECT_THREADID_OFFSET is correct");
284
285 #pragma mark pthread attrs
286
287 _Static_assert(sizeof(struct _pthread_attr_t) == sizeof(__darwin_pthread_attr_t),
288 "internal pthread_attr_t == external pthread_attr_t");
289
290 int
291 pthread_attr_destroy(pthread_attr_t *attr)
292 {
293 int ret = EINVAL;
294 if (attr->sig == _PTHREAD_ATTR_SIG) {
295 attr->sig = 0;
296 ret = 0;
297 }
298 return ret;
299 }
300
301 int
302 pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
303 {
304 int ret = EINVAL;
305 if (attr->sig == _PTHREAD_ATTR_SIG) {
306 *detachstate = attr->detached;
307 ret = 0;
308 }
309 return ret;
310 }
311
312 int
313 pthread_attr_getinheritsched(const pthread_attr_t *attr, int *inheritsched)
314 {
315 int ret = EINVAL;
316 if (attr->sig == _PTHREAD_ATTR_SIG) {
317 *inheritsched = attr->inherit;
318 ret = 0;
319 }
320 return ret;
321 }
322
323 static PTHREAD_ALWAYS_INLINE void
324 _pthread_attr_get_schedparam(const pthread_attr_t *attr,
325 struct sched_param *param)
326 {
327 if (attr->schedset) {
328 *param = attr->param;
329 } else {
330 param->sched_priority = default_priority;
331 param->quantum = 10; /* quantum isn't public yet */
332 }
333 }
334
335 int
336 pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
337 {
338 int ret = EINVAL;
339 if (attr->sig == _PTHREAD_ATTR_SIG) {
340 _pthread_attr_get_schedparam(attr, param);
341 ret = 0;
342 }
343 return ret;
344 }
345
346 int
347 pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
348 {
349 int ret = EINVAL;
350 if (attr->sig == _PTHREAD_ATTR_SIG) {
351 *policy = attr->policy;
352 ret = 0;
353 }
354 return ret;
355 }
356
357 int
358 pthread_attr_init(pthread_attr_t *attr)
359 {
360 *attr = _pthread_attr_default;
361 return 0;
362 }
363
364 int
365 pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
366 {
367 int ret = EINVAL;
368 if (attr->sig == _PTHREAD_ATTR_SIG &&
369 (detachstate == PTHREAD_CREATE_JOINABLE ||
370 detachstate == PTHREAD_CREATE_DETACHED)) {
371 attr->detached = detachstate;
372 ret = 0;
373 }
374 return ret;
375 }
376
377 int
378 pthread_attr_setinheritsched(pthread_attr_t *attr, int inheritsched)
379 {
380 int ret = EINVAL;
381 if (attr->sig == _PTHREAD_ATTR_SIG &&
382 (inheritsched == PTHREAD_INHERIT_SCHED ||
383 inheritsched == PTHREAD_EXPLICIT_SCHED)) {
384 attr->inherit = inheritsched;
385 ret = 0;
386 }
387 return ret;
388 }
389
390 int
391 pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param)
392 {
393 int ret = EINVAL;
394 if (attr->sig == _PTHREAD_ATTR_SIG) {
395 /* TODO: Validate sched_param fields */
396 attr->param = *param;
397 attr->schedset = 1;
398 ret = 0;
399 }
400 return ret;
401 }
402
403 int
404 pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
405 {
406 int ret = EINVAL;
407 if (attr->sig == _PTHREAD_ATTR_SIG && (policy == SCHED_OTHER ||
408 policy == SCHED_RR || policy == SCHED_FIFO)) {
409 if (!_PTHREAD_POLICY_IS_FIXEDPRI(policy)) {
410 /* non-fixedpri policy should remove cpupercent */
411 attr->cpupercentset = 0;
412 }
413 attr->policy = policy;
414 attr->policyset = 1;
415 ret = 0;
416 }
417 return ret;
418 }
419
420 int
421 pthread_attr_setscope(pthread_attr_t *attr, int scope)
422 {
423 int ret = EINVAL;
424 if (attr->sig == _PTHREAD_ATTR_SIG) {
425 if (scope == PTHREAD_SCOPE_SYSTEM) {
426 // No attribute yet for the scope.
427 ret = 0;
428 } else if (scope == PTHREAD_SCOPE_PROCESS) {
429 ret = ENOTSUP;
430 }
431 }
432 return ret;
433 }
434
435 int
436 pthread_attr_getscope(const pthread_attr_t *attr, int *scope)
437 {
438 int ret = EINVAL;
439 if (attr->sig == _PTHREAD_ATTR_SIG) {
440 *scope = PTHREAD_SCOPE_SYSTEM;
441 ret = 0;
442 }
443 return ret;
444 }
445
446 int
447 pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
448 {
449 int ret = EINVAL;
450 if (attr->sig == _PTHREAD_ATTR_SIG) {
451 *stackaddr = attr->stackaddr;
452 ret = 0;
453 }
454 return ret;
455 }
456
457 int
458 pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
459 {
460 int ret = EINVAL;
461 if (attr->sig == _PTHREAD_ATTR_SIG &&
462 ((uintptr_t)stackaddr % vm_page_size) == 0) {
463 attr->stackaddr = stackaddr;
464 attr->defaultguardpage = false;
465 attr->guardsize = 0;
466 ret = 0;
467 }
468 return ret;
469 }
470
471 static inline size_t
472 _pthread_attr_stacksize(const pthread_attr_t *attr)
473 {
474 return attr->stacksize ? attr->stacksize : DEFAULT_STACK_SIZE;
475 }
476
477 int
478 pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
479 {
480 int ret = EINVAL;
481 if (attr->sig == _PTHREAD_ATTR_SIG) {
482 *stacksize = _pthread_attr_stacksize(attr);
483 ret = 0;
484 }
485 return ret;
486 }
487
488 int
489 pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
490 {
491 int ret = EINVAL;
492 if (attr->sig == _PTHREAD_ATTR_SIG &&
493 (stacksize % vm_page_size) == 0 &&
494 stacksize >= PTHREAD_STACK_MIN) {
495 attr->stacksize = stacksize;
496 ret = 0;
497 }
498 return ret;
499 }
500
501 int
502 pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
503 {
504 int ret = EINVAL;
505 if (attr->sig == _PTHREAD_ATTR_SIG) {
506 *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize);
507 *stacksize = _pthread_attr_stacksize(attr);
508 ret = 0;
509 }
510 return ret;
511 }
512
513 // Per SUSv3, the stackaddr is the base address, the lowest addressable byte
514 // address. This is not the same as in pthread_attr_setstackaddr.
515 int
516 pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
517 {
518 int ret = EINVAL;
519 if (attr->sig == _PTHREAD_ATTR_SIG &&
520 ((uintptr_t)stackaddr % vm_page_size) == 0 &&
521 (stacksize % vm_page_size) == 0 &&
522 stacksize >= PTHREAD_STACK_MIN) {
523 attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize);
524 attr->stacksize = stacksize;
525 ret = 0;
526 }
527 return ret;
528 }
529
530 int
531 pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
532 {
533 int ret = EINVAL;
534 if (attr->sig == _PTHREAD_ATTR_SIG && (guardsize % vm_page_size) == 0) {
535 /* Guardsize of 0 is valid, means no guard */
536 attr->defaultguardpage = false;
537 attr->guardsize = guardsize;
538 ret = 0;
539 }
540 return ret;
541 }
542
543 static inline size_t
544 _pthread_attr_guardsize(const pthread_attr_t *attr)
545 {
546 return attr->defaultguardpage ? vm_page_size : attr->guardsize;
547 }
548
549 int
550 pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize)
551 {
552 int ret = EINVAL;
553 if (attr->sig == _PTHREAD_ATTR_SIG) {
554 *guardsize = _pthread_attr_guardsize(attr);
555 ret = 0;
556 }
557 return ret;
558 }
559
560 int
561 pthread_attr_setcpupercent_np(pthread_attr_t *attr, int percent,
562 unsigned long refillms)
563 {
564 int ret = EINVAL;
565 if (attr->sig == _PTHREAD_ATTR_SIG && percent < UINT8_MAX &&
566 refillms < _PTHREAD_ATTR_REFILLMS_MAX && attr->policyset &&
567 _PTHREAD_POLICY_IS_FIXEDPRI(attr->policy)) {
568 attr->cpupercent = percent;
569 attr->refillms = (uint32_t)(refillms & 0x00ffffff);
570 attr->cpupercentset = 1;
571 ret = 0;
572 }
573 return ret;
574 }
575
576 #pragma mark pthread lifetime
577
578 // Allocate a thread structure, stack and guard page.
579 //
580 // The thread structure may optionally be placed in the same allocation as the
581 // stack, residing above the top of the stack. This cannot be done if a
582 // custom stack address is provided.
583 //
584 // Similarly the guard page cannot be allocated if a custom stack address is
585 // provided.
586 //
587 // The allocated thread structure is initialized with values that indicate how
588 // it should be freed.
589
590 static pthread_t
591 _pthread_allocate(const pthread_attr_t *attrs, void **stack,
592 bool from_mach_thread)
593 {
594 mach_vm_address_t allocaddr = __pthread_stack_hint;
595 size_t allocsize, guardsize, stacksize, pthreadoff;
596 kern_return_t kr;
597 pthread_t t;
598
599 if (os_unlikely(attrs->stacksize != 0 &&
600 attrs->stacksize < PTHREAD_STACK_MIN)) {
601 PTHREAD_CLIENT_CRASH(attrs->stacksize, "Stack size in attrs is too small");
602 }
603
604 if (os_unlikely(((uintptr_t)attrs->stackaddr % vm_page_size) != 0)) {
605 PTHREAD_CLIENT_CRASH(attrs->stacksize, "Unaligned stack addr in attrs");
606 }
607
608 // Allocate a pthread structure if necessary
609
610 if (attrs->stackaddr != NULL) {
611 allocsize = PTHREAD_SIZE;
612 guardsize = 0;
613 pthreadoff = 0;
614 // <rdar://problem/42588315> if the attrs struct specifies a custom
615 // stack address but not a custom size, using ->stacksize here instead
616 // of _pthread_attr_stacksize stores stacksize as zero, indicating
617 // that the stack size is unknown.
618 stacksize = attrs->stacksize;
619 } else {
620 guardsize = _pthread_attr_guardsize(attrs);
621 stacksize = _pthread_attr_stacksize(attrs) + PTHREAD_T_OFFSET;
622 pthreadoff = stacksize + guardsize;
623 allocsize = pthreadoff + PTHREAD_SIZE;
624 allocsize = mach_vm_round_page(allocsize);
625 }
626
627 kr = mach_vm_map(mach_task_self(), &allocaddr, allocsize, vm_page_size - 1,
628 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL,
629 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
630
631 if (kr != KERN_SUCCESS) {
632 kr = mach_vm_allocate(mach_task_self(), &allocaddr, allocsize,
633 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
634 } else if (__syscall_logger && !from_mach_thread) {
635 // libsyscall will not output malloc stack logging events when
636 // VM_MEMORY_STACK is passed in to facilitate mach thread promotion.
637 // To avoid losing the stack traces for normal p-thread create
638 // operations, libpthread must pretend to be the vm syscall and log
639 // the allocations. <rdar://36418708>
640 int eventTypeFlags = stack_logging_type_vm_allocate |
641 stack_logging_type_mapped_file_or_shared_mem;
642 __syscall_logger(eventTypeFlags | VM_MAKE_TAG(VM_MEMORY_STACK),
643 (uintptr_t)mach_task_self(), (uintptr_t)allocsize, 0,
644 (uintptr_t)allocaddr, 0);
645 }
646
647 if (kr != KERN_SUCCESS) {
648 *stack = NULL;
649 return NULL;
650 } else if (__syscall_logger && !from_mach_thread) {
651 // libsyscall will not output malloc stack logging events when
652 // VM_MEMORY_STACK is passed in to facilitate mach thread promotion.
653 // To avoid losing the stack traces for normal p-thread create
654 // operations, libpthread must pretend to be the vm syscall and log
655 // the allocations. <rdar://36418708>
656 int eventTypeFlags = stack_logging_type_vm_allocate;
657 __syscall_logger(eventTypeFlags | VM_MAKE_TAG(VM_MEMORY_STACK),
658 (uintptr_t)mach_task_self(), (uintptr_t)allocsize, 0,
659 (uintptr_t)allocaddr, 0);
660 }
661
662 // The stack grows down.
663 // Set the guard page at the lowest address of the
664 // newly allocated stack. Return the highest address
665 // of the stack.
666 if (guardsize) {
667 (void)mach_vm_protect(mach_task_self(), allocaddr, guardsize,
668 FALSE, VM_PROT_NONE);
669 }
670
671 // Thread structure resides at the top of the stack (when using a
672 // custom stack, allocsize == PTHREAD_SIZE, so places the pthread_t
673 // at allocaddr).
674 t = (pthread_t)(allocaddr + pthreadoff);
675 if (attrs->stackaddr) {
676 *stack = attrs->stackaddr;
677 } else {
678 *stack = t;
679 }
680
681 _pthread_struct_init(t, attrs, *stack, stacksize, allocaddr, allocsize);
682 return t;
683 }
684
685 PTHREAD_NOINLINE
686 void
687 _pthread_deallocate(pthread_t t, bool from_mach_thread)
688 {
689 kern_return_t ret;
690
691 // Don't free the main thread.
692 if (t != main_thread()) {
693 if (!from_mach_thread) { // see __pthread_add_thread
694 _pthread_introspection_thread_destroy(t);
695 }
696 ret = mach_vm_deallocate(mach_task_self(), t->freeaddr, t->freesize);
697 if (ret != KERN_SUCCESS) {
698 PTHREAD_INTERNAL_CRASH(ret, "Unable to deallocate stack");
699 }
700 }
701 }
702
703 #pragma clang diagnostic push
704 #pragma clang diagnostic ignored "-Wreturn-stack-address"
705
706 PTHREAD_NOINLINE
707 static void*
708 _pthread_current_stack_address(void)
709 {
710 int a;
711 return &a;
712 }
713
714 #pragma clang diagnostic pop
715
716 void
717 _pthread_joiner_wake(pthread_t thread)
718 {
719 uint32_t *exit_gate = &thread->tl_exit_gate;
720
721 for (;;) {
722 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, exit_gate, 0);
723 if (ret == 0 || ret == -ENOENT) {
724 return;
725 }
726 if (ret != -EINTR) {
727 PTHREAD_INTERNAL_CRASH(-ret, "pthread_join() wake failure");
728 }
729 }
730 }
731
732 // Terminates the thread if called from the currently running thread.
733 PTHREAD_NORETURN PTHREAD_NOINLINE PTHREAD_NOT_TAIL_CALLED
734 static void
735 _pthread_terminate(pthread_t t, void *exit_value)
736 {
737 _pthread_introspection_thread_terminate(t);
738
739 uintptr_t freeaddr = (uintptr_t)t->freeaddr;
740 size_t freesize = t->freesize;
741 bool should_exit;
742
743 // the size of just the stack
744 size_t freesize_stack = t->freesize;
745
746 // We usually pass our structure+stack to bsdthread_terminate to free, but
747 // if we get told to keep the pthread_t structure around then we need to
748 // adjust the free size and addr in the pthread_t to just refer to the
749 // structure and not the stack. If we do end up deallocating the
750 // structure, this is useless work since no one can read the result, but we
751 // can't do it after the call to pthread_remove_thread because it isn't
752 // safe to dereference t after that.
753 if ((void*)t > t->freeaddr && (void*)t < t->freeaddr + t->freesize){
754 // Check to ensure the pthread structure itself is part of the
755 // allocation described by freeaddr/freesize, in which case we split and
756 // only deallocate the area below the pthread structure. In the event of a
757 // custom stack, the freeaddr/size will be the pthread structure itself, in
758 // which case we shouldn't free anything (the final else case).
759 freesize_stack = trunc_page((uintptr_t)t - (uintptr_t)freeaddr);
760
761 // describe just the remainder for deallocation when the pthread_t goes away
762 t->freeaddr += freesize_stack;
763 t->freesize -= freesize_stack;
764 } else if (t == main_thread()) {
765 freeaddr = t->stackaddr - pthread_get_stacksize_np(t);
766 uintptr_t stackborder = trunc_page((uintptr_t)_pthread_current_stack_address());
767 freesize_stack = stackborder - freeaddr;
768 } else {
769 freesize_stack = 0;
770 }
771
772 mach_port_t kport = _pthread_kernel_thread(t);
773 bool keep_thread_struct = false, needs_wake = false;
774 semaphore_t custom_stack_sema = MACH_PORT_NULL;
775
776 _pthread_dealloc_special_reply_port(t);
777 _pthread_dealloc_reply_port(t);
778
779 _PTHREAD_LOCK(_pthread_list_lock);
780
781 // This piece of code interacts with pthread_join. It will always:
782 // - set tl_exit_gate to MACH_PORT_DEAD (thread exited)
783 // - set tl_exit_value to the value passed to pthread_exit()
784 // - decrement _pthread_count, so that we can exit the process when all
785 // threads exited even if not all of them were joined.
786 t->tl_exit_gate = MACH_PORT_DEAD;
787 t->tl_exit_value = exit_value;
788 should_exit = (--_pthread_count <= 0);
789
790 // If we see a joiner, we prepost that the join has to succeed,
791 // and the joiner is committed to finish (even if it was canceled)
792 if (t->tl_join_ctx) {
793 custom_stack_sema = _pthread_joiner_prepost_wake(t); // unsets tl_joinable
794 needs_wake = true;
795 }
796
797 // Joinable threads that have no joiner yet are kept on the thread list
798 // so that pthread_join() can later discover the thread when it is joined,
799 // and will have to do the pthread_t cleanup.
800 if (t->tl_joinable) {
801 t->tl_joiner_cleans_up = keep_thread_struct = true;
802 } else {
803 TAILQ_REMOVE(&__pthread_head, t, tl_plist);
804 }
805
806 _PTHREAD_UNLOCK(_pthread_list_lock);
807
808 if (needs_wake) {
809 // When we found a waiter, we want to drop the very contended list lock
810 // before we do the syscall in _pthread_joiner_wake(). Then, we decide
811 // who gets to cleanup the pthread_t between the joiner and the exiting
812 // thread:
813 // - the joiner tries to set tl_join_ctx to NULL
814 // - the exiting thread tries to set tl_joiner_cleans_up to true
815 // Whoever does it first commits the other guy to cleanup the pthread_t
816 _pthread_joiner_wake(t);
817 _PTHREAD_LOCK(_pthread_list_lock);
818 if (t->tl_join_ctx) {
819 t->tl_joiner_cleans_up = true;
820 keep_thread_struct = true;
821 }
822 _PTHREAD_UNLOCK(_pthread_list_lock);
823 }
824
825 //
826 // /!\ dereferencing `t` past this point is not safe /!\
827 //
828
829 if (keep_thread_struct || t == main_thread()) {
830 // Use the adjusted freesize of just the stack that we computed above.
831 freesize = freesize_stack;
832 } else {
833 _pthread_introspection_thread_destroy(t);
834 }
835
836 // Check if there is nothing to free because the thread has a custom
837 // stack allocation and is joinable.
838 if (freesize == 0) {
839 freeaddr = 0;
840 }
841 if (should_exit) {
842 exitf(0);
843 }
844 __bsdthread_terminate((void *)freeaddr, freesize, kport, custom_stack_sema);
845 PTHREAD_INTERNAL_CRASH(t, "thread didn't terminate");
846 }
847
848 PTHREAD_NORETURN
849 static void
850 _pthread_terminate_invoke(pthread_t t, void *exit_value)
851 {
852 #if PTHREAD_T_OFFSET
853 void *p = NULL;
854 // <rdar://problem/25688492> During pthread termination there is a race
855 // between pthread_join and pthread_terminate; if the joiner is responsible
856 // for cleaning up the pthread_t struct, then it may destroy some part of the
857 // stack with it on 16k OSes. So that this doesn't cause _pthread_terminate()
858 // to crash because its stack has been removed from under its feet, just make
859 // sure termination happens in a part of the stack that is not on the same
860 // page as the pthread_t.
861 if (trunc_page((uintptr_t)__builtin_frame_address(0)) ==
862 trunc_page((uintptr_t)t)) {
863 p = alloca(PTHREAD_T_OFFSET);
864 }
865 // And this __asm__ volatile is needed to stop the compiler from optimising
866 // away the alloca() completely.
867 __asm__ volatile ("" : : "r"(p) );
868 #endif
869 _pthread_terminate(t, exit_value);
870 }
871
872 #pragma mark pthread start / body
873
874 PTHREAD_NORETURN
875 void
876 _pthread_start(pthread_t self, mach_port_t kport,
877 __unused void *(*fun)(void *), __unused void *arg,
878 __unused size_t stacksize, unsigned int pflags)
879 {
880 if (os_unlikely(pflags & PTHREAD_START_SUSPENDED)) {
881 PTHREAD_INTERNAL_CRASH(pflags,
882 "kernel without PTHREAD_START_SUSPENDED support");
883 }
884 if (os_unlikely((pflags & PTHREAD_START_TSD_BASE_SET) == 0)) {
885 PTHREAD_INTERNAL_CRASH(pflags,
886 "thread_set_tsd_base() wasn't called by the kernel");
887 }
888 PTHREAD_DEBUG_ASSERT(MACH_PORT_VALID(kport));
889 PTHREAD_DEBUG_ASSERT(_pthread_kernel_thread(self) == kport);
890 _pthread_validate_signature(self);
891 _pthread_markcancel_if_canceled(self, kport);
892
893 _pthread_set_self_internal(self);
894 __pthread_started_thread(self);
895 _pthread_exit(self, (self->fun)(self->arg));
896 }
897
898 PTHREAD_ALWAYS_INLINE
899 static inline void
900 _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs,
901 void *stackaddr, size_t stacksize, void *freeaddr, size_t freesize)
902 {
903 _pthread_init_signature(t);
904 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = t;
905 t->tsd[_PTHREAD_TSD_SLOT_ERRNO] = &t->err_no;
906 if (attrs->schedset == 0) {
907 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = attrs->qosclass;
908 } else {
909 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] =
910 _pthread_unspecified_priority();
911 }
912 t->tsd[_PTHREAD_TSD_SLOT_PTR_MUNGE] = _pthread_ptr_munge_token;
913 t->tl_has_custom_stack = (attrs->stackaddr != NULL);
914
915 _PTHREAD_LOCK_INIT(t->lock);
916
917 t->stackaddr = stackaddr;
918 t->stackbottom = stackaddr - stacksize;
919 t->freeaddr = freeaddr;
920 t->freesize = freesize;
921
922 t->guardsize = _pthread_attr_guardsize(attrs);
923 t->tl_joinable = (attrs->detached == PTHREAD_CREATE_JOINABLE);
924 t->inherit = attrs->inherit;
925 t->tl_policy = attrs->policy;
926 t->schedset = attrs->schedset;
927 _pthread_attr_get_schedparam(attrs, &t->tl_param);
928 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
929 }
930
931 #pragma mark pthread public interface
932
933 /* Need to deprecate this in future */
934 int
935 _pthread_is_threaded(void)
936 {
937 return __is_threaded;
938 }
939
940 /* Non portable public api to know whether this process has(had) atleast one thread
941 * apart from main thread. There could be race if there is a thread in the process of
942 * creation at the time of call . It does not tell whether there are more than one thread
943 * at this point of time.
944 */
945 int
946 pthread_is_threaded_np(void)
947 {
948 return __is_threaded;
949 }
950
951
952 PTHREAD_NOEXPORT_VARIANT
953 mach_port_t
954 pthread_mach_thread_np(pthread_t t)
955 {
956 mach_port_t kport = MACH_PORT_NULL;
957 (void)_pthread_is_valid(t, &kport);
958 return kport;
959 }
960
961 PTHREAD_NOEXPORT_VARIANT
962 pthread_t
963 pthread_from_mach_thread_np(mach_port_t kernel_thread)
964 {
965 struct _pthread *p = NULL;
966
967 /* No need to wait as mach port is already known */
968 _PTHREAD_LOCK(_pthread_list_lock);
969
970 TAILQ_FOREACH(p, &__pthread_head, tl_plist) {
971 if (_pthread_kernel_thread(p) == kernel_thread) {
972 break;
973 }
974 }
975
976 _PTHREAD_UNLOCK(_pthread_list_lock);
977
978 return p;
979 }
980
981 PTHREAD_NOEXPORT_VARIANT
982 size_t
983 pthread_get_stacksize_np(pthread_t t)
984 {
985 size_t size = 0;
986
987 if (t == NULL) {
988 return ESRCH; // XXX bug?
989 }
990
991 #if TARGET_OS_OSX
992 // The default rlimit based allocations will be provided with a stacksize
993 // of the current limit and a freesize of the max. However, custom
994 // allocations will just have the guard page to free. If we aren't in the
995 // latter case, call into rlimit to determine the current stack size. In
996 // the event that the current limit == max limit then we'll fall down the
997 // fast path, but since it's unlikely that the limit is going to be lowered
998 // after it's been change to the max, we should be fine.
999 //
1000 // Of course, on arm rlim_cur == rlim_max and there's only the one guard
1001 // page. So, we can skip all this there.
1002 if (t == main_thread()) {
1003 size_t stacksize = t->stackaddr - t->stackbottom;
1004
1005 if (stacksize + vm_page_size != t->freesize) {
1006 // We want to call getrlimit() just once, as it's relatively
1007 // expensive
1008 static size_t rlimit_stack;
1009
1010 if (rlimit_stack == 0) {
1011 struct rlimit limit;
1012 int ret = getrlimit(RLIMIT_STACK, &limit);
1013
1014 if (ret == 0) {
1015 rlimit_stack = (size_t) limit.rlim_cur;
1016 }
1017 }
1018
1019 if (rlimit_stack == 0 || rlimit_stack > t->freesize) {
1020 return stacksize;
1021 } else {
1022 return round_page(rlimit_stack);
1023 }
1024 }
1025 }
1026 #endif /* TARGET_OS_OSX */
1027
1028 if (t == pthread_self() || t == main_thread()) {
1029 size = t->stackaddr - t->stackbottom;;
1030 goto out;
1031 }
1032
1033 if (_pthread_validate_thread_and_list_lock(t)) {
1034 size = t->stackaddr - t->stackbottom;;
1035 _PTHREAD_UNLOCK(_pthread_list_lock);
1036 }
1037
1038 out:
1039 // <rdar://problem/42588315> binary compatibility issues force us to return
1040 // DEFAULT_STACK_SIZE here when we do not know the size of the stack
1041 return size ? size : DEFAULT_STACK_SIZE;
1042 }
1043
1044 PTHREAD_NOEXPORT_VARIANT
1045 void *
1046 pthread_get_stackaddr_np(pthread_t t)
1047 {
1048 // since the main thread will not get de-allocated from underneath us
1049 if (t == pthread_self() || t == main_thread()) {
1050 return t->stackaddr;
1051 }
1052
1053 if (!_pthread_validate_thread_and_list_lock(t)) {
1054 return (void *)(uintptr_t)ESRCH; // XXX bug?
1055 }
1056
1057 void *addr = t->stackaddr;
1058 _PTHREAD_UNLOCK(_pthread_list_lock);
1059 return addr;
1060 }
1061
1062
1063 static mach_port_t
1064 _pthread_reply_port(pthread_t t)
1065 {
1066 void *p;
1067 if (t == NULL) {
1068 p = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY);
1069 } else {
1070 p = t->tsd[_PTHREAD_TSD_SLOT_MIG_REPLY];
1071 }
1072 return (mach_port_t)(uintptr_t)p;
1073 }
1074
1075 static void
1076 _pthread_set_reply_port(pthread_t t, mach_port_t reply_port)
1077 {
1078 void *p = (void *)(uintptr_t)reply_port;
1079 if (t == NULL) {
1080 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY, p);
1081 } else {
1082 t->tsd[_PTHREAD_TSD_SLOT_MIG_REPLY] = p;
1083 }
1084 }
1085
1086 static void
1087 _pthread_dealloc_reply_port(pthread_t t)
1088 {
1089 mach_port_t reply_port = _pthread_reply_port(t);
1090 if (reply_port != MACH_PORT_NULL) {
1091 mig_dealloc_reply_port(reply_port);
1092 }
1093 }
1094
1095 static mach_port_t
1096 _pthread_special_reply_port(pthread_t t)
1097 {
1098 void *p;
1099 if (t == NULL) {
1100 p = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY);
1101 } else {
1102 p = t->tsd[_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY];
1103 }
1104 return (mach_port_t)(uintptr_t)p;
1105 }
1106
1107 static void
1108 _pthread_dealloc_special_reply_port(pthread_t t)
1109 {
1110 mach_port_t special_reply_port = _pthread_special_reply_port(t);
1111 if (special_reply_port != MACH_PORT_NULL) {
1112 thread_destruct_special_reply_port(special_reply_port,
1113 THREAD_SPECIAL_REPLY_PORT_ALL);
1114 }
1115 }
1116
1117 pthread_t
1118 pthread_main_thread_np(void)
1119 {
1120 return main_thread();
1121 }
1122
1123 /* returns non-zero if the current thread is the main thread */
1124 int
1125 pthread_main_np(void)
1126 {
1127 return pthread_self() == main_thread();
1128 }
1129
1130
1131 static int
1132 _pthread_threadid_slow(pthread_t thread, uint64_t *thread_id)
1133 {
1134 unsigned int info_count = THREAD_IDENTIFIER_INFO_COUNT;
1135 mach_port_t thport = _pthread_kernel_thread(thread);
1136 struct thread_identifier_info info;
1137 kern_return_t kr;
1138
1139 kr = thread_info(thport, THREAD_IDENTIFIER_INFO,
1140 (thread_info_t)&info, &info_count);
1141 if (kr == KERN_SUCCESS && info.thread_id) {
1142 *thread_id = info.thread_id;
1143 os_atomic_store(&thread->thread_id, info.thread_id, relaxed);
1144 return 0;
1145 }
1146 return EINVAL;
1147 }
1148
1149 /*
1150 * if we are passed in a pthread_t that is NULL, then we return the current
1151 * thread's thread_id. So folks don't have to call pthread_self, in addition to
1152 * us doing it, if they just want their thread_id.
1153 */
1154 PTHREAD_NOEXPORT_VARIANT
1155 int
1156 pthread_threadid_np(pthread_t thread, uint64_t *thread_id)
1157 {
1158 int res = 0;
1159 pthread_t self = pthread_self();
1160
1161 if (thread_id == NULL) {
1162 return EINVAL;
1163 }
1164
1165 if (thread == NULL || thread == self) {
1166 *thread_id = self->thread_id;
1167 } else if (!_pthread_validate_thread_and_list_lock(thread)) {
1168 res = ESRCH;
1169 } else {
1170 *thread_id = os_atomic_load(&thread->thread_id, relaxed);
1171 if (os_unlikely(*thread_id == 0)) {
1172 // there is a race at init because the thread sets its own TID.
1173 // correct this by asking mach
1174 res = _pthread_threadid_slow(thread, thread_id);
1175 }
1176 _PTHREAD_UNLOCK(_pthread_list_lock);
1177 }
1178 return res;
1179 }
1180
1181 PTHREAD_NOEXPORT_VARIANT
1182 int
1183 pthread_getname_np(pthread_t thread, char *threadname, size_t len)
1184 {
1185 if (thread == pthread_self()) {
1186 strlcpy(threadname, thread->pthread_name, len);
1187 return 0;
1188 }
1189
1190 if (!_pthread_validate_thread_and_list_lock(thread)) {
1191 return ESRCH;
1192 }
1193
1194 strlcpy(threadname, thread->pthread_name, len);
1195 _PTHREAD_UNLOCK(_pthread_list_lock);
1196 return 0;
1197 }
1198
1199
1200 int
1201 pthread_setname_np(const char *name)
1202 {
1203 int res;
1204 pthread_t self = pthread_self();
1205
1206 size_t len = 0;
1207 if (name != NULL) {
1208 len = strlen(name);
1209 }
1210
1211 _pthread_validate_signature(self);
1212
1213 /* protytype is in pthread_internals.h */
1214 res = __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name, (int)len);
1215 if (res == 0) {
1216 if (len > 0) {
1217 strlcpy(self->pthread_name, name, MAXTHREADNAMESIZE);
1218 } else {
1219 bzero(self->pthread_name, MAXTHREADNAMESIZE);
1220 }
1221 }
1222 return res;
1223
1224 }
1225
1226 PTHREAD_ALWAYS_INLINE
1227 static inline void
1228 __pthread_add_thread(pthread_t t, bool from_mach_thread)
1229 {
1230 if (from_mach_thread) {
1231 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock);
1232 } else {
1233 _PTHREAD_LOCK(_pthread_list_lock);
1234 }
1235
1236 TAILQ_INSERT_TAIL(&__pthread_head, t, tl_plist);
1237 _pthread_count++;
1238
1239 if (from_mach_thread) {
1240 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock);
1241 } else {
1242 _PTHREAD_UNLOCK(_pthread_list_lock);
1243 }
1244
1245 if (!from_mach_thread) {
1246 // PR-26275485: Mach threads will likely crash trying to run
1247 // introspection code. Since the fall out from the introspection
1248 // code not seeing the injected thread is likely less than crashing
1249 // in the introspection code, just don't make the call.
1250 _pthread_introspection_thread_create(t);
1251 }
1252 }
1253
1254 PTHREAD_ALWAYS_INLINE
1255 static inline void
1256 __pthread_undo_add_thread(pthread_t t, bool from_mach_thread)
1257 {
1258 if (from_mach_thread) {
1259 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock);
1260 } else {
1261 _PTHREAD_LOCK(_pthread_list_lock);
1262 }
1263
1264 TAILQ_REMOVE(&__pthread_head, t, tl_plist);
1265 _pthread_count--;
1266
1267 if (from_mach_thread) {
1268 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock);
1269 } else {
1270 _PTHREAD_UNLOCK(_pthread_list_lock);
1271 }
1272 }
1273
1274 PTHREAD_ALWAYS_INLINE
1275 static inline void
1276 __pthread_started_thread(pthread_t t)
1277 {
1278 mach_port_t kport = _pthread_kernel_thread(t);
1279 if (os_unlikely(!MACH_PORT_VALID(kport))) {
1280 PTHREAD_CLIENT_CRASH(kport,
1281 "Unable to allocate thread port, possible port leak");
1282 }
1283 _pthread_introspection_thread_start(t);
1284 }
1285
1286 #define _PTHREAD_CREATE_NONE 0x0
1287 #define _PTHREAD_CREATE_FROM_MACH_THREAD 0x1
1288 #define _PTHREAD_CREATE_SUSPENDED 0x2
1289
1290 static int
1291 _pthread_create(pthread_t *thread, const pthread_attr_t *attrs,
1292 void *(*start_routine)(void *), void *arg, unsigned int create_flags)
1293 {
1294 pthread_t t = NULL;
1295 void *stack = NULL;
1296 bool from_mach_thread = (create_flags & _PTHREAD_CREATE_FROM_MACH_THREAD);
1297
1298 if (attrs == NULL) {
1299 attrs = &_pthread_attr_default;
1300 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1301 return EINVAL;
1302 }
1303
1304 unsigned int flags = PTHREAD_START_CUSTOM;
1305 if (attrs->schedset != 0) {
1306 struct sched_param p;
1307 _pthread_attr_get_schedparam(attrs, &p);
1308 flags |= PTHREAD_START_SETSCHED;
1309 flags |= ((attrs->policy & PTHREAD_START_POLICY_MASK) << PTHREAD_START_POLICY_BITSHIFT);
1310 flags |= (p.sched_priority & PTHREAD_START_IMPORTANCE_MASK);
1311 } else if (attrs->qosclass != 0) {
1312 flags |= PTHREAD_START_QOSCLASS;
1313 flags |= (attrs->qosclass & PTHREAD_START_QOSCLASS_MASK);
1314 }
1315 if (create_flags & _PTHREAD_CREATE_SUSPENDED) {
1316 flags |= PTHREAD_START_SUSPENDED;
1317 }
1318
1319 __is_threaded = 1;
1320
1321 t =_pthread_allocate(attrs, &stack, from_mach_thread);
1322 if (t == NULL) {
1323 return EAGAIN;
1324 }
1325
1326 t->arg = arg;
1327 t->fun = start_routine;
1328 __pthread_add_thread(t, from_mach_thread);
1329
1330 if (__bsdthread_create(start_routine, arg, stack, t, flags) ==
1331 (pthread_t)-1) {
1332 if (errno == EMFILE) {
1333 PTHREAD_CLIENT_CRASH(0,
1334 "Unable to allocate thread port, possible port leak");
1335 }
1336 __pthread_undo_add_thread(t, from_mach_thread);
1337 _pthread_deallocate(t, from_mach_thread);
1338 return EAGAIN;
1339 }
1340
1341 // n.b. if a thread is created detached and exits, t will be invalid
1342 *thread = t;
1343 return 0;
1344 }
1345
1346 int
1347 pthread_create(pthread_t *thread, const pthread_attr_t *attr,
1348 void *(*start_routine)(void *), void *arg)
1349 {
1350 unsigned int flags = _PTHREAD_CREATE_NONE;
1351 return _pthread_create(thread, attr, start_routine, arg, flags);
1352 }
1353
1354 int
1355 pthread_create_from_mach_thread(pthread_t *thread, const pthread_attr_t *attr,
1356 void *(*start_routine)(void *), void *arg)
1357 {
1358 unsigned int flags = _PTHREAD_CREATE_FROM_MACH_THREAD;
1359 return _pthread_create(thread, attr, start_routine, arg, flags);
1360 }
1361
1362 int
1363 pthread_create_suspended_np(pthread_t *thread, const pthread_attr_t *attr,
1364 void *(*start_routine)(void *), void *arg)
1365 {
1366 unsigned int flags = _PTHREAD_CREATE_SUSPENDED;
1367 return _pthread_create(thread, attr, start_routine, arg, flags);
1368 }
1369
1370
1371 PTHREAD_NOEXPORT_VARIANT
1372 int
1373 pthread_detach(pthread_t thread)
1374 {
1375 int res = 0;
1376 bool join = false, wake = false;
1377
1378 if (!_pthread_validate_thread_and_list_lock(thread)) {
1379 return ESRCH;
1380 }
1381
1382 if (!thread->tl_joinable) {
1383 res = EINVAL;
1384 } else if (thread->tl_exit_gate == MACH_PORT_DEAD) {
1385 // Join the thread if it's already exited.
1386 join = true;
1387 } else {
1388 thread->tl_joinable = false; // _pthread_joiner_prepost_wake uses this
1389 if (thread->tl_join_ctx) {
1390 (void)_pthread_joiner_prepost_wake(thread);
1391 wake = true;
1392 }
1393 }
1394 _PTHREAD_UNLOCK(_pthread_list_lock);
1395
1396 if (join) {
1397 pthread_join(thread, NULL);
1398 } else if (wake) {
1399 _pthread_joiner_wake(thread);
1400 }
1401 return res;
1402 }
1403
1404 PTHREAD_NOEXPORT_VARIANT
1405 int
1406 pthread_kill(pthread_t th, int sig)
1407 {
1408 if (sig < 0 || sig > NSIG) {
1409 return EINVAL;
1410 }
1411
1412 mach_port_t kport = MACH_PORT_NULL;
1413 {
1414 if (!_pthread_is_valid(th, &kport)) {
1415 return ESRCH;
1416 }
1417 }
1418
1419 int ret = __pthread_kill(kport, sig);
1420
1421 if (ret == -1) {
1422 ret = errno;
1423 }
1424 return ret;
1425 }
1426
1427 PTHREAD_NOEXPORT_VARIANT
1428 int
1429 __pthread_workqueue_setkill(int enable)
1430 {
1431 {
1432 return __bsdthread_ctl(BSDTHREAD_CTL_WORKQ_ALLOW_KILL, enable, 0, 0);
1433 }
1434 }
1435
1436
1437 /* For compatibility... */
1438
1439 pthread_t
1440 _pthread_self(void)
1441 {
1442 return pthread_self();
1443 }
1444
1445 /*
1446 * Terminate a thread.
1447 */
1448 extern int __disable_threadsignal(int);
1449
1450 PTHREAD_NORETURN
1451 static void
1452 _pthread_exit(pthread_t self, void *exit_value)
1453 {
1454 struct __darwin_pthread_handler_rec *handler;
1455
1456 // Disable signal delivery while we clean up
1457 __disable_threadsignal(1);
1458
1459 // Set cancel state to disable and type to deferred
1460 _pthread_setcancelstate_exit(self, exit_value);
1461
1462 while ((handler = self->__cleanup_stack) != 0) {
1463 (handler->__routine)(handler->__arg);
1464 self->__cleanup_stack = handler->__next;
1465 }
1466 _pthread_tsd_cleanup(self);
1467
1468 // Clear per-thread semaphore cache
1469 os_put_cached_semaphore(SEMAPHORE_NULL);
1470
1471 _pthread_terminate_invoke(self, exit_value);
1472 }
1473
1474 void
1475 pthread_exit(void *exit_value)
1476 {
1477 pthread_t self = pthread_self();
1478 if (os_unlikely(self->wqthread)) {
1479 PTHREAD_CLIENT_CRASH(0, "pthread_exit() called from a thread "
1480 "not created by pthread_create()");
1481 }
1482 _pthread_validate_signature(self);
1483 _pthread_exit(self, exit_value);
1484 }
1485
1486
1487 PTHREAD_NOEXPORT_VARIANT
1488 int
1489 pthread_getschedparam(pthread_t thread, int *policy, struct sched_param *param)
1490 {
1491 if (!_pthread_validate_thread_and_list_lock(thread)) {
1492 return ESRCH;
1493 }
1494
1495 if (policy) *policy = thread->tl_policy;
1496 if (param) *param = thread->tl_param;
1497 _PTHREAD_UNLOCK(_pthread_list_lock);
1498 return 0;
1499 }
1500
1501
1502
1503 PTHREAD_ALWAYS_INLINE
1504 static inline int
1505 pthread_setschedparam_internal(pthread_t thread, mach_port_t kport, int policy,
1506 const struct sched_param *param)
1507 {
1508 policy_base_data_t bases;
1509 policy_base_t base;
1510 mach_msg_type_number_t count;
1511 kern_return_t ret;
1512
1513 switch (policy) {
1514 case SCHED_OTHER:
1515 bases.ts.base_priority = param->sched_priority;
1516 base = (policy_base_t)&bases.ts;
1517 count = POLICY_TIMESHARE_BASE_COUNT;
1518 break;
1519 case SCHED_FIFO:
1520 bases.fifo.base_priority = param->sched_priority;
1521 base = (policy_base_t)&bases.fifo;
1522 count = POLICY_FIFO_BASE_COUNT;
1523 break;
1524 case SCHED_RR:
1525 bases.rr.base_priority = param->sched_priority;
1526 /* quantum isn't public yet */
1527 bases.rr.quantum = param->quantum;
1528 base = (policy_base_t)&bases.rr;
1529 count = POLICY_RR_BASE_COUNT;
1530 break;
1531 default:
1532 return EINVAL;
1533 }
1534 ret = thread_policy(kport, policy, base, count, TRUE);
1535 return (ret != KERN_SUCCESS) ? EINVAL : 0;
1536 }
1537
1538 PTHREAD_NOEXPORT_VARIANT
1539 int
1540 pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param)
1541 {
1542 mach_port_t kport = MACH_PORT_NULL;
1543 int bypass = 1;
1544
1545 // since the main thread will not get de-allocated from underneath us
1546 if (t == pthread_self() || t == main_thread()) {
1547 _pthread_validate_signature(t);
1548 kport = _pthread_kernel_thread(t);
1549 } else {
1550 bypass = 0;
1551 if (!_pthread_is_valid(t, &kport)) {
1552 return ESRCH;
1553 }
1554 }
1555
1556 int res = pthread_setschedparam_internal(t, kport, policy, param);
1557 if (res) return res;
1558
1559 if (bypass) {
1560 _PTHREAD_LOCK(_pthread_list_lock);
1561 } else if (!_pthread_validate_thread_and_list_lock(t)) {
1562 // Ensure the thread is still valid.
1563 return ESRCH;
1564 }
1565
1566 t->tl_policy = policy;
1567 t->tl_param = *param;
1568 _PTHREAD_UNLOCK(_pthread_list_lock);
1569 return 0;
1570 }
1571
1572
1573 int
1574 sched_get_priority_min(int policy)
1575 {
1576 return default_priority - 16;
1577 }
1578
1579 int
1580 sched_get_priority_max(int policy)
1581 {
1582 return default_priority + 16;
1583 }
1584
1585 int
1586 pthread_equal(pthread_t t1, pthread_t t2)
1587 {
1588 return (t1 == t2);
1589 }
1590
1591 /*
1592 * Force LLVM not to optimise this to a call to __pthread_set_self, if it does
1593 * then _pthread_set_self won't be bound when secondary threads try and start up.
1594 */
1595 PTHREAD_NOINLINE
1596 void
1597 _pthread_set_self(pthread_t p)
1598 {
1599 #if VARIANT_DYLD
1600 if (os_likely(!p)) {
1601 return _pthread_set_self_dyld();
1602 }
1603 #endif // VARIANT_DYLD
1604 _pthread_set_self_internal(p);
1605 _thread_set_tsd_base(&p->tsd[0]);
1606 }
1607
1608 #if VARIANT_DYLD
1609 // _pthread_set_self_dyld is noinline+noexport to allow the option for
1610 // static libsyscall to adopt this as the entry point from mach_init if
1611 // desired
1612 PTHREAD_NOINLINE PTHREAD_NOEXPORT
1613 void
1614 _pthread_set_self_dyld(void)
1615 {
1616 pthread_t p = main_thread();
1617 p->thread_id = __thread_selfid();
1618
1619 if (os_unlikely(p->thread_id == -1ull)) {
1620 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id");
1621 }
1622
1623 // <rdar://problem/40930651> pthread self and the errno address are the
1624 // bare minimium TSD setup that dyld needs to actually function. Without
1625 // this, TSD access will fail and crash if it uses bits of Libc prior to
1626 // library initialization. __pthread_init will finish the initialization
1627 // during library init.
1628 p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = p;
1629 p->tsd[_PTHREAD_TSD_SLOT_ERRNO] = &p->err_no;
1630 _thread_set_tsd_base(&p->tsd[0]);
1631 }
1632 #endif // VARIANT_DYLD
1633
1634 PTHREAD_ALWAYS_INLINE
1635 static inline void
1636 _pthread_set_self_internal(pthread_t p)
1637 {
1638 os_atomic_store(&p->thread_id, __thread_selfid(), relaxed);
1639
1640 if (os_unlikely(p->thread_id == -1ull)) {
1641 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id");
1642 }
1643 }
1644
1645
1646 // <rdar://problem/28984807> pthread_once should have an acquire barrier
1647 PTHREAD_ALWAYS_INLINE
1648 static inline void
1649 _os_once_acquire(os_once_t *predicate, void *context, os_function_t function)
1650 {
1651 if (OS_EXPECT(os_atomic_load(predicate, acquire), ~0l) != ~0l) {
1652 _os_once(predicate, context, function);
1653 OS_COMPILER_CAN_ASSUME(*predicate == ~0l);
1654 }
1655 }
1656
1657 struct _pthread_once_context {
1658 pthread_once_t *pthread_once;
1659 void (*routine)(void);
1660 };
1661
1662 static void
1663 __pthread_once_handler(void *context)
1664 {
1665 struct _pthread_once_context *ctx = context;
1666 pthread_cleanup_push((void*)__os_once_reset, &ctx->pthread_once->once);
1667 ctx->routine();
1668 pthread_cleanup_pop(0);
1669 ctx->pthread_once->sig = _PTHREAD_ONCE_SIG;
1670 }
1671
1672 PTHREAD_NOEXPORT_VARIANT
1673 int
1674 pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
1675 {
1676 struct _pthread_once_context ctx = { once_control, init_routine };
1677 do {
1678 _os_once_acquire(&once_control->once, &ctx, __pthread_once_handler);
1679 } while (once_control->sig == _PTHREAD_ONCE_SIG_init);
1680 return 0;
1681 }
1682
1683
1684 int
1685 pthread_getconcurrency(void)
1686 {
1687 return pthread_concurrency;
1688 }
1689
1690 int
1691 pthread_setconcurrency(int new_level)
1692 {
1693 if (new_level < 0) {
1694 return EINVAL;
1695 }
1696 pthread_concurrency = new_level;
1697 return 0;
1698 }
1699
1700 #if !defined(VARIANT_STATIC)
1701 void *
1702 malloc(size_t sz)
1703 {
1704 if (_pthread_malloc) {
1705 return _pthread_malloc(sz);
1706 } else {
1707 return NULL;
1708 }
1709 }
1710
1711 void
1712 free(void *p)
1713 {
1714 if (_pthread_free) {
1715 _pthread_free(p);
1716 }
1717 }
1718 #endif // VARIANT_STATIC
1719
1720 /*
1721 * Perform package initialization - called automatically when application starts
1722 */
1723 struct ProgramVars; /* forward reference */
1724
1725 #if !VARIANT_DYLD
1726 static unsigned long
1727 _pthread_strtoul(const char *p, const char **endptr, int base)
1728 {
1729 uintptr_t val = 0;
1730
1731 // Expect hex string starting with "0x"
1732 if ((base == 16 || base == 0) && p && p[0] == '0' && p[1] == 'x') {
1733 p += 2;
1734 while (1) {
1735 char c = *p;
1736 if ('0' <= c && c <= '9') {
1737 val = (val << 4) + (c - '0');
1738 } else if ('a' <= c && c <= 'f') {
1739 val = (val << 4) + (c - 'a' + 10);
1740 } else if ('A' <= c && c <= 'F') {
1741 val = (val << 4) + (c - 'A' + 10);
1742 } else {
1743 break;
1744 }
1745 ++p;
1746 }
1747 }
1748
1749 *endptr = (char *)p;
1750 return val;
1751 }
1752
1753 static int
1754 parse_main_stack_params(const char *apple[],
1755 void **stackaddr,
1756 size_t *stacksize,
1757 void **allocaddr,
1758 size_t *allocsize)
1759 {
1760 const char *p = _simple_getenv(apple, "main_stack");
1761 if (!p) return 0;
1762
1763 int ret = 0;
1764 const char *s = p;
1765
1766 *stackaddr = _pthread_strtoul(s, &s, 16);
1767 if (*s != ',') goto out;
1768
1769 *stacksize = _pthread_strtoul(s + 1, &s, 16);
1770 if (*s != ',') goto out;
1771
1772 *allocaddr = _pthread_strtoul(s + 1, &s, 16);
1773 if (*s != ',') goto out;
1774
1775 *allocsize = _pthread_strtoul(s + 1, &s, 16);
1776 if (*s != ',' && *s != 0) goto out;
1777
1778 ret = 1;
1779 out:
1780 bzero((char *)p, strlen(p));
1781 return ret;
1782 }
1783
1784 static void
1785 parse_ptr_munge_params(const char *envp[], const char *apple[])
1786 {
1787 const char *p, *s;
1788 uintptr_t token = 0;
1789 p = _simple_getenv(apple, "ptr_munge");
1790 if (p) {
1791 token = _pthread_strtoul(p, &s, 16);
1792 bzero((char *)p, strlen(p));
1793 }
1794 #if !DEBUG
1795 if (!token) {
1796 #endif
1797 p = _simple_getenv(envp, "PTHREAD_PTR_MUNGE_TOKEN");
1798 if (p) {
1799 uintptr_t t = _pthread_strtoul(p, &s, 16);
1800 if (t) token = t;
1801 }
1802 #if !DEBUG
1803 }
1804
1805 if (!token) {
1806 PTHREAD_INTERNAL_CRASH(token, "Token from the kernel is 0");
1807 }
1808 #endif // DEBUG
1809
1810 _pthread_ptr_munge_token = token;
1811 // we need to refresh the main thread signature now that we changed
1812 // the munge token. We need to do it while TSAN will not look at it
1813 _pthread_init_signature(_main_thread_ptr);
1814 }
1815
1816 int
1817 __pthread_init(const struct _libpthread_functions *pthread_funcs,
1818 const char *envp[], const char *apple[],
1819 const struct ProgramVars *vars __unused)
1820 {
1821 // Save our provided pushed-down functions
1822 if (pthread_funcs) {
1823 exitf = pthread_funcs->exit;
1824
1825 if (pthread_funcs->version >= 2) {
1826 _pthread_malloc = pthread_funcs->malloc;
1827 _pthread_free = pthread_funcs->free;
1828 }
1829 }
1830
1831 // libpthread.a in dyld "owns" the main thread structure itself and sets
1832 // up the tsd to point to it. So take the pthread_self() from there
1833 // and make it our main thread point.
1834 pthread_t thread = (pthread_t)_pthread_getspecific_direct(
1835 _PTHREAD_TSD_SLOT_PTHREAD_SELF);
1836 if (os_unlikely(thread == NULL)) {
1837 PTHREAD_INTERNAL_CRASH(0, "PTHREAD_SELF TSD not initialized");
1838 }
1839 _main_thread_ptr = thread;
1840 // this needs to be done early so that pthread_self() works in TSAN
1841 _pthread_init_signature(thread);
1842
1843 //
1844 // Get host information
1845 //
1846
1847 kern_return_t kr;
1848 host_flavor_t flavor = HOST_PRIORITY_INFO;
1849 mach_msg_type_number_t count = HOST_PRIORITY_INFO_COUNT;
1850 host_priority_info_data_t priority_info;
1851 host_t host = mach_host_self();
1852 kr = host_info(host, flavor, (host_info_t)&priority_info, &count);
1853 if (kr != KERN_SUCCESS) {
1854 PTHREAD_INTERNAL_CRASH(kr, "host_info() failed");
1855 } else {
1856 default_priority = (uint8_t)priority_info.user_priority;
1857 min_priority = (uint8_t)priority_info.minimum_priority;
1858 max_priority = (uint8_t)priority_info.maximum_priority;
1859 }
1860 mach_port_deallocate(mach_task_self(), host);
1861
1862 //
1863 // Set up the main thread structure
1864 //
1865
1866 // Get the address and size of the main thread's stack from the kernel.
1867 void *stackaddr = 0;
1868 size_t stacksize = 0;
1869 void *allocaddr = 0;
1870 size_t allocsize = 0;
1871 if (!parse_main_stack_params(apple, &stackaddr, &stacksize, &allocaddr, &allocsize) ||
1872 stackaddr == NULL || stacksize == 0) {
1873 // Fall back to previous bevhaior.
1874 size_t len = sizeof(stackaddr);
1875 int mib[] = { CTL_KERN, KERN_USRSTACK };
1876 if (__sysctl(mib, 2, &stackaddr, &len, NULL, 0) != 0) {
1877 #if defined(__LP64__)
1878 stackaddr = (void *)USRSTACK64;
1879 #else
1880 stackaddr = (void *)USRSTACK;
1881 #endif
1882 }
1883 stacksize = DFLSSIZ;
1884 allocaddr = 0;
1885 allocsize = 0;
1886 }
1887
1888 // Initialize random ptr_munge token from the kernel.
1889 parse_ptr_munge_params(envp, apple);
1890
1891 PTHREAD_DEBUG_ASSERT(_pthread_attr_default.qosclass ==
1892 _pthread_default_priority(0));
1893 _pthread_struct_init(thread, &_pthread_attr_default,
1894 stackaddr, stacksize, allocaddr, allocsize);
1895 thread->tl_joinable = true;
1896
1897 // Finish initialization with common code that is reinvoked on the
1898 // child side of a fork.
1899
1900 // Finishes initialization of main thread attributes.
1901 // Initializes the thread list and add the main thread.
1902 // Calls _pthread_set_self() to prepare the main thread for execution.
1903 _pthread_main_thread_init(thread);
1904
1905 struct _pthread_registration_data registration_data;
1906 // Set up kernel entry points with __bsdthread_register.
1907 _pthread_bsdthread_init(&registration_data);
1908
1909 // Have pthread_key and pthread_mutex do their init envvar checks.
1910 _pthread_key_global_init(envp);
1911 _pthread_mutex_global_init(envp, &registration_data);
1912
1913 #if PTHREAD_DEBUG_LOG
1914 _SIMPLE_STRING path = _simple_salloc();
1915 _simple_sprintf(path, "/var/tmp/libpthread.%d.log", getpid());
1916 _pthread_debuglog = open(_simple_string(path),
1917 O_WRONLY | O_APPEND | O_CREAT | O_NOFOLLOW | O_CLOEXEC, 0666);
1918 _simple_sfree(path);
1919 _pthread_debugstart = mach_absolute_time();
1920 #endif
1921
1922 return 0;
1923 }
1924 #endif // !VARIANT_DYLD
1925
1926 PTHREAD_NOEXPORT void
1927 _pthread_main_thread_init(pthread_t p)
1928 {
1929 TAILQ_INIT(&__pthread_head);
1930 _PTHREAD_LOCK_INIT(_pthread_list_lock);
1931 _PTHREAD_LOCK_INIT(p->lock);
1932 _pthread_set_kernel_thread(p, mach_thread_self());
1933 _pthread_set_reply_port(p, mach_reply_port());
1934 p->__cleanup_stack = NULL;
1935 p->tl_join_ctx = NULL;
1936 p->tl_exit_gate = MACH_PORT_NULL;
1937 p->tsd[__TSD_SEMAPHORE_CACHE] = (void*)(uintptr_t)SEMAPHORE_NULL;
1938 p->tsd[__TSD_MACH_SPECIAL_REPLY] = 0;
1939
1940 // Initialize the list of threads with the new main thread.
1941 TAILQ_INSERT_HEAD(&__pthread_head, p, tl_plist);
1942 _pthread_count = 1;
1943
1944 _pthread_introspection_thread_start(p);
1945 }
1946
1947 PTHREAD_NOEXPORT
1948 void
1949 _pthread_main_thread_postfork_init(pthread_t p)
1950 {
1951 _pthread_main_thread_init(p);
1952 _pthread_set_self_internal(p);
1953 }
1954
1955 int
1956 sched_yield(void)
1957 {
1958 swtch_pri(0);
1959 return 0;
1960 }
1961
1962 // XXX remove
1963 void
1964 cthread_yield(void)
1965 {
1966 sched_yield();
1967 }
1968
1969 void
1970 pthread_yield_np(void)
1971 {
1972 sched_yield();
1973 }
1974
1975 // Libsystem knows about this symbol and exports it to libsyscall
1976 int
1977 pthread_current_stack_contains_np(const void *addr, size_t length)
1978 {
1979 uintptr_t begin = (uintptr_t) addr, end;
1980 uintptr_t stack_base = (uintptr_t) _pthread_self_direct()->stackbottom;
1981 uintptr_t stack_top = (uintptr_t) _pthread_self_direct()->stackaddr;
1982
1983 if (stack_base == stack_top) {
1984 return -ENOTSUP;
1985 }
1986
1987 if (__builtin_add_overflow(begin, length, &end)) {
1988 return -EINVAL;
1989 }
1990
1991 return stack_base <= begin && end <= stack_top;
1992 }
1993
1994
1995
1996 // Libsystem knows about this symbol and exports it to libsyscall
1997 PTHREAD_NOEXPORT_VARIANT
1998 void
1999 _pthread_clear_qos_tsd(mach_port_t thread_port)
2000 {
2001 if (thread_port == MACH_PORT_NULL || (uintptr_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF) == thread_port) {
2002 /* Clear the current thread's TSD, that can be done inline. */
2003 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS,
2004 _pthread_unspecified_priority());
2005 } else {
2006 pthread_t p;
2007
2008 _PTHREAD_LOCK(_pthread_list_lock);
2009
2010 TAILQ_FOREACH(p, &__pthread_head, tl_plist) {
2011 mach_port_t kp = _pthread_kernel_thread(p);
2012 if (thread_port == kp) {
2013 p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] =
2014 _pthread_unspecified_priority();
2015 break;
2016 }
2017 }
2018
2019 _PTHREAD_UNLOCK(_pthread_list_lock);
2020 }
2021 }
2022
2023
2024 #pragma mark pthread/stack_np.h public interface
2025
2026
2027 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__arm64__)
2028 #if __ARM64_ARCH_8_32__
2029 /*
2030 * arm64_32 uses 64-bit sizes for the frame pointer and
2031 * return address of a stack frame.
2032 */
2033 typedef uint64_t frame_data_addr_t;
2034 #else
2035 typedef uintptr_t frame_data_addr_t;
2036 #endif
2037
2038 struct frame_data {
2039 frame_data_addr_t frame_addr_next;
2040 frame_data_addr_t ret_addr;
2041 };
2042 #else
2043 #error ********** Unimplemented architecture
2044 #endif
2045
2046 uintptr_t
2047 pthread_stack_frame_decode_np(uintptr_t frame_addr, uintptr_t *return_addr)
2048 {
2049 struct frame_data *frame = (struct frame_data *)frame_addr;
2050
2051 if (return_addr) {
2052 #if __has_feature(ptrauth_calls)
2053 *return_addr = (uintptr_t)ptrauth_strip((void *)frame->ret_addr,
2054 ptrauth_key_return_address);
2055 #else
2056 *return_addr = (uintptr_t)frame->ret_addr;
2057 #endif /* __has_feature(ptrauth_calls) */
2058 }
2059
2060 #if __has_feature(ptrauth_calls)
2061 return (uintptr_t)ptrauth_strip((void *)frame->frame_addr_next,
2062 ptrauth_key_frame_pointer);
2063 #endif /* __has_feature(ptrauth_calls) */
2064 return (uintptr_t)frame->frame_addr_next;
2065 }
2066
2067
2068 #pragma mark pthread workqueue support routines
2069
2070
2071 PTHREAD_NOEXPORT void
2072 _pthread_bsdthread_init(struct _pthread_registration_data *data)
2073 {
2074 bzero(data, sizeof(*data));
2075 data->version = sizeof(struct _pthread_registration_data);
2076 data->dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *);
2077 data->return_to_kernel_offset = __TSD_RETURN_TO_KERNEL * sizeof(void *);
2078 data->tsd_offset = offsetof(struct _pthread, tsd);
2079 data->mach_thread_self_offset = __TSD_MACH_THREAD_SELF * sizeof(void *);
2080
2081 int rv = __bsdthread_register(thread_start, start_wqthread, (int)PTHREAD_SIZE,
2082 (void*)data, (uintptr_t)sizeof(*data), data->dispatch_queue_offset);
2083
2084 if (rv > 0) {
2085 int required_features =
2086 PTHREAD_FEATURE_FINEPRIO |
2087 PTHREAD_FEATURE_BSDTHREADCTL |
2088 PTHREAD_FEATURE_SETSELF |
2089 PTHREAD_FEATURE_QOS_MAINTENANCE |
2090 PTHREAD_FEATURE_QOS_DEFAULT;
2091 if ((rv & required_features) != required_features) {
2092 PTHREAD_INTERNAL_CRASH(rv, "Missing required kernel support");
2093 }
2094 __pthread_supported_features = rv;
2095 }
2096
2097 /*
2098 * TODO: differentiate between (-1, EINVAL) after fork (which has the side
2099 * effect of resetting the child's stack_addr_hint before bailing out) and
2100 * (-1, EINVAL) because of invalid arguments. We'd probably like to treat
2101 * the latter as fatal.
2102 *
2103 * <rdar://problem/36451838>
2104 */
2105
2106 pthread_priority_t main_qos = (pthread_priority_t)data->main_qos;
2107
2108 if (_pthread_priority_thread_qos(main_qos) != THREAD_QOS_UNSPECIFIED) {
2109 _pthread_set_main_qos(main_qos);
2110 main_thread()->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = main_qos;
2111 }
2112
2113 if (data->stack_addr_hint) {
2114 __pthread_stack_hint = data->stack_addr_hint;
2115 }
2116
2117 if (__libdispatch_workerfunction != NULL) {
2118 // prepare the kernel for workq action
2119 (void)__workq_open();
2120 }
2121 }
2122
2123 PTHREAD_NOINLINE
2124 static void
2125 _pthread_wqthread_legacy_worker_wrap(pthread_priority_t pp)
2126 {
2127 /* Old thread priorities are inverted from where we have them in
2128 * the new flexible priority scheme. The highest priority is zero,
2129 * up to 2, with background at 3.
2130 */
2131 pthread_workqueue_function_t func = (pthread_workqueue_function_t)__libdispatch_workerfunction;
2132 bool overcommit = (pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
2133 int opts = overcommit ? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT : 0;
2134
2135 switch (_pthread_priority_thread_qos(pp)) {
2136 case THREAD_QOS_USER_INITIATED:
2137 return (*func)(WORKQ_HIGH_PRIOQUEUE, opts, NULL);
2138 case THREAD_QOS_LEGACY:
2139 /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being
2140 * picked up by NSThread (et al) and transported around the system. So change the TSD to
2141 * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy.
2142 */
2143 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS,
2144 _pthread_priority_make_from_thread_qos(THREAD_QOS_USER_INITIATED, 0, 0));
2145 return (*func)(WORKQ_DEFAULT_PRIOQUEUE, opts, NULL);
2146 case THREAD_QOS_UTILITY:
2147 return (*func)(WORKQ_LOW_PRIOQUEUE, opts, NULL);
2148 case THREAD_QOS_BACKGROUND:
2149 return (*func)(WORKQ_BG_PRIOQUEUE, opts, NULL);
2150 }
2151 PTHREAD_INTERNAL_CRASH(pp, "Invalid pthread priority for the legacy interface");
2152 }
2153
2154 PTHREAD_ALWAYS_INLINE
2155 static inline pthread_priority_t
2156 _pthread_wqthread_priority(int flags)
2157 {
2158 pthread_priority_t pp = 0;
2159 thread_qos_t qos;
2160
2161 if (flags & WQ_FLAG_THREAD_KEVENT) {
2162 pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
2163 }
2164 if (flags & WQ_FLAG_THREAD_EVENT_MANAGER) {
2165 return pp | _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
2166 }
2167
2168 if (flags & WQ_FLAG_THREAD_OVERCOMMIT) {
2169 pp |= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
2170 }
2171 if (flags & WQ_FLAG_THREAD_PRIO_QOS) {
2172 qos = (thread_qos_t)(flags & WQ_FLAG_THREAD_PRIO_MASK);
2173 pp = _pthread_priority_make_from_thread_qos(qos, 0, pp);
2174 } else if (flags & WQ_FLAG_THREAD_PRIO_SCHED) {
2175 pp |= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
2176 pp |= (flags & WQ_FLAG_THREAD_PRIO_MASK);
2177 } else {
2178 PTHREAD_INTERNAL_CRASH(flags, "Missing priority");
2179 }
2180 return pp;
2181 }
2182
2183 PTHREAD_NOINLINE
2184 static void
2185 _pthread_wqthread_setup(pthread_t self, mach_port_t kport, void *stacklowaddr,
2186 int flags)
2187 {
2188 void *stackaddr = self;
2189 size_t stacksize = (uintptr_t)self - (uintptr_t)stacklowaddr;
2190
2191 _pthread_struct_init(self, &_pthread_attr_default, stackaddr, stacksize,
2192 PTHREAD_ALLOCADDR(stackaddr, stacksize),
2193 PTHREAD_ALLOCSIZE(stackaddr, stacksize));
2194
2195 _pthread_set_kernel_thread(self, kport);
2196 self->wqthread = 1;
2197 self->wqkillset = 0;
2198 self->tl_joinable = false;
2199
2200 // Update the running thread count and set childrun bit.
2201 if (os_unlikely((flags & WQ_FLAG_THREAD_TSD_BASE_SET) == 0)) {
2202 PTHREAD_INTERNAL_CRASH(flags,
2203 "thread_set_tsd_base() wasn't called by the kernel");
2204 }
2205 _pthread_set_self_internal(self);
2206 __pthread_add_thread(self, false);
2207 __pthread_started_thread(self);
2208 }
2209
2210 PTHREAD_NORETURN PTHREAD_NOINLINE
2211 static void
2212 _pthread_wqthread_exit(pthread_t self)
2213 {
2214 pthread_priority_t pp;
2215 thread_qos_t qos;
2216
2217 pp = (pthread_priority_t)self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS];
2218 qos = _pthread_priority_thread_qos(pp);
2219 if (qos == THREAD_QOS_UNSPECIFIED || qos > WORKQ_THREAD_QOS_CLEANUP) {
2220 // Reset QoS to something low for the cleanup process
2221 pp = _pthread_priority_make_from_thread_qos(WORKQ_THREAD_QOS_CLEANUP, 0, 0);
2222 self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = (void *)pp;
2223 }
2224
2225 _pthread_exit(self, NULL);
2226 }
2227
2228 // workqueue entry point from kernel
2229 void
2230 _pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr,
2231 void *keventlist, int flags, int nkevents)
2232 {
2233 if ((flags & WQ_FLAG_THREAD_REUSE) == 0) {
2234 _pthread_wqthread_setup(self, kport, stacklowaddr, flags);
2235 }
2236
2237 pthread_priority_t pp;
2238
2239 if (flags & WQ_FLAG_THREAD_OUTSIDEQOS) {
2240 self->wq_outsideqos = 1;
2241 pp = _pthread_priority_make_from_thread_qos(THREAD_QOS_LEGACY, 0,
2242 _PTHREAD_PRIORITY_FALLBACK_FLAG);
2243 } else {
2244 self->wq_outsideqos = 0;
2245 pp = _pthread_wqthread_priority(flags);
2246 }
2247
2248 self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = (void *)pp;
2249
2250 // avoid spills on the stack hard to keep used stack space minimal
2251 if (os_unlikely(nkevents == WORKQ_EXIT_THREAD_NKEVENT)) {
2252 _pthread_wqthread_exit(self);
2253 } else if (flags & WQ_FLAG_THREAD_WORKLOOP) {
2254 kqueue_id_t *kqidptr = (kqueue_id_t *)keventlist - 1;
2255 self->fun = (void *(*)(void*))__libdispatch_workloopfunction;
2256 self->arg = keventlist;
2257 self->wq_nevents = nkevents;
2258 (*__libdispatch_workloopfunction)(kqidptr, &self->arg, &self->wq_nevents);
2259 __workq_kernreturn(WQOPS_THREAD_WORKLOOP_RETURN, self->arg, self->wq_nevents, 0);
2260 } else if (flags & WQ_FLAG_THREAD_KEVENT) {
2261 self->fun = (void *(*)(void*))__libdispatch_keventfunction;
2262 self->arg = keventlist;
2263 self->wq_nevents = nkevents;
2264 (*__libdispatch_keventfunction)(&self->arg, &self->wq_nevents);
2265 __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, self->arg, self->wq_nevents, 0);
2266 } else {
2267 self->fun = (void *(*)(void*))__libdispatch_workerfunction;
2268 self->arg = (void *)(uintptr_t)pp;
2269 self->wq_nevents = 0;
2270 if (os_likely(__workq_newapi)) {
2271 (*__libdispatch_workerfunction)(pp);
2272 } else {
2273 _pthread_wqthread_legacy_worker_wrap(pp);
2274 }
2275 __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0);
2276 }
2277
2278 _os_set_crash_log_cause_and_message(self->err_no,
2279 "BUG IN LIBPTHREAD: __workq_kernreturn returned");
2280 /*
2281 * 52858993: we should never return but the compiler insists on outlining,
2282 * so the __builtin_trap() is in _start_wqthread in pthread_asm.s
2283 */
2284 }
2285
2286
2287 #pragma mark pthread workqueue API for libdispatch
2288
2289
2290 _Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN == WQ_KEVENT_LIST_LEN,
2291 "Kernel and userland should agree on the event list size");
2292
2293 void
2294 pthread_workqueue_setdispatchoffset_np(int offset)
2295 {
2296 __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP, NULL, offset, 0x00);
2297 }
2298
2299 int
2300 pthread_workqueue_setup(struct pthread_workqueue_config *cfg, size_t cfg_size)
2301 {
2302 int rv = EBUSY;
2303 struct workq_dispatch_config wdc_cfg;
2304 size_t min_size = 0;
2305
2306 if (cfg_size < sizeof(uint32_t)) {
2307 return EINVAL;
2308 }
2309
2310 switch (cfg->version) {
2311 case 1:
2312 min_size = offsetof(struct pthread_workqueue_config, queue_label_offs);
2313 break;
2314 case 2:
2315 min_size = sizeof(struct pthread_workqueue_config);
2316 break;
2317 default:
2318 return EINVAL;
2319 }
2320
2321 if (!cfg || cfg_size < min_size) {
2322 return EINVAL;
2323 }
2324
2325 if (cfg->flags & ~PTHREAD_WORKQUEUE_CONFIG_SUPPORTED_FLAGS ||
2326 cfg->version < PTHREAD_WORKQUEUE_CONFIG_MIN_SUPPORTED_VERSION) {
2327 return ENOTSUP;
2328 }
2329
2330 if (__libdispatch_workerfunction == NULL) {
2331 __workq_newapi = true;
2332
2333 wdc_cfg.wdc_version = WORKQ_DISPATCH_CONFIG_VERSION;
2334 wdc_cfg.wdc_flags = 0;
2335 wdc_cfg.wdc_queue_serialno_offs = cfg->queue_serialno_offs;
2336 #if WORKQ_DISPATCH_CONFIG_VERSION >= 2
2337 wdc_cfg.wdc_queue_label_offs = cfg->queue_label_offs;
2338 #endif
2339
2340 // Tell the kernel about dispatch internals
2341 rv = (int) __workq_kernreturn(WQOPS_SETUP_DISPATCH, &wdc_cfg, sizeof(wdc_cfg), 0);
2342 if (rv == -1) {
2343 return errno;
2344 } else {
2345 __libdispatch_keventfunction = cfg->kevent_cb;
2346 __libdispatch_workloopfunction = cfg->workloop_cb;
2347 __libdispatch_workerfunction = cfg->workq_cb;
2348
2349 // Prepare the kernel for workq action
2350 (void)__workq_open();
2351 if (__is_threaded == 0) {
2352 __is_threaded = 1;
2353 }
2354
2355 return 0;
2356 }
2357 }
2358
2359 return rv;
2360 }
2361
2362 int
2363 _pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func,
2364 pthread_workqueue_function_kevent_t kevent_func,
2365 pthread_workqueue_function_workloop_t workloop_func,
2366 int offset, int flags)
2367 {
2368 struct pthread_workqueue_config cfg = {
2369 .version = PTHREAD_WORKQUEUE_CONFIG_VERSION,
2370 .flags = 0,
2371 .workq_cb = queue_func,
2372 .kevent_cb = kevent_func,
2373 .workloop_cb = workloop_func,
2374 .queue_serialno_offs = offset,
2375 .queue_label_offs = 0,
2376 };
2377
2378 return pthread_workqueue_setup(&cfg, sizeof(cfg));
2379 }
2380
2381 int
2382 _pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func,
2383 pthread_workqueue_function_kevent_t kevent_func,
2384 int offset, int flags)
2385 {
2386 return _pthread_workqueue_init_with_workloop(queue_func, kevent_func, NULL, offset, flags);
2387 }
2388
2389 int
2390 _pthread_workqueue_init(pthread_workqueue_function2_t func, int offset, int flags)
2391 {
2392 return _pthread_workqueue_init_with_kevent(func, NULL, offset, flags);
2393 }
2394
2395 int
2396 pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func)
2397 {
2398 struct pthread_workqueue_config cfg = {
2399 .version = PTHREAD_WORKQUEUE_CONFIG_VERSION,
2400 .flags = 0,
2401 .workq_cb = (uint64_t)(pthread_workqueue_function2_t)worker_func,
2402 .kevent_cb = 0,
2403 .workloop_cb = 0,
2404 .queue_serialno_offs = 0,
2405 .queue_label_offs = 0,
2406 };
2407
2408 return pthread_workqueue_setup(&cfg, sizeof(cfg));
2409 }
2410
2411 int
2412 _pthread_workqueue_supported(void)
2413 {
2414 if (os_unlikely(!__pthread_supported_features)) {
2415 PTHREAD_INTERNAL_CRASH(0, "libpthread has not been initialized");
2416 }
2417
2418 return __pthread_supported_features;
2419 }
2420
2421 int
2422 pthread_workqueue_addthreads_np(int queue_priority, int options, int numthreads)
2423 {
2424 int res = 0;
2425
2426 // Cannot add threads without a worker function registered.
2427 if (__libdispatch_workerfunction == NULL) {
2428 return EPERM;
2429 }
2430
2431 pthread_priority_t kp = 0;
2432 int compat_priority = queue_priority & WQ_FLAG_THREAD_PRIO_MASK;
2433 int flags = 0;
2434
2435 if (options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT) {
2436 flags = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
2437 }
2438
2439 #pragma clang diagnostic push
2440 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2441 kp = _pthread_qos_class_encode_workqueue(compat_priority, flags);
2442 #pragma clang diagnostic pop
2443
2444 res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)kp);
2445 if (res == -1) {
2446 res = errno;
2447 }
2448 return res;
2449 }
2450
2451 bool
2452 _pthread_workqueue_should_narrow(pthread_priority_t pri)
2453 {
2454 int res = __workq_kernreturn(WQOPS_SHOULD_NARROW, NULL, (int)pri, 0);
2455 if (res == -1) {
2456 return false;
2457 }
2458 return res;
2459 }
2460
2461 int
2462 _pthread_workqueue_addthreads(int numthreads, pthread_priority_t priority)
2463 {
2464 int res = 0;
2465
2466 if (__libdispatch_workerfunction == NULL) {
2467 return EPERM;
2468 }
2469
2470 #if TARGET_OS_OSX
2471 // <rdar://problem/37687655> Legacy simulators fail to boot
2472 //
2473 // Older sims set the deprecated _PTHREAD_PRIORITY_ROOTQUEUE_FLAG wrongly,
2474 // which is aliased to _PTHREAD_PRIORITY_SCHED_PRI_FLAG and that XNU
2475 // validates and rejects.
2476 //
2477 // As a workaround, forcefully unset this bit that cannot be set here
2478 // anyway.
2479 priority &= ~_PTHREAD_PRIORITY_SCHED_PRI_FLAG;
2480 #endif
2481
2482 res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)priority);
2483 if (res == -1) {
2484 res = errno;
2485 }
2486 return res;
2487 }
2488
2489 int
2490 _pthread_workqueue_set_event_manager_priority(pthread_priority_t priority)
2491 {
2492 int res = __workq_kernreturn(WQOPS_SET_EVENT_MANAGER_PRIORITY, NULL, (int)priority, 0);
2493 if (res == -1) {
2494 res = errno;
2495 }
2496 return res;
2497 }
2498
2499 int
2500 _pthread_workloop_create(uint64_t workloop_id, uint64_t options, pthread_attr_t *attr)
2501 {
2502 struct kqueue_workloop_params params = {
2503 .kqwlp_version = sizeof(struct kqueue_workloop_params),
2504 .kqwlp_id = workloop_id,
2505 .kqwlp_flags = 0,
2506 };
2507
2508 if (!attr) {
2509 return EINVAL;
2510 }
2511
2512 if (attr->schedset) {
2513 params.kqwlp_flags |= KQ_WORKLOOP_CREATE_SCHED_PRI;
2514 params.kqwlp_sched_pri = attr->param.sched_priority;
2515 }
2516
2517 if (attr->policyset) {
2518 params.kqwlp_flags |= KQ_WORKLOOP_CREATE_SCHED_POL;
2519 params.kqwlp_sched_pol = attr->policy;
2520 }
2521
2522 if (attr->cpupercentset) {
2523 params.kqwlp_flags |= KQ_WORKLOOP_CREATE_CPU_PERCENT;
2524 params.kqwlp_cpu_percent = attr->cpupercent;
2525 params.kqwlp_cpu_refillms = attr->refillms;
2526 }
2527
2528 int res = __kqueue_workloop_ctl(KQ_WORKLOOP_CREATE, 0, &params,
2529 sizeof(params));
2530 if (res == -1) {
2531 res = errno;
2532 }
2533 return res;
2534 }
2535
2536 int
2537 _pthread_workloop_destroy(uint64_t workloop_id)
2538 {
2539 struct kqueue_workloop_params params = {
2540 .kqwlp_version = sizeof(struct kqueue_workloop_params),
2541 .kqwlp_id = workloop_id,
2542 };
2543
2544 int res = __kqueue_workloop_ctl(KQ_WORKLOOP_DESTROY, 0, &params,
2545 sizeof(params));
2546 if (res == -1) {
2547 res = errno;
2548 }
2549 return res;
2550 }
2551
2552
2553 #pragma mark Introspection SPI for libpthread.
2554
2555
2556 static pthread_introspection_hook_t _pthread_introspection_hook;
2557
2558 pthread_introspection_hook_t
2559 pthread_introspection_hook_install(pthread_introspection_hook_t hook)
2560 {
2561 pthread_introspection_hook_t prev;
2562 prev = _pthread_atomic_xchg_ptr((void**)&_pthread_introspection_hook, hook);
2563 return prev;
2564 }
2565
2566 PTHREAD_NOINLINE
2567 static void
2568 _pthread_introspection_hook_callout_thread_create(pthread_t t)
2569 {
2570 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE, t, t,
2571 PTHREAD_SIZE);
2572 }
2573
2574 static inline void
2575 _pthread_introspection_thread_create(pthread_t t)
2576 {
2577 if (os_fastpath(!_pthread_introspection_hook)) return;
2578 _pthread_introspection_hook_callout_thread_create(t);
2579 }
2580
2581 PTHREAD_NOINLINE
2582 static void
2583 _pthread_introspection_hook_callout_thread_start(pthread_t t)
2584 {
2585 size_t freesize;
2586 void *freeaddr;
2587 if (t == main_thread()) {
2588 size_t stacksize = t->stackaddr - t->stackbottom;
2589 freesize = stacksize + t->guardsize;
2590 freeaddr = t->stackaddr - freesize;
2591 } else {
2592 freesize = t->freesize - PTHREAD_SIZE;
2593 freeaddr = t->freeaddr;
2594 }
2595 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START, t,
2596 freeaddr, freesize);
2597 }
2598
2599 static inline void
2600 _pthread_introspection_thread_start(pthread_t t)
2601 {
2602 if (os_fastpath(!_pthread_introspection_hook)) return;
2603 _pthread_introspection_hook_callout_thread_start(t);
2604 }
2605
2606 PTHREAD_NOINLINE
2607 static void
2608 _pthread_introspection_hook_callout_thread_terminate(pthread_t t)
2609 {
2610 size_t freesize;
2611 void *freeaddr;
2612 if (t == main_thread()) {
2613 size_t stacksize = t->stackaddr - t->stackbottom;
2614 freesize = stacksize + t->guardsize;
2615 freeaddr = t->stackaddr - freesize;
2616 } else {
2617 freesize = t->freesize - PTHREAD_SIZE;
2618 freeaddr = t->freeaddr;
2619 }
2620 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE, t,
2621 freeaddr, freesize);
2622 }
2623
2624 static inline void
2625 _pthread_introspection_thread_terminate(pthread_t t)
2626 {
2627 if (os_fastpath(!_pthread_introspection_hook)) return;
2628 _pthread_introspection_hook_callout_thread_terminate(t);
2629 }
2630
2631 PTHREAD_NOINLINE
2632 static void
2633 _pthread_introspection_hook_callout_thread_destroy(pthread_t t)
2634 {
2635 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY, t, t,
2636 PTHREAD_SIZE);
2637 }
2638
2639 static inline void
2640 _pthread_introspection_thread_destroy(pthread_t t)
2641 {
2642 if (os_fastpath(!_pthread_introspection_hook)) return;
2643 _pthread_introspection_hook_callout_thread_destroy(t);
2644 }
2645
2646
2647 #if !VARIANT_DYLD
2648 #pragma mark libplatform shims
2649
2650 #include <platform/string.h>
2651
2652 // pthread_setup initializes large structures to 0,
2653 // which the compiler turns into a library call to memset.
2654 //
2655 // To avoid linking against Libc, provide a simple wrapper
2656 // that calls through to the libplatform primitives
2657
2658 #undef memset
2659 PTHREAD_NOEXPORT
2660 void *
2661 memset(void *b, int c, size_t len)
2662 {
2663 return _platform_memset(b, c, len);
2664 }
2665
2666 #undef bzero
2667 PTHREAD_NOEXPORT
2668 void
2669 bzero(void *s, size_t n)
2670 {
2671 _platform_bzero(s, n);
2672 }
2673
2674 #undef memcpy
2675 PTHREAD_NOEXPORT
2676 void *
2677 memcpy(void* a, const void* b, unsigned long s)
2678 {
2679 return _platform_memmove(a, b, s);
2680 }
2681
2682 #endif // !VARIANT_DYLD