]> git.saurik.com Git - apple/libpthread.git/blob - src/pthread.c
libpthread-330.201.1.tar.gz
[apple/libpthread.git] / src / pthread.c
1 /*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 *
43 */
44 /*
45 * MkLinux
46 */
47
48 /*
49 * POSIX Pthread Library
50 */
51
52 #include "resolver.h"
53 #include "internal.h"
54 #include "private.h"
55 #include "workqueue_private.h"
56 #include "introspection_private.h"
57 #include "qos_private.h"
58 #include "tsd_private.h"
59 #include "pthread/stack_np.h"
60 #include "offsets.h" // included to validate the offsets at build time
61
62 #include <stdlib.h>
63 #include <errno.h>
64 #include <signal.h>
65 #include <unistd.h>
66 #include <mach/mach_init.h>
67 #include <mach/mach_vm.h>
68 #include <mach/mach_sync_ipc.h>
69 #include <sys/time.h>
70 #include <sys/resource.h>
71 #include <sys/sysctl.h>
72 #include <sys/queue.h>
73 #include <sys/ulock.h>
74 #include <sys/mman.h>
75 #include <machine/vmparam.h>
76 #define __APPLE_API_PRIVATE
77 #include <machine/cpu_capabilities.h>
78
79 #include <_simple.h>
80 #include <platform/string.h>
81 #include <platform/compat.h>
82
83 extern int __sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
84 void *newp, size_t newlen);
85 extern void __exit(int) __attribute__((noreturn));
86 extern int __pthread_kill(mach_port_t, int);
87
88 extern void _pthread_joiner_wake(pthread_t thread);
89
90 #if !VARIANT_DYLD
91 PTHREAD_NOEXPORT extern struct _pthread *_main_thread_ptr;
92 #define main_thread() (_main_thread_ptr)
93 #endif // VARIANT_DYLD
94
95 // Default stack size is 512KB; independent of the main thread's stack size.
96 #define DEFAULT_STACK_SIZE (size_t)(512 * 1024)
97
98
99 //
100 // Global constants
101 //
102
103 /*
104 * The pthread may be offset into a page. In that event, by contract
105 * with the kernel, the allocation will extend PTHREAD_SIZE from the
106 * start of the next page. There's also one page worth of allocation
107 * below stacksize for the guard page. <rdar://problem/19941744>
108 */
109 #define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
110 #define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size)
111 #define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize))
112
113 static const pthread_attr_t _pthread_attr_default = {
114 .sig = _PTHREAD_ATTR_SIG,
115 .stacksize = 0,
116 .detached = PTHREAD_CREATE_JOINABLE,
117 .inherit = _PTHREAD_DEFAULT_INHERITSCHED,
118 .policy = _PTHREAD_DEFAULT_POLICY,
119 .defaultguardpage = true,
120 // compile time constant for _pthread_default_priority(0)
121 .qosclass = (1U << (THREAD_QOS_LEGACY - 1 + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT)) |
122 ((uint8_t)-1 & _PTHREAD_PRIORITY_PRIORITY_MASK),
123 };
124
125 #if PTHREAD_LAYOUT_SPI
126
127 const struct pthread_layout_offsets_s pthread_layout_offsets = {
128 .plo_version = 1,
129 .plo_pthread_tsd_base_offset = offsetof(struct _pthread, tsd),
130 .plo_pthread_tsd_base_address_offset = 0,
131 .plo_pthread_tsd_entry_size = sizeof(((struct _pthread *)NULL)->tsd[0]),
132 };
133
134 #endif // PTHREAD_LAYOUT_SPI
135
136 //
137 // Global exported variables
138 //
139
140 // This global should be used (carefully) by anyone needing to know if a
141 // pthread (other than the main thread) has been created.
142 int __is_threaded = 0;
143 int __unix_conforming = 0;
144
145 //
146 // Global internal variables
147 //
148
149 // _pthread_list_lock protects _pthread_count, access to the __pthread_head
150 // list. Externally imported by pthread_cancelable.c.
151 struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
152 _pthread_lock _pthread_list_lock = _PTHREAD_LOCK_INITIALIZER;
153
154 uint32_t _main_qos;
155
156 #if VARIANT_DYLD
157 // The main thread's pthread_t
158 struct _pthread _main_thread __attribute__((aligned(64))) = { };
159 #define main_thread() (&_main_thread)
160 #else // VARIANT_DYLD
161 struct _pthread *_main_thread_ptr;
162 #endif // VARIANT_DYLD
163
164 #if PTHREAD_DEBUG_LOG
165 #include <fcntl.h>
166 int _pthread_debuglog;
167 uint64_t _pthread_debugstart;
168 #endif
169
170 //
171 // Global static variables
172 //
173 static bool __workq_newapi;
174 static uint8_t default_priority;
175 #if !VARIANT_DYLD
176 static uint8_t max_priority;
177 static uint8_t min_priority;
178 #endif // !VARIANT_DYLD
179 static int _pthread_count = 1;
180 static int pthread_concurrency;
181 static uintptr_t _pthread_ptr_munge_token;
182
183 static void (*exitf)(int) = __exit;
184 #if !VARIANT_DYLD
185 static void *(*_pthread_malloc)(size_t) = NULL;
186 static void (*_pthread_free)(void *) = NULL;
187 #endif // !VARIANT_DYLD
188
189 // work queue support data
190 PTHREAD_NORETURN
191 static void
192 __pthread_invalid_keventfunction(void **events, int *nevents)
193 {
194 PTHREAD_CLIENT_CRASH(0, "Invalid kqworkq setup");
195 }
196
197 PTHREAD_NORETURN
198 static void
199 __pthread_invalid_workloopfunction(uint64_t *workloop_id, void **events, int *nevents)
200 {
201 PTHREAD_CLIENT_CRASH(0, "Invalid kqwl setup");
202 }
203 static pthread_workqueue_function2_t __libdispatch_workerfunction;
204 static pthread_workqueue_function_kevent_t __libdispatch_keventfunction = &__pthread_invalid_keventfunction;
205 static pthread_workqueue_function_workloop_t __libdispatch_workloopfunction = &__pthread_invalid_workloopfunction;
206 static int __libdispatch_offset;
207 static int __pthread_supported_features; // supported feature set
208
209 #if defined(__i386__) || defined(__x86_64__)
210 static mach_vm_address_t __pthread_stack_hint = 0xB0000000;
211 #else
212 #error no __pthread_stack_hint for this architecture
213 #endif
214
215 //
216 // Function prototypes
217 //
218
219 // pthread primitives
220 static inline void _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs,
221 void *stack, size_t stacksize, void *freeaddr, size_t freesize);
222
223 #if VARIANT_DYLD
224 static void _pthread_set_self_dyld(void);
225 #endif // VARIANT_DYLD
226 static inline void _pthread_set_self_internal(pthread_t, bool needs_tsd_base_set);
227
228 static void _pthread_dealloc_reply_port(pthread_t t);
229 static void _pthread_dealloc_special_reply_port(pthread_t t);
230
231 static inline void __pthread_started_thread(pthread_t t);
232
233 static void _pthread_exit(pthread_t self, void *value_ptr) __dead2;
234
235 static inline void _pthread_introspection_thread_create(pthread_t t);
236 static inline void _pthread_introspection_thread_start(pthread_t t);
237 static inline void _pthread_introspection_thread_terminate(pthread_t t);
238 static inline void _pthread_introspection_thread_destroy(pthread_t t);
239
240 extern void _pthread_set_self(pthread_t);
241 extern void start_wqthread(pthread_t self, mach_port_t kport, void *stackaddr, void *unused, int reuse); // trampoline into _pthread_wqthread
242 extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags); // trampoline into _pthread_start
243
244 /*
245 * Flags filed passed to bsdthread_create and back in pthread_start
246 * 31 <---------------------------------> 0
247 * _________________________________________
248 * | flags(8) | policy(8) | importance(16) |
249 * -----------------------------------------
250 */
251 #define PTHREAD_START_CUSTOM 0x01000000 // <rdar://problem/34501401>
252 #define PTHREAD_START_SETSCHED 0x02000000
253 // was PTHREAD_START_DETACHED 0x04000000
254 #define PTHREAD_START_QOSCLASS 0x08000000
255 #define PTHREAD_START_TSD_BASE_SET 0x10000000
256 #define PTHREAD_START_SUSPENDED 0x20000000
257 #define PTHREAD_START_QOSCLASS_MASK 0x00ffffff
258 #define PTHREAD_START_POLICY_BITSHIFT 16
259 #define PTHREAD_START_POLICY_MASK 0xff
260 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
261
262 #if (!defined(__OPEN_SOURCE__) && TARGET_OS_OSX) || OS_VARIANT_RESOLVED // 40703288
263 static int pthread_setschedparam_internal(pthread_t, mach_port_t, int,
264 const struct sched_param *);
265 #endif
266
267 extern pthread_t __bsdthread_create(void *(*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags);
268 extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, void *, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t);
269 extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem);
270 extern __uint64_t __thread_selfid( void );
271
272 #if __LP64__
273 _Static_assert(offsetof(struct _pthread, tsd) == 224, "TSD LP64 offset");
274 #else
275 _Static_assert(offsetof(struct _pthread, tsd) == 176, "TSD ILP32 offset");
276 #endif
277 _Static_assert(offsetof(struct _pthread, tsd) + _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET
278 == offsetof(struct _pthread, thread_id),
279 "_PTHREAD_STRUCT_DIRECT_THREADID_OFFSET is correct");
280
281 #pragma mark pthread attrs
282
283 _Static_assert(sizeof(struct _pthread_attr_t) == sizeof(__darwin_pthread_attr_t),
284 "internal pthread_attr_t == external pthread_attr_t");
285
286 int
287 pthread_attr_destroy(pthread_attr_t *attr)
288 {
289 int ret = EINVAL;
290 if (attr->sig == _PTHREAD_ATTR_SIG) {
291 attr->sig = 0;
292 ret = 0;
293 }
294 return ret;
295 }
296
297 int
298 pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
299 {
300 int ret = EINVAL;
301 if (attr->sig == _PTHREAD_ATTR_SIG) {
302 *detachstate = attr->detached;
303 ret = 0;
304 }
305 return ret;
306 }
307
308 int
309 pthread_attr_getinheritsched(const pthread_attr_t *attr, int *inheritsched)
310 {
311 int ret = EINVAL;
312 if (attr->sig == _PTHREAD_ATTR_SIG) {
313 *inheritsched = attr->inherit;
314 ret = 0;
315 }
316 return ret;
317 }
318
319 static PTHREAD_ALWAYS_INLINE void
320 _pthread_attr_get_schedparam(const pthread_attr_t *attr,
321 struct sched_param *param)
322 {
323 if (attr->schedset) {
324 *param = attr->param;
325 } else {
326 param->sched_priority = default_priority;
327 param->quantum = 10; /* quantum isn't public yet */
328 }
329 }
330
331 int
332 pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
333 {
334 int ret = EINVAL;
335 if (attr->sig == _PTHREAD_ATTR_SIG) {
336 _pthread_attr_get_schedparam(attr, param);
337 ret = 0;
338 }
339 return ret;
340 }
341
342 int
343 pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
344 {
345 int ret = EINVAL;
346 if (attr->sig == _PTHREAD_ATTR_SIG) {
347 *policy = attr->policy;
348 ret = 0;
349 }
350 return ret;
351 }
352
353 int
354 pthread_attr_init(pthread_attr_t *attr)
355 {
356 *attr = _pthread_attr_default;
357 return 0;
358 }
359
360 int
361 pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
362 {
363 int ret = EINVAL;
364 if (attr->sig == _PTHREAD_ATTR_SIG &&
365 (detachstate == PTHREAD_CREATE_JOINABLE ||
366 detachstate == PTHREAD_CREATE_DETACHED)) {
367 attr->detached = detachstate;
368 ret = 0;
369 }
370 return ret;
371 }
372
373 int
374 pthread_attr_setinheritsched(pthread_attr_t *attr, int inheritsched)
375 {
376 int ret = EINVAL;
377 if (attr->sig == _PTHREAD_ATTR_SIG &&
378 (inheritsched == PTHREAD_INHERIT_SCHED ||
379 inheritsched == PTHREAD_EXPLICIT_SCHED)) {
380 attr->inherit = inheritsched;
381 ret = 0;
382 }
383 return ret;
384 }
385
386 int
387 pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param)
388 {
389 int ret = EINVAL;
390 if (attr->sig == _PTHREAD_ATTR_SIG) {
391 /* TODO: Validate sched_param fields */
392 attr->param = *param;
393 attr->schedset = 1;
394 ret = 0;
395 }
396 return ret;
397 }
398
399 int
400 pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
401 {
402 int ret = EINVAL;
403 if (attr->sig == _PTHREAD_ATTR_SIG && (policy == SCHED_OTHER ||
404 policy == SCHED_RR || policy == SCHED_FIFO)) {
405 if (!_PTHREAD_POLICY_IS_FIXEDPRI(policy)) {
406 /* non-fixedpri policy should remove cpupercent */
407 attr->cpupercentset = 0;
408 }
409 attr->policy = policy;
410 attr->policyset = 1;
411 ret = 0;
412 }
413 return ret;
414 }
415
416 int
417 pthread_attr_setscope(pthread_attr_t *attr, int scope)
418 {
419 int ret = EINVAL;
420 if (attr->sig == _PTHREAD_ATTR_SIG) {
421 if (scope == PTHREAD_SCOPE_SYSTEM) {
422 // No attribute yet for the scope.
423 ret = 0;
424 } else if (scope == PTHREAD_SCOPE_PROCESS) {
425 ret = ENOTSUP;
426 }
427 }
428 return ret;
429 }
430
431 int
432 pthread_attr_getscope(const pthread_attr_t *attr, int *scope)
433 {
434 int ret = EINVAL;
435 if (attr->sig == _PTHREAD_ATTR_SIG) {
436 *scope = PTHREAD_SCOPE_SYSTEM;
437 ret = 0;
438 }
439 return ret;
440 }
441
442 int
443 pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
444 {
445 int ret = EINVAL;
446 if (attr->sig == _PTHREAD_ATTR_SIG) {
447 *stackaddr = attr->stackaddr;
448 ret = 0;
449 }
450 return ret;
451 }
452
453 int
454 pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
455 {
456 int ret = EINVAL;
457 if (attr->sig == _PTHREAD_ATTR_SIG &&
458 ((uintptr_t)stackaddr % vm_page_size) == 0) {
459 attr->stackaddr = stackaddr;
460 attr->defaultguardpage = false;
461 attr->guardsize = 0;
462 ret = 0;
463 }
464 return ret;
465 }
466
467 static inline size_t
468 _pthread_attr_stacksize(const pthread_attr_t *attr)
469 {
470 return attr->stacksize ? attr->stacksize : DEFAULT_STACK_SIZE;
471 }
472
473 int
474 pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
475 {
476 int ret = EINVAL;
477 if (attr->sig == _PTHREAD_ATTR_SIG) {
478 *stacksize = _pthread_attr_stacksize(attr);
479 ret = 0;
480 }
481 return ret;
482 }
483
484 int
485 pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
486 {
487 int ret = EINVAL;
488 if (attr->sig == _PTHREAD_ATTR_SIG &&
489 (stacksize % vm_page_size) == 0 &&
490 stacksize >= PTHREAD_STACK_MIN) {
491 attr->stacksize = stacksize;
492 ret = 0;
493 }
494 return ret;
495 }
496
497 int
498 pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
499 {
500 int ret = EINVAL;
501 if (attr->sig == _PTHREAD_ATTR_SIG) {
502 *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize);
503 *stacksize = _pthread_attr_stacksize(attr);
504 ret = 0;
505 }
506 return ret;
507 }
508
509 // Per SUSv3, the stackaddr is the base address, the lowest addressable byte
510 // address. This is not the same as in pthread_attr_setstackaddr.
511 int
512 pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
513 {
514 int ret = EINVAL;
515 if (attr->sig == _PTHREAD_ATTR_SIG &&
516 ((uintptr_t)stackaddr % vm_page_size) == 0 &&
517 (stacksize % vm_page_size) == 0 &&
518 stacksize >= PTHREAD_STACK_MIN) {
519 attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize);
520 attr->stacksize = stacksize;
521 ret = 0;
522 }
523 return ret;
524 }
525
526 int
527 pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
528 {
529 int ret = EINVAL;
530 if (attr->sig == _PTHREAD_ATTR_SIG && (guardsize % vm_page_size) == 0) {
531 /* Guardsize of 0 is valid, means no guard */
532 attr->defaultguardpage = false;
533 attr->guardsize = guardsize;
534 ret = 0;
535 }
536 return ret;
537 }
538
539 static inline size_t
540 _pthread_attr_guardsize(const pthread_attr_t *attr)
541 {
542 return attr->defaultguardpage ? vm_page_size : attr->guardsize;
543 }
544
545 int
546 pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize)
547 {
548 int ret = EINVAL;
549 if (attr->sig == _PTHREAD_ATTR_SIG) {
550 *guardsize = _pthread_attr_guardsize(attr);
551 ret = 0;
552 }
553 return ret;
554 }
555
556 int
557 pthread_attr_setcpupercent_np(pthread_attr_t *attr, int percent,
558 unsigned long refillms)
559 {
560 int ret = EINVAL;
561 if (attr->sig == _PTHREAD_ATTR_SIG && percent < UINT8_MAX &&
562 refillms < _PTHREAD_ATTR_REFILLMS_MAX && attr->policyset &&
563 _PTHREAD_POLICY_IS_FIXEDPRI(attr->policy)) {
564 attr->cpupercent = percent;
565 attr->refillms = (uint32_t)(refillms & 0x00ffffff);
566 attr->cpupercentset = 1;
567 ret = 0;
568 }
569 return ret;
570 }
571
572 #pragma mark pthread lifetime
573
574 // Allocate a thread structure, stack and guard page.
575 //
576 // The thread structure may optionally be placed in the same allocation as the
577 // stack, residing above the top of the stack. This cannot be done if a
578 // custom stack address is provided.
579 //
580 // Similarly the guard page cannot be allocated if a custom stack address is
581 // provided.
582 //
583 // The allocated thread structure is initialized with values that indicate how
584 // it should be freed.
585
586 static pthread_t
587 _pthread_allocate(const pthread_attr_t *attrs, void **stack)
588 {
589 mach_vm_address_t allocaddr = __pthread_stack_hint;
590 size_t allocsize, guardsize, stacksize;
591 kern_return_t kr;
592 pthread_t t;
593
594 PTHREAD_ASSERT(attrs->stacksize == 0 ||
595 attrs->stacksize >= PTHREAD_STACK_MIN);
596
597 // Allocate a pthread structure if necessary
598
599 if (attrs->stackaddr != NULL) {
600 PTHREAD_ASSERT(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
601 allocsize = PTHREAD_SIZE;
602 guardsize = 0;
603 // <rdar://problem/42588315> if the attrs struct specifies a custom
604 // stack address but not a custom size, using ->stacksize here instead
605 // of _pthread_attr_stacksize stores stacksize as zero, indicating
606 // that the stack size is unknown.
607 stacksize = attrs->stacksize;
608 } else {
609 guardsize = _pthread_attr_guardsize(attrs);
610 stacksize = _pthread_attr_stacksize(attrs) + PTHREAD_T_OFFSET;
611 allocsize = stacksize + guardsize + PTHREAD_SIZE;
612 allocsize = mach_vm_round_page(allocsize);
613 }
614
615 kr = mach_vm_map(mach_task_self(), &allocaddr, allocsize, vm_page_size - 1,
616 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL,
617 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
618
619 if (kr != KERN_SUCCESS) {
620 kr = mach_vm_allocate(mach_task_self(), &allocaddr, allocsize,
621 VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
622 }
623 if (kr != KERN_SUCCESS) {
624 *stack = NULL;
625 return NULL;
626 }
627
628 // The stack grows down.
629 // Set the guard page at the lowest address of the
630 // newly allocated stack. Return the highest address
631 // of the stack.
632 if (guardsize) {
633 (void)mach_vm_protect(mach_task_self(), allocaddr, guardsize,
634 FALSE, VM_PROT_NONE);
635 }
636
637 // Thread structure resides at the top of the stack (when using a
638 // custom stack, allocsize == PTHREAD_SIZE, so places the pthread_t
639 // at allocaddr).
640 t = (pthread_t)(allocaddr + allocsize - PTHREAD_SIZE);
641 if (attrs->stackaddr) {
642 *stack = attrs->stackaddr;
643 } else {
644 *stack = t;
645 }
646
647 _pthread_struct_init(t, attrs, *stack, stacksize, allocaddr, allocsize);
648 return t;
649 }
650
651 PTHREAD_NOINLINE
652 void
653 _pthread_deallocate(pthread_t t, bool from_mach_thread)
654 {
655 kern_return_t ret;
656
657 // Don't free the main thread.
658 if (t != main_thread()) {
659 if (!from_mach_thread) { // see __pthread_add_thread
660 _pthread_introspection_thread_destroy(t);
661 }
662 ret = mach_vm_deallocate(mach_task_self(), t->freeaddr, t->freesize);
663 PTHREAD_ASSERT(ret == KERN_SUCCESS);
664 }
665 }
666
667 #pragma clang diagnostic push
668 #pragma clang diagnostic ignored "-Wreturn-stack-address"
669
670 PTHREAD_NOINLINE
671 static void*
672 _pthread_current_stack_address(void)
673 {
674 int a;
675 return &a;
676 }
677
678 #pragma clang diagnostic pop
679
680 void
681 _pthread_joiner_wake(pthread_t thread)
682 {
683 uint32_t *exit_gate = &thread->tl_exit_gate;
684
685 for (;;) {
686 int ret = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO, exit_gate, 0);
687 if (ret == 0 || ret == -ENOENT) {
688 return;
689 }
690 if (ret != -EINTR) {
691 PTHREAD_INTERNAL_CRASH(-ret, "pthread_join() wake failure");
692 }
693 }
694 }
695
696 // Terminates the thread if called from the currently running thread.
697 PTHREAD_NORETURN PTHREAD_NOINLINE PTHREAD_NOT_TAIL_CALLED
698 static void
699 _pthread_terminate(pthread_t t, void *exit_value)
700 {
701 PTHREAD_ASSERT(t == pthread_self());
702
703 _pthread_introspection_thread_terminate(t);
704
705 uintptr_t freeaddr = (uintptr_t)t->freeaddr;
706 size_t freesize = t->freesize;
707 bool should_exit;
708
709 // the size of just the stack
710 size_t freesize_stack = t->freesize;
711
712 // We usually pass our structure+stack to bsdthread_terminate to free, but
713 // if we get told to keep the pthread_t structure around then we need to
714 // adjust the free size and addr in the pthread_t to just refer to the
715 // structure and not the stack. If we do end up deallocating the
716 // structure, this is useless work since no one can read the result, but we
717 // can't do it after the call to pthread_remove_thread because it isn't
718 // safe to dereference t after that.
719 if ((void*)t > t->freeaddr && (void*)t < t->freeaddr + t->freesize){
720 // Check to ensure the pthread structure itself is part of the
721 // allocation described by freeaddr/freesize, in which case we split and
722 // only deallocate the area below the pthread structure. In the event of a
723 // custom stack, the freeaddr/size will be the pthread structure itself, in
724 // which case we shouldn't free anything (the final else case).
725 freesize_stack = trunc_page((uintptr_t)t - (uintptr_t)freeaddr);
726
727 // describe just the remainder for deallocation when the pthread_t goes away
728 t->freeaddr += freesize_stack;
729 t->freesize -= freesize_stack;
730 } else if (t == main_thread()) {
731 freeaddr = t->stackaddr - pthread_get_stacksize_np(t);
732 uintptr_t stackborder = trunc_page((uintptr_t)_pthread_current_stack_address());
733 freesize_stack = stackborder - freeaddr;
734 } else {
735 freesize_stack = 0;
736 }
737
738 mach_port_t kport = _pthread_kernel_thread(t);
739 bool keep_thread_struct = false, needs_wake = false;
740 semaphore_t custom_stack_sema = MACH_PORT_NULL;
741
742 _pthread_dealloc_special_reply_port(t);
743 _pthread_dealloc_reply_port(t);
744
745 _PTHREAD_LOCK(_pthread_list_lock);
746
747 // This piece of code interacts with pthread_join. It will always:
748 // - set tl_exit_gate to MACH_PORT_DEAD (thread exited)
749 // - set tl_exit_value to the value passed to pthread_exit()
750 // - decrement _pthread_count, so that we can exit the process when all
751 // threads exited even if not all of them were joined.
752 t->tl_exit_gate = MACH_PORT_DEAD;
753 t->tl_exit_value = exit_value;
754 should_exit = (--_pthread_count <= 0);
755
756 // If we see a joiner, we prepost that the join has to succeed,
757 // and the joiner is committed to finish (even if it was canceled)
758 if (t->tl_join_ctx) {
759 custom_stack_sema = _pthread_joiner_prepost_wake(t); // unsets tl_joinable
760 needs_wake = true;
761 }
762
763 // Joinable threads that have no joiner yet are kept on the thread list
764 // so that pthread_join() can later discover the thread when it is joined,
765 // and will have to do the pthread_t cleanup.
766 if (t->tl_joinable) {
767 t->tl_joiner_cleans_up = keep_thread_struct = true;
768 } else {
769 TAILQ_REMOVE(&__pthread_head, t, tl_plist);
770 }
771
772 _PTHREAD_UNLOCK(_pthread_list_lock);
773
774 if (needs_wake) {
775 // When we found a waiter, we want to drop the very contended list lock
776 // before we do the syscall in _pthread_joiner_wake(). Then, we decide
777 // who gets to cleanup the pthread_t between the joiner and the exiting
778 // thread:
779 // - the joiner tries to set tl_join_ctx to NULL
780 // - the exiting thread tries to set tl_joiner_cleans_up to true
781 // Whoever does it first commits the other guy to cleanup the pthread_t
782 _pthread_joiner_wake(t);
783 _PTHREAD_LOCK(_pthread_list_lock);
784 if (t->tl_join_ctx) {
785 t->tl_joiner_cleans_up = true;
786 keep_thread_struct = true;
787 }
788 _PTHREAD_UNLOCK(_pthread_list_lock);
789 }
790
791 //
792 // /!\ dereferencing `t` past this point is not safe /!\
793 //
794
795 if (keep_thread_struct || t == main_thread()) {
796 // Use the adjusted freesize of just the stack that we computed above.
797 freesize = freesize_stack;
798 } else {
799 _pthread_introspection_thread_destroy(t);
800 }
801
802 // Check if there is nothing to free because the thread has a custom
803 // stack allocation and is joinable.
804 if (freesize == 0) {
805 freeaddr = 0;
806 }
807 if (should_exit) {
808 exitf(0);
809 }
810 __bsdthread_terminate((void *)freeaddr, freesize, kport, custom_stack_sema);
811 PTHREAD_INTERNAL_CRASH(t, "thread didn't terminate");
812 }
813
814 PTHREAD_NORETURN
815 static void
816 _pthread_terminate_invoke(pthread_t t, void *exit_value)
817 {
818 #if PTHREAD_T_OFFSET
819 void *p = NULL;
820 // <rdar://problem/25688492> During pthread termination there is a race
821 // between pthread_join and pthread_terminate; if the joiner is responsible
822 // for cleaning up the pthread_t struct, then it may destroy some part of the
823 // stack with it on 16k OSes. So that this doesn't cause _pthread_terminate()
824 // to crash because its stack has been removed from under its feet, just make
825 // sure termination happens in a part of the stack that is not on the same
826 // page as the pthread_t.
827 if (trunc_page((uintptr_t)__builtin_frame_address(0)) ==
828 trunc_page((uintptr_t)t)) {
829 p = alloca(PTHREAD_T_OFFSET);
830 }
831 // And this __asm__ volatile is needed to stop the compiler from optimising
832 // away the alloca() completely.
833 __asm__ volatile ("" : : "r"(p) );
834 #endif
835 _pthread_terminate(t, exit_value);
836 }
837
838 #pragma mark pthread start / body
839
840 /*
841 * Create and start execution of a new thread.
842 */
843 PTHREAD_NOINLINE PTHREAD_NORETURN
844 static void
845 _pthread_body(pthread_t self, bool needs_tsd_base_set)
846 {
847 _pthread_set_self_internal(self, needs_tsd_base_set);
848 __pthread_started_thread(self);
849 _pthread_exit(self, (self->fun)(self->arg));
850 }
851
852 PTHREAD_NORETURN
853 void
854 _pthread_start(pthread_t self, mach_port_t kport,
855 __unused void *(*fun)(void *), __unused void *arg,
856 __unused size_t stacksize, unsigned int pflags)
857 {
858 bool thread_tsd_bsd_set = (bool)(pflags & PTHREAD_START_TSD_BASE_SET);
859
860 if (os_unlikely(pflags & PTHREAD_START_SUSPENDED)) {
861 PTHREAD_INTERNAL_CRASH(0,
862 "kernel without PTHREAD_START_SUSPENDED support");
863 }
864 #if DEBUG
865 PTHREAD_ASSERT(MACH_PORT_VALID(kport));
866 PTHREAD_ASSERT(_pthread_kernel_thread(self) == kport);
867 #endif
868 // will mark the thread initialized
869 _pthread_markcancel_if_canceled(self, kport);
870
871 _pthread_body(self, !thread_tsd_bsd_set);
872 }
873
874 PTHREAD_ALWAYS_INLINE
875 static inline void
876 _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs,
877 void *stackaddr, size_t stacksize, void *freeaddr, size_t freesize)
878 {
879 #if DEBUG
880 PTHREAD_ASSERT(t->sig != _PTHREAD_SIG);
881 #endif
882
883 t->sig = _PTHREAD_SIG;
884 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = t;
885 t->tsd[_PTHREAD_TSD_SLOT_ERRNO] = &t->err_no;
886 if (attrs->schedset == 0) {
887 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = attrs->qosclass;
888 } else {
889 t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] =
890 _pthread_unspecified_priority();
891 }
892 t->tsd[_PTHREAD_TSD_SLOT_PTR_MUNGE] = _pthread_ptr_munge_token;
893 t->tl_has_custom_stack = (attrs->stackaddr != NULL);
894
895 _PTHREAD_LOCK_INIT(t->lock);
896
897 t->stackaddr = stackaddr;
898 t->stackbottom = stackaddr - stacksize;
899 t->freeaddr = freeaddr;
900 t->freesize = freesize;
901
902 t->guardsize = _pthread_attr_guardsize(attrs);
903 t->tl_joinable = (attrs->detached == PTHREAD_CREATE_JOINABLE);
904 t->inherit = attrs->inherit;
905 t->tl_policy = attrs->policy;
906 t->schedset = attrs->schedset;
907 _pthread_attr_get_schedparam(attrs, &t->tl_param);
908 t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
909 }
910
911 #pragma mark pthread public interface
912
913 /* Need to deprecate this in future */
914 int
915 _pthread_is_threaded(void)
916 {
917 return __is_threaded;
918 }
919
920 /* Non portable public api to know whether this process has(had) atleast one thread
921 * apart from main thread. There could be race if there is a thread in the process of
922 * creation at the time of call . It does not tell whether there are more than one thread
923 * at this point of time.
924 */
925 int
926 pthread_is_threaded_np(void)
927 {
928 return __is_threaded;
929 }
930
931
932 PTHREAD_NOEXPORT_VARIANT
933 mach_port_t
934 pthread_mach_thread_np(pthread_t t)
935 {
936 mach_port_t kport = MACH_PORT_NULL;
937 (void)_pthread_is_valid(t, &kport);
938 return kport;
939 }
940
941 PTHREAD_NOEXPORT_VARIANT
942 pthread_t
943 pthread_from_mach_thread_np(mach_port_t kernel_thread)
944 {
945 struct _pthread *p = NULL;
946
947 /* No need to wait as mach port is already known */
948 _PTHREAD_LOCK(_pthread_list_lock);
949
950 TAILQ_FOREACH(p, &__pthread_head, tl_plist) {
951 if (_pthread_kernel_thread(p) == kernel_thread) {
952 break;
953 }
954 }
955
956 _PTHREAD_UNLOCK(_pthread_list_lock);
957
958 return p;
959 }
960
961 PTHREAD_NOEXPORT_VARIANT
962 size_t
963 pthread_get_stacksize_np(pthread_t t)
964 {
965 size_t size = 0;
966 size_t stacksize = t->stackaddr - t->stackbottom;
967
968 if (t == NULL) {
969 return ESRCH; // XXX bug?
970 }
971
972 #if !defined(__arm__) && !defined(__arm64__)
973 // The default rlimit based allocations will be provided with a stacksize
974 // of the current limit and a freesize of the max. However, custom
975 // allocations will just have the guard page to free. If we aren't in the
976 // latter case, call into rlimit to determine the current stack size. In
977 // the event that the current limit == max limit then we'll fall down the
978 // fast path, but since it's unlikely that the limit is going to be lowered
979 // after it's been change to the max, we should be fine.
980 //
981 // Of course, on arm rlim_cur == rlim_max and there's only the one guard
982 // page. So, we can skip all this there.
983 if (t == main_thread() && stacksize + vm_page_size != t->freesize) {
984 // We want to call getrlimit() just once, as it's relatively expensive
985 static size_t rlimit_stack;
986
987 if (rlimit_stack == 0) {
988 struct rlimit limit;
989 int ret = getrlimit(RLIMIT_STACK, &limit);
990
991 if (ret == 0) {
992 rlimit_stack = (size_t) limit.rlim_cur;
993 }
994 }
995
996 if (rlimit_stack == 0 || rlimit_stack > t->freesize) {
997 return stacksize;
998 } else {
999 return rlimit_stack;
1000 }
1001 }
1002 #endif /* !defined(__arm__) && !defined(__arm64__) */
1003
1004 if (t == pthread_self() || t == main_thread()) {
1005 size = stacksize;
1006 goto out;
1007 }
1008
1009 if (_pthread_validate_thread_and_list_lock(t)) {
1010 size = stacksize;
1011 _PTHREAD_UNLOCK(_pthread_list_lock);
1012 } else {
1013 size = ESRCH; // XXX bug?
1014 }
1015
1016 out:
1017 // <rdar://problem/42588315> binary compatibility issues force us to return
1018 // DEFAULT_STACK_SIZE here when we do not know the size of the stack
1019 return size ? size : DEFAULT_STACK_SIZE;
1020 }
1021
1022 PTHREAD_NOEXPORT_VARIANT
1023 void *
1024 pthread_get_stackaddr_np(pthread_t t)
1025 {
1026 // since the main thread will not get de-allocated from underneath us
1027 if (t == pthread_self() || t == main_thread()) {
1028 return t->stackaddr;
1029 }
1030
1031 if (!_pthread_validate_thread_and_list_lock(t)) {
1032 return (void *)(uintptr_t)ESRCH; // XXX bug?
1033 }
1034
1035 void *addr = t->stackaddr;
1036 _PTHREAD_UNLOCK(_pthread_list_lock);
1037 return addr;
1038 }
1039
1040
1041 static mach_port_t
1042 _pthread_reply_port(pthread_t t)
1043 {
1044 void *p;
1045 if (t == NULL) {
1046 p = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY);
1047 } else {
1048 p = t->tsd[_PTHREAD_TSD_SLOT_MIG_REPLY];
1049 }
1050 return (mach_port_t)(uintptr_t)p;
1051 }
1052
1053 static void
1054 _pthread_set_reply_port(pthread_t t, mach_port_t reply_port)
1055 {
1056 void *p = (void *)(uintptr_t)reply_port;
1057 if (t == NULL) {
1058 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_MIG_REPLY, p);
1059 } else {
1060 t->tsd[_PTHREAD_TSD_SLOT_MIG_REPLY] = p;
1061 }
1062 }
1063
1064 static void
1065 _pthread_dealloc_reply_port(pthread_t t)
1066 {
1067 mach_port_t reply_port = _pthread_reply_port(t);
1068 if (reply_port != MACH_PORT_NULL) {
1069 mig_dealloc_reply_port(reply_port);
1070 }
1071 }
1072
1073 static mach_port_t
1074 _pthread_special_reply_port(pthread_t t)
1075 {
1076 void *p;
1077 if (t == NULL) {
1078 p = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY);
1079 } else {
1080 p = t->tsd[_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY];
1081 }
1082 return (mach_port_t)(uintptr_t)p;
1083 }
1084
1085 static void
1086 _pthread_dealloc_special_reply_port(pthread_t t)
1087 {
1088 mach_port_t special_reply_port = _pthread_special_reply_port(t);
1089 if (special_reply_port != MACH_PORT_NULL) {
1090 thread_destruct_special_reply_port(special_reply_port,
1091 THREAD_SPECIAL_REPLY_PORT_ALL);
1092 }
1093 }
1094
1095 pthread_t
1096 pthread_main_thread_np(void)
1097 {
1098 return main_thread();
1099 }
1100
1101 /* returns non-zero if the current thread is the main thread */
1102 int
1103 pthread_main_np(void)
1104 {
1105 return pthread_self() == main_thread();
1106 }
1107
1108
1109 /*
1110 * if we are passed in a pthread_t that is NULL, then we return the current
1111 * thread's thread_id. So folks don't have to call pthread_self, in addition to
1112 * us doing it, if they just want their thread_id.
1113 */
1114 PTHREAD_NOEXPORT_VARIANT
1115 int
1116 pthread_threadid_np(pthread_t thread, uint64_t *thread_id)
1117 {
1118 int res = 0;
1119 pthread_t self = pthread_self();
1120
1121 if (thread_id == NULL) {
1122 return EINVAL;
1123 }
1124
1125 if (thread == NULL || thread == self) {
1126 *thread_id = self->thread_id;
1127 } else if (!_pthread_validate_thread_and_list_lock(thread)) {
1128 res = ESRCH;
1129 } else {
1130 if (thread->thread_id == 0) {
1131 res = EINVAL;
1132 } else {
1133 *thread_id = thread->thread_id;
1134 }
1135 _PTHREAD_UNLOCK(_pthread_list_lock);
1136 }
1137 return res;
1138 }
1139
1140 PTHREAD_NOEXPORT_VARIANT
1141 int
1142 pthread_getname_np(pthread_t thread, char *threadname, size_t len)
1143 {
1144 if (thread == pthread_self()) {
1145 strlcpy(threadname, thread->pthread_name, len);
1146 return 0;
1147 }
1148
1149 if (!_pthread_validate_thread_and_list_lock(thread)) {
1150 return ESRCH;
1151 }
1152
1153 strlcpy(threadname, thread->pthread_name, len);
1154 _PTHREAD_UNLOCK(_pthread_list_lock);
1155 return 0;
1156 }
1157
1158
1159 int
1160 pthread_setname_np(const char *name)
1161 {
1162 int res;
1163 pthread_t self = pthread_self();
1164
1165 size_t len = 0;
1166 if (name != NULL) {
1167 len = strlen(name);
1168 }
1169
1170 /* protytype is in pthread_internals.h */
1171 res = __proc_info(5, getpid(), 2, (uint64_t)0, (void*)name, (int)len);
1172 if (res == 0) {
1173 if (len > 0) {
1174 strlcpy(self->pthread_name, name, MAXTHREADNAMESIZE);
1175 } else {
1176 bzero(self->pthread_name, MAXTHREADNAMESIZE);
1177 }
1178 }
1179 return res;
1180
1181 }
1182
1183 PTHREAD_ALWAYS_INLINE
1184 static inline void
1185 __pthread_add_thread(pthread_t t, bool from_mach_thread)
1186 {
1187 if (from_mach_thread) {
1188 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock);
1189 } else {
1190 _PTHREAD_LOCK(_pthread_list_lock);
1191 }
1192
1193 TAILQ_INSERT_TAIL(&__pthread_head, t, tl_plist);
1194 _pthread_count++;
1195
1196 if (from_mach_thread) {
1197 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock);
1198 } else {
1199 _PTHREAD_UNLOCK(_pthread_list_lock);
1200 }
1201
1202 if (!from_mach_thread) {
1203 // PR-26275485: Mach threads will likely crash trying to run
1204 // introspection code. Since the fall out from the introspection
1205 // code not seeing the injected thread is likely less than crashing
1206 // in the introspection code, just don't make the call.
1207 _pthread_introspection_thread_create(t);
1208 }
1209 }
1210
1211 PTHREAD_ALWAYS_INLINE
1212 static inline void
1213 __pthread_undo_add_thread(pthread_t t, bool from_mach_thread)
1214 {
1215 if (from_mach_thread) {
1216 _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock);
1217 } else {
1218 _PTHREAD_LOCK(_pthread_list_lock);
1219 }
1220
1221 TAILQ_REMOVE(&__pthread_head, t, tl_plist);
1222 _pthread_count--;
1223
1224 if (from_mach_thread) {
1225 _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock);
1226 } else {
1227 _PTHREAD_UNLOCK(_pthread_list_lock);
1228 }
1229 }
1230
1231 PTHREAD_ALWAYS_INLINE
1232 static inline void
1233 __pthread_started_thread(pthread_t t)
1234 {
1235 mach_port_t kport = _pthread_kernel_thread(t);
1236 if (os_slowpath(!MACH_PORT_VALID(kport))) {
1237 PTHREAD_CLIENT_CRASH(kport,
1238 "Unable to allocate thread port, possible port leak");
1239 }
1240 _pthread_introspection_thread_start(t);
1241 }
1242
1243 #define _PTHREAD_CREATE_NONE 0x0
1244 #define _PTHREAD_CREATE_FROM_MACH_THREAD 0x1
1245 #define _PTHREAD_CREATE_SUSPENDED 0x2
1246
1247 static int
1248 _pthread_create(pthread_t *thread, const pthread_attr_t *attrs,
1249 void *(*start_routine)(void *), void *arg, unsigned int create_flags)
1250 {
1251 pthread_t t = NULL;
1252 void *stack = NULL;
1253 bool from_mach_thread = (create_flags & _PTHREAD_CREATE_FROM_MACH_THREAD);
1254
1255 if (attrs == NULL) {
1256 attrs = &_pthread_attr_default;
1257 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1258 return EINVAL;
1259 }
1260
1261 unsigned int flags = PTHREAD_START_CUSTOM;
1262 if (attrs->schedset != 0) {
1263 struct sched_param p;
1264 _pthread_attr_get_schedparam(attrs, &p);
1265 flags |= PTHREAD_START_SETSCHED;
1266 flags |= ((attrs->policy & PTHREAD_START_POLICY_MASK) << PTHREAD_START_POLICY_BITSHIFT);
1267 flags |= (p.sched_priority & PTHREAD_START_IMPORTANCE_MASK);
1268 } else if (attrs->qosclass != 0) {
1269 flags |= PTHREAD_START_QOSCLASS;
1270 flags |= (attrs->qosclass & PTHREAD_START_QOSCLASS_MASK);
1271 }
1272 if (create_flags & _PTHREAD_CREATE_SUSPENDED) {
1273 flags |= PTHREAD_START_SUSPENDED;
1274 }
1275
1276 __is_threaded = 1;
1277
1278 t =_pthread_allocate(attrs, &stack);
1279 if (t == NULL) {
1280 return EAGAIN;
1281 }
1282
1283 t->arg = arg;
1284 t->fun = start_routine;
1285 __pthread_add_thread(t, from_mach_thread);
1286
1287 if (__bsdthread_create(start_routine, arg, stack, t, flags) ==
1288 (pthread_t)-1) {
1289 if (errno == EMFILE) {
1290 PTHREAD_CLIENT_CRASH(0,
1291 "Unable to allocate thread port, possible port leak");
1292 }
1293 __pthread_undo_add_thread(t, from_mach_thread);
1294 _pthread_deallocate(t, from_mach_thread);
1295 return EAGAIN;
1296 }
1297
1298 if (create_flags & _PTHREAD_CREATE_SUSPENDED) {
1299 _pthread_markcancel_if_canceled(t, _pthread_kernel_thread(t));
1300 }
1301
1302 // n.b. if a thread is created detached and exits, t will be invalid
1303 *thread = t;
1304 return 0;
1305 }
1306
1307 int
1308 pthread_create(pthread_t *thread, const pthread_attr_t *attr,
1309 void *(*start_routine)(void *), void *arg)
1310 {
1311 unsigned int flags = _PTHREAD_CREATE_NONE;
1312 return _pthread_create(thread, attr, start_routine, arg, flags);
1313 }
1314
1315 int
1316 pthread_create_from_mach_thread(pthread_t *thread, const pthread_attr_t *attr,
1317 void *(*start_routine)(void *), void *arg)
1318 {
1319 unsigned int flags = _PTHREAD_CREATE_FROM_MACH_THREAD;
1320 return _pthread_create(thread, attr, start_routine, arg, flags);
1321 }
1322
1323 #if !defined(__OPEN_SOURCE__) && TARGET_OS_OSX // 40703288
1324 /* Functions defined in machine-dependent files. */
1325 PTHREAD_NOEXPORT void _pthread_setup_suspended(pthread_t th, void (*f)(pthread_t), void *sp);
1326
1327 PTHREAD_NORETURN
1328 static void
1329 _pthread_suspended_body(pthread_t self)
1330 {
1331 _pthread_set_self(self);
1332 __pthread_started_thread(self);
1333 _pthread_exit(self, (self->fun)(self->arg));
1334 }
1335
1336 static int
1337 _pthread_create_suspended_np(pthread_t *thread, const pthread_attr_t *attrs,
1338 void *(*start_routine)(void *), void *arg)
1339 {
1340 pthread_t t;
1341 void *stack;
1342 mach_port_t kernel_thread = MACH_PORT_NULL;
1343
1344 if (attrs == NULL) {
1345 attrs = &_pthread_attr_default;
1346 } else if (attrs->sig != _PTHREAD_ATTR_SIG) {
1347 return EINVAL;
1348 }
1349
1350 t = _pthread_allocate(attrs, &stack);
1351 if (t == NULL) {
1352 return EAGAIN;
1353 }
1354
1355 if (thread_create(mach_task_self(), &kernel_thread) != KERN_SUCCESS) {
1356 _pthread_deallocate(t, false);
1357 return EAGAIN;
1358 }
1359
1360 _pthread_set_kernel_thread(t, kernel_thread);
1361 (void)pthread_setschedparam_internal(t, kernel_thread,
1362 t->tl_policy, &t->tl_param);
1363
1364 __is_threaded = 1;
1365
1366 t->arg = arg;
1367 t->fun = start_routine;
1368 t->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
1369 __pthread_add_thread(t, false);
1370
1371 // Set up a suspended thread.
1372 _pthread_setup_suspended(t, _pthread_suspended_body, stack);
1373 *thread = t;
1374 return 0;
1375 }
1376 #endif // !defined(__OPEN_SOURCE__) && TARGET_OS_OSX
1377
1378 int
1379 pthread_create_suspended_np(pthread_t *thread, const pthread_attr_t *attr,
1380 void *(*start_routine)(void *), void *arg)
1381 {
1382 #if !defined(__OPEN_SOURCE__) && TARGET_OS_OSX // 40703288
1383 if (_os_xbs_chrooted) {
1384 return _pthread_create_suspended_np(thread, attr, start_routine, arg);
1385 }
1386 #endif
1387 unsigned int flags = _PTHREAD_CREATE_SUSPENDED;
1388 return _pthread_create(thread, attr, start_routine, arg, flags);
1389 }
1390
1391
1392 PTHREAD_NOEXPORT_VARIANT
1393 int
1394 pthread_detach(pthread_t thread)
1395 {
1396 int res = 0;
1397 bool join = false, wake = false;
1398
1399 if (!_pthread_validate_thread_and_list_lock(thread)) {
1400 return ESRCH;
1401 }
1402
1403 if (!thread->tl_joinable) {
1404 res = EINVAL;
1405 } else if (thread->tl_exit_gate == MACH_PORT_DEAD) {
1406 // Join the thread if it's already exited.
1407 join = true;
1408 } else {
1409 thread->tl_joinable = false; // _pthread_joiner_prepost_wake uses this
1410 if (thread->tl_join_ctx) {
1411 (void)_pthread_joiner_prepost_wake(thread);
1412 wake = true;
1413 }
1414 }
1415 _PTHREAD_UNLOCK(_pthread_list_lock);
1416
1417 if (join) {
1418 pthread_join(thread, NULL);
1419 } else if (wake) {
1420 _pthread_joiner_wake(thread);
1421 }
1422 return res;
1423 }
1424
1425 PTHREAD_NOEXPORT_VARIANT
1426 int
1427 pthread_kill(pthread_t th, int sig)
1428 {
1429 if (sig < 0 || sig > NSIG) {
1430 return EINVAL;
1431 }
1432
1433 mach_port_t kport = MACH_PORT_NULL;
1434 if (!_pthread_is_valid(th, &kport)) {
1435 return ESRCH; // Not a valid thread.
1436 }
1437
1438 // Don't signal workqueue threads.
1439 if (th->wqthread != 0 && th->wqkillset == 0) {
1440 return ENOTSUP;
1441 }
1442
1443 int ret = __pthread_kill(kport, sig);
1444
1445 if (ret == -1) {
1446 ret = errno;
1447 }
1448 return ret;
1449 }
1450
1451 PTHREAD_NOEXPORT_VARIANT
1452 int
1453 __pthread_workqueue_setkill(int enable)
1454 {
1455 pthread_t self = pthread_self();
1456
1457 _PTHREAD_LOCK(self->lock);
1458 self->wqkillset = enable ? 1 : 0;
1459 _PTHREAD_UNLOCK(self->lock);
1460
1461 return 0;
1462 }
1463
1464
1465 /* For compatibility... */
1466
1467 pthread_t
1468 _pthread_self(void)
1469 {
1470 return pthread_self();
1471 }
1472
1473 /*
1474 * Terminate a thread.
1475 */
1476 extern int __disable_threadsignal(int);
1477
1478 PTHREAD_NORETURN
1479 static void
1480 _pthread_exit(pthread_t self, void *exit_value)
1481 {
1482 struct __darwin_pthread_handler_rec *handler;
1483
1484 // Disable signal delivery while we clean up
1485 __disable_threadsignal(1);
1486
1487 // Set cancel state to disable and type to deferred
1488 _pthread_setcancelstate_exit(self, exit_value);
1489
1490 while ((handler = self->__cleanup_stack) != 0) {
1491 (handler->__routine)(handler->__arg);
1492 self->__cleanup_stack = handler->__next;
1493 }
1494 _pthread_tsd_cleanup(self);
1495
1496 // Clear per-thread semaphore cache
1497 os_put_cached_semaphore(SEMAPHORE_NULL);
1498
1499 _pthread_terminate_invoke(self, exit_value);
1500 }
1501
1502 void
1503 pthread_exit(void *exit_value)
1504 {
1505 pthread_t self = pthread_self();
1506 if (os_unlikely(self->wqthread)) {
1507 PTHREAD_CLIENT_CRASH(0, "pthread_exit() called from a thread "
1508 "not created by pthread_create()");
1509 }
1510 _pthread_exit(self, exit_value);
1511 }
1512
1513
1514 PTHREAD_NOEXPORT_VARIANT
1515 int
1516 pthread_getschedparam(pthread_t thread, int *policy, struct sched_param *param)
1517 {
1518 if (!_pthread_validate_thread_and_list_lock(thread)) {
1519 return ESRCH;
1520 }
1521
1522 if (policy) *policy = thread->tl_policy;
1523 if (param) *param = thread->tl_param;
1524 _PTHREAD_UNLOCK(_pthread_list_lock);
1525 return 0;
1526 }
1527
1528
1529
1530 PTHREAD_ALWAYS_INLINE
1531 static inline int
1532 pthread_setschedparam_internal(pthread_t thread, mach_port_t kport, int policy,
1533 const struct sched_param *param)
1534 {
1535 policy_base_data_t bases;
1536 policy_base_t base;
1537 mach_msg_type_number_t count;
1538 kern_return_t ret;
1539
1540 switch (policy) {
1541 case SCHED_OTHER:
1542 bases.ts.base_priority = param->sched_priority;
1543 base = (policy_base_t)&bases.ts;
1544 count = POLICY_TIMESHARE_BASE_COUNT;
1545 break;
1546 case SCHED_FIFO:
1547 bases.fifo.base_priority = param->sched_priority;
1548 base = (policy_base_t)&bases.fifo;
1549 count = POLICY_FIFO_BASE_COUNT;
1550 break;
1551 case SCHED_RR:
1552 bases.rr.base_priority = param->sched_priority;
1553 /* quantum isn't public yet */
1554 bases.rr.quantum = param->quantum;
1555 base = (policy_base_t)&bases.rr;
1556 count = POLICY_RR_BASE_COUNT;
1557 break;
1558 default:
1559 return EINVAL;
1560 }
1561 ret = thread_policy(kport, policy, base, count, TRUE);
1562 return (ret != KERN_SUCCESS) ? EINVAL : 0;
1563 }
1564
1565 PTHREAD_NOEXPORT_VARIANT
1566 int
1567 pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param)
1568 {
1569 mach_port_t kport = MACH_PORT_NULL;
1570 int bypass = 1;
1571
1572 // since the main thread will not get de-allocated from underneath us
1573 if (t == pthread_self() || t == main_thread()) {
1574 kport = _pthread_kernel_thread(t);
1575 } else {
1576 bypass = 0;
1577 if (!_pthread_is_valid(t, &kport)) {
1578 return ESRCH;
1579 }
1580 }
1581
1582 int res = pthread_setschedparam_internal(t, kport, policy, param);
1583 if (res) return res;
1584
1585 if (bypass) {
1586 _PTHREAD_LOCK(_pthread_list_lock);
1587 } else if (!_pthread_validate_thread_and_list_lock(t)) {
1588 // Ensure the thread is still valid.
1589 return ESRCH;
1590 }
1591
1592 t->tl_policy = policy;
1593 t->tl_param = *param;
1594 _PTHREAD_UNLOCK(_pthread_list_lock);
1595 return 0;
1596 }
1597
1598
1599 int
1600 sched_get_priority_min(int policy)
1601 {
1602 return default_priority - 16;
1603 }
1604
1605 int
1606 sched_get_priority_max(int policy)
1607 {
1608 return default_priority + 16;
1609 }
1610
1611 int
1612 pthread_equal(pthread_t t1, pthread_t t2)
1613 {
1614 return (t1 == t2);
1615 }
1616
1617 /*
1618 * Force LLVM not to optimise this to a call to __pthread_set_self, if it does
1619 * then _pthread_set_self won't be bound when secondary threads try and start up.
1620 */
1621 PTHREAD_NOINLINE
1622 void
1623 _pthread_set_self(pthread_t p)
1624 {
1625 #if VARIANT_DYLD
1626 if (os_likely(!p)) {
1627 return _pthread_set_self_dyld();
1628 }
1629 #endif // VARIANT_DYLD
1630 _pthread_set_self_internal(p, true);
1631 }
1632
1633 #if VARIANT_DYLD
1634 // _pthread_set_self_dyld is noinline+noexport to allow the option for
1635 // static libsyscall to adopt this as the entry point from mach_init if
1636 // desired
1637 PTHREAD_NOINLINE PTHREAD_NOEXPORT
1638 void
1639 _pthread_set_self_dyld(void)
1640 {
1641 pthread_t p = main_thread();
1642 p->thread_id = __thread_selfid();
1643
1644 if (os_unlikely(p->thread_id == -1ull)) {
1645 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id");
1646 }
1647
1648 // <rdar://problem/40930651> pthread self and the errno address are the
1649 // bare minimium TSD setup that dyld needs to actually function. Without
1650 // this, TSD access will fail and crash if it uses bits of Libc prior to
1651 // library initialization. __pthread_init will finish the initialization
1652 // during library init.
1653 p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = p;
1654 p->tsd[_PTHREAD_TSD_SLOT_ERRNO] = &p->err_no;
1655 _thread_set_tsd_base(&p->tsd[0]);
1656 }
1657 #endif // VARIANT_DYLD
1658
1659 PTHREAD_ALWAYS_INLINE
1660 static inline void
1661 _pthread_set_self_internal(pthread_t p, bool needs_tsd_base_set)
1662 {
1663 p->thread_id = __thread_selfid();
1664
1665 if (os_unlikely(p->thread_id == -1ull)) {
1666 PTHREAD_INTERNAL_CRASH(0, "failed to set thread_id");
1667 }
1668
1669 if (needs_tsd_base_set) {
1670 _thread_set_tsd_base(&p->tsd[0]);
1671 }
1672 }
1673
1674
1675 // <rdar://problem/28984807> pthread_once should have an acquire barrier
1676 PTHREAD_ALWAYS_INLINE
1677 static inline void
1678 _os_once_acquire(os_once_t *predicate, void *context, os_function_t function)
1679 {
1680 if (OS_EXPECT(os_atomic_load(predicate, acquire), ~0l) != ~0l) {
1681 _os_once(predicate, context, function);
1682 OS_COMPILER_CAN_ASSUME(*predicate == ~0l);
1683 }
1684 }
1685
1686 struct _pthread_once_context {
1687 pthread_once_t *pthread_once;
1688 void (*routine)(void);
1689 };
1690
1691 static void
1692 __pthread_once_handler(void *context)
1693 {
1694 struct _pthread_once_context *ctx = context;
1695 pthread_cleanup_push((void*)__os_once_reset, &ctx->pthread_once->once);
1696 ctx->routine();
1697 pthread_cleanup_pop(0);
1698 ctx->pthread_once->sig = _PTHREAD_ONCE_SIG;
1699 }
1700
1701 PTHREAD_NOEXPORT_VARIANT
1702 int
1703 pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
1704 {
1705 struct _pthread_once_context ctx = { once_control, init_routine };
1706 do {
1707 _os_once_acquire(&once_control->once, &ctx, __pthread_once_handler);
1708 } while (once_control->sig == _PTHREAD_ONCE_SIG_init);
1709 return 0;
1710 }
1711
1712
1713 int
1714 pthread_getconcurrency(void)
1715 {
1716 return pthread_concurrency;
1717 }
1718
1719 int
1720 pthread_setconcurrency(int new_level)
1721 {
1722 if (new_level < 0) {
1723 return EINVAL;
1724 }
1725 pthread_concurrency = new_level;
1726 return 0;
1727 }
1728
1729 #if !defined(VARIANT_STATIC)
1730 void *
1731 malloc(size_t sz)
1732 {
1733 if (_pthread_malloc) {
1734 return _pthread_malloc(sz);
1735 } else {
1736 return NULL;
1737 }
1738 }
1739
1740 void
1741 free(void *p)
1742 {
1743 if (_pthread_free) {
1744 _pthread_free(p);
1745 }
1746 }
1747 #endif // VARIANT_STATIC
1748
1749 /*
1750 * Perform package initialization - called automatically when application starts
1751 */
1752 struct ProgramVars; /* forward reference */
1753
1754 #if !VARIANT_DYLD
1755 static unsigned long
1756 _pthread_strtoul(const char *p, const char **endptr, int base)
1757 {
1758 uintptr_t val = 0;
1759
1760 // Expect hex string starting with "0x"
1761 if ((base == 16 || base == 0) && p && p[0] == '0' && p[1] == 'x') {
1762 p += 2;
1763 while (1) {
1764 char c = *p;
1765 if ('0' <= c && c <= '9') {
1766 val = (val << 4) + (c - '0');
1767 } else if ('a' <= c && c <= 'f') {
1768 val = (val << 4) + (c - 'a' + 10);
1769 } else if ('A' <= c && c <= 'F') {
1770 val = (val << 4) + (c - 'A' + 10);
1771 } else {
1772 break;
1773 }
1774 ++p;
1775 }
1776 }
1777
1778 *endptr = (char *)p;
1779 return val;
1780 }
1781
1782 static int
1783 parse_main_stack_params(const char *apple[],
1784 void **stackaddr,
1785 size_t *stacksize,
1786 void **allocaddr,
1787 size_t *allocsize)
1788 {
1789 const char *p = _simple_getenv(apple, "main_stack");
1790 if (!p) return 0;
1791
1792 int ret = 0;
1793 const char *s = p;
1794
1795 *stackaddr = _pthread_strtoul(s, &s, 16);
1796 if (*s != ',') goto out;
1797
1798 *stacksize = _pthread_strtoul(s + 1, &s, 16);
1799 if (*s != ',') goto out;
1800
1801 *allocaddr = _pthread_strtoul(s + 1, &s, 16);
1802 if (*s != ',') goto out;
1803
1804 *allocsize = _pthread_strtoul(s + 1, &s, 16);
1805 if (*s != ',' && *s != 0) goto out;
1806
1807 ret = 1;
1808 out:
1809 bzero((char *)p, strlen(p));
1810 return ret;
1811 }
1812
1813 static void
1814 parse_ptr_munge_params(const char *envp[], const char *apple[])
1815 {
1816 const char *p, *s;
1817 p = _simple_getenv(apple, "ptr_munge");
1818 if (p) {
1819 _pthread_ptr_munge_token = _pthread_strtoul(p, &s, 16);
1820 bzero((char *)p, strlen(p));
1821 }
1822 #if !DEBUG
1823 if (_pthread_ptr_munge_token) return;
1824 #endif
1825 p = _simple_getenv(envp, "PTHREAD_PTR_MUNGE_TOKEN");
1826 if (p) {
1827 uintptr_t t = _pthread_strtoul(p, &s, 16);
1828 if (t) _pthread_ptr_munge_token = t;
1829 }
1830 }
1831
1832 int
1833 __pthread_init(const struct _libpthread_functions *pthread_funcs,
1834 const char *envp[], const char *apple[],
1835 const struct ProgramVars *vars __unused)
1836 {
1837 // Save our provided pushed-down functions
1838 if (pthread_funcs) {
1839 exitf = pthread_funcs->exit;
1840
1841 if (pthread_funcs->version >= 2) {
1842 _pthread_malloc = pthread_funcs->malloc;
1843 _pthread_free = pthread_funcs->free;
1844 }
1845 }
1846
1847 //
1848 // Get host information
1849 //
1850
1851 kern_return_t kr;
1852 host_flavor_t flavor = HOST_PRIORITY_INFO;
1853 mach_msg_type_number_t count = HOST_PRIORITY_INFO_COUNT;
1854 host_priority_info_data_t priority_info;
1855 host_t host = mach_host_self();
1856 kr = host_info(host, flavor, (host_info_t)&priority_info, &count);
1857 if (kr != KERN_SUCCESS) {
1858 PTHREAD_INTERNAL_CRASH(kr, "host_info() failed");
1859 } else {
1860 default_priority = (uint8_t)priority_info.user_priority;
1861 min_priority = (uint8_t)priority_info.minimum_priority;
1862 max_priority = (uint8_t)priority_info.maximum_priority;
1863 }
1864 mach_port_deallocate(mach_task_self(), host);
1865
1866 //
1867 // Set up the main thread structure
1868 //
1869
1870 // Get the address and size of the main thread's stack from the kernel.
1871 void *stackaddr = 0;
1872 size_t stacksize = 0;
1873 void *allocaddr = 0;
1874 size_t allocsize = 0;
1875 if (!parse_main_stack_params(apple, &stackaddr, &stacksize, &allocaddr, &allocsize) ||
1876 stackaddr == NULL || stacksize == 0) {
1877 // Fall back to previous bevhaior.
1878 size_t len = sizeof(stackaddr);
1879 int mib[] = { CTL_KERN, KERN_USRSTACK };
1880 if (__sysctl(mib, 2, &stackaddr, &len, NULL, 0) != 0) {
1881 #if defined(__LP64__)
1882 stackaddr = (void *)USRSTACK64;
1883 #else
1884 stackaddr = (void *)USRSTACK;
1885 #endif
1886 }
1887 stacksize = DFLSSIZ;
1888 allocaddr = 0;
1889 allocsize = 0;
1890 }
1891
1892 // Initialize random ptr_munge token from the kernel.
1893 parse_ptr_munge_params(envp, apple);
1894
1895 // libpthread.a in dyld "owns" the main thread structure itself and sets
1896 // up the tsd to point to it. So take the pthread_self() from there
1897 // and make it our main thread point.
1898 pthread_t thread = (pthread_t)_pthread_getspecific_direct(
1899 _PTHREAD_TSD_SLOT_PTHREAD_SELF);
1900 PTHREAD_ASSERT(thread);
1901 _main_thread_ptr = thread;
1902
1903 PTHREAD_ASSERT(_pthread_attr_default.qosclass ==
1904 _pthread_default_priority(0));
1905 _pthread_struct_init(thread, &_pthread_attr_default,
1906 stackaddr, stacksize, allocaddr, allocsize);
1907 thread->tl_joinable = true;
1908
1909 // Finish initialization with common code that is reinvoked on the
1910 // child side of a fork.
1911
1912 // Finishes initialization of main thread attributes.
1913 // Initializes the thread list and add the main thread.
1914 // Calls _pthread_set_self() to prepare the main thread for execution.
1915 _pthread_main_thread_init(thread);
1916
1917 struct _pthread_registration_data registration_data;
1918 // Set up kernel entry points with __bsdthread_register.
1919 _pthread_bsdthread_init(&registration_data);
1920
1921 // Have pthread_key and pthread_mutex do their init envvar checks.
1922 _pthread_key_global_init(envp);
1923 _pthread_mutex_global_init(envp, &registration_data);
1924
1925 #if PTHREAD_DEBUG_LOG
1926 _SIMPLE_STRING path = _simple_salloc();
1927 _simple_sprintf(path, "/var/tmp/libpthread.%d.log", getpid());
1928 _pthread_debuglog = open(_simple_string(path),
1929 O_WRONLY | O_APPEND | O_CREAT | O_NOFOLLOW | O_CLOEXEC, 0666);
1930 _simple_sfree(path);
1931 _pthread_debugstart = mach_absolute_time();
1932 #endif
1933
1934 return 0;
1935 }
1936 #endif // !VARIANT_DYLD
1937
1938 PTHREAD_NOEXPORT void
1939 _pthread_main_thread_init(pthread_t p)
1940 {
1941 TAILQ_INIT(&__pthread_head);
1942 _PTHREAD_LOCK_INIT(_pthread_list_lock);
1943 _PTHREAD_LOCK_INIT(p->lock);
1944 _pthread_set_kernel_thread(p, mach_thread_self());
1945 _pthread_set_reply_port(p, mach_reply_port());
1946 p->__cleanup_stack = NULL;
1947 p->tl_join_ctx = NULL;
1948 p->tl_exit_gate = MACH_PORT_NULL;
1949 p->tsd[__TSD_SEMAPHORE_CACHE] = (void*)SEMAPHORE_NULL;
1950 p->tsd[__TSD_MACH_SPECIAL_REPLY] = 0;
1951 p->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
1952
1953 // Initialize the list of threads with the new main thread.
1954 TAILQ_INSERT_HEAD(&__pthread_head, p, tl_plist);
1955 _pthread_count = 1;
1956
1957 _pthread_introspection_thread_start(p);
1958 }
1959
1960 int
1961 sched_yield(void)
1962 {
1963 swtch_pri(0);
1964 return 0;
1965 }
1966
1967 // XXX remove
1968 void
1969 cthread_yield(void)
1970 {
1971 sched_yield();
1972 }
1973
1974 void
1975 pthread_yield_np(void)
1976 {
1977 sched_yield();
1978 }
1979
1980
1981
1982 // Libsystem knows about this symbol and exports it to libsyscall
1983 PTHREAD_NOEXPORT_VARIANT
1984 void
1985 _pthread_clear_qos_tsd(mach_port_t thread_port)
1986 {
1987 if (thread_port == MACH_PORT_NULL || (uintptr_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF) == thread_port) {
1988 /* Clear the current thread's TSD, that can be done inline. */
1989 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS,
1990 _pthread_unspecified_priority());
1991 } else {
1992 pthread_t p;
1993
1994 _PTHREAD_LOCK(_pthread_list_lock);
1995
1996 TAILQ_FOREACH(p, &__pthread_head, tl_plist) {
1997 mach_port_t kp = _pthread_kernel_thread(p);
1998 if (thread_port == kp) {
1999 p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] =
2000 _pthread_unspecified_priority();
2001 break;
2002 }
2003 }
2004
2005 _PTHREAD_UNLOCK(_pthread_list_lock);
2006 }
2007 }
2008
2009
2010 #pragma mark pthread/stack_np.h public interface
2011
2012
2013 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__arm64__)
2014 typedef uintptr_t frame_data_addr_t;
2015
2016 struct frame_data {
2017 frame_data_addr_t frame_addr_next;
2018 frame_data_addr_t ret_addr;
2019 };
2020 #else
2021 #error ********** Unimplemented architecture
2022 #endif
2023
2024 uintptr_t
2025 pthread_stack_frame_decode_np(uintptr_t frame_addr, uintptr_t *return_addr)
2026 {
2027 struct frame_data *frame = (struct frame_data *)frame_addr;
2028
2029 if (return_addr) {
2030 *return_addr = (uintptr_t)frame->ret_addr;
2031 }
2032
2033 return (uintptr_t)frame->frame_addr_next;
2034 }
2035
2036
2037 #pragma mark pthread workqueue support routines
2038
2039
2040 PTHREAD_NOEXPORT void
2041 _pthread_bsdthread_init(struct _pthread_registration_data *data)
2042 {
2043 bzero(data, sizeof(*data));
2044 data->version = sizeof(struct _pthread_registration_data);
2045 data->dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *);
2046 data->return_to_kernel_offset = __TSD_RETURN_TO_KERNEL * sizeof(void *);
2047 data->tsd_offset = offsetof(struct _pthread, tsd);
2048 data->mach_thread_self_offset = __TSD_MACH_THREAD_SELF * sizeof(void *);
2049
2050 int rv = __bsdthread_register(thread_start, start_wqthread, (int)PTHREAD_SIZE,
2051 (void*)data, (uintptr_t)sizeof(*data), data->dispatch_queue_offset);
2052
2053 if (rv > 0) {
2054 int required_features =
2055 PTHREAD_FEATURE_FINEPRIO |
2056 PTHREAD_FEATURE_BSDTHREADCTL |
2057 PTHREAD_FEATURE_SETSELF |
2058 PTHREAD_FEATURE_QOS_MAINTENANCE |
2059 PTHREAD_FEATURE_QOS_DEFAULT;
2060 if ((rv & required_features) != required_features) {
2061 PTHREAD_INTERNAL_CRASH(rv, "Missing required kernel support");
2062 }
2063 __pthread_supported_features = rv;
2064 }
2065
2066 /*
2067 * TODO: differentiate between (-1, EINVAL) after fork (which has the side
2068 * effect of resetting the child's stack_addr_hint before bailing out) and
2069 * (-1, EINVAL) because of invalid arguments. We'd probably like to treat
2070 * the latter as fatal.
2071 *
2072 * <rdar://problem/36451838>
2073 */
2074
2075 pthread_priority_t main_qos = (pthread_priority_t)data->main_qos;
2076
2077 if (_pthread_priority_thread_qos(main_qos) != THREAD_QOS_UNSPECIFIED) {
2078 _pthread_set_main_qos(main_qos);
2079 main_thread()->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = main_qos;
2080 }
2081
2082 if (data->stack_addr_hint) {
2083 __pthread_stack_hint = data->stack_addr_hint;
2084 }
2085
2086 if (__libdispatch_workerfunction != NULL) {
2087 // prepare the kernel for workq action
2088 (void)__workq_open();
2089 }
2090 }
2091
2092 PTHREAD_NOINLINE
2093 static void
2094 _pthread_wqthread_legacy_worker_wrap(pthread_priority_t pp)
2095 {
2096 /* Old thread priorities are inverted from where we have them in
2097 * the new flexible priority scheme. The highest priority is zero,
2098 * up to 2, with background at 3.
2099 */
2100 pthread_workqueue_function_t func = (pthread_workqueue_function_t)__libdispatch_workerfunction;
2101 bool overcommit = (pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
2102 int opts = overcommit ? WORKQ_ADDTHREADS_OPTION_OVERCOMMIT : 0;
2103
2104 switch (_pthread_priority_thread_qos(pp)) {
2105 case THREAD_QOS_USER_INITIATED:
2106 return (*func)(WORKQ_HIGH_PRIOQUEUE, opts, NULL);
2107 case THREAD_QOS_LEGACY:
2108 /* B&I builders can't pass a QOS_CLASS_DEFAULT thread to dispatch, for fear of the QoS being
2109 * picked up by NSThread (et al) and transported around the system. So change the TSD to
2110 * make this thread look like QOS_CLASS_USER_INITIATED even though it will still run as legacy.
2111 */
2112 _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS,
2113 _pthread_priority_make_from_thread_qos(THREAD_QOS_USER_INITIATED, 0, 0));
2114 return (*func)(WORKQ_DEFAULT_PRIOQUEUE, opts, NULL);
2115 case THREAD_QOS_UTILITY:
2116 return (*func)(WORKQ_LOW_PRIOQUEUE, opts, NULL);
2117 case THREAD_QOS_BACKGROUND:
2118 return (*func)(WORKQ_BG_PRIOQUEUE, opts, NULL);
2119 }
2120 PTHREAD_INTERNAL_CRASH(pp, "Invalid pthread priority for the legacy interface");
2121 }
2122
2123 PTHREAD_ALWAYS_INLINE
2124 static inline pthread_priority_t
2125 _pthread_wqthread_priority(int flags)
2126 {
2127 pthread_priority_t pp = 0;
2128 thread_qos_t qos;
2129
2130 if (flags & WQ_FLAG_THREAD_KEVENT) {
2131 pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
2132 }
2133 if (flags & WQ_FLAG_THREAD_EVENT_MANAGER) {
2134 return pp | _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
2135 }
2136
2137 if (flags & WQ_FLAG_THREAD_OVERCOMMIT) {
2138 pp |= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
2139 }
2140 if (flags & WQ_FLAG_THREAD_PRIO_QOS) {
2141 qos = (thread_qos_t)(flags & WQ_FLAG_THREAD_PRIO_MASK);
2142 pp = _pthread_priority_make_from_thread_qos(qos, 0, pp);
2143 } else if (flags & WQ_FLAG_THREAD_PRIO_SCHED) {
2144 pp |= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
2145 pp |= (flags & WQ_FLAG_THREAD_PRIO_MASK);
2146 } else {
2147 PTHREAD_INTERNAL_CRASH(flags, "Missing priority");
2148 }
2149 return pp;
2150 }
2151
2152 PTHREAD_NOINLINE
2153 static void
2154 _pthread_wqthread_setup(pthread_t self, mach_port_t kport, void *stacklowaddr,
2155 int flags)
2156 {
2157 void *stackaddr = self;
2158 size_t stacksize = (uintptr_t)self - (uintptr_t)stacklowaddr;
2159
2160 _pthread_struct_init(self, &_pthread_attr_default, stackaddr, stacksize,
2161 PTHREAD_ALLOCADDR(stackaddr, stacksize),
2162 PTHREAD_ALLOCSIZE(stackaddr, stacksize));
2163
2164 _pthread_set_kernel_thread(self, kport);
2165 self->wqthread = 1;
2166 self->wqkillset = 0;
2167 self->tl_joinable = false;
2168 self->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
2169
2170 // Update the running thread count and set childrun bit.
2171 bool thread_tsd_base_set = (bool)(flags & WQ_FLAG_THREAD_TSD_BASE_SET);
2172 _pthread_set_self_internal(self, !thread_tsd_base_set);
2173 __pthread_add_thread(self, false);
2174 __pthread_started_thread(self);
2175 }
2176
2177 PTHREAD_NORETURN PTHREAD_NOINLINE
2178 static void
2179 _pthread_wqthread_exit(pthread_t self)
2180 {
2181 pthread_priority_t pp;
2182 thread_qos_t qos;
2183
2184 pp = (pthread_priority_t)self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS];
2185 qos = _pthread_priority_thread_qos(pp);
2186 if (qos == THREAD_QOS_UNSPECIFIED || qos > WORKQ_THREAD_QOS_CLEANUP) {
2187 // Reset QoS to something low for the cleanup process
2188 pp = _pthread_priority_make_from_thread_qos(WORKQ_THREAD_QOS_CLEANUP, 0, 0);
2189 self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = (void *)pp;
2190 }
2191
2192 _pthread_exit(self, NULL);
2193 }
2194
2195 // workqueue entry point from kernel
2196 void
2197 _pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr,
2198 void *keventlist, int flags, int nkevents)
2199 {
2200 if ((flags & WQ_FLAG_THREAD_REUSE) == 0) {
2201 _pthread_wqthread_setup(self, kport, stacklowaddr, flags);
2202 }
2203
2204 pthread_priority_t pp;
2205 if (flags & WQ_FLAG_THREAD_OUTSIDEQOS) {
2206 self->wqoutsideqos = 1;
2207 pp = _pthread_priority_make_from_thread_qos(THREAD_QOS_LEGACY, 0,
2208 _PTHREAD_PRIORITY_FALLBACK_FLAG);
2209 } else {
2210 self->wqoutsideqos = 0;
2211 pp = _pthread_wqthread_priority(flags);
2212 }
2213
2214 self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = (void *)pp;
2215
2216 // avoid spills on the stack hard to keep used stack space minimal
2217 if (nkevents == WORKQ_EXIT_THREAD_NKEVENT) {
2218 goto exit;
2219 } else if (flags & WQ_FLAG_THREAD_WORKLOOP) {
2220 self->fun = (void *(*)(void*))__libdispatch_workloopfunction;
2221 self->wq_retop = WQOPS_THREAD_WORKLOOP_RETURN;
2222 self->wq_kqid_ptr = ((kqueue_id_t *)keventlist - 1);
2223 self->arg = keventlist;
2224 self->wq_nevents = nkevents;
2225 } else if (flags & WQ_FLAG_THREAD_KEVENT) {
2226 self->fun = (void *(*)(void*))__libdispatch_keventfunction;
2227 self->wq_retop = WQOPS_THREAD_KEVENT_RETURN;
2228 self->wq_kqid_ptr = NULL;
2229 self->arg = keventlist;
2230 self->wq_nevents = nkevents;
2231 } else {
2232 self->fun = (void *(*)(void*))__libdispatch_workerfunction;
2233 self->wq_retop = WQOPS_THREAD_RETURN;
2234 self->wq_kqid_ptr = NULL;
2235 self->arg = (void *)(uintptr_t)pp;
2236 self->wq_nevents = 0;
2237 if (os_likely(__workq_newapi)) {
2238 (*__libdispatch_workerfunction)(pp);
2239 } else {
2240 _pthread_wqthread_legacy_worker_wrap(pp);
2241 }
2242 goto just_return;
2243 }
2244
2245 if (nkevents > 0) {
2246 kevent_errors_retry:
2247 if (self->wq_retop == WQOPS_THREAD_WORKLOOP_RETURN) {
2248 ((pthread_workqueue_function_workloop_t)self->fun)
2249 (self->wq_kqid_ptr, &self->arg, &self->wq_nevents);
2250 } else {
2251 ((pthread_workqueue_function_kevent_t)self->fun)
2252 (&self->arg, &self->wq_nevents);
2253 }
2254 int rc = __workq_kernreturn(self->wq_retop, self->arg, self->wq_nevents, 0);
2255 if (os_unlikely(rc > 0)) {
2256 self->wq_nevents = rc;
2257 goto kevent_errors_retry;
2258 }
2259 if (os_unlikely(rc < 0)) {
2260 PTHREAD_INTERNAL_CRASH(self->err_no, "kevent (workloop) failed");
2261 }
2262 } else {
2263 just_return:
2264 __workq_kernreturn(self->wq_retop, NULL, 0, 0);
2265 }
2266
2267 exit:
2268 _pthread_wqthread_exit(self);
2269 }
2270
2271
2272 #pragma mark pthread workqueue API for libdispatch
2273
2274
2275 _Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN == WQ_KEVENT_LIST_LEN,
2276 "Kernel and userland should agree on the event list size");
2277
2278 void
2279 pthread_workqueue_setdispatchoffset_np(int offset)
2280 {
2281 __libdispatch_offset = offset;
2282 }
2283
2284 static int
2285 pthread_workqueue_setdispatch_with_workloop_np(pthread_workqueue_function2_t queue_func,
2286 pthread_workqueue_function_kevent_t kevent_func,
2287 pthread_workqueue_function_workloop_t workloop_func)
2288 {
2289 int res = EBUSY;
2290 if (__libdispatch_workerfunction == NULL) {
2291 // Check whether the kernel supports new SPIs
2292 res = __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP, NULL, __libdispatch_offset, kevent_func != NULL ? 0x01 : 0x00);
2293 if (res == -1){
2294 res = ENOTSUP;
2295 } else {
2296 __libdispatch_workerfunction = queue_func;
2297 __libdispatch_keventfunction = kevent_func;
2298 __libdispatch_workloopfunction = workloop_func;
2299
2300 // Prepare the kernel for workq action
2301 (void)__workq_open();
2302 if (__is_threaded == 0) {
2303 __is_threaded = 1;
2304 }
2305 }
2306 }
2307 return res;
2308 }
2309
2310 int
2311 _pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func,
2312 pthread_workqueue_function_kevent_t kevent_func,
2313 pthread_workqueue_function_workloop_t workloop_func,
2314 int offset, int flags)
2315 {
2316 if (flags != 0) {
2317 return ENOTSUP;
2318 }
2319
2320 __workq_newapi = true;
2321 __libdispatch_offset = offset;
2322
2323 int rv = pthread_workqueue_setdispatch_with_workloop_np(queue_func, kevent_func, workloop_func);
2324 return rv;
2325 }
2326
2327 int
2328 _pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func,
2329 pthread_workqueue_function_kevent_t kevent_func,
2330 int offset, int flags)
2331 {
2332 return _pthread_workqueue_init_with_workloop(queue_func, kevent_func, NULL, offset, flags);
2333 }
2334
2335 int
2336 _pthread_workqueue_init(pthread_workqueue_function2_t func, int offset, int flags)
2337 {
2338 return _pthread_workqueue_init_with_kevent(func, NULL, offset, flags);
2339 }
2340
2341 int
2342 pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func)
2343 {
2344 return pthread_workqueue_setdispatch_with_workloop_np((pthread_workqueue_function2_t)worker_func, NULL, NULL);
2345 }
2346
2347 int
2348 _pthread_workqueue_supported(void)
2349 {
2350 if (os_unlikely(!__pthread_supported_features)) {
2351 PTHREAD_INTERNAL_CRASH(0, "libpthread has not been initialized");
2352 }
2353
2354 return __pthread_supported_features;
2355 }
2356
2357 int
2358 pthread_workqueue_addthreads_np(int queue_priority, int options, int numthreads)
2359 {
2360 int res = 0;
2361
2362 // Cannot add threads without a worker function registered.
2363 if (__libdispatch_workerfunction == NULL) {
2364 return EPERM;
2365 }
2366
2367 pthread_priority_t kp = 0;
2368 int compat_priority = queue_priority & WQ_FLAG_THREAD_PRIO_MASK;
2369 int flags = 0;
2370
2371 if (options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT) {
2372 flags = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
2373 }
2374
2375 #pragma clang diagnostic push
2376 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2377 kp = _pthread_qos_class_encode_workqueue(compat_priority, flags);
2378 #pragma clang diagnostic pop
2379
2380 res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)kp);
2381 if (res == -1) {
2382 res = errno;
2383 }
2384 return res;
2385 }
2386
2387 bool
2388 _pthread_workqueue_should_narrow(pthread_priority_t pri)
2389 {
2390 int res = __workq_kernreturn(WQOPS_SHOULD_NARROW, NULL, (int)pri, 0);
2391 if (res == -1) {
2392 return false;
2393 }
2394 return res;
2395 }
2396
2397 int
2398 _pthread_workqueue_addthreads(int numthreads, pthread_priority_t priority)
2399 {
2400 int res = 0;
2401
2402 if (__libdispatch_workerfunction == NULL) {
2403 return EPERM;
2404 }
2405
2406 #if TARGET_OS_OSX
2407 // <rdar://problem/37687655> Legacy simulators fail to boot
2408 //
2409 // Older sims set the deprecated _PTHREAD_PRIORITY_ROOTQUEUE_FLAG wrongly,
2410 // which is aliased to _PTHREAD_PRIORITY_SCHED_PRI_FLAG and that XNU
2411 // validates and rejects.
2412 //
2413 // As a workaround, forcefully unset this bit that cannot be set here
2414 // anyway.
2415 priority &= ~_PTHREAD_PRIORITY_SCHED_PRI_FLAG;
2416 #endif
2417
2418 res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)priority);
2419 if (res == -1) {
2420 res = errno;
2421 }
2422 return res;
2423 }
2424
2425 int
2426 _pthread_workqueue_set_event_manager_priority(pthread_priority_t priority)
2427 {
2428 int res = __workq_kernreturn(WQOPS_SET_EVENT_MANAGER_PRIORITY, NULL, (int)priority, 0);
2429 if (res == -1) {
2430 res = errno;
2431 }
2432 return res;
2433 }
2434
2435 int
2436 _pthread_workloop_create(uint64_t workloop_id, uint64_t options, pthread_attr_t *attr)
2437 {
2438 struct kqueue_workloop_params params = {
2439 .kqwlp_version = sizeof(struct kqueue_workloop_params),
2440 .kqwlp_id = workloop_id,
2441 .kqwlp_flags = 0,
2442 };
2443
2444 if (!attr) {
2445 return EINVAL;
2446 }
2447
2448 if (attr->schedset) {
2449 params.kqwlp_flags |= KQ_WORKLOOP_CREATE_SCHED_PRI;
2450 params.kqwlp_sched_pri = attr->param.sched_priority;
2451 }
2452
2453 if (attr->policyset) {
2454 params.kqwlp_flags |= KQ_WORKLOOP_CREATE_SCHED_POL;
2455 params.kqwlp_sched_pol = attr->policy;
2456 }
2457
2458 if (attr->cpupercentset) {
2459 params.kqwlp_flags |= KQ_WORKLOOP_CREATE_CPU_PERCENT;
2460 params.kqwlp_cpu_percent = attr->cpupercent;
2461 params.kqwlp_cpu_refillms = attr->refillms;
2462 }
2463
2464 int res = __kqueue_workloop_ctl(KQ_WORKLOOP_CREATE, 0, &params,
2465 sizeof(params));
2466 if (res == -1) {
2467 res = errno;
2468 }
2469 return res;
2470 }
2471
2472 int
2473 _pthread_workloop_destroy(uint64_t workloop_id)
2474 {
2475 struct kqueue_workloop_params params = {
2476 .kqwlp_version = sizeof(struct kqueue_workloop_params),
2477 .kqwlp_id = workloop_id,
2478 };
2479
2480 int res = __kqueue_workloop_ctl(KQ_WORKLOOP_DESTROY, 0, &params,
2481 sizeof(params));
2482 if (res == -1) {
2483 res = errno;
2484 }
2485 return res;
2486 }
2487
2488
2489 #pragma mark Introspection SPI for libpthread.
2490
2491
2492 static pthread_introspection_hook_t _pthread_introspection_hook;
2493
2494 pthread_introspection_hook_t
2495 pthread_introspection_hook_install(pthread_introspection_hook_t hook)
2496 {
2497 pthread_introspection_hook_t prev;
2498 prev = _pthread_atomic_xchg_ptr((void**)&_pthread_introspection_hook, hook);
2499 return prev;
2500 }
2501
2502 PTHREAD_NOINLINE
2503 static void
2504 _pthread_introspection_hook_callout_thread_create(pthread_t t)
2505 {
2506 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE, t, t,
2507 PTHREAD_SIZE);
2508 }
2509
2510 static inline void
2511 _pthread_introspection_thread_create(pthread_t t)
2512 {
2513 if (os_fastpath(!_pthread_introspection_hook)) return;
2514 _pthread_introspection_hook_callout_thread_create(t);
2515 }
2516
2517 PTHREAD_NOINLINE
2518 static void
2519 _pthread_introspection_hook_callout_thread_start(pthread_t t)
2520 {
2521 size_t freesize;
2522 void *freeaddr;
2523 if (t == main_thread()) {
2524 size_t stacksize = t->stackaddr - t->stackbottom;
2525 freesize = stacksize + t->guardsize;
2526 freeaddr = t->stackaddr - freesize;
2527 } else {
2528 freesize = t->freesize - PTHREAD_SIZE;
2529 freeaddr = t->freeaddr;
2530 }
2531 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START, t,
2532 freeaddr, freesize);
2533 }
2534
2535 static inline void
2536 _pthread_introspection_thread_start(pthread_t t)
2537 {
2538 if (os_fastpath(!_pthread_introspection_hook)) return;
2539 _pthread_introspection_hook_callout_thread_start(t);
2540 }
2541
2542 PTHREAD_NOINLINE
2543 static void
2544 _pthread_introspection_hook_callout_thread_terminate(pthread_t t)
2545 {
2546 size_t freesize;
2547 void *freeaddr;
2548 if (t == main_thread()) {
2549 size_t stacksize = t->stackaddr - t->stackbottom;
2550 freesize = stacksize + t->guardsize;
2551 freeaddr = t->stackaddr - freesize;
2552 } else {
2553 freesize = t->freesize - PTHREAD_SIZE;
2554 freeaddr = t->freeaddr;
2555 }
2556 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE, t,
2557 freeaddr, freesize);
2558 }
2559
2560 static inline void
2561 _pthread_introspection_thread_terminate(pthread_t t)
2562 {
2563 if (os_fastpath(!_pthread_introspection_hook)) return;
2564 _pthread_introspection_hook_callout_thread_terminate(t);
2565 }
2566
2567 PTHREAD_NOINLINE
2568 static void
2569 _pthread_introspection_hook_callout_thread_destroy(pthread_t t)
2570 {
2571 _pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY, t, t,
2572 PTHREAD_SIZE);
2573 }
2574
2575 static inline void
2576 _pthread_introspection_thread_destroy(pthread_t t)
2577 {
2578 if (os_fastpath(!_pthread_introspection_hook)) return;
2579 _pthread_introspection_hook_callout_thread_destroy(t);
2580 }
2581
2582 #pragma mark libplatform shims
2583
2584 #include <platform/string.h>
2585
2586 // pthread_setup initializes large structures to 0,
2587 // which the compiler turns into a library call to memset.
2588 //
2589 // To avoid linking against Libc, provide a simple wrapper
2590 // that calls through to the libplatform primitives
2591
2592 #undef memset
2593 PTHREAD_NOEXPORT
2594 void *
2595 memset(void *b, int c, size_t len)
2596 {
2597 return _platform_memset(b, c, len);
2598 }
2599
2600 #undef bzero
2601 PTHREAD_NOEXPORT
2602 void
2603 bzero(void *s, size_t n)
2604 {
2605 _platform_bzero(s, n);
2606 }
2607
2608 #undef memcpy
2609 PTHREAD_NOEXPORT
2610 void *
2611 memcpy(void* a, const void* b, unsigned long s)
2612 {
2613 return _platform_memmove(a, b, s);
2614 }
2615